mixtools/0000755000176200001440000000000013617252762012143 5ustar liggesusersmixtools/NAMESPACE0000644000176200001440000000607113616554540013364 0ustar liggesusersuseDynLib(mixtools, .registration = TRUE, .fixes="C_") # Export all names export("aug.x") export("boot.comp") export("boot.se") export("compCDF") export("ddirichlet") export("density.npEM") export("density.spEM") export("depth") export("dmvnorm") export("ellipse") export("expRMM_EM") export("flaremix.init") export("flaremixEM") export("gammamix.init") export("gammamixEM") export("hmeEM") export("ise.npEM") export("lambda") export("lambda.pert") export("ldmult") export("logdmvnorm") export("logisregmix.init") export("logisregmixEM") export("makemultdata") export("matsqrt") export("mixturegram") export("multmix.init") export("multmixEM") export("multmixmodel.sel") export("mvnormalmix.init") export("mvnormalmixEM") export("mvnpEM") export("normalmix.init") export("normalmixEM") export("normalmixEM2comp") export("normalmixMMlc") export("normmix.sim") export("normmixrm.sim") export("npEM") export("npEMindrep") export("npEMindrepbw") export("npMSL") export("parse.constraints") export("perm") export("plot.mixEM") export("plot.mixMCMC") export("plot.mvnpEM") export("plot.npEM") export("plot.spEM") export("plot.spEMN01") export("plotexpRMM") export("plotFDR") export("plotseq.npEM") export("plotseq") export("plotspRMM") export("plotweibullRMM") export("poisregmix.init") export("poisregmixEM") export("post.beta") export("print.mvnpEM") export("print.npEM") export("print.summary.mvnpEM") export("print.summary.npEM") export("regcr") export("regmix.init") export("regmix.lambda.init") export("regmix.mixed.init") export("regmixEM") export("regmixEM.lambda") export("regmixEM.loc") export("regmixEM.mixed") export("regmixMH") export("regmixmodel.sel") export("repnormmix.init") export("repnormmixEM") export("repnormmixmodel.sel") export("rexpmix") export("rmvnorm") export("rmvnormmix") export("rnormmix") export("rweibullmix") export("segregmix.init") export("segregmixEM") export("spEM") export("spEMsymloc") export("spEMsymlocN01") export("spregmix") export("spRMM_SEM") export("summary.mixEM") export("summary.mvnpEM") export("summary.npEM") export("summary.spRMM") export("tauequivnormalmixEM") export("test.equality") export("test.equality.mixed") export("try.flare") export("weibullRMM_SEM") export("wIQR") export("wkde") export("wquantile") # Import all packages listed as Imports or Depends import( MASS, segmented, stats, survival ) # S3 methods S3method(density, npEM) S3method(density, spEM) S3method(plot, mixEM) S3method(plot, mixMCMC) S3method(plot, mvnpEM) S3method(plot, npEM) S3method(plot, spEM) S3method(plot, spEMN01) S3method(plotseq, npEM) S3method(print, mvnpEM) S3method(print, npEM) S3method(print, summary.mvnpEM) S3method(print, summary.npEM) S3method(summary, mixEM) S3method(summary, mvnpEM) S3method(summary, npEM) S3method(summary, spRMM) # Import from importFrom("kernlab", "kpca", "pcv", "Q") importFrom("grDevices", "chull", "adjustcolor", "colors") importFrom("graphics", "abline", "barplot", "hist", "layout", "layout.show", "legend", "lines", "par", "plot", "points", "segments", "title", "axis", "polygon", "rect") importFrom("utils", "packageDescription", "tail")mixtools/data/0000755000176200001440000000000013617013707013046 5ustar liggesusersmixtools/data/RodFramedata.RData0000755000176200001440000001375711736707362016343 0ustar liggesusersBZh91AY&SY&O 7Fۭ E'$oȸؠ"<>0o/-ւKX@0LLmOI)@hihޔPm hS 44@%24ڀPR?Rh<ЍM'&TO)jz4@4 @4z=LLP!h!J%4ڦziG@i@4 4d&ѐM1 4hhN!0O{lvp3uyQi*q|PNpHb*P lTt2Y_MD>0gfҪt3 HY1Ci٬z"O _[a#WzQ:g`!M pcDJ`  vU0 )r=$j膒 h(;l^cfpwM12"D"%D  8go{xPPٛyh@v5j '*D0 b_ 'V0>NX%0oDV`MpY ~@XX2+c,ĹޙneQu$8=MR8C8 \bmyߗ$v R/h/f΂KrQ:RfttYxqtWտɞ]: *؎u)؋ X( ?glB XC@Ub2(7KHH8JKG{҉ s =W*-5J}"mIau8;79ly 9mu9D)(9o=n9$5#8";i*mKi_v[pڑb>UgD[9tuiHtT F,WS)?&-›QSJjA"YYbh{9 -YCcpguxhwC~i&C¶#a *U|$Y/0MLسx {ln+{#KP+HAS.L< `3_Y|޼R!BtCJ@ni%D":<0ԍLOczB$xsjA!Cްz*ku1D0ٜo9gfff`JR)0- :j߱kD$Z4v1h@ڕVvEW \jL.2EuRB\;kZVJM=P"5< `D=]Ta:-bx8p043(9I\sDHl@n C G@W1R)m םbyǭ;:Ԯw-"si/VmʳׯA!D/,}aM^ ;bx'cMtH ʶR*IMhI18;_Esn3[W5鹨R%H5eY bS y<HxݯNV¯+: ]c/,ʵ2&"-!AT%D*jC{oDy[o.lOYեz-ZB(ZRx(\q0,r߮Ea B([Y9`7PH*3$I; ^iwdɕI1H6wGz"=NO ۺc!+J[cNm1~St_LTɅ\1^*WhJNF)Ű3@T!RР1iQUUP!ZbO fH- 4d_jtK5ʌFsyJx3aR- I&W5 W{J rE[pG>IDZ9)!/ᒙǙPVFs)axv@XdݘVF!ܛrXg`?X |CL[hC6;F*}ϱMEQD"Vnp:eLnv`iBډ[r rm@߂g㏝9rͭ=W '9%'Nrxn,vW`9@ؕD T&o9@Ɓ.[_bC8bTՂu\g\ !5[ZveF~IOnɘZA%0%p"wKSls`PGes_yyWzOx@C{m *B@*D|;;B2ɺc4#)1K3#3-+LLJXW $Zrb0> B" BeQi"7!rbiJ6T 4首4&CTRBB B# ":@vЫ@:%'*9+@) @ SB4 "rG D6Bsg QrU@/@B*u8drM&tD+#@ćf>N̎U_x}O:뭀`j2;4Sֽ mb1M Oz'8rSV !*IЉb9W L{^|6.wKYS$ˋ}֟{P* T$WOVEtK6*!@,B&^ BRCyf@%ZK3V ~ćEP[X} b$xF!\u6tD +VM -0yrZ~Bj+M]Yːw.*|f{I,Fۖܠ2\1b]%pO=.YacǔZ?s#sWT+'%벍YKw1xy2,b#qZrRw(6 'v6dQ"2 ‘MtvrR8oXbo X]7Uߎ]! Cݻ܉­Do8SgjMyNk=]r tv ' G)X\=Y1+lPpdj%qDm;8g.1Af4,y:.ž,z}p6 X6 [ EZ TH؀是tIH/4+ۈޗN(А2}IdifeUjXr۩} y˗ o87Nw5UPg c'O"-F12a[cRRab#ct8M@͇5a`B,,Ӿ -)X'RafscL(Nn(dhյФYmpÁ]yn^ği}ITvB㋛a'qB-Cq(9 >ǽ bŒvZtaUA4moa? *:^-{wm| Q-b2*^-jṼ (牬M0`369 U)=X8H>ѡr]ڃB(ۮJ!MjN=1C"r3oC-4%t dx9|D⣘'I~L6CKM@-+Db5"i3 ݙ)nhy_=ڷm ;QET\dV(֮dIu=RKܑN$1%mixtools/data/Habituationdata.RData0000755000176200001440000000117411736707362017101 0ustar liggesusers r0b```b`bf H01'{$&e&d$$200 30ނ׹ll!׭r;LvK8(>K;(_W*| A,qPffPPꡇiS?PఃB O;"b=<}.{4q 3qn積AAJ5,tqR[t8 ^V:A7k)y aM+VL`uډEdA`SuOM㛃16{ᆈ5!bP ' {-?w9V@MdOVFs ׀ګC'H<,4 uЄ R^ }9&\ Ww+A sw0O賵أ ?Pwj@c>Ɛ3SMvsh|+X?F4 _쥏+?:J 6e ^]{̡kNphٌ5/17f. S!e!9'&ʡziE@Дsn @8 mixtools/data/NOdata.RData0000755000176200001440000000222411736707362015143 0ustar liggesusers]TU̎;;.Yj~ȹQ2NJɶ殩!/BKYTDjfvvf7ﵫC;̽9sݲ!'R MӢZ&nƢ'G35]#wiZ)Z}ѡ7趕_ .]k_O@qsgbu$<8/ ,za͒w>nhA'k55A/w ]x0E~߷%,f}߲O|y< /:xUV3a?7X{9E0!jdӯ>WLpC>0u8šX _+em->Sa%r=\`rLsrא?\],T(v ,4D6WXmG^ۢ{N,<٪ʒ\T.9K/X|'8z^8[DΝb ƕu.2<%iRXb]E}2 JwB^s=gw'c Kޅ"_]qpȣNYn:P:UB=DA?(a/b}aP1Eu' 1K 8a=ze1 ƛA}GU?Ϲ F;^b2ՍVO>7=?dB\w qs7ڷ.D:CC #4аB '4XFyYddddYdd9ddC'N :1tbЉC'N 1 b0a A 1Lb0a$I &1Lbİa"E 1,bXİa&M 61lbİa&M 1b8p!C 1\bp%K .1\?2b mixtools/data/CO2data.RData0000755000176200001440000000117311736707362015214 0ustar liggesusers]MlAǗBak?(jCeh mB6 A8a?&=U^IzLzփzj3NB͐iO01^GaKӌ"S""G~ۉoy/}Ϩ>w??ṈZ88;O f)]7/>~I16>ͽ:J/ZC>ዟe(E~;sÓ+>ouƧaނ5~[< a<y8A]\7EG@?10a|ŕN}1e .x%i~Y"dɜ1guFĴ'kE:.˺qEUF,<9F#ޱ! 6^,=ڛcVűpت+6S@p*]Z[imVVѦQݦSکC[pᒔ;3za4^kjA'wZ;ܠ3i_8?^(K 3R+i B\&J1"(EBJ&#F<y0`ȃ˨2IHZqr^_]'mixtools/data/RanEffdata.RData0000755000176200001440000017427411736707362016007 0ustar liggesusers7zXZi"6!X])TW"nRʟXa%>"UGQNȐHRYrNăkGnS܆(%5۩%NCe S֛US{,Ƿk&j"=9F1VjWx<$k 5ة@\y* c"ؑD,>|v;&njJIqA)Pcg ?mouO4pF6@_B fOZLLy/Ҝw>t 3 3d V޿hC!#E@[e$[-ce.tMYBQ}՛`d ͒ɰya @{8*V[;p kp渌Et%2]!m!FGt64+9n{).(itG/r끈(,?Dm]i3Y3<4v+j4 m@Ř}H+fv7Lt7=j 4on~oXI %ItaՃO?JLHo xQΑ߶QX|ruyVw{ʇKY#^ó+1JK43M jM+UCHW$l{>{q&fC1@`γlmׅg4YiZVAL*5!_ź|f.gCuW<#ɰ#5Q}$an P!?ώyJk^5Rh@\ԝR2urfN`E}&. h7f$.@9>zon*p4sܳm/>WRDߖT;L27vRhcwsi1qzP*UȼG_CQ76Ucx/jB AʟYg&l;:#8˜BQ: r֒}jYs /sd?*9t5ĝ!=.},mj#^;TT/r#8X&I0Wm2Mj'%ɇZU~\FrB6čЦ -OrYOJ-Z7,ȃ˻aeȥrsI$exr5]_jzNͷn"p4h /Ts2kZVBCZMqp0FjnSg D0!EHhO2wy@p/ʝЕOlnM"C72~"pсnjsTx坧ɭ(z*TuǵJs3xlLN Xii9'1yG`^|D2IIt͝kXݱyfdz | if[SF,-/1tF2=.h@~JEAp<cq턌<&a49K5 60?p𺍧 ܞQ7X;E>ShѹР@̣gpuK~=]j@4 gA1TwZ#Uek?̋6ޢA`IurqLW}!SL:% wAZ4_Jr1~W@',4ۖl3P3!{)Ykf vcdjya\m]FWXds xWFt <70 dzq9fː|š`óBBLηQȂ@*gϔ 4 $rQAwxr8\vy :d]4)މPy5 :/eΦXbc$q3R ?Lqti54gH潂DKJ}$YOuXiOq &9ky^0D0@Mx!p*nR2Oεv,R[tK3EqԱy(6͕E1PEkпˇJ!yI~lsQk=Bl 8( GY(G05kQLcGT43SJCO=NR}ʉЋ V8{]mSjX{d: aij~&YuV{J,cDZ* 6!4yj - '+<#94Mz/f^(|ju ]\-^$^\S[G%eNd*hBNicӥlXɖۜy\By]`j`!!!( 驳6oiSRxom~. #q/)o.@0?d!ju3Sj_2 Sun`tMHkW ~FAngl7ۖd|QyKVBR5MPD| /pF2i@NZmP0Τ4^(ga~y{2m1&q(?/at4{a.:w;p9@/뮬aHzL /S]@x~ۆ#Qy:ˏz$Q^қe`}qqFլg?N دyV[B~ f?ouzw+KNѷ()!M Qkjy $ƫ&uL k)< kn!z䶫`l8|a X}YJ_iw e8yBD(F*[mLLv0>?ďKoNV-ɹ sJlֳQ1Okkx(f1q#8k6Oͧ9ހX>],<33 BYAۅ_ &E#"CUn._qq%n5m17 Ch/1hCu6@Ie`Mb_6:sg3}bwڴAZ.1Q݊IxN%ɮ8uiU8[PkID /g]0Zݨ4S?YU ͣ׬=}}oi^E59ߞIoXS$T6xmqAdp_2 >6΁f9&視sI(crAL~\7WԸegQ>ڔG-Yۃlwe1 y fݦ0.!V$lr+z`w*2 n߯J~ a? Z')Ojݰism׸?97&zjⲀ)e,HP<`⣹m @6 ݘn[u/d}DLSx)l +]˘MXܺ8 tU-Gbr~6*9&IgثL2-^ P̀ݔ3ីKu^ |a֚8(FTRm uL@H0":/42B~}HChLw2L} RSɢ>V6ȇ1͗YtRR2 _\g׺ǠNn`A 'An#嗺KM^C?ylTA 5J _42d@޹% c x;*x-2R᠎xc3q,NRog%*]Uk[ܺ7il+FT I떤x^OZ|jO j6/[zr!lqrSpm{2{›C`DP{>$5(m#-@s[7Z;|] #\b 3Rt2QzFʋ~. A!Җ{fʂǿa w{1NxY]󱭸J Οo^ͫGL(le6Z7 ]moWZ;V+kyö搊XU8:0+-jg+Jv_Co!?pX6;caBϲ ( /Em%)@-mx<\4!4}-NdFY!ڛ_NqAkH K+}evBCY#=8^(}3 h D S$kK( ! 8~.l kB>VcEQ63%GeW;gZx,.Sŵ'Հ.py܏;aUxRk`Lj4yC)m.ޙѕ9ۑ(ua[)sE*pISkfk9 DH|AJȟliy9j'lf梂WG0Yxve HOgO=..>nI+JZJVws 㘖o1*c\Tb+Rhcp+xN1xGhV.iĦ5b: 3ZT5|@5mu{C2HC @KA3}|{CF#[5׾ = _nA"FŗT}ÿ8KRv] VH{dxaNl.x[9vP#UK}0:q/b9ӱ5T7*=UUQDcQlpD 3DfE4{hu{HhUۛ8yh%5V_76M^[VԻR37a*(xoCKu~t^ܦPvZع =a|o6G|ki;za9@X$_yFqvyRjQ@Hsr!6IE3s2_u~٩Ȍ  ^aNE4sw"su[:Ȥs-<Z 40Dtki7$U7. |ҧ99.TUt nHG9s>9&6&gȣw2˳?WBb*j|E1ptyW UXEp$lz$Etm`>5㢝cdq'}Q H{>`oPַeQ.ѓ_V.q湋Fb A[aEtXԵb givw@?/Jo' O27 Pǘ3ei0W"Mtf~-Rt0Jc ]oo9f#7 f?]^\/VD2+66hs9k .FTY0wdb-R\:Awx~7bTc,LEI( \ fHkڬh<tN/AJɏ >_|?yXK>Y1?W[B28iIU4BACԊn~Z&( #D;)ZP"4RKꀟ,6T)ZJF;v2EЂzeV(&5UQ-ZőT 0yQ*1aSb`=NxS @qIwX !6>5R^d*X%t&bh׵I>KwB i~0OU =U9%۱5}m:E Yұ~%PVQAKըXg=ʝľV!\ IH`"=&VHq7DO.OF\vMq\DF:ZFK|7嘛rCcn܀ņ>zHEe"|Pj4w]7_&Rdh1UWw1Kp'?O,2Ć1x^VZKZ/ؙxL9ϼ%dW@V|G%.8e tS?!B;G״^RʚwiT" 0]M^dbD X[ B$V|141֨4; U%ӪNog25ُX]JX ?ZeOHx)QFN|boBEѩ\Ά]#9MQ5K dřĉdn3S |8-iO"2ПEs߰qZ~HZ ’"Ry onTl% ݫ*ľKg: 'Sގ?qbyiujXӪj2W0dCiX3F\[`acF mCT>F61 J-alؐT;$|]'nԯ?GR)(>&uU;RI.C|-X"Mל{uɚoKv*>|?i˕:J^]S t;2kXT1w]I4 ۑ4Z!j`vH]8C޷w~,,B:"%M#жa$b"ٴ2_ :m3{Do7Bu_(b!/dX\܁7M1`h?(:' Tb|LpLT愹TthS% ,]<ti/3O}O~,<ܔ^?GXZtƖ3F« tH(1ڼwDzd 6Ney61P&Sڒj+j?)Bh=ݪȖq]=_B܏0;ײ+x*bQ-cV\n"o㒻]^cd%!jݎ )&% j7G4NjW\LRKȜ*+O}v'u0r="J氭<'Gr&ՉS+ ~en\:5ǜELh]`zG$3]_/&7X>@F3(S5J#g>Rڶ' ?YFvadeeS d^:75-DxcuUp:w/_vِVT^ph-y If;a^?f" AE[$s!*M(z!h&;ך)qB[V1@=Ӕp(w7vc <сPHaح۴MxYRUQ;!V pM,f&Bear5 @S17BRE4E AVqu5ie/phLC2gήz(gDޙl+Cquv:/'Dz|]S`` 49\SSEK|P'R&%WlOЌMn3< ;Kv wnU\pR hpahkvZG_kpcVvv8QnQ$uWa¨ ʗ8Vӷ󗋯e2G{w<'^}XIF׺ь< U~,,|u>rPƴAS%I~6BAJ7ӽt{âeU_2U'Q_MqOwCcAڃsj_EPKд dY ~-Bh}|0QI>=$Gm!1QjW>$v4*_k2V!ڀŧ7um޸w4IieS'0Ck h1l7GO8#(1Q%*`"TAv\$鬽 7Gl_6Q7kMk<E%a%B{C {I.tB#QY{弑Tԏ':.Y# 0(ѥ'dCMIYՍ%Hǁ.1YNm @ f"nY%VGԦad  R1-z ߀?c<-cJ.KOfPюrFhYDhis39v_\GCLj}ph hhiy ظݽ(=g3Kd [ߎ3>YŻAYTʬ&@uVMHB~drqt Rg[=}J;8& w8 . D/8j: ʭh>/"l)6ؼ'z_~= SHAtsӱW{D;Du~v;*sI#7C]#XwM[s;t9JҘV:1Ӱv|2+.caG& S{ 8gs!ͫ=h֐~piҞ٬:l:FD^{^=:_ -Wm2{I F[Lq| vO>MLE=CuLs&1Ѻyصs*Jێ ?X\!s!,ѿ;`hcM꟡/Z1yP}6ɮ3#r߰nF9o'3w<1ѿ.yi#t8nd3d}%;lD {R2/3]n^PF4; B_/8Kl=pEfp>T٭=MTyPzR=uLS9JLȋڟc_`.ÿטA0K5G=c[8`a{,Vuo58jhaԼZfOn@`q yXvYNMçU7!Rg bRѧ҇JEܖMLdo*=Ml(UjM@eckYxroțCdcB~#k Rt*IVoL3,& NTuvN3_^Kws/jY#2! m'c}պN?}ǚx&>|In ۛ ؓ 2( kUޟxa?b4D{)h `2m.["Ӳ]cA=%>o Qsa e$g??&|>Cm h ^;0&s7URjv}Fc+}3jGe+D]1 Oq#ŬFJ}b4Dw)}RKS2?Vٷ=Y)i9_&eFDfHޠ,9sA:>zB a⇥Ok li agᣒxkQiH~ZFQOǿ/ǜs6ާҟ jѵ)Z?onr)jdY4+„/dac@iʧռ {| vXvi%qj`ͭ?Ը{>I2c :4C ˈVLsGNgo5 # [0CkVE>N ߉ ns$C$ 76J;O#nDЫÀX}dEjGyLk&DJgY (؂lu90}PL_MVʙD4!E^ BBRHƂnbDֹ61z7;BF!w2V<"Aq[|Dϕ#l-PǏǟ",߱< %rf G8I8㟗'D\6U5T:PpM1P=`| Q⮧Z*۝8 Ukc*JZRE>لC>c""wv6/Jm V/j ˙,!:;xuʲz:.;꒡+І a(!xx-D#L"R\"& SHP67F%y+@i7\z%웢G)g=n-7,359|Ig ŒZLh#xY%˟V#L_A]D\L_/=23͌i>32R*=f_/ԕNT+ṴۧP:OWAtdoӚnj/t="Bt7Oal{g)oKO 3'M5|-)|fcF >C PxNlXcNT;S 9eޚ39]Nj omKnk1D/}Uϋ]lŞi~Яσ.hx@h#hݨ~7X6 v14r>1ŰJ{u{wNBw-nz>rg}Oa3Ƨ8}URXq\Z֑wSIV&@˯(Z?{jTl!,&:4=%N`64K;Xc\=ZMGȌNǖ_ ;UGqI1V~vRU&G[j.xqm_f;e$pC]F.b+@,cs}1)(令f|Z/WE~~;-[k7=*z 1U\펪͒jg.:G{BҨu*$ S~t'Vѹ>˞ҋnp@&O29Fǀnu[oԡ}u4fn.F*LcKP-0iEExgߠeHLߢ9nG ZV4jJO> ohh638F|\Kft8B` K= |S@f@|&e(G;KϺ2ZPW=_u&aJb4aU_#&ؽbl!H 7Eo?_G!=瘄WIC9Z[Z,y F4N=C6qYJ^BAuǏ0O_W'YU٤痿0ѽux+Rq~<$m.63QEU$Y$RWft2[YFq$Vm+-|Od^ʥTOuj.wGeWDX0R]͐Zm\_)55K leg]sp5hr0%$x!]d0+Cn6rB[E>VǒNPC Y>vsP(3p vd[%zC(mD(5|c%/Xڜ,yXc[tnd0^"yQ{3]@WhrE)fG4ԹYOfn%)fg*aNjIոl|?zt`Ҳ]2 kS %LR#Rhlhac2^ZRkwԵϲ58?=_ S*ڤ՝sF6|Qջ!`VҗS:5FQܬeUKK6t qPi Py1 M>Glŵ KM8 AgEEQ]|ihh7+"stV e}^,-{z)MG9h0?:w5^nM˹eUfi8MjLּ1 ;T$4''#y$2K֕ +Ht;*\ mf&|]T,͉Fa@{{_|oj Yd؈?b%@MյL:G1qڧx㌗|h[ eN֪rs^I<+mc)(6SCVDxהP+.%<48r;qDues`\>G& ^H/=hl Açyi䷹G5%% ^Maf&,-rm>jO!#]4RgN 朰Et[,j0}wD/ZhqPè d\'>|#,Lُ4@z=҅3C ƌ`q{8y~ ?Zى _w?\J3ˬӼ:Ƒò>|Bw$gpNLS0.N@CǍ(E3VݺȶUΞnl%$V2%dQsP?:V(0i 6w'?|YKu)0U'.-F)T(XKظ0\$!dzrs5U%ڧK8[3eb|l)-meu"4}{piNa2m(ryb@r2O33bYˁbRտ`Qyx9ȅ#2 ) K̼mGph`4̥g;t11ci)FOs_/bmd|psMzO˱+̇{exj#׶ e9A}'NuG>mLgJcx1b%KT^lސ>fj[z@%GSy i3IO0}}ԞC[H0=?Vs< Ҩu߂رY lwY2g6G~t&Zb*:՗2yRd}E{wʈvtywO ]聂2wRh*`@KwN1+-JĊ2JA⁍<(^A&۱ΐSҠֽIu8Xpu.>҂du)6dqE 92T!}-ō&4<ΑFL2j[/KC)LjM5ǝZ'MBJ҅zE^͠M5w%ۢ׭t7Q0ޗ9YvHdvI5xΏ=AHW#ج .mX:a^AqQu>\&ӓk#_7Ca{h" u^=fWjj~8˽_I&@w!{X׉pP!V+&3?LyWu QJq AP#KVhEws~,6#q;&)m]a^3&d1r!zg0_s^@=+kQIB2ga0G^?d+,v= ud{/+g^ةPG}4]A<\U. Te+O;侵0d 4R'9 ( 4t¤ ;K; HKiYe 3j)z.V;0*ǚe)@,LT9F+m@TũSF?c!DKuVhȧ˒%KS(#d12@":_l:ZPA[S  `YbZ&Pؽ$:K/to@K50jijH&҂6D$'~,6ItONw/S2@2gidmA'%/-+l,:\]YIc$C3 'liZTXi,61ߔT RrmўMMiO䞸Ї&#lR>7n['xSp*oX] 4!0y@Sj_IȕJZr7pj5/o1ⷉ"͎@X[0CT-ix%1F?p/l}emIB _eMz]UP3 ʥ!*-Ć ai GFY5ٓ`2jbôz ))layȱ"yTa4fD턤on}x҅vL YV#)eTݫ64lքӜݔd8[D;mJ}k!?_(fkN[J Jǘȁ@PQu]lh.}<^E sz_H;O$ZƴX!;oi}a0 x Pzn'yBPiǡOkMTP֜Xs&P-c"^[j"b :rK^k&t=&jIx>W} d.6:'Ўg5 ] 8cw3\Hntiq.Ĝ\R28 8a۾x&r2E{^KXl5k6>1 rm@]-1{$Vb*&.e aYu gvI}tVgg [-$-bY3eF|B6UuZrrf(=@kn~r*x)CZqhz{%P6p=p15w2Լ' pA^5_ρY.ln%b&b^9%иBp,#qs>I*b;/0bdM#e T7\'Xt$3LU: r+DC8WNN3 qݰk iU*qN5q .dzEQX{Z}5^NKCZN.@%!@]%k;lKÉO2#MfvԌi&Jrbg0AHQ/#Kq {RHKP,20=<=jV/w9A{y""!( _M#z|%cԛ޷N.u}(Mp#ƼwL6}>#&{֖M97#)n\G rx4/'1s2y xxXUE+pE$t48 $/CQ1A 孧zT.~ 6^`Kgl̠ah(xM{ 8u"y(\8>:\zyAC)'H*yU/Ծup$82\AZsv|-q M^tfr,mȫ0Lu6ߜf[kpL&]U3.䆹ܸ{?z.V lY`Wr..4`a;o䧋2;@i+C!i7TWgSV$ZCF($OPDrبD"-x47aW gv3Xqv0 buOJd%J ``{ 3{PE]f89y ;TA叀6l0zT3^_ c+^c5gShcqIoj64]~ScŌ HL7bG.1M-$z1KyԠP, ~ϙGn}͍s 1*<(e7v$)l$S` YcMeK,=j-a-ެoR8 ùUs%~1uW;%~%ª+( L 9)T"#%1, I}qu+.lL)hTvʔBr҃$cx~G8)u-vm.>PSBS.ﯖ?54\ ѣ[>*|oGÒ,JPHqڤpГ^=oH>rf gU"ٸU؀٠UF쨻̥T08fcE]GKa^uv R:{d0Ŷ1ʅ5EDu73!#[ʉ&]<{obӱרY}SY"R\?%ǐί2(×!0Dъm';-pS7) NT1\>*Mi;R1 &LNʲC,x=n!e!eֿdD{OWC/Sgs>Nr{8CkﷺH~ig']u㮶ݧȄkSPs Gc)Teè9{'<A۟ѫd)VݑD)}F z[:/bgK;Ʉ2nOF,:1%9 ~ځѱ5VŁY?diUEOz?)c݀<ռnWF w:&+d¤ZV53?ԥWb=xm/[gFcBrйO'uB9o=? QFgV^;)E. ܻr@j";_*)ͥFJEN9/ P8%}˯?xWsk 3U֩8_C8FT`X-]# H _e9eUy[a6~LΛ ])rҜR@.BT>e8Ѡr|5#x [T}. }ߘi])NSЁɂ+>N|;v@g+CM{v H3t6v+ْi&}8193^$cW0#kqg[u cO%/) +R㪼y\Ğ'ʅVBFh%iu1kJ } Dӄ-n[>(.pwFH#ꡧBAg"VgWq (9)s`GbR?wLǵN9eMW] S ['b6[²xP1dMKϲ5 qgn>M8صIFqw3"$Nd 3g;#"PV %$ZQkT U5a;?KgO]lxy=rNC(܇^c];3ӻW*d;HXAr19G1˲sP~$Jb:`-J{9X_o#9G3^.Zג`51+ww_rtcf#޺l-oeB{)0O ,:B;j`'RW{8 %+/ $R+ZFb WVn[zӒԣǔ9"" {~[ϩvZysy^Ӭ ftDn^/#_IWC<&^ 7?-QIR&Ji6B{#X`+|&. `C[m$} ~*`U>7˔It&fe_"\/ZcJR$b RÁ-(.FRWz RՋs6s)7vі{6$(癕G%th%_"Q|*oj?B8VDhD?iqS>7%RmzAaQ @ToƆKrޓӃI&LzibjNc-6F v?<6(^e;Uj(uS1Z=dJcIxCF6.3$"(gMc} i#ۃFD8Jv(W2)~B]h&]pVss|QFKWeϵkg~ zڲdJ]5p"|/c8hiգ8@ 8x,\~Y.%~ n3R,Ac4hx._&*]e~OΠ;YS)YpgR][[5,bSԳAޚs'UNJ-n>dx4v6v5$b;%ݾ9b5酘M2C* xNV@j:%tEL6yMsk؜xw13eWȖ(n~d#OG\/]^BVQ.*2 UokcGȵ&\_xv@'u8n"v _\2 HouEH.yJk><Èl~y5A[`\bZ] md}!^Fs.6^l[JԀ7ƞyk{N[͍sq׈#KKxeã ό*`\~6z}eB/aW±~l mII0/9[=ݩ{C qbxg[0 ̔˜ã&ڔnoCOeio-\870=k|"};|~{8khC99MkB7!\Aժ#8U~ǍK4|>,ar$/ jZM($bg&|ZNdo\狵mpWE7& ȰNo2\2,iX6ȧ}q}Ǚy~Q_Sر#gϠYVey93wX$u7U8ᑪe˃nOC[=;_oni8Z)|^z``+Hju 0?S(nUKT>m~)n\.م ]K 6VIrhKB5hGQShOݹ-/+U55'*V1cTf$ 話_f4yIOK m;Dm:Ru*JK䗚t҆0MH]P^0"l -WsOM_KL rw&P)r2qjÅP*Z}̉Im^U1t{0=Clh 尶N<XlKEO_-%X8ҧ9d6=T^a"7/AÕMKQΗLODGSj LXcr|gD >.ۆ6TWA=caz+ 9уU*G&LH: (* +c/]h>E xK?n{޳-B41MwLYۑY}H@qt-!cSeH A|VZjRȝ,PGX6CU6f٠ej5N9B÷Ypga@j}T͝o`2,bЉu2V#w׈ძZGk}ӓLP je:EK-?I*sD9@,mMc FZ;Y(hxb,/ʞqSp>0BZZ!Ko @.hu?$Hq?mk-Ce^M;B `4T[|`^:ъA\Ds(׋;Ch?z#tǩm#d>iJ |8| 0I$55S4zȫypiBD6PJM&B٥ 3\pÔ*RKbgY^qÓHjxG6T[{?停"\ mXxa0zޑ0I܏yoM!gJtM&Մsfl4ka1#E͸ 'x$KVQ\\d) EC1,D^9z5OA[yτPC &9;W ,zwUyfvQNV6o_ɱu$ԛ:QqZ4xQ4ퟛvDjGs݂L$3V|-Knڤlt yH,ζB[fB^놷JP`trӜPEV$;.joPGj($&[;Y$5OV S;'Rcj@RJTy"#$ h’Bqۥ|QFxog=(8H -tHƾZ4$m/Dafo3-:`|(?d)B^cK63x^g݁cK&|8^YMS*{Xk1т`Ƣi`.A.ODzut%$OqnbKrd}=Vs$tW^%l`k!l0dXNF1s%Ȇ.?L$ԢgjtBv.ڧx?v;9@Br }q3nȄWXQk "խCSsyspm Kݬ}O30'sV+qLJ`Kfbe˫e 0"}Ê%YU ݀6 2Db˙%w8خh"SCDӂfOxhx!N}gHg˩/Dyq^UQƱnCzYډ,iuNTknq(f;d `=MΔ!,.[B &WgUl[v;\,U 괣y?3}Rt)#&: uéu^5e.C3V?Qu˾=ڽtXMz-c>3]#U^ OBDƆ7vjQ=J!!X3Ӌ ?%P|#'ױ6X4 Oj!1Ԡ$;}0}boOed}GD|X> \]*\:5kPL5`4iWpQ08DȈr25x.<+crE0L&͓a@ !(ԙ+>/ZOFG.dH&FG+'Ɠ/Bbm]VJƲrvN^[{0T |Ezlez H 0iT}O۔4q4mxUW`,WU#/ou4}4MчSLYhu\牜Ȉ"uJIW`G}/V4ݜXқcݙ*ܫ\:_%U~eX`?m M&Nt'`<2euɦ~-0g7/YW ŒB:Ӹ5UR]'ȷQmE? dz>I[>p!=W V"$E)$(s,< b`8Dg@i҇j!#ᦀ2U+@=5d3jFN@/e3KYc206Մ? ̈́X9TQYt(Q I~1bj8&=do~R`{S \Qxˬf֭mUWqt,@%cƵpoBg| !}/*R񅼉z "a|M3E_fxвTw])OܩȦvI/7ٖ /*~+(Ą\.eN/jW7l/K߃ș0ĵ\>ڱOِI$bVL>*+ -گ&2,.65<̇ŗ+NNS4DOoAXn3<}u|VOc9ֺgmIQ;*{ׅU5Ŕ;F N3I#e֝D/9":\d 9>$N)l3ך?}B{k6;cK P9,f2Zq3({j*OGT6ϨŬoۨ?Wp8 {5}cQ}|gu-*Tﱂr~;3Fι η*j4sQ LwSjXMwu~U->YYҦ QZ"eUN#qr:,a)GnM<NQ1c؋X&C%Fo3ΈPC0.azԘ0uxYviy\䛸<.N gd;ZliGպiz%3?qFK㌈{(gЩ ˠxdşGWr)9Q-g]֧.RegX|:Fx ڋEaz,آV\s}g@cTRF<:{&YҧNDY ,qRGf#IlGꁗr#{#Ik;{_ _Siq9q30nj̹ 7 "owcyI$${»? #ӑym%vzW3XQw۳N-ԸX*(@rtROKč9pf$subR8@'2]~gQ1PX2zn0S.oBk&C*GՄYG|n?Ky( A*HT3@HNf7=.UhׅR~hF41#'өM4r$:c- 1zO2l$2rƃY6E;2#u6dENү֒,? P#$)mE@V턮*Q+ TK~$$yƎS5zsN <I4rl\zYOi}`΍cXcPh0 i0<@Iɫ'h_G?B9ɥ h[;cS Ŋ8'6p=[<ģVf@җ|u2D^_ _Dْ&`B[XxQ }?A9NW#]a)nq){W{ wʁgLRYUÄ sɑ+C h6e:? JdbYKW( "le{A:yCE${bq#>_k۝}4±=J sfjWl41 H"I~HG:D$g!ms8Nu9G4/3\WrۨC+iw봸ҿ]8xJOJ-^`3’Bw̠tr,YhWĪ歬UĦ*=a趞p`73ʣmwٱ^s㈀˾恓GrZյ d9՗?NM?а˞09CV w*J-]?:='R^{=xEG 'uq[ Ie %&@v~bpQAE3aˮ.'+Kg<kٹj؇"a IO&c4y>hU-*؄g{"Ѭvv7mp l Cf )ex`!IrύtGq9[ksxP=[[C\A;H pɜ(MT$*ubu:h Ks=[jzd‚ rs{b+6x7CC*܊ ,QLfR?.Mm>-ۤ?vQ㎝3P>JUV~벗`UܚJ%dV}_=o70^p =ET+>- i8;+}Qa%0mdkf 9_pxfÿkZ.: u7[ 8.uI=B/M\^ N#Qg~)KObڴms[Nq_ hP9c AOQNݮ'4jj G0B 8"P #s V4j)•cOF:mJuaI(VHz =\"n^X-do_Jz௝g ܌1o9UctRu7  C"oMYux<gz5F(shcC͢B$CI"*cgAvo DRۡr'PRQ{z {I4+*ƚF4冶xs"K]X7m6b~'5`P}$QV&S|H2}mA]=<ܢ-^c+ϼ޲Nc0Ghʻt,P.:mmx-t<]^gX0hrh*yt*Ĕ*7 +BHaS _c[&k? \* MH=X* jd'8G;f-lIn ҸdN<= m߼$.~sS72 CBjUhB5Hvk.MnʷV*/ͧ3%LNƨHU~j28ڊo6QʍZsM•K"bV4$l9w_ &R9<2,FF7T1F\ jz%X,%ꭘjH(k|j?+(f@V㈻O Aʼlzk8_Lu UD4OρbwMԮ7R|,9g"pn'ir/49Aza'kOM@7Cv/ys#ʻK̗ =| "|dJKjTA|-"X&uox< \G3RrW:ϝmmJZ74Jt[yFH]3Ggg0 3\Yb"RYʈ38;Q_j eE,v +&Έx2YWhEc0SuDŖnCX?y4[s5i H{`@Nc;nereRgLW GJĈμEF`J_iӷѺß ;#EU= x#6ox{+nGvrUm\[}HR3!S-Q̅#40鑤yʈưyx죐}æ/.L F,MS^bV{!_ kIԊj 雩p07 ;H/XDR..tRAAh^(|W}j't8˂V o$'D<M>BX!E7O_1*wQ:[! š"Sq (:\'RkٹvAQgs82H#k͌v D,=,-Qhui+"oNz Hח?yKUJӂZQތVOOb32X.v:ͨ{WA-<^eoϮj%0]dc^fJmqo+49\qZ oDdbƐYr(O6sYbC^p4\Z⧖Vpock r3%t:Yтa=z%Ydڝb lWJIT窱d /5["Uʢa€WJIQW,;U G4!w.O<"Wfr ')+>osRցOۡP82fUd,4%2r6\mB؈=FRm;zjfv;k2Nnqo޵hՎsڞZW 9:k^;3;Dd˿V'><{X`a~e^^&W[ >'W\/(K]8{ƨ rss'/X0VdLp~H׀ɵQb{@#f)56-P00g2 tvM֣1BB@0E;*Y_Xr3cma` Pl >U\LF;9;22;Teu .h:`:( l#lZ)VDŽͰq޲6C{R-?{XC@a=,'MO:'#ZsݕwճLpx|.u_^U 4 mWL2M(>[xCvozCe/PjeA2_kT*zz-m^dvΊL9xAS]0ꆏ==2OиDLh LtE,Gn $fNd0\ZaIU=:}jMJ?ɢ#R>ys]4+2)JLRtI " h¡dZLF?6icBMMMA )i3cS #swvb]\L-Mj`z^HF/Xydu 5h. tx]\2+@/sdEa>杋Q9LGMֳt]@*ιXmȥJ\vHoq~כ*(aUKE(~YQêa^θMK^=_Ŵ.Kh0eHr+fڬpU>f)WAп6v{|_:%; l6Lh:+'5@KquOײRy'l9=E cH%%֫x׹RէUC3vx9 hr Lާu,Wd^Sm*Ռ0~g Z=H+ 7X=(6-짣iV&fc\; kڑ,W]?U*Rݧj6kxdm)ְu#gEZCI6f`u)WD #1_;BmOcwϐzQ$)/X7jvxS]ҧ'upoB8[tiʔ# _/UE*L6;wMtQQo*`V {"l#,lޛYB?o+= 'K9}2 +yXpUπDT~o 2{OF+vݕ'7H `S]nfZ^~IIWeŏ<Y*;)R@aA]9h#<І )_uR?2mQ9yafwvn}d w%];L5YRm̲+Ц}ti\- ɳe@3E_>/z+X$JƬP۟2 3Yei'jx.aՖd|K@eI鉍KIvG5q[w+şF^3c#WaU*]a;ATqJ s;7FBfcz<78aw, ÆtLj9PBAg|`c@D/a^WhGlrb9?oo \5,J1lKgx>i,e{jaey`/08`Hg%J39?FWqh+B.{(~ pwOX T5g\])/|L!WR#w`s[gTN} p[a-YʋI3+;x%Cc6c>9x(}jWwc =Hkk͉n 捣W6W gXw *=#5W|vӖmP/ցF Hm݃\za-U`_Fe+(1H kM; ,5^}#8nXpV' ;zNP=sT; gvz ^=wY碇žf!lf"6Mj[ޭ!x/!v + i@B̚6KL+`9Q-ag\=J-iq^lɍP:x>a'yU{w%9@% t9x(3Ä1P_PA/YU t/ʃX R;vgïDpSG`_=`]Ppd0"qW@P(6`_ۡv_7\H+Q`#| &R咴:-B*4 cO2q{an MbDV_C%]a_f5i8LiA)/3:"B"Upj9$$Nԁ=K0,.z[(Oख़X"ÐaSmm~M'Hy &Lۑ!bVk~j3%DX8i}4.k\P3'=,ȽZunR5Qp["X!">xG WO [!2b)7iIl55b,bdng-72QD79F?tkTLhB-l~wBB')EGs5 %G| vxlڶgV*\>`` FIDBmCb)h$[(wx-Nٝ;oz1Z?DDe*yqَ{iSD9d _R 7ݎcR0tV`ԷxU%u2a~?/ Qӑg<+YNP[Ȃs{ZC"Ty=Ő{ E[H'ݚھ*>0*{eo%.~PZ?'jKn4?͏*e26iIgj֛j*>+T FӍ #2Eq<ViB7f{ R=%Dzֽs焍?$u؅fդ`56`?xE<{$5o\DZW fRK Rw8K?TzJmq [`ƨ4qrMֈ6h"4GrݿЍD\eS@3y)O^h~qdRh Qާ![\"*e&K!3Ϝ|bE?jI~1s$XT.c ,پhX HɗQNڵ{ aMܴʧJouYiK Xo6Um:$ 6A9^m~{ݜ}i b1^O⡗YŖ\md|S[KckF",>s@,IP w]qσpCsrgML+K@Ep6B e1~ ts/d(*Փ5k8$H\y: Q.񳅦 uu:>{<9A=/Oz; ic(;"Ug?YIYNt Ba)`!}`v5ƇjMlU-3iZdEV>?8%FHcn7e}iL֦a qlP"W:pV2m؁e6ls2ʳ 5IeU'n܄PW72]OIn=T֓ 9s \2{oHyq4B#JIkR]}2Ӈ. U"7%Οwu[}c> ۊtc3[DqסIYzjwƾ0~_; |#U%FYGexRNh՚f5ʊ^ lhi m޲L|菗 p R<\$Nf/lZ39w•fѝ- %wwG>55e7?N7vzxTAOCz촠b 8[qLd|ۚ/U$phѠxG+MՊǪb1<}uRIfP Sv2I,v~>o9]s"~-Fd FVj#Y\Ȼ6FPW tƾZEK>WLv#Pح 7Ҩ)dCDR!9REtzmߝ}-d'^8 uHQ8-/ uuf,c-.GʑvvAdj&#ѓۮΛG1LiFJ'p58Ŷӿӂ}۹>P;?[%:ȣ`~%aɶ19Sj^ -] c芠}7/Y=NjmXHRAMڨp+wF4\ƌDÆzM7SE?ws8Ö*2 h dc(M%?ݮz+䫢`걣ۿgsBHs? LtJ9Ca g~XhS RΖu,0+!R4$JI"(%D:| Jeuנ,Mm1@m]=VM\R6t%Atr)n2Q;He_Jj$ SE8gU_Uʻv$%6f<ڥOt6X[!ů jxj׺"h :yzt7.)lFGqP؝aʊS _&g,`` >p喩/H#ڣ?@mS+_<\.#S+z6BSro(:TݛQqWVڬ\=韼zc@ :dI]2T+gf&m m3@c`^]J+ /dAt&ti {'ۃd}H[EmkǠ7L7zZ!aG1I7:݅`k'߸mX#n~=Zϵ)v"ՖUNba-s`2kX;x$dgsWnwqTb:0A2Fl `ϑβ@?c~_!Bet|) 5Mpb:t\Ǩ8lv&8Nr%v'j9dNI`P6kUNzpB4u;+9AWJP.e,t #754|VZ|=ƻHL~1!6„U/$9q 9zTIq$kf2<)݄{o=[[-f Ce Ez%'g][=y(PvM<#y+P ^Y;GŁ Hn!Wuƫ1K"@G}^z2^\FGuFB~Kfw#Rt~񅥷iE1"RwU\D٦sr*᫵*ٮ=WQ Ft)~i6R #,=^Q#a+/`%4޿*L`!u]1`/x&Јs bqCIYJލ٣".TckDx\Y)Vìꌍg8\>c(aC.R# 2!ge{&Ue_S Hv,ep75MdNd|HToGvŠkku)~2&J+nͷVq6*ȷJ> òh^16D GۂWթ ?z3 &ɵӉDLt``#Ó G`qmH37R=lζ9۝JBZz3.8~]=7dMЈ[f<9U+]zN-C1*"k`oyYͳDcBg$@}P WLS\!XZ:tfO+k==-yr.jGՆvH'`bSmvɕei[F9"NUtJ@%\B π.YA&k!3St>$V|c7NK_p >ouQ#i3ATPxPГq?oI3W+fF:>4KI IkM_:\ePcUr)"J\\{:L D%^vg ieS(Boմ]A޿aX&aXj{jʺ<9쥣A-A`@nmaݪZߨ>n<035 !i7H!"_aݛΆ*;+qPLR9 6F''fKOPMen:Yl-~2r|c[& |ɎwGB  5<#b-Bv?](gHHmƲt8V:XH O撿TaI%Aɸ۴P>4ѫlsE;|P"62إ6]$EhqLFT,!{^ulg#bJal; AcԍcbuSP5\~WdL0vX/a1[ZRS4lׯs[ nU;5cǥ+ D# 8gJ?lT\3z}b<(WNڌGe~ a kyl0kpݩ >: Qs +EisXy79Oa.!1U v[y Tm Ԟ;ʿ3_{) aSO'r-k> ]/rr^iyD%|HWѴ"QH] *# 1I3F?eƕY/f%_:@l<䠒eh[$V!& El6h~ܒ[ʏP3uuQ.Bdt6qxi84)J\0SvL#C!)lHא#iyb!]5b@zȭ EAYnFr Un7sCg 80AZh@ٯ߽P9^) :QЖjU1nɑ*[wARVQX'еL9uJbme~85aN>bb9X:*ixɏ&uyh}yٟ/uW4zÜG$Ud 5)I2S90Y֧J5(@9a #*I/PPovT#d!-L E#1VڑԀ d$M$?,;aZ3'Kҩn0)hCeӑ~L4hdiR*8bIwqbhYh `/D ڝk ZН,z\0Um׺:XA 1vK?ǏN/`^nyцR17$袅Cz$}6f||&z:M*j-@LeW㦀\L>5Ha~%m10HsdQ3nZWdf'(պ,򛦋ܗ@KB$J̨i2>Yy ggxԳ|?axcٟ~ECL{PU$tNά31N،UW|ah10ݹ[n]:]L]xGϟi"qZkk7%:҆R ".0=MOK0T\|B`[jєyWhHt hxׂt"AL2_r1܌^DWMWv3IPl56M}%Hnxՠ]<^5ꅋwq|F sc3(]Ϩʚ2| [D>If9㿑epW)vl57Vϔ?ZǧO$XOSO0و`J m;[ x\W v^A @ǦjmKLE_ņn}LI71+)d?*Cw웎))r|2KSxqѢm*mc#seEo\o?FRApw҅v"t-)_q^1 `ڜ}`]&t@Ꜯ!4\((4d?(}'zXej%N2_dk?ztƶ/Oïo *}5$H5жS֒<$J?fmF<:уp^KdIv% =;VEF͠-a j\]WoA7ps*1ZMS3Löv҇ᛙÓ1&Af&X s1v:͎"cS32r yok(opk"Mh3b˅rKw _OOaa nN: s"kxT΍Qn]ыCVj~Ө9~‚L0KBLDww`hMz$lC21*)l[cdn94K_!F awm=<9ϥ1KJvԓ3/H)f@yU}>1;#9ض ]*_ b'u5~ҧ^uCC{?L2ͽyU;Ase0@6}^'ۨ=m*v}L$)G[yOR%Tfb^ ='z(nvp5ܔ7ɲ#k/uVXX 1HwxⰗ}k=AmYAVZ4{Jᒤ//g#o9>q\UC9 5z{@|2 qD궴Yj)&6y꘷UPڟz#v%x7;6xܳT͋ e8%_Gy&Y[*dYB$AE7 cXMWPe6{vD2SY/MI줴/vcͰ=$ٟY"W |h^^FAgNMMuÕ??HEEѿcq< 2_Y]< Yu{" Y5aבr΢i-qCFaɂxꁒrȚWyqu7>0 K˸w ZY'ځwEMC3 R!82|0%b=Rj"H.³<{jU˛ !Kə}EUΕ~Re epw_$qA>!-3^3tzet`줤pKBBU*j"zHƓ2KcMp4{>d?kjdw/Л࿨|OE-̛*4I#W6X)ƥ< )b0۟+$=E'Te/[sUNɰ'aonYbԋʟCty8_USpmE6wӇ3pT 񴥪'~8$E(q#C,3(#\HDn~ ?A w ոީ BȄUw'ܡؐ -,ׅ` $ `nW#NWΎL>v֦AB$n} LbTl6t˜u({\s}-<@?*+caiZ1m#F 9eKՔ.wåV8`̢ OJ ?F{?p1Ly HV7F~B3WT"!;ZCEk 1M,Iҍ<8GiL4ʛ(&o"<Aǰng%K= %6 C+bX$ pm 6sٵNv_ ?&'p.JM!O~uݠM؍N^KMAfus%]e,S >F#*z*u ǹ=Dl<_h?{uV+Ƕ`)sALgœλ n)KY6-5ܿFՅ#$u(WD-6tًO|2K Z%aK؇7@bg|AmdNkTifIVɋ4^C#UaK`|S"):![ُF(\eYѡQڅ~;2 Q$pzK:07?C{[X/gcֳTh-?Zi?紽4Dpn[[Sj@{=]9vɺ(@jpFZKK=mz0) iPslO7k"K'D=WSZó_YmJPO:hT\TM<,!M='iv jT_ԀOfۑ?Tm+jRGk8gUs9ČX7Ik6`Jlk,J*MX%ڰ )N+2a@_$yUTDg"+՞\ ۪"Db5sD@ѥ?.Av멯+GKSٳĿE T(N^npbTuR[19Bp2ʱc >Ą:~I.qoٹI[ncKfl#')~;S:v _C !0MZ6-W-.,Myjx! x hC_kL%NDUsYaS7,ʐWjX"t$: t|+ Ͻy.cȮlu"`(_BDƫt<%"N"¤;2M) _^nbgLuuN0y'^HROp}u_,\ ؇a\Bmwi\eK4˟SmQU֌rA٬d!84ܕ1e)eA^խo ̍k vZDp9@N "y'v8|(⚷*{o!,xIJ̈w)V1YWv藇+ Z ٖYp`XBUduf+T1`e갲` z*t7X'3R@^*-Ԑ8޽`^4xupX8 QHP~Ҿj5/ZycZCTڄ^rI; (ά !mb'ׄj#oN95 ހ4;+BxY}1p-NrrBؠX*;Cف!l yu2u3e}43f"#>槎 4:/vS4Y+#>{4_P[Q_d|gT ^sCLȿ_h=nPl1CD| dc!3~0ׂ6TOՒa*PJي &Eh5&qۘ>.$%^꾾@>hXH~&:0>Mg ǩ`ٖpE lW"O}$ӼIrobߧz?t$KmB뢊 Ju;ܪ]GqM/{1kF:"iZ*p 6g}{- 5ju%£;R؋>aMhP*{ e^M $RjdIBG hc JSքmzёcyzoO7?XfXU3y/V̞CBl58B\cYGo9fS\hOJXoR0mfX?BJҋJJr:B[HNXoqd1 S*/s@~%n-> [dQ.}Q&MwJZ(Q&2X ջ 9GYj<+cx*ĬVwabF"{"mļoOĖ&t_+aЯ?nKU)Y&!wcPR#kcz'֕%Zq;Cn9=#'."+ߞQDt\xqv%esq J v䈰AwO_\I}x(y̰1O'ln/<a2 G (mɺH,^pIDݕ@7 "Ϯ[咪7ܟGQ q#Hae$DͲWF^ (D.1:$B›mpt[j7mExt\m(P$9s"=h6fFu`|q^k.A`f?U糕z>;w8yqiUPFBSC"ګJ@tMIޣfE*_] ܸp ͭ9C5,jhymv&- Dx8lxV5S+T?N 6b#2Ϲ<RQ7BIם>~I1.ajmN-:dxtUQM|)gJ牯!]Ɓ'|^ue%WE3]?(*β Z]("x[ORP/dHz,YՎIJt Vsg|څ+P;F&"LqU` |ЮԆC"6O^r #1s 9S~{{#DHýr'Cd]~:%Q*7_!A9mj 1? l{Ho 2Q[D>Q{59P,,/閵 d0R nգ$aKa=W2U{ؙP–^U7R p#?VNiL4_ LW@DL;/ϗz&_RbrxeXlr,fPf7DvM7݄69!ܾ)?ݵ S=tUvꗥɱq %to_ ث;- )"!c(V8G-;l3Jc.J"ᥥՅ/7[^, ͆/52K7M3p2{ֶ2A#^ĺzh-'S>`p Eu$~qhA!!%Zw^P_hU5\#)EyN1E!N!dOr?s|cYH򗮃$ᫍׄ39=z"SG6隂u@l+x4(GDS1 ]Vޅg(bkZ`oye,mCN8y#ZbJHEƹ;2/MF.}ZMH. }hC`aH6sr&YjIWZЧʃxFGh 1گa;*7Et+df; 5]sKwc#S1 ^}!8r*ju7@j%X/rW9to!%랂/'b9V7!~FKɛD8_;)ijۦ9}LcDZޔI1Vjj˅U@DJ}#qqWVJYm1]~jHZ>! j xSǍ˿iIɄUc}e.\bִT TmQ[ѧ8 (Iz=a5?*qH#` تJ~d BzZ.|.ҸvQ,WwDF~AZ(V琖HkzAy%PWX[e:YlΰMQգ(gl hFPU;-F7Ya6`$$mbh(5x'}施1$hMXo`9'+"pM,\DaU"S~|&m AWGb̄Qo~.㠾<]oC~x &lE kȁq>[® d:L HD5K_@@Eo^9|iI5%N3pA_s\lqOXyE&N_$]gûtJmאTO Lg _t9?E> *M:aU5K.#bRֻGʩ-.-`nݨj`# Q;l1^:G)TK'E 6D'-*x'?r]HK`wde16gTGpU Sk*{H _[t;x!+`dqMP\msO@Å/I:,Mby Fwj gc$nkșmX9‚= kaFO|0ي0䤊ELD .4|"};b[3'It׋_y[&!HϦSU&ml+e( ()%/RU,E Gg Bq{Ԑ)^#<X~D1BE.XP K/Q*d xAbt! -!(6 3C1Ъݸ9 ١ag 'RxͲ#bDq1%|" t׵͍Kޢ*k$tWR|?4 {V;>uȗUEr.I'+`&P(tv*a.*rU ,jTʋwJ,LD;AKU&ޝĶdn~gv#͘{Xo P ez3( S&|ěljhR<[Ss!hH)PrSefBڽꜥx=o.C4;+:h|t(@@^jusk(. 0!ۣx ɀ>]q[zL|{8h:q, ܢտ8##FeךC>0 YZmixtools/data/WaterdataFull.RData0000644000176200001440000001623712777444063016544 0ustar liggesusers_^Er{Hi# .|9Bkc`۰˥Œ\ KE9i@xM9$bȇU]]]]]]} }L{ٓO9 oշ{[{N2'ߛ_j?U9<sW9 —9| +gsxܞÿC ~~qk-Ɨ'=?><ϓq2ϓ?? .ϓO?IU.4_R|=>~>1h]p.xK_<$ח{K|GsH]]M'8%E=~-:z'm X9zw3m3_'| o4%toVE| |JDŽ7z:״S,o!rVzx͟sF~iZ ﲩ|z^-X.>A {Ѿww~0@>2./'@g>JtϼyGyR=~辬S?7qe<7/rZF^7}y ƛ-/֗G>2.Un*f'[Q?U<3+{ț5O7_nG@Gڽ$ =OWW誂Ṅ#ϡT=`Oh|ouy?NP{bw{<$I{~y"O:hyn(~=OWxh+=}~K=tA+ЫM_ڧ_蛩o$;CEOƋ>]r+ϛ>?x~+[i"Wcu OyUANC\dW/?ҢwVxyrЗ,c}O۫Gn=|auE'e'yh_vzȳ}֗7i[pˣoggv9ґGѵ~>v="쳽\^׳>R }nW=|뷔OUߋ}oi<+.#c^U~}x*]v΂HWaPt-Z?=vv} [_&U?݌sh}ǟdGoO{y>|_v\/QT'@av3|O _x2!((!.T?0T}Ǽ`=AS}ݪ]Od/ǕK.toB>[iߥ/"ЇЏ{3ԣٮo>GcJ /[7`_־)#э={ZY z9{CRD:dVlg6A#}:_Rj'r> >ڱ?$3q˃k/rK >orՑ|߷G{zEtN5;]9w~N?|g^8/o>-v|%ZgB|'G?Irry2ܵ}%EЏZW 8mkRyxc^33-Փ]4ӕ_p>uY? =K|a=z5g\XB_0yW!>tNkD>Wf>Cn(p$O5<6呧w?p>"_>4 W3v?>3;wۃ>e>O)?0#;|޹4m(M't<o9*XŜ_BNDZ`S{*-:ci9??ޢ7噗USkG3g Gx1|n8MOsUgge{!_;@?nESAбUA/c+V~Oe'?O>MJ^!w V~?~y?)}Bno5J|\kV=%kM`ʯ<ʥ}|t܌/<Сa?1(;n8^Vy6Rpz+yUGf~1nyG>oڏ"&i@ oa ;NW"z!g8/(+|GJnSqB#"2Y j>BOg*ϫ_W= [UMO8n=ҟO9ů,@<' 2U`|}8!n{ ;'[M.>AqϾ 8as%'/##t[Wl=CSlo\Qf?+:4G[g[2< ?5zu)YGOMA=GϮyD>XZ>Wʣǿ\!އ r _SoFͼGޱW?@.ԹatCWg௙ Cr9au(Hviec_>k:O)Y??<9g|q}zC+|Q.?'[ߓQ܂ >y>?zdWl*`Ǹ`ۗMt2?΃[<8^b[wLU%r7so Ac9')|^$sѓ}+g/ 8ꠇ >7(>cm;oe#Ǿo} 1+rn>b:O9TN~pӗxɷ}`Wa 8Ժhp_>6Sd>;*_to1 %>VBsҜg@ n*_~ <V|LO=~y,g<2ݨ]Wq"WI{ HcO6=+C\p*hzzig>d>(pY4.Lúq\GZD®cziPs{<7}*ۙp~FOׅBܛ!w3+ve>_I]C/N˹r{z1bo ? >o}]).4[ߦ+{jUqX`_{{g_aI< ;1SF=+~?U^ 9ₙg;O'}u{'C> _>U}9yD/6@ݾ^Ý]_::'_#CWI,e =c(<K1c G_q<}k39%t0o*!_7>{Wcu[2z*&C=s=zs־^{X":g}~1}ГP6g-{}O`s>mtٮ_Rm?JO9}ӬG>ׅ.rle >ߎzzQ܊~\ ~O?pُr9nf!1_#a_{蕊IK_B [nǀckhz;}S zz0~Ɵq}!~4ڹaX8ؿw\I/K{GrI{^KtEw1}ϹxUo+]99^fk?*NtX 3ߒ=SY>~?(^qw*W_ǜvt#y?q(|__ϋ|gxz};Ev*kj]SsNG;\}4(?ve!>T|@WPOS}Ž#? G~8W9y̞r{7u3̚>gq|VRx?c?EoErxM__;ABSx=9clzC\>KԷ8 DN{վ=93o7c?Ħw|?z>}o+_Z";{n<Whsč!cnXO =LWm*D;3zاN|c2xOB_a~l[ (~kS=^;B9@_mj{R~؇]aKn~(2mf<d|{+(_md{zF/طQ0|r|A|}r@`Fq^ ^G^?CjN>J]7y{.kSO|~d$GZ/~]xu.ki B}~.{^Ry]({6#4s^J;ةzanx:N)=`'+G{~z>6zS~Mi{0 e~:W<1mToGʸ9U']w~}wyp@c='/uެ|{ݏ;˃wEoIZ/."tpM/ʾcQZ{:g ?5ܟ;øNt>H_^)z8_m/?ߵwu< qOo!g~7꽿m<ﴇ|nho`׿gҬWRQi=HQ&~A:_>眘sį}Cw 2Vw+O[W|? !Ω-[K> b\>}1zxy)ȍ@B|C?v+Rf:|OqVye2.[kzOy:|$9wsh}_?ݗ }য়+>3/'1n+ +)S\Ry4rog>#/W3o,य़7^]_ s}Iy6vo+3_|/~3>^p|.Xx={zV9<.3V|Y^\WS8%re{?+~|K>μFrS*;CH?ufڿ {/ 9>=5;[s'B^|/x'*=owAxA? Sjʭc I9 ~py2}_՟k_?UƅJ_W> $SU(ɿgg}?=2__oO꟤G mixtools/data/RTdata2.RData0000755000176200001440000000666011736707362015246 0ustar liggesusersBZh91AY&SYאl y޽{3VִSws>sXH:20MpI"/[L1ɾy#+ҟͿuh-'"yi\yvdw~]TpY+fYcwFn"|;?s /,4c[VID 2'4˴+Q*o:M3K5#:`BΗ)?ˢigP_mLuU@,F`5b '$le[D u . %s}}*i wϒ'&ߧ_U|$On)Wjx5HBV5gˆE,Ucօbsz%ޠ%=W/;UpQ3< 2C*zV)Z:)9< y\ nob#% SI> ~p8ѼO_P߇=@2(n2f]#9RY[A0@1[zf0C%_M#L[]朵D5d#Yu}RrP*e稱nrYQiV[5>ш.+=}K_l#eב,oKv_HZ"n3yI4Bgrkd ‹˛cU4@^OӀ|wwpgI$AkzaD,Vt^ ۲K WԶ 7j#I#hV!}|Fg+ 0#,pٚZA8NB{I RƳOW68K|& 2`OZV@Gp޻p=.>ts_ DFb"W{Ed!>>{Z?tG8ېg~15ÓE~h_+$z>X]K񛒂Nz$HBնr5ZnU`KRİG6k_˷5 =TXx9}8~=H573KA!s$Di6ȑ5>)PfSό_] NZ+ٌ5yAMA 4u}(x`M1pvgYO ~$xKɓiB*7ڷsuYҼ^Xu)eA9āե7lx.6〮5znF>%0kVe^v<,f@S+[WKr҉blP^Sj龪b bWBZD$~eE>og!Q-o}~`e6|uvB'},nU9=,L1SԸ6f5W@[ $QP}{|4݄Y %#ٯM3ʻo9*ݱyԮ[v5*FJq#"%2S[n3CuGESK8>1K2sb>9*TtbWq?r#]&$(wٶYNmEUceK?>V;h+OQ 5 N+} '(-1bIC([6hknnۛÝ E4 M.c8!iԚ6zw*:e^5i;+nϜ 'N`* x㍋OH9by))lTCj TYx(BpȧN@JyDp.I*KԔJ|rR([LU*T^8RJBMRIF-*FIWJ@%U8SVIW%MjRT1DYfbU Mb&66C-6FDw `v@̔X4u4u2i ܱ!LnhRm11[h#P܌/P!FA51FNZz~OjզBޮM4צ`ٳf͛3ׯnp! :tM4x@(H1J$_81cBA$0:Hc16mnR۷f͛2ךg4MYA@Cc@VCpJИ:Є[ ce%46T Ɔ˘9xfQjFݪ*Vr: eE3 RR S?Xu Cʑ!WKv[ܒ}iGYOo׮EnKf&@T*"BhC*iRâ(D""D"V"""$${G\ AR-MW {@-/d9+}۹go~ڴK>"Ywp N“@Z9CGA>c5}&<{&>`'D5 ;[w\x ܿg1ƲˊƜIowӹe>wΥGȞ}0(7=|G`7ޮ5sdEo~m"( ĩN}!xܾ<+H18 w"o*f䯗7)$ͿT2初Nʑ U%$U">z@02\J=Jpyqȶ|5;?H0Q X 5;U!l@%wd| ,Sɯ'dh,u]Jz,q4Ϯ.@&-LA&չ82^G=E?+dYzh]\Q"#MXefy䝒q8]*!U,(_=u$w,v.m=d7&BvU~ITr.B6l`BfyhFwvz߳V?$0k޻Ww>~ ܠJjWs)}OsC;+<ݟuk#7-F1o?Q|#5mo$qf??>j{}Ri?G {~nV)+1ٔєFR:R+1{t?+)uϥ~z>K)ߜҵ~!~Оt!%k7VjJ Lk)5Sڇ:y4l]cҳ}4 l%,h&Oy/hdM>nHi*?5r[QƮaBOg!ٗ2|zz]0^A[=ᾼQRցBvziD[б]u19υt1,ޛm7KwMhv[A~Vчqr m~Gle~?X@lȇi G[Cg\E؞_f/5Ե2~tf#c`{̎hnOz'OfbKÅA\a$~eZ;ח~ & [:e:<w&ݺ1r崫8l6\~8>.@=`BAPʉ:]vR W:{Q~2;(,]_ؤc h7d1H~*%_0ﶤg*ݘuQk|z]Jg1{RM湔mlAŽڦR~RDͯdd9n؊|Z3Q~Dh{6BgMAOʟBwum:7yyl~v#mkR*ُdAl'r$_>]n7?|{N< ʼr}~=B8-zz]q-1ȳg g~/?8d< m7e<Azhlh&ou1Vzc9>rF`YϤbj]}' A]AWaikaClǻv`Mǻ; y {N~%CȷWe!_=Ţ~|2Ypy6CqqN\8P`DnzsZ#wCϻl(xrڟ@?G?L?K}Pq_rzGhEXUA=rpm>0}s 6}GON1!W.6Xom~ϧtj뉨ʿr3)CJjmwulM!Guhc卯&eX%FgQMsv2 7Q۷obƞEnul̾{.eD:=2d:.A]]=ny*VF_ Wq1l3~_Wqb, u60W Յ`9Σ8l0 ߱}̛(Ksju|Ԟ4pM&3_m]Ժs/eCyY/L\! 9e m<,^QU&s {Nq )m>n[CۯjLnKͨg6ٛ#ط9D/|(ɨ/\5/vKM6F&ƅ{2r 8:V!˛Q1ʙ NyZڛnw)# ^~vi-MEnkQYOc~GbT^[yE8˰`3n5ג`=5?c AiԘMX{+tzeg̓>͸ɳ([1NXN^fIYrթuh{|A1jMcϾy,-wQ{ ECӫ%~hq9qjOR:+5Y.BEѯʘ>!^؁r |U~o6{l &']A=;Z?YwuA`]ȩ7bA˅ >MЙ՝ߎU,n38ŕl: C\5?}|(j4܋(dgpl }m ~U/jǗSc+Nqyi[1FQ++Ns5cl~ Տ*2~ D[}}F`o͓;`ë&#|ۆzy~Ϯl>.G@I<_Ƹt laֺ.kZ(_d`^{?di)vUd'r6݂WP1'uޙ܎~lCyXMSQs3݋ze®gy^92l6]*P"Vs}s ׷EM~Ov-a 7X-k=fn|y4C= 㗡y'ި:34ĸbG;z3Ny,;mMF@OMW.:y&Z70VH]Z_FƢk; ~c)F.v:ojwRߐr 8bC |f~G <9yi;zyH⺇,Eq/(-NBmK_GAmvvfWWλ9YsqIE9r1XՙqXg;1Kc5/Dؚ wh0ͧ\ѾXoßaw KQ`>({w!}ñ'/ꓟ?o oA?F^>=,B^~p<_f"|(Ruk"g]g ǎ% ]c1*myvllr?o?CB\,SX?9߼w 6Ef! v-R7.;,uVx/@Qڟs]Gz滇&ϦЭu|Y||,w:o%+Xy<,Z;3|*ս\ 㦢OorT6I^{}.u`ڷq0 ]7>DZNUDT'gx<3{oVGYo9jjsÏ?MWMx8tmÏ~|z=9~m]ƺϢ=_,$O۰4oc3>c3>c2F!c2F!c2F=c3F=c3F=c3F=c3F=c3F#c42F#c42F#c42F#c42F#c42F3c43F3c43F3c43F3c43F3c43F+c2F+c2F+c2F+c2F+c2F;c3F;c3F;c3F;c3F;c3F'ct2F'ct2F'ct2F'ct2F'ct14}m֭mm] 6um[BڴЦ6-iM mZhBڴммммммммммЂЂЂЂЂЂЂЂЂЂB .B .B !Bk!Bk!КBk )КBk )КBk %ZBk %ZBk %Bk -Bk -Bk #:B#:B#4qxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqIqI]\R%uqI]\RO\.cɈN؟?O~K,}^mixtools/data/RTdata.RData0000755000176200001440000000527211736707362015162 0ustar liggesusersBZh91AY&SYq 0^HV[z$ڌ44h 4ɦDazjmFFOA4d3HzM4ѣi6L13DblBz!ɦI䞓G4F='zOQ' )!jz4ځi OSI413(=GiꞦS#hmC4oT 24@i~Ò`]nY mpMDyRq,-YiO I D"15_'eѺ `иnrf[舱Ւ.D{ !6-:wWrnG_ k2)U0(ũ܃4Y)Wn'$')-PF:1!c԰T[\{gy CĈ)@P#1?GxmbZb/ƹ#Pԟo HZr,({bhQC;^XO,Ta[1j<;`@ӠNLJb 52{#'Pu! Kyto e-GMg>jm0(Vx$-]4'h|`RUV)UA#A5SpXfYes![Z\D&S4Z[F4G?;Rvݚe|yQ\N% ig1j%`(fjދh2RR&ΣQnt900l}#JjKB,=M?v&jIU$k2rsr$5YƊoxdplgP΍"OMGSC5ciKd8rۥ-zMK.3x+VӬo!/dzJcFU 3r{XR{-*dC$/9.T}_gfecQ.S9&uaY*f9'kHbo ie]y*+Q]žjMʍ7°UO'|ĚTXd|_]@י.f1~^( e6 H=ϹOm|OeR%> -9p 1+6xqcb8n/J'KջK&bPt/TKM^2c 3h=fb9^j5="y 2րnʯ#F_ū">cAk.UtpZD`\:s;w 14Zq31c<̺[oзD9Xު=8 p  yQRP B` Ѕ(((̉@( (PGd)@>YHLZOTN.b )@ⴥ9jU1j'DRY^'h^ 'J ҙ6aqAAFU\e:| ^B+ctH́),V*1ahIRtġaNρ#`PL̢qP-h$PPAA&g3f( P31@foI<$51cƤI&k"D$̺8H!ad$I) @s:I%Rf3hAAdI%䓘ֵk\I$5tkZ$I(.l1c5$JZי,bŋ,V1$J_3 +ĈC:=DD6W +2EePU[.]`),Lغ 'bBsJ%`LJ5ሔPU9( U"l#C X `:"4<>-6bO3? `E%4D;1FH4r֚BTpɈ+ k-Zi 8< Um.-.f:eBuY1vS47=vdLASr^m[%m`O.II 39<HȖ+_+hTٞ[= EX)f#DChLD*$2e)a4_: nNqilƒPLssC7h,*\NlՀAI2zQL|ɮv WC&O]-Dw$S 7mixtools/man/0000755000176200001440000000000013617013707012710 5ustar liggesusersmixtools/man/Habituationdata.Rd0000755000176200001440000000276711736707362016326 0ustar liggesusers\name{Habituationdata} \alias{Habituationdata} \docType{data} \title{Infant habituation data} \author{Hoben Thomas} \description{ From Thomas et al (2011): "Habituation is a standard method of studying infant behaviors. Indeed, much of what is known about infant memory and perception rests on habituation methods. Six-month infants (n = 51) were habituated to a checker-board pattern on two occasions, one week apart. On each occasion, the infant was presented with the checkerboard pattern and the length of time the infant viewed the pattern before disengaging was recorded; this denoted the end of a trial. After disengagement, another trial was presented. The procedure was implemented for eleven trials. The conventional index of habituation performance is the summed observed fixation to the checkerboard pattern over the eleven trials. Thus, an index of reliability focuses on how these fixation times, in seconds, on the two assessment occasions correlate: \eqn{r = .29}."} \usage{data(Habituationdata)} \format{A data frame with two variables, \code{m1} and \code{m2}, and 51 cases. The two variables are the summed observations times for the two occasions described above.} \source{ Original source: Thomas et al. (2011). See references section. } \references{ Thomas, H., Lohaus, A., and Domsch, H. (2011), Extensions of Reliability Theory, in Nonparametric Statistics and Mixture Models: A Festschrift in Honor of Thomas Hettmansperger (Singapore: World Scientific), pp. 309-316. } \keyword{datasets} mixtools/man/RTdata2.Rd0000755000176200001440000000242513616576476014466 0ustar liggesusers\name{RTdata2} \docType{data} \title{Reaction Time (RT) Data Set \# 2} \alias{RTdata2} \usage{ data(RTdata2) } \description{ This data set involves normally developing children 9 years of age presented visual simuli on a computer monitor. There are three different experimental conditions, according to the length of the delay after which the stimulus was displayed on the screen. Each subject experienced each condition eight times, and these 24 trials were given in random order. These data give the 82 children for whom there are complete measurements among over 200 total subjects. } \format{This data frame consists of 82 children (the rows) and their 24 responses (the columns) to the stimulus presented. The response is recorded in milliseconds. The columns are not in the order in which the stimuli were presented to the children; rather, they are arranged into three blocks of eight columns each so that each eight-column block contains only trials from one of the three conditions. } \references{ Miller, C. A., Kail, R., Leonard, L. B. and Tomblin, J. B. (2001) Speed of Processing in Children with Specific Language Impairment, \emph{Journal of Speech, Language, and Hearing Research} \bold{44(2)}, 416--433. } \seealso{ \code{\link{RTdata}} } \keyword{datasets} mixtools/man/plotexpRMM.Rd0000644000176200001440000000467013055603106015247 0ustar liggesusers\name{plotexpRMM} \alias{plotexpRMM} %- Also NEED an '\alias' for EACH other topic documented here. \title{Plot sequences from the EM algorithm for censored mixture of exponentials } \description{Function for plotting sequences of estimates along iterations, from an object returned by the \code{\link{expRMM_EM}}, an EM algorithm for mixture of exponential distributions with randomly right censored data (see reference below). } \usage{ plotexpRMM(a, title=NULL, rowstyle=TRUE, subtitle=NULL, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{a}{An object returned by \code{\link{expRMM_EM}}.} \item{title}{The title of the plot, set to some default value if \code{NULL}.} \item{rowstyle}{Window organization, for plots in rows (the default) or columns.} \item{subtitle}{A subtitle for the plot, set to some default value if \code{NULL}.} \item{...}{Other parameters (such as \code{lwd}) passed to \code{plot}, \code{lines}, and \code{legend} commands.} } \value{The plot returned} \seealso{ Related functions: \code{\link{expRMM_EM}}, \code{\link{summary.mixEM}}, \code{\link{plot.mixEM}}. Other models and algorithms for censored lifetime data (name convention is model_algorithm): \code{\link{weibullRMM_SEM}}, \code{\link{spRMM_SEM}}. } \references{ \itemize{ \item Bordes, L., and Chauveau, D. (2016), Stochastic EM algorithms for parametric and semiparametric mixture models for right-censored lifetime data, Computational Statistics, Volume 31, Issue 4, pages 1513-1538. \url{http://link.springer.com/article/10.1007/s00180-016-0661-7} } } \author{Didier Chauveau} %% ~Make other sections like Warning with \section{Warning }{....} ~ \examples{ n=300 # sample size m=2 # number of mixture components lambda <- c(1/3,1-1/3); rate <- c(1,1/10) # mixture parameters set.seed(1234) x <- rexpmix(n, lambda, rate) # iid ~ exponential mixture cs=runif(n,0,max(x)) # Censoring (uniform) and incomplete data t <- apply(cbind(x,cs),1,min) # observed or censored data d <- 1*(x <= cs) # censoring indicator ###### EM for RMM, exponential lifetimes l0 <- rep(1/m,m); r0 <- c(1, 0.5) # "arbitrary" initial values a <- expRMM_EM(t, d, lambda=l0, rate=r0, k = m) summary(a) # EM estimates etc plotexpRMM(a, lwd=2) # plot of EM sequences %%\dontrun{ %%} } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{file} mixtools/man/normalmixEM2comp.Rd0000755000176200001440000000633212122212724016365 0ustar liggesusers\name{normalmixEM2comp} \title{Fast EM Algorithm for 2-Component Mixtures of Univariate Normals} \alias{normalmixEM2comp} \usage{ normalmixEM2comp(x, lambda, mu, sigsqrd, eps= 1e-8, maxit = 1000, verb=FALSE) } \description{ Return EM algorithm output for mixtures of univariate normal distributions for the special case of 2 components, exploiting the simple structure of the problem to speed up the code. } \arguments{ \item{x}{A vector of length \eqn{n} consisting of the data.} \item{lambda}{Initial value of first-component mixing proportion.} \item{mu}{A 2-vector of initial values for the mean parameters.} \item{sigsqrd}{Either a scalar or a 2-vector with initial value(s) for the variance parameters. If a scalar, the algorithm assumes that the two components have equal variances; if a 2-vector, it assumes that the two components do not have equal variances.} \item{eps}{The convergence criterion. Convergence is declared when the change in the observed data log-likelihood increases by less than epsilon.} \item{maxit}{The maximum possible number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \details{ This code is written to be very fast, sometimes more than an order of magnitude faster than \code{\link{normalmixEM}} for the same problem. It is less numerically stable that \code{\link{normalmixEM}} in the sense that it does not safeguard against underflow as carefully. Note that when the two components are assumed to have unequal variances, the loglikelihood is unbounded. However, in practice this is rarely a problem and quite often the algorithm converges to a "nice" local maximum. } \value{ \code{normalmixEM2comp} returns a list of class \code{mixEM} with items: \item{x}{The raw data.} \item{lambda}{The final mixing proportions (lambda and 1-lambda).} \item{mu}{The final two mean parameters.} \item{sigma}{The final one or two standard deviations. } \item{loglik}{The final log-likelihood.} \item{posterior}{An nx2 matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood. This vector includes both the initial and the final values; thus, the number of iterations is one less than its length.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values (always zero).} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{mvnormalmixEM}}, \code{\link{normalmixEM}} } \references{ McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}, John Wiley \& Sons, Inc. } \examples{ ##Analyzing the Old Faithful geyser data with a 2-component mixture of normals. data(faithful) attach(faithful) set.seed(100) system.time(out <- normalmixEM2comp(waiting, lambda=.5, mu=c(50,80), sigsqrd=100)) out$all.loglik # Note: must be monotone increasing # Compare elapsed time with more general version system.time(out2 <- normalmixEM(waiting, lambda=c(.5,.5), mu=c(50,80), sigma=c(10,10), arbvar=FALSE)) out2$all.loglik # Values should be identical to above } \keyword{file} mixtools/man/regmixEM.loc.Rd0000755000176200001440000000723412122214260015464 0ustar liggesusers\name{regmixEM.loc} \title{Iterative Algorithm Using EM Algorithm for Mixtures of Regressions with Local Lambda Estimates} \alias{regmixEM.loc} \usage{ regmixEM.loc(y, x, lambda = NULL, beta = NULL, sigma = NULL, k = 2, addintercept = TRUE, kern.l = c("Gaussian", "Beta", "Triangle", "Cosinus", "Optcosinus"), epsilon = 1e-08, maxit = 10000, kernl.g = 0, kernl.h = 1, verb = FALSE) } \description{ Iterative algorithm returning EM algorithm output for mixtures of multiple regressions where the mixing proportions are estimated locally. } \arguments{ \item{y}{An n-vector of response values.} \item{x}{An nxp matrix of predictors. See \code{addintercept} below.} \item{lambda}{An nxk matrix of initial local values of mixing proportions. Entries should sum to 1. This determines number of components. If NULL, then \code{lambda} is simply one over the number of components.} \item{beta}{Initial global values of \code{beta} parameters. Should be a pxk matrix, where p is the number of columns of x and \code{k} is number of components. If NULL, then \code{beta} has uniform standard normal entries. If both \code{lambda} and \code{beta} are NULL, then number of components is determined by \code{sigma}.} \item{sigma}{A k-vector of initial global values of standard deviations. If NULL, then \eqn{1/\code{sigma}^2} has random standard exponential entries. If \code{lambda}, \code{beta}, and \code{sigma} are NULL, then number of components determined by \code{k}.} \item{k}{Number of components. Ignored unless all of \code{lambda}, \code{beta}, and \code{sigma} are NULL.} \item{addintercept}{If TRUE, a column of ones is appended to the x matrix before the value of p is calculated.} \item{kern.l}{The type of kernel to use in the local estimation of \code{lambda}.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{kernl.g}{A shape parameter required for the symmetric beta kernel for local estimation of \code{lambda}. The default is g = 0 which yields the uniform kernel. Some common values are g = 1 for the Epanechnikov kernel, g = 2 for the biweight kernel, and g = 3 for the triweight kernel.} \item{kernl.h}{The bandwidth controlling the size of the window used in the local estimation of lambda around x.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \value{ \code{regmixEM.loc} returns a list of class \code{mixEM} with items: \item{x}{The set of predictors (which includes a column of 1's if \code{addintercept} = TRUE).} \item{y}{The response values.} \item{lambda.x}{The final local mixing proportions.} \item{beta}{The final global regression coefficients.} \item{sigma}{The final global standard deviations.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{regmixEM.lambda}} } \examples{ ## Compare a 2-component and 3-component fit to NOdata. data(NOdata) attach(NOdata) set.seed(100) out1 <- regmixEM.loc(Equivalence, NO, kernl.h = 2, epsilon = 1e-02, verb = TRUE) out2 <- regmixEM.loc(Equivalence, NO, kernl.h = 2, k = 3, epsilon = 1e-02, verb = TRUE) c(out1$loglik, out2$loglik) } \keyword{file} mixtools/man/ldmult.Rd0000755000176200001440000000145012122211744014472 0ustar liggesusers\name{ldmult} \title{Log-Density for Multinomial Distribution} \alias{ldmult} \usage{ ldmult(y, theta) } \description{ Return the logarithm of the multinomial density function. } \arguments{ \item{y}{A vector of multinomial counts.} \item{theta}{A vector of multinomial probabilities. May have same number of components as or one fewer component than \code{y}. In the latter case, an extra component is appended so that theta sums to one.} } \value{ \code{ldmult} returns the logarithm of the multinomial density with parameter \code{theta}, evaluated at \code{y}. } \details{ This function is called by \code{multmixEM}. } \seealso{ \code{\link{multmixEM}} } \examples{ y <- c(2, 2, 10) theta <- c(0.2, 0.3, 0.5) ldmult(y, theta) } \keyword{internal} mixtools/man/spEM.Rd0000755000176200001440000001464212226007736014055 0ustar liggesusers\name{spEM} \title{Semiparametric EM-like Algorithm for Mixtures of Independent Repeated Measurements} \alias{spEM} \usage{ spEM(x, mu0, blockid = 1:ncol(x), bw = bw.nrd0(as.vector(as.matrix(x))), constbw = TRUE, h = bw, eps = 1e-8, maxiter = 500, stochastic = FALSE, verb = TRUE) } \description{ Returns semiparametric EM algorithm output (Benaglia et al, 2009) for mixtures of multivariate (repeated measures) data where the coordinates of a row (case) in the data matrix are assumed to be independent, conditional on the mixture component (subpopulation) from which they are drawn. For now, this algorithm only implements model (4.7) in Benaglia et al, in which each component and block has exactly the same (nonparametric) shape and they differ only by location and scale. } \arguments{ \item{x}{An \eqn{n\times r}{n x r} matrix of data. Each of the \eqn{n} rows is a case, and each case has \eqn{r} repeated measurements. These measurements are assumed to be conditionally independent, conditional on the mixture component (subpopulation) from which the case is drawn.} \item{mu0}{Either an \eqn{m\times r}{m x r} matrix specifying the initial centers for the \link{kmeans} function, or an integer \eqn{m} specifying the number of initial centers, which are then choosen randomly in \link{kmeans}} \item{blockid}{A vector of length \eqn{r} identifying coordinates (columns of \code{x}) that are assumed to be identically distributed (i.e., in the same block). For instance, the default has all distinct elements, indicating that no two coordinates are assumed identically distributed and thus a separate set of \eqn{m} density estimates is produced for each column of \eqn{x}. On the other hand, if \code{blockid=rep(1,ncol(x))}, then the coordinates in each row are assumed conditionally i.i.d.} \item{bw}{Bandwidth for density estimation, equal to the standard deviation of the kernel density. By default, a simplistic application of the default \code{\link{bw.nrd0}} bandwidth used by \code{\link{density}} to the entire dataset.} \item{constbw}{Logical: If \code{TRUE}, use the same bandwidth for each iteration and for each component and block. If \code{FALSE}, use a separate bandwidth for each component and block, and update this bandwidth at each iteration of the algorithm using a suitably modified \code{\link{bw.nrd0}} method as described in Benaglia et al (2011).} \item{h}{Alternative way to specify the bandwidth, to provide backward compatibility.} \item{eps}{Tolerance limit for declaring algorithm convergence. Convergence is declared whenever the maximum change in any coordinate of the \code{lambda} vector (of mixing proportion estimates) does not exceed \code{eps}.} \item{maxiter}{The maximum number of iterations allowed, for both stochastic and non-stochastic versions; for non-stochastic algorithms (\code{stochastic = FALSE}), convergence may be declared before \code{maxiter} iterations (see \code{eps} above).} \item{stochastic}{Flag, if FALSE (the default), runs the non-stochastic version of the npEM algorithm, as in Benaglia et al (2009). Set to TRUE to run a stochastic version which simulates the posteriors at each iteration, and runs for \code{maxiter} iterations.} \item{verb}{If TRUE, print updates for every iteration of the algorithm as it runs} } \value{ \code{spEM} returns a list of class \code{spEM} with the following items: \item{data}{The raw data (an \eqn{n\times r}{n x r} matrix).} \item{posteriors}{An \eqn{n\times m}{n x m} matrix of posterior probabilities for observation. If \code{stochastic = TRUE}, this matrix is computed from an average over the \code{maxiter} iterations.} \item{bandwidth}{If \code{constbw==TRUE}, same as the \code{bw} input argument; otherwise, value of \code{bw} matrix at final iteration (since for now this algorithm only implements model (4.7) in Benaglia et al, the bandwidth matrix is reduced to a single bandwith scalar). This information is needed by any method that produces density estimates from the output.} \item{blockid}{Same as the \code{blockid} input argument, but recoded to have positive integer values. Also needed by any method that produces density estimates from the output.} \item{lambda}{The sequence of mixing proportions over iterations.} \item{lambdahat}{The final mixing proportions if \code{stochastic = FALSE}, or the average mixing proportions if \code{stochastic = TRUE}.} \item{mu}{The sequence of location parameters over iterations.} \item{muhat}{The final location parameters if \code{stochastic = FALSE}, or the average location parameters if \code{stochastic = TRUE}.} \item{sigma}{The sequence of scale parameters over iterations.} \item{sigmahat}{The final scale parameters if \code{stochastic = FALSE}, or the average scale parameters if \code{stochastic = TRUE}.} \item{loglik}{The sequence of log-likelihoods over iterations.} } \seealso{ \code{\link{plot.spEM}}, \code{\link{normmixrm.sim}}, \code{\link{spEMsymloc}}, \code{\link{npEM}}, \code{\link{plotseq.npEM}} } \references{ \itemize{ \item Benaglia, T., Chauveau, D., and Hunter, D. R., An EM-like algorithm for semi- and non-parametric estimation in multivariate mixtures, Journal of Computational and Graphical Statistics, 18, 505-526, 2009. \item Benaglia, T., Chauveau, D. and Hunter, D.R. Bandwidth Selection in an EM-like algorithm for nonparametric multivariate mixtures. Nonparametric Statistics and Mixture Models: A Festschrift in Honor of Thomas P. Hettmansperger. World Scientific Publishing Co., pages 15-27, 2011. \item Bordes, L., Chauveau, D., and Vandekerkhove, P., An EM algorithm for a semiparametric mixture model, Computational Statistics and Data Analysis, 51: 5429-5443, 2007. } } \examples{\dontrun{ ## simulate a 2-component gaussian mixture with 3 iid repeated measures set.seed(100) mu <- matrix(c(0, 15), 2, 3) sigma <- matrix(c(1, 5), 2, 3) x <- rmvnormmix(300, lambda = c(.4,.6), mu = mu, sigma = sigma) ## apply spEM with or without an iterative bandwidth selection d <- spEM(x, mu0 = 2, blockid = rep(1,3), constbw = FALSE) d2 <- spEM(x, mu0 = 2, blockid = rep(1,3), constbw = TRUE) plot(d, xlim=c(-10, 40), ylim = c(0, .16), xlab = "", breaks = 30, cex.lab=1.5, cex.axis=1.5, addlegend=FALSE) plot(d2, newplot=FALSE, addlegend=FALSE, lty=2)} } \keyword{file} mixtools/man/ddirichlet.Rd0000755000176200001440000000071711736711146015324 0ustar liggesusers\name{ddirichlet} \alias{ddirichlet} \title{Density Function for the Dirichlet Distribution} \description{ Density function for the Dirichlet distribution. } \usage{ ddirichlet(x, alpha) } \arguments{ \item{x}{A k-dimensional vector of values that sum to 1 for which to calculate the density} \item{alpha}{A k-dimensional vector of the Dirichlet distribution parameters.} } \details{ This is usually not to be called by the user. } \keyword{internal} mixtools/man/parseconstraint.Rd0000755000176200001440000000076012112171410016405 0ustar liggesusers\name{parse.constraints} \title{Constraint Function} \alias{parse.constraints} \usage{ parse.constraints(constr, k = 2, allsame = FALSE) } \description{ Constraint function for some mixture EM algorithms. } \arguments{ \item{constr}{Vector indicating constrained/unconstrained means.} \item{k}{Number of components.} \item{allsame}{Logical indicating for processing the constraints.} } \details{ This function is not intended to be called by the user. } \keyword{internal} mixtools/man/regmixMH.Rd0000755000176200001440000000661012122214466014720 0ustar liggesusers\name{regmixMH} \title{Metropolis-Hastings Algorithm for Mixtures of Regressions} \alias{regmixMH} \usage{ regmixMH(y, x, lambda = NULL, beta = NULL, s = NULL, k = 2, addintercept = TRUE, mu = NULL, sig = NULL, lam.hyp = NULL, sampsize = 1000, omega = 0.01, thin = 1) } \description{ Return Metropolis-Hastings (M-H) algorithm output for mixtures of multiple regressions with arbitrarily many components. } \arguments{ \item{y}{An n-vector of response values.} \item{x}{An nxp matrix of predictors. See \code{addintercept} below.} \item{lambda}{Initial value of mixing proportions. Entries should sum to 1. This determines number of components. If NULL, then \code{lambda} is random from uniform Dirichlet and number of components is determined by \code{beta}.} \item{beta}{Initial value of \code{beta} parameters. Should be a pxk matrix, where p is the number of columns of x and k is number of components. If NULL, then \code{beta} has uniform standard normal entries. If both \code{lambda} and \code{beta} are NULL, then number of components is determined by \code{s}.} \item{s}{k-vector of standard deviations. If NULL, then \eqn{1/\code{s}^2} has random standard exponential entries. If \code{lambda}, \code{beta}, and \code{s} are NULL, then number of components determined by \code{k}.} \item{k}{Number of components. Ignored unless all of \code{lambda}, \code{beta}, and \code{s} are NULL.} \item{addintercept}{If TRUE, a column of ones is appended to the x matrix before the value of p is calculated.} \item{mu}{The prior hyperparameter of same size as \code{beta}; the means of \code{beta} components. If NULL, these are set to zero.} \item{sig}{The prior hyperparameter of same size as \code{beta}; the standard deviations of \code{beta} components. If NULL, these are all set to five times the overall standard deviation of y.} \item{lam.hyp}{The prior hyperparameter of length \code{k} for the mixing proportions (i.e., these are hyperparameters for the Dirichlet distribution). If NULL, these are generated from a standard uniform distribution and then scaled to sum to 1.} \item{sampsize}{Size of posterior sample returned.} \item{omega}{Multiplier of step size to control M-H acceptance rate. Values closer to zero result in higher acceptance rates, generally.} \item{thin}{Lag between parameter vectors that will be kept.} } \value{ \code{regmixMH} returns a list of class \code{mixMCMC} with items: \item{x}{A nxp matrix of the predictors.} \item{y}{A vector of the responses.} \item{theta}{A (\code{sampsize}/\code{thin}) x q matrix of MCMC-sampled q-vectors, where q is the total number of parameters in \code{beta}, \code{s}, and \code{lambda}.} \item{k}{The number of components.} } \seealso{ \code{\link{regcr}} } \references{ Hurn, M., Justel, A. and Robert, C. P. (2003) Estimating Mixtures of Regressions, \emph{Journal of Computational and Graphical Statistics} \bold{12(1)}, 55--79. } \examples{ ## M-H algorithm for NOdata with acceptance rate about 40\%. data(NOdata) attach(NOdata) set.seed(100) beta <- matrix(c(1.3, -0.1, 0.6, 0.1), 2, 2) sigma <- c(.02, .05) MH.out <- regmixMH(Equivalence, NO, beta = beta, s = sigma, sampsize = 2500, omega = .0013) MH.out$theta[2400:2499,] } \keyword{file} mixtools/man/plotseq.npEM.Rd0000644000176200001440000000277613055031413015526 0ustar liggesusers\name{plotseq.npEM} \alias{plotseq.npEM} \title{Plotting sequences of estimates from non- or semiparametric EM-like Algorithm} \usage{ \method{plotseq}{npEM}(x, \dots) } \description{ Returns plots of the sequences of scalar parameter estimates along iterations from an object of class \code{npEM}. } \arguments{ \item{x}{an object of class \code{npEM}, as output by \code{\link{npEM}} or \code{\link{spEMsymloc}}} \item{\dots}{further parameters that are passed to \code{\link{plot}}} } \details{ \code{plotseq.npEM} returns a figure with one plot for each component proportion, and, in the case of \code{\link{spEMsymloc}}, one plot for each component mean. } \seealso{ \code{\link{plot.npEM}}, \code{\link{rnormmix}}, \code{\link{npEM}}, \code{\link{spEMsymloc}} } \references{ \itemize{ \item Benaglia, T., Chauveau, D., and Hunter, D. R. (2009), An EM-like algorithm for semi- and non-parametric estimation in multivariate mixtures, Journal of Computational and Graphical Statistics (to appear). \item Bordes, L., Chauveau, D., and Vandekerkhove, P. (2007), An EM algorithm for a semiparametric mixture model, Computational Statistics and Data Analysis, 51: 5429-5443. } } \examples{ ## Example from a normal location mixture n <- 200 set.seed(100) lambda <- c(1/3,2/3) mu <- c(0, 4); sigma<-rep(1, 2) x <- rnormmix(n, lambda, mu, sigma) b <- spEMsymloc(x, mu0=c(-1, 2), stochastic=FALSE) plotseq(b) bst <- spEMsymloc(x, mu0=c(-1, 2), stochastic=TRUE) plotseq(bst) } \keyword{file} mixtools/man/rweibullmix.Rd0000644000176200001440000000223613055022056015536 0ustar liggesusers\name{rweibullmix} \title{Simulate from Mixtures of Weibull distributions} \alias{rweibullmix} \usage{ rweibullmix(n, lambda = 1, shape = 1, scale = 1) } \description{ Simulate from a mixture of univariate Weibull distributions. } \arguments{ \item{n}{Number of cases to simulate.} \item{lambda}{Vector of mixture probabilities, with length equal to \eqn{m}, the desired number of components (subpopulations). This is assumed to sum to 1.} \item{shape}{Vector of component shapes.} \item{scale}{Vector of component scales.} } \value{ \code{rexpmix} returns an \eqn{n}-vector sampled from an \eqn{m}-component mixture of univariate Weibull distributions. } %%\details{This function simply calls \code{\link{sample}} and \code{\link{rexp}}.} \seealso{ \code{\link{rnormmix}} and \code{\link{rmvnormmix}} for Gaussian mixtures, \code{\link{rexpmix}} for mixture of exponentials. } \examples{ n = 500 # sample size m = 2 # nb components lambda=c(0.4, 0.6) shape <- c(0.5,5); scale <- c(1,20) # model parameters set.seed(321) x <- rweibullmix(n, lambda, shape, scale) # iid ~ weibull mixture ## histogram of the simulated data. hist(x, col=8) } \keyword{file} mixtools/man/makemultdata.Rd0000755000176200001440000000521512122212216015640 0ustar liggesusers\name{makemultdata} \title{Produce Cutpoint Multinomial Data} \alias{makemultdata} \usage{ makemultdata(..., cuts) } \description{ Change data into a matrix of multinomial counts using the cutpoint method and generate EM algorithm starting values for a k-component mixture of multinomials. } \arguments{ \item{...}{Either vectors (possibly of different lengths) of raw data or an nxm matrix (or data frame) of data. If \code{...} are vectors of varying length, then \code{makemultdata} will create a matrix of size nxm where n is the sample size and m is the length of the vector with maximum length. Those vectors with length less than m will have \code{NA}s to make the corresponding row in the matrix of length m. If \code{...} is a matrix (or data frame), then the rows must correspond to the sample and the columns the repeated measures.} \item{cuts}{A vector of cutpoints. This vector is sorted by the algorithm.} } \value{ \code{makemultdata} returns an object which is a list with components: \item{x}{An nxm matrix of the raw data.} \item{y}{An nxp matrix of the discretized data where p is one more than the number of cutpoints. Each row is a multinomial vector of counts. In particular, each row should sum to the number of repeated measures for that sample.} } \details{ The (i, j)th entry of the matrix \code{y} (for j < p) is equal to the number of entries in the ith column of \code{x} that are less than or equal to \code{cuts}[j]. The (i, p)th entry is equal to the number of entries greater than \code{cuts}[j]. } \seealso{ \code{\link{compCDF}}, \code{\link{multmixmodel.sel}}, \code{\link{multmixEM}} } \references{ Elmore, R. T., Hettmansperger, T. P. and Xuan, F. (2004) The Sign Statistic, One-Way Layouts and Mixture Models, \emph{Statistical Science} \bold{19(4)}, 579--587. } \examples{ ## Randomly generated data. set.seed(100) y <- matrix(rpois(70, 6), 10, 7) cuts <- c(2, 5, 7) out1 <- makemultdata(y, cuts = cuts) out1 ## The sulfur content of the coal seams in Texas. A <- c(1.51, 1.92, 1.08, 2.04, 2.14, 1.76, 1.17) B <- c(1.69, 0.64, .9, 1.41, 1.01, .84, 1.28, 1.59) C <- c(1.56, 1.22, 1.32, 1.39, 1.33, 1.54, 1.04, 2.25, 1.49) D <- c(1.3, .75, 1.26, .69, .62, .9, 1.2, .32) E <- c(.73, .8, .9, 1.24, .82, .72, .57, 1.18, .54, 1.3) out2 <- makemultdata(A, B, C, D, E, cuts = median(c(A, B, C, D, E))) out2 ## The reaction time data. data(RTdata) out3 <- makemultdata(RTdata, cuts = 100*c(5, 10, 12, 14, 16, 20, 25, 30, 40, 50)) dim(out3$y) out3$y[1:10,] } \keyword{file} mixtools/man/boot.comp.Rd0000755000176200001440000001153012122207616015074 0ustar liggesusers\name{boot.comp} \title{Performs Parametric Bootstrap for Sequentially Testing the Number of Components in Various Mixture Models} \alias{boot.comp} \usage{ boot.comp(y, x = NULL, N = NULL, max.comp = 2, B = 100, sig = 0.05, arbmean = TRUE, arbvar = TRUE, mix.type = c("logisregmix", "multmix", "mvnormalmix", "normalmix", "poisregmix", "regmix", "regmix.mixed", "repnormmix"), hist = TRUE, ...) } \description{ Performs a parametric bootstrap by producing B bootstrap realizations of the likelihood ratio statistic for testing the null hypothesis of a k-component fit versus the alternative hypothesis of a (k+1)-component fit to various mixture models. This is performed for up to a specified number of maximum components, k. A p-value is calculated for each test and once the p-value is above a specified significance level, the testing terminates. An optional histogram showing the distribution of the likelihood ratio statistic along with the observed statistic can also be produced. } \arguments{ \item{y}{The raw data for \code{multmix}, \code{mvnormalmix}, \code{normalmix}, and \code{repnormmix} and the response values for \code{logisregmix}, \code{poisregmix}, and \code{regmix}. See the documentation concerning their respective EM algorithms for specific structure of the raw data.} \item{x}{The predictor values required only for the regression mixtures \code{logisregmix}, \code{poisregmix}, and \code{regmix}. A column of 1s for the intercept term must not be included! See the documentation concerning their respective EM algorithms for specific structure of the predictor values.} \item{N}{An n-vector of number of trials for the logistic regression type \code{logisregmix}. If NULL, then \code{N} is an n-vector of 1s for binary logistic regression.} \item{max.comp}{The maximum number of components to test for. The default is 2. This function will perform a test of k-components versus (k+1)-components sequentially until we fail to reject the null hypothesis. This decision rule is governed by the calculated p-value and \code{sig}.} \item{B}{The number of bootstrap realizations of the likelihood ratio statistic to produce. The default is 100, but ideally, values of 1000 or more would be more acceptable.} \item{sig}{The significance level for which to compare the p-value against when performing the test of k-components versus (k+1)-components.} \item{arbmean}{If FALSE, then a scale mixture analysis can be performed for \code{mvnormalmix}, \code{normalmix}, \code{regmix}, or \code{repnormmix}. The default is TRUE.} \item{arbvar}{If FALSE, then a location mixture analysis can be performed for \code{mvnormalmix}, \code{normalmix}, \code{regmix}, or \code{repnormmix}. The default is TRUE.} \item{mix.type}{The type of mixture analysis you wish to perform. The data inputted for \code{y} and \code{x} depend on which type of mixture is selected. \code{logisregmix} corresponds to a mixture of logistic regressions. \code{multmix} corresponds to a mixture of multinomials with data determined by the cut-point method. \code{mvnormalmix} corresponds to a mixture of multivariate normals. \code{normalmix} corresponds to a mixture of univariate normals. \code{poisregmix} corresponds to a mixture of Poisson regressions. \code{regmix} corresponds to a mixture of regressions with normal components. \code{regmix.mixed} corresponds to a mixture of regressions with random or mixed effects. \code{repnormmix} corresponds to a mixture of normals with repeated measurements.} \item{hist}{An argument to provide a matrix plot of histograms for the boostrapped likelihood ratio statistic.} \item{...}{Additional arguments passed to the various EM algorithms for the mixture of interest.} } \value{ \code{boot.comp} returns a list with items: \item{p.values}{The p-values for each test of k-components versus (k+1)-components.} \item{log.lik}{The B bootstrap realizations of the likelihood ratio statistic.} \item{obs.log.lik}{The observed likelihood ratio statistic for each test which is used in determining the p-values.} } \seealso{ \code{\link{logisregmixEM}}, \code{\link{multmixEM}}, \code{\link{mvnormalmixEM}}, \code{\link{normalmixEM}}, \code{\link{poisregmixEM}}, \code{\link{regmixEM}}, \code{\link{regmixEM.mixed}}, \code{\link{repnormmixEM}} } \references{ McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}, John Wiley \& Sons, Inc. } \examples{ ## Bootstrapping to test the number of components on the RTdata. data(RTdata) set.seed(100) x <- as.matrix(RTdata[, 1:3]) y <- makemultdata(x, cuts = quantile(x, (1:9)/10))$y a <- boot.comp(y = y, max.comp = 1, B = 5, mix.type = "multmix", epsilon = 1e-3) a$p.values } \keyword{file} mixtools/man/spEMsymloc.Rd0000644000176200001440000000773412644764321015311 0ustar liggesusers\name{spEMsymloc} \title{Semiparametric EM-like Algorithm for univariate symmetric location mixture} \alias{spEMsymloc} \usage{ spEMsymloc(x, mu0, bw = bw.nrd0(x), h=bw, eps = 1e-8, maxiter = 100, stochastic = FALSE, verbose = FALSE) } \description{ Returns semiparametric EM algorithm output (Bordes et al, 2007, and Benaglia et al, 2009) for location mixtures of univariate data and symmetric component density. } \arguments{ \item{x}{A vector of length \eqn{n}{n} consisting of the data.} \item{mu0}{Either a vector specifying the initial centers for the \link{kmeans} function, and from which the number of component is obtained, or an integer \eqn{m} specifying the number of initial centers, which are then choosen randomly in \link{kmeans}.} \item{bw}{Bandwidth for density estimation, equal to the standard deviation of the kernel density.} \item{h}{Alternative way to specify the bandwidth, to provide backward compatibility.} \item{eps}{Tolerance limit for declaring algorithm convergence. Convergence is declared before \code{maxiter} iterations whenever the maximum change in any coordinate of the \code{lambda} (mixing proportion estimates) and \code{mu} (means) vector does not exceed \code{eps}.} \item{maxiter}{The maximum number of iterations allowed, for both stochastic and non-stochastic versions; for non-stochastic algorithms (\code{stochastic = FALSE}), convergence may be declared before \code{maxiter} iterations (see \code{eps} above).} \item{stochastic}{Flag, if FALSE (the default), runs the non-stochastic version of the algorithm, as in Benaglia et al (2009). Set to TRUE to run a stochastic version which simulates the posteriors at each iteration (as in Bordes et al, 2007), and runs for \code{maxiter} iterations.} \item{verbose}{If TRUE, print updates for every iteration of the algorithm as it runs} } \value{ \code{spEMsymloc} returns a list of class \code{npEM} with the following items: \item{data}{The raw data (an \eqn{n\times r}{n x r} matrix).} \item{posteriors}{An \eqn{n\times m}{n x m} matrix of posterior probabilities for observations. If \code{stochastic = TRUE}, this matrix is computed from an average over the \code{maxiter} iterations.} \item{bandwidth}{Same as the \code{bw} input argument, returned because this information is needed by any method that produces density estimates from the output.} \item{lambda}{The sequence of mixing proportions over iterations.} \item{lambdahat}{The final estimate for mixing proportions if \code{stochastic = FALSE}, the average over the sequence if \code{stochastic = TRUE}.} \item{mu}{the sequence of component means over iterations.} \item{muhat}{the final estimate of component means if \code{stochastic = FALSE}, the average over the sequence if \code{stochastic = TRUE}.} \item{symmetric}{Flag indicating that the kernel density estimate is using a symmetry assumption.} } \seealso{ \code{\link{plot.npEM}}, \code{\link{rnormmix}}, \code{\link{npEM}}, \code{\link{spEMsymlocN01}}, \code{\link{plotseq.npEM}} } \references{ \itemize{ \item Benaglia, T., Chauveau, D., and Hunter, D. R., An EM-like algorithm for semi- and non-parametric estimation in multivariate mixtures, Journal of Computational and Graphical Statistics, 18, 505-526, 2009. \item Benaglia, T., Chauveau, D., Hunter, D. R., and Young, D. mixtools: An R package for analyzing finite mixture models. Journal of Statistical Software, 32(6):1-29, 2009. \item Bordes, L., Chauveau, D., and Vandekerkhove, P. (2007), An EM algorithm for a semiparametric mixture model, Computational Statistics and Data Analysis, 51: 5429-5443. } } \examples{ ## Example from a normal location mixture set.seed(100) n <- 200 lambda <- c(1/3,2/3) mu <- c(0, 4); sigma<-rep(1, 2) x <- rnormmix(n, lambda, mu, sigma) out.stoc <- spEMsymloc(x, mu0=c(-1, 2), stochastic=TRUE) out.nonstoc <- spEMsymloc(x, mu0=c(-1, 2)) } \keyword{file} mixtools/man/weibullRMM_SEM.Rd0000644000176200001440000001102513055603106015713 0ustar liggesusers\name{weibullRMM_SEM} \title{St-EM algorithm for Reliability Mixture Models (RMM) of Weibull with right Censoring} \alias{weibullRMM_SEM} \usage{ weibullRMM_SEM(x, d = NULL, lambda = NULL, shape = NULL, scale = NULL, k = 2, maxit = 200, maxit.survreg = 200, epsilon = 1e-03, averaged = TRUE, verb = FALSE) } \description{ Parametric Stochastic EM (St-EM) algorithm for univariate finite mixture of Weibull distributions with randomly right censored data. } \arguments{ \item{x}{A vector of \eqn{n}{n} real positive lifetime (possibly censored) durations. If \code{d} is not \code{NULL} then a vector of random censoring times \code{c} occured, so that \eqn{x= min(x,c)} and \eqn{d = I(x <= c)}.} \item{d}{The vector of censoring indication, where 1 means observed lifetime data, and 0 means censored lifetime data.} \item{lambda}{Initial value of mixing proportions. If \code{NULL}, then \code{lambda} is set to \code{rep(1/k,k)}.} \item{shape}{Initial value of Weibull component shapes, all set to 1 if \code{NULL}.} \item{scale}{Initial value of Weibull component scales, all set to 1 if \code{NULL}.} \item{k}{Number of components of the mixture.} \item{maxit}{The number of iterations allowed, since for St-EM algorithms convergence is not based on stabilization, exactly \code{maxit} iterations are performed (see Bordes L. and Chauveau D. (2016) reference below).} \item{maxit.survreg}{The number of iterations allowed in the computations of the MLE for censored weibull data from the \code{survival} package (see Bordes L. and Chauveau D. (2016) reference below).} \item{epsilon}{Tolerance parameter used in the numerical computations of the MLE for censored weibull data by \code{survreg} from the \code{survival} package (see Bordes L. and Chauveau D. (2016) reference below).} \item{averaged}{The way of updating parameters at each iteration: if \code{TRUE}, current values of the parameters are obtained by averaging the sequence (see Bordes L. and Chauveau D. (2016) reference below).} \item{verb}{If TRUE, print updates for every iteration of the algorithm as it runs} } \details{This St-EM algorithm calls functions from the \code{survival} package to compute parametric MLE for censored weibull data.} \value{ \code{weibullRMM_SEM} returns a list of class "mixEM" with the following items: \item{x}{The input data.} \item{d}{The input censoring indicator.} \item{lambda}{The estimates for the mixing proportions.} \item{scale}{The estimates for the Weibull component scales.} \item{shape}{The estimates for the Weibull component shapes.} \item{loglik}{The log-likelihood value at convergence of the algorithm.} \item{posterior}{An \eqn{n\times k}{n x k} matrix of posterior probabilities for observation, after convergence of the algorithm.} \item{all.loglik}{The sequence of log-likelihoods over iterations.} \item{all.lambda}{The sequence of mixing proportions over iterations.} \item{all.scale}{The sequence of component scales over iterations.} \item{all.shape}{The sequence of component shapes over iterations.} \item{ft}{A character vector giving the name of the function called.} } \seealso{ Related functions: \code{\link{plotweibullRMM}}, \code{\link{summary.mixEM}}. Other models and algorithms for censored lifetime data (name convention is model_algorithm): \code{\link{expRMM_EM}}, \code{\link{spRMM_SEM}}. } \references{ \itemize{ \item Bordes, L., and Chauveau, D. (2016), Stochastic EM algorithms for parametric and semiparametric mixture models for right-censored lifetime data, Computational Statistics, Volume 31, Issue 4, pages 1513-1538. \url{http://link.springer.com/article/10.1007/s00180-016-0661-7} } } \author{Didier Chauveau} \examples{ n = 500 # sample size m = 2 # nb components lambda=c(0.4, 0.6) shape <- c(0.5,5); scale <- c(1,20) # model parameters set.seed(321) x <- rweibullmix(n, lambda, shape, scale) # iid ~ weibull mixture cs=runif(n,0,max(x)+10) # iid censoring times t <- apply(cbind(x,cs),1,min) # censored observations d <- 1*(x <= cs) # censoring indicator ## set arbitrary or "reasonable" (e.g., data-driven) initial values l0 <- rep(1/m,m); sh0 <- c(1, 2); sc0 <- c(2,10) # Stochastic EM algorithm a <- weibullRMM_SEM(t, d, lambda = l0, shape = sh0, scale = sc0, maxit = 200) summary(a) # Parameters estimates etc plotweibullRMM(a) # plot of St-EM sequences plot(a, which=2) # or equivalently, S3 method for "mixEM" object %%\dontrun{ %%} } \keyword{file} mixtools/man/plot.npEM.Rd0000755000176200001440000000730512514002272015011 0ustar liggesusers\name{plot.npEM} \title{Plot Nonparametric or Semiparametric EM Output} \alias{plot.npEM} \alias{plot.spEM} \usage{ \method{plot}{npEM}(x, blocks = NULL, hist=TRUE, addlegend = TRUE, scale=TRUE, title=NULL, breaks="Sturges", ylim=NULL, dens.col, newplot = TRUE, pos.legend = "topright", cex.legend = 1, \dots) \method{plot}{spEM}(x, blocks = NULL, hist=TRUE, addlegend = TRUE, scale=TRUE, title=NULL, breaks="Sturges", ylim=NULL, dens.col, newplot = TRUE, pos.legend = "topright", cex.legend = 1, \dots) } \description{ Takes an object of class \code{npEM} and returns a set of plots of the density estimates for each block and each component. There is one plot per block, with all the components displayed on each block so it is possible to see the mixture structure for each block. } \arguments{ \item{x}{An object of class \code{npEM} such as the output of the \code{\link{npEM}} function} \item{blocks}{Blocks (of repeated measures coordinates) to plot; not relevant for univariate case. Default is to plot all blocks.} \item{hist}{If TRUE, superimpose density estimate plots on a histogram of the data} \item{addlegend}{If TRUE, adds legend to the plot.} \item{scale}{If TRUE, scale each density estimate by its corresponding estimated mixing proportion, so that the total area under all densities equals 1 and the densities plotted may be added to produce an estimate of the mixture density. When FALSE, each density curve has area 1 in the plot.} \item{title}{Alternative vector of main titles for plots (recycled as many times as needed)} \item{breaks}{Passed directly to the \code{\link{hist}} function} \item{ylim}{\code{ylim} parameter to use for all plots, if desired. If not given, each plot uses its own ylim that ensures that no part of the plot will go past the top of the plotting area.} \item{dens.col}{Color values to use for the individual component density functions, repeated as necessary. Default value is \code{2:(m+1)}.} \item{newplot}{If TRUE, creates a new plot.} \item{pos.legend}{Single argument specifying the position of the legend. See `Details' section of \code{\link{legend}}.} \item{cex.legend}{Character expansion factor for \code{\link{legend}}.} \item{\dots}{Any remaining arguments are passed to the \code{\link{hist}} and \code{\link{lines}} functions.} } \value{ \code{plot.npEM} returns a list with two elements: % \item{means}{A \eqn{B\times m}{B x m} matrix of estimated population means, % where \eqn{B} is the number of blocks and \eqn{m} is the number of mixture components % (subpopulations)} % \item{variances}{A \eqn{B\times m}{B x m} matrix of estimated population variances} \item{x}{List of matrices. The \eqn{j}th column of the \eqn{i}th matrix is the vector of \eqn{x}-values for the \eqn{j}th density in the \eqn{i}th plot.} \item{y}{\eqn{y}-values, given in the same form as the \eqn{x}-values.} } \seealso{ \code{\link{npEM}}, \code{\link{density.npEM}}, \code{\link{spEMsymloc}}, \code{\link{plotseq.npEM}} } \examples{ ## Examine and plot water-level task data set. ## First, try a 3-component solution where no two coordinates are ## assumed i.d. data(Waterdata) set.seed(100) \dontrun{ a <- npEM(Waterdata[,3:10], 3, bw=4) par(mfrow=c(2,4)) plot(a) # This produces 8 plots, one for each coordinate } \dontrun{ ## Next, same thing but pairing clock angles that are directly opposite one ## another (1:00 with 7:00, 2:00 with 8:00, etc.) b <- npEM(Waterdata[,3:10], 3, blockid=c(4,3,2,1,3,4,1,2), bw=4) par(mfrow=c(2,2)) plot(b) # Now only 4 plots, one for each block } } \keyword{file} mixtools/man/mixtools-internal.Rd0000755000176200001440000001022313210335242016656 0ustar liggesusers\name{mixtools-internal} \alias{inv.logit} \alias{dexpmixt} \alias{HRkde} \alias{kern.B} \alias{kern.C} \alias{kern.G} \alias{kern.O} \alias{kern.T} \alias{kfoldCV} \alias{KMintegrate} \alias{KMod} \alias{ldc} \alias{logit} \alias{npMSL_old} \alias{plotseq} \alias{rlnormscalemix} \alias{splitsample} \alias{triang_wkde} \alias{wbw.kCV} \title{Internal 'mixtools' Functions} \description{ Internal kernel, semiparametric-related, and miscellaneous functions for the package \code{mixtools}. } \usage{ dexpmixt(t, lam, rate) HRkde(cpd, u = cpd[,1], kernelft = triang_wkde, bw = rep(bw.nrd0(as.vector(cpd[,1])), length(cpd[,1]))) inv.logit(eta) kern.B(x, xi, h, g = 0) kern.C(x, xi, h) kern.G(x, xi, h) kern.O(x, xi, h) kern.T(x, xi, h) kfoldCV(h, x, nbsets = 2, w = rep(1, length(x)), lower = mean(x) - 5*sd(x), upper = mean(x) + 5*sd(x)) KMintegrate(s) KMod(cpd, already.ordered = TRUE) ldc(data, class, score) logit(mu) npMSL_old(x, mu0, blockid = 1:ncol(x), bw=bw.nrd0(as.vector(as.matrix(x))), samebw = TRUE, h=bw, eps=1e-8, maxiter=500, bwiter = maxiter, ngrid = 200, post = NULL, verb = TRUE) plotseq(x, ...) rlnormscalemix(n, lambda=1, meanlog=1, sdlog=1, scale=0.1) splitsample(n, nbsets = 2) triang_wkde(t, u=t, w=rep(1/length(t),length(t)), bw=rep(bw.nrd0(t), length(t))) wbw.kCV(x, nbfold = 5, w = rep(1, length(x)), hmin = 0.1*hmax, hmax = NULL) } \arguments{ \item{x}{A vector of values to which local modeling techniques are applied.} \item{xi}{An n-vector of data values.} \item{h}{The bandwidth controlling the size of the window used for the local estimation around \code{x}.} \item{g}{A shape parameter required for the symmetric beta kernel. The default is \code{g} = 0 which yields the uniform kernel. Some common values are \code{g} = 1 for the Epanechnikov kernel, \code{g} = 2 for the biweight kernel, and \code{g} = 3 for the triweight kernel.} \item{mu0}{See updated arguments in the \code{npMSL} function.} \item{blockid}{See updated arguments in the \code{npMSL} function.} \item{bw}{See updated arguments in the \code{npMSL} function.} \item{samebw}{See updated arguments in the \code{npMSL} function.} \item{h}{See updated arguments in the \code{npMSL} function.} \item{eps}{See updated arguments in the \code{npMSL} function.} \item{maxiter}{See updated arguments in the \code{npMSL} function.} \item{bwiter}{See updated arguments in the \code{npMSL} function.} \item{ngrid}{See updated arguments in the \code{npMSL} function.} \item{post}{See updated arguments in the \code{npMSL} function.} \item{verb}{See updated arguments in the \code{npMSL} function.} \item{n}{See updated arguments in the \code{npMSL} function.} \item{nbsets}{See updated arguments in the \code{npMSL} function.} \item{w}{See updated arguments in the \code{npMSL} function.} \item{lower}{See updated arguments in the \code{npMSL} function.} \item{upper}{See updated arguments in the \code{npMSL} function.} \item{nbfold}{See updated arguments in the \code{npMSL} function.} \item{hmin}{See updated arguments in the \code{npMSL} function.} \item{hmax}{See updated arguments in the \code{npMSL} function.} \item{data}{Data, possibly multivariate, fed to the \code{mixturegram} function.} \item{class}{The number of classes, inputted based on number of components in the \code{mixturegram} function.} \item{score}{The score vector from LDA used in constructing a mixturegram.} \item{lam}{A vector of mixture proportions, should sum to one.} \item{rate}{A vector of mixture component rates.} \item{t}{Argument for \code{dexpmixt}.} \item{mu}{A proportion for which to calculate the logit function; i.e., \code{log(mu / (1 - mu))}.} \item{eta}{Any real value for which to calculate the inverse logit function; i.e., \code{1 / (1 + exp(eta))}.} \item{cpd}{Argument for \code{HRkde}.} \item{kernelft}{Argument for \code{HRkde}.} \item{s}{Argument for \code{KMintegrate}.} \item{meanlog}{Argument for \code{rlnormscalemix}.} \item{sdlog}{Argument for \code{rlnormscalemix}.} } \details{ These are usually not to be called by the user. } \seealso{ \code{\link{npMSL}} } \keyword{internal} mixtools/man/plotweibullRMM.Rd0000644000176200001440000000475513055603106016122 0ustar liggesusers\name{plotweibullRMM} \alias{plotweibullRMM} %- Also NEED an '\alias' for EACH other topic documented here. \title{Plot sequences from the Stochastic EM algorithm for mixture of Weibull } \description{Function for plotting sequences of estimates along iterations, from an object returned by \code{\link{weibullRMM_SEM}}, a Stochastic EM algorithm for mixture of Weibull distributions with randomly right censored data (see reference below). } \usage{ plotweibullRMM(a, title = NULL, rowstyle = TRUE, subtitle = NULL, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{a}{An object returned by \code{\link{weibullRMM_SEM}}.} \item{title}{The title of the plot, set to some default value if \code{NULL}.} \item{rowstyle}{Window organization, for plots in rows (the default) or columns.} \item{subtitle}{A subtitle for the plot, set to some default value if \code{NULL}.} \item{...}{Other parameters (such as \code{lwd}) passed to \code{plot}, \code{lines}, and \code{legend} commands.} } \value{The plot returned} \seealso{ Related functions: \code{\link{weibullRMM_SEM}}, \code{\link{summary.mixEM}}. Other models and algorithms for censored lifetime data (name convention is model_algorithm): \code{\link{expRMM_EM}}, \code{\link{spRMM_SEM}} . } \references{ \itemize{ \item Bordes, L., and Chauveau, D. (2016), Stochastic EM algorithms for parametric and semiparametric mixture models for right-censored lifetime data, Computational Statistics, Volume 31, Issue 4, pages 1513-1538. \url{http://link.springer.com/article/10.1007/s00180-016-0661-7} } } \author{Didier Chauveau} %% ~Make other sections like Warning with \section{Warning }{....} ~ \examples{ n = 500 # sample size m = 2 # nb components lambda=c(0.4, 0.6) shape <- c(0.5,5); scale <- c(1,20) # model parameters set.seed(321) x <- rweibullmix(n, lambda, shape, scale) # iid ~ weibull mixture cs=runif(n,0,max(x)+10) # iid censoring times t <- apply(cbind(x,cs),1,min) # censored observations d <- 1*(x <= cs) # censoring indicator ## set arbitrary or "reasonable" (e.g., data-driven) initial values l0 <- rep(1/m,m); sh0 <- c(1, 2); sc0 <- c(2,10) # Stochastic EM algorithm a <- weibullRMM_SEM(t, d, lambda = l0, shape = sh0, scale = sc0, maxit = 200) summary(a) # Parameters estimates etc plotweibullRMM(a) # default plot of St-EM sequences %%\dontrun{ %%} } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{file} mixtools/man/spEMsymlocN01.Rd0000644000176200001440000001257712644764306015574 0ustar liggesusers\name{spEMsymlocN01} \alias{spEMsymlocN01} \title{semiparametric EM-like algorithm for univariate mixture in False Discovery Rate (FDR) estimation} \description{Return semiparametric EM-like algorithm output for a 2-components mixture model with one component set to Normal(0,1), and the other component being a unspecified but symmetric density with a location parameter. This model is tailored to FDR estimation on probit transform (\code{qnorm}) of p-values arising from multiple testing.} \usage{ spEMsymlocN01(x, mu0 = 2, bw = bw.nrd0(x), h=bw, eps = 1e-8, maxiter = 100, verbose = FALSE, plotf = FALSE)} \arguments{ \item{x}{A vector of length n consisting of the data, probit transform of pvalues, preferably sorted.} \item{mu0}{Starting value of vector of component means. If not set then the initial value is randomly generated by a \code{kmeans} of the data in two bins. Since component 1 is theoretically normal(0,1), \code{mu[1]} must be 0 and \code{mu[2]} some negative value (see details).} \item{bw}{Bandwidth for weighted kernel density estimation.} \item{h}{Alternative way to specify the bandwidth, to provide backward compatibility.} \item{eps}{Tolerance limit for declaring algorithm convergence. Convergence is declared before \code{maxiter} iterations whenever the maximum change in any coordinate of the \code{lambda} (mixing proportion estimates) and \code{mu} (mean of the semiparametric component) vector does not exceed \code{eps}} \item{maxiter}{The maximum number of iterations allowed; convergence may be declared before \code{maxiter} iterations (see \code{eps} above).} \item{verbose}{If TRUE, print updates for every iteration of the algorithm as it runs.} \item{plotf}{If TRUE, plots successive updates of the nonparametric density estimate over iterations. Mostly for testing purpose.} } \details{This algorithm is a specific version of semiparametric EM-like algorithm similar in spirit to \code{\link{spEMsymloc}}, but specialized for FDR estimation on probit transform (\code{qnorm}) of p-values in multiple testing framework. In this model, component 1 corresponds to the individuals under the null hypothesis, i.e. theoretically normal(0,1) distributed, whereas component 2 corresponds to individuals in the alternative hypothesis, with typically very small p-values and consequently negative values for probit(p) data. This model only assumes that these individuals come from an unspecified but symmetric density with a location parameter, as in Bordes and Vandekerkhove (2010) and Chauveau et al. (2014).} \value{ \code{spEMsymlocN01} returns a list of class \code{spEMN01} with the following items: \item{data}{The raw data (an \eqn{n\times r}{n x r} matrix).} \item{posteriors}{An \eqn{n\times 2}{n x 2} matrix of posterior probabilities for observations. This can be used in, e.g., \code{\link{plotFDR}} to plot False Discovery Rate estimates.} \item{bandwidth}{Same as the \code{bw} input argument, returned because this information is needed by any method that produces density estimates from the output.} \item{lambda}{The sequence of mixing proportions over iterations.} \item{lambdahat}{The final estimate for mixing proportions.} \item{mu}{the sequence of second component mean over iterations.} \item{muhat}{the final estimate of second component mean.} \item{symmetric}{Flag indicating that the kernel density estimate is using a symmetry assumption.} } \references{ \itemize{ \item Bordes, L. and Vandekerkhove, P. (2010). Semiparametric two-component mixture model with a known component: an asymptotically normal estimator. Mathematical Methods of Statistics, 19(1):22-41 \item Chauveau, D., Saby, N., Orton, T. G., Lemercier B., Walter, C. and Arrouys, D. (2014) Large-scale simultaneous hypothesis testing in monitoring carbon content from french soil database: A semi-parametric mixture approach. Geoderma 219-220 (2014): 117-124. } } \author{Didier Chauveau} \seealso{ \code{\link{spEMsymloc}}, \code{\link{normalmixEM}}, \code{\link{npEM}}, \code{\link{plot.spEMN01}}, \code{\link{plotFDR}} } \examples{ ## Probit transform of p-values ## from a Beta-Uniform mixture model ## comparion of parametric and semiparametric EM fit ## Note: in actual situations n=thousands set.seed(50) n=300 # nb of multiple tests m=2 # 2 mixture components a=c(1,0.1); b=c(1,1); lambda=c(0.6,0.4) # parameters z=sample(1:m, n, rep=TRUE, prob = lambda) p <- rbeta(n, shape1 = a[z], shape2 = b[z]) # p-values o <- order(p) cpd <- cbind(z,p)[o,] # sorted complete data, z=1 if H0, 2 if H1 p <- cpd[,2] # sorted p-values y <- qnorm(p) # probit transform of the pvalues # gaussian EM fit with component 1 constrained to N(0,1) s1 <- normalmixEM(y, mu=c(0,-4), mean.constr = c(0,NA), sd.constr = c(1,NA)) s2 <- spEMsymlocN01(y, mu0 = c(0,-3)) # spEM with N(0,1) fit hist(y, freq = FALSE, col = 8, main = "histogram of probit(pvalues)") plot(s2, add.plot = TRUE, lwd = 2) # Exemples of plot capabilities # Note: posteriors must be ordered by p for plot.FDR # plotFDR(s1$post) # when true complete data not observed # plotFDR(s1$post, s2$post) # comparing 2 strategies plotFDR(s1$post, s2$post, lg1 = "normalmixEM", lg2 = "spEMsymlocN01", complete.data = cpd) # with true FDR computed from z } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{file} mixtools/man/regmixmodel.sel.Rd0000755000176200001440000000451012122214526016270 0ustar liggesusers\name{regmixmodel.sel} \title{Model Selection in Mixtures of Regressions} \alias{regmixmodel.sel} \usage{ regmixmodel.sel(x, y, w = NULL, k = 2, type = c("fixed", "random", "mixed"), ...) } \description{ Assess the number of components in a mixture of regressions model using the Akaike's information criterion (AIC), Schwartz's Bayesian information criterion (BIC), Bozdogan's consistent AIC (CAIC), and Integrated Completed Likelihood (ICL). } \arguments{ \item{x}{An nxp matrix (or list) of predictors. If an intercept is required, then \code{x} must NOT include a column of 1's! Requiring an intercept may be controlled through arguments specified in \code{...}.} \item{y}{An n-vector (or list) of response values.} \item{w}{An optional list of fixed effects predictors for type "mixed" or "random".} \item{k}{The maximum number of components to assess.} \item{type}{The type of regression mixture to use. If "fixed", then a mixture of regressions with fixed effects will be used. If "random", then a mixture of regressions where the random effects regression coefficients are assumed to come from a mixture will be used. If "mixed", the mixture structure used is the same as "random", except a coefficient of fixed effects is also assumed.} \item{...}{Additional arguments passed to the EM algorithm used for calculating the type of regression mixture specified in \code{type}.} } \value{ \code{regmixmodel.sel} returns a matrix of the AIC, BIC, CAIC, and ICL values along with the winner (i.e., the highest value given by the model selection criterion) for various types of regression mixtures. } \seealso{ \code{\link{regmixEM}}, \code{\link{regmixEM.mixed}} } \references{ Biernacki, C., Celeux, G. and Govaert, G. (2000) Assessing a Mixture Model for Clustering with the Integrated Completed Likelihood, \emph{IEEE Transactions on Pattern Analysis and Machine Intelligence} \bold{22(7)}, 719--725. Bozdogan, H. (1987) Model Selection and Akaike's Information Criterion (AIC): The General Theory and its Analytical Extensions, \emph{Psychometrika} \bold{52}, 345--370. } \examples{ ## Assessing the number of components for NOdata. data(NOdata) attach(NOdata) set.seed(100) regmixmodel.sel(x = NO, y = Equivalence, k = 3, type = "fixed") } \keyword{file} mixtools/man/print.npEM.Rd0000755000176200001440000000171112514002272015162 0ustar liggesusers\name{print.npEM} \alias{print.npEM} \title{Printing non- and semi-parametric multivariate mixture model fits} \usage{ \method{print}{npEM}(x, \dots) } \arguments{ \item{x}{an object of class \code{npEM} such as a result of a call to \code{\link{npEM}}} \item{\dots}{Additional arguments to \code{\link{print}}} } \description{ \code{\link[base]{print}} method for class \code{npEM}. } \details{ \code{print.npEM} prints the elements of an \code{npEM} object without printing the data or the posterior probabilities. (These may still be accessed as \code{x$data} and \code{x$posteriors}.) } \value{ \code{print.npEM} returns (invisibly) the full value of \code{x} itself, including the \code{data} and \code{posteriors} elements. } \seealso{ \code{\link{npEM}}, \code{\link{plot.npEM}} \code{\link{summary.npEM}} } \examples{ data(Waterdata) set.seed(100) \dontrun{npEM(Waterdata[,3:10], 3, bw=4, verb=FALSE) # Assume indep but not iid} } \keyword{file} mixtools/man/summary.npEM.Rd0000644000176200001440000000367012644774124015546 0ustar liggesusers\name{summary.npEM} \alias{summary.npEM} \alias{print.summary.npEM} \title{Summarizing non- and semi-parametric multivariate mixture model fits} \usage{ \method{summary}{npEM}(object, \dots) \method{print}{summary.npEM}(x, digits=3, \dots) } \arguments{ \item{object,x}{an object of class \code{npEM} such as a result of a call to \code{\link{npEM}}} \item{digits}{Significant digits for printing values} \item{\dots}{further arguments passed to or from other methods.} } \description{ \code{\link[base]{summary}} method for class \code{npEM}. } \details{ \code{\link{summary.npEM}} prints means and variances of each block for each component. These quantities might not be part of the model, but they are estimated nonparametrically based on the posterior probabilities and the data. } \value{ The function \code{\link{summary.npEM}} returns a list of type \code{summary.npEM} with the following components: \item{n}{The number of observations} \item{m}{The number of mixture components} \item{B}{The number of blocks} \item{blockid}{The block ID (from 1 through B) for each of the coordinates of the multivariate observations. The \code{blockid} component is of length \eqn{r}, the dimension of each observation.} \item{means}{A \eqn{B\times m}{B x m} matrix giving the estimated mean of each block in each component.} \item{variances}{Same as \code{means} but giving the estimated variances instead.} } \references{ Benaglia, T., Chauveau, D., and Hunter, D. R. (2009), An EM-like algorithm for semi- and non-parametric estimation in multivariate mixtures, \emph{Journal of Computational and Graphical Statistics}, \bold{18(2)}, 505--526. } \seealso{ \code{\link{npEM}}, \code{\link{plot.npEM}} } \examples{ data(Waterdata) set.seed(100) \dontrun{ a <- npEM(Waterdata[,3:10], 3, bw=4) # Assume indep but not iid summary(a) b <- npEM(Waterdata[,3:10], 3, bw=4, blockid=rep(1,8)) # Now assume iid summary(b) } } \keyword{file} mixtools/man/RanEffdata.Rd0000755000176200001440000000103013616576526015203 0ustar liggesusers\name{RanEffdata} \docType{data} \title{Simulated Data from 2-Component Mixture of Regressions with Random Effects} \alias{RanEffdata} \usage{ data(RanEffdata) } \description{ This data set was generated from a 2-component mixture of regressions with random effects. } \format{This data set consists of a list with 100 25x3 matrices. The first column is the response variable, the second column is a column of 1's and the last column is the predictor variable. } \seealso{ \code{\link{regmixEM.mixed}} } \keyword{datasets} mixtools/man/post.beta.Rd0000755000176200001440000000462612644763353015121 0ustar liggesusers\name{post.beta} \title{Summary of Posterior Regression Coefficients in Mixtures of Random Effects Regressions} \alias{post.beta} \usage{ post.beta(y, x, p.beta, p.z) } \description{ Returns a 2x2 matrix of plots summarizing the posterior intercept and slope terms in a mixture of random effects regression with arbitrarily many components. } \arguments{ \item{y}{A list of N response trajectories with (possibly) varying dimensions of length \eqn{n_i}.} \item{x}{A list of N predictor values of dimension \eqn{n_i}. Each trajectory in y has its own design vector.} \item{p.beta}{A list of N 2xk matrices giving the posterior intercept and slope values from the output of an EM algorithm.} \item{p.z}{An Nxk matrix of posterior membership probabilities from the output of an EM algorithm.} } \value{ \code{post.beta} returns a 2x2 matrix of plots giving: \item{(1, 1)}{The data plotted on the x-y axes with all posterior regression lines.} \item{(1, 2)}{The data plotted on the x-y axes with most probable posterior regression lines.} \item{(2, 1)}{A beta-space plot of all posterior regression coefficients.} \item{(1, 1)}{A beta-space plot of most probable posterior regression coefficients.} } \seealso{ \code{\link{regmixEM.mixed}}, \code{\link{plot.mixEM}} } \references{ Young, D. S. and Hunter, D. R. (2015) Random Effects Regression Mixtures for Analyzing Infant Habituation, \emph{Journal of Applied Statistics}, \bold{42(7)}, 1421--1441. } \examples{ ## EM output for simulated data from 2-component mixture of random effects. data(RanEffdata) set.seed(100) x <- lapply(1:length(RanEffdata), function(i) matrix(RanEffdata[[i]][, 2:3], ncol = 2)) x <- x[1:20] y <- lapply(1:length(RanEffdata), function(i) matrix(RanEffdata[[i]][, 1], ncol = 1)) y <- y[1:20] lambda <- c(0.45, 0.55) mu <- matrix(c(0, 4, 100, 12), 2, 2) sigma <- 2 R <- list(diag(1, 2), diag(1, 2)) em.out <- regmixEM.mixed(y, x, sigma = sigma, arb.sigma = FALSE, lambda = lambda, mu = mu, R = R, addintercept.random = FALSE, epsilon = 1e-02, verb = TRUE) ## Obtaining the 2x2 matrix of plots. x.ran <- lapply(1:length(x), function(i) x[[i]][, 2]) p.beta <- em.out$posterior.beta p.z <- em.out$posterior.z post.beta(y, x.ran, p.beta = p.beta, p.z = p.z) } \details{ This is primarily used for within \code{plot.mixEM}. } \keyword{internal} mixtools/man/perm.Rd0000755000176200001440000000143412112156012014131 0ustar liggesusers\name{perm} \title{Permutation Function} \alias{perm} \usage{ perm(n, r, v = 1:n) } \description{ Enumerates the possible permutations of a specified size from the elements of a vector having the same size. } \arguments{ \item{n}{Size of the source vector.} \item{r}{Size of the target vector.} \item{v}{Source vector. Must be a vector of length \code{n}.} } \value{ \code{perm} returns a matrix where each row contains one of the permutations of length \code{r}. } \details{ This function is called by \code{segregmixEM} and the associated internal functions. This is also a simplified version of the function \code{permutations} found in the package \code{gtools}. } \seealso{ \code{\link{segregmixEM}} } \examples{ perm(3, 3, 2:4) } \keyword{internal} mixtools/man/plotFDR.Rd0000644000176200001440000000457312644764141014526 0ustar liggesusers\name{plotFDR} \alias{plotFDR} \title{Plot False Discovery Rate (FDR) estimates from output by EM-like strategies} \description{Plot FDR\eqn{(p_i)} estimates against index of sorted p-values from, e.g., normalmixEM or the semiparametric mixture model posterior probabilities output by \code{\link{spEMsymlocN01}}, or any EM-algorithm like \code{\link{normalmixEM}} which returns posterior probabilities. The function can simultaneously plot FDR estimates from two strategies for comparison. Plot of the true FDR can be added if complete data are available (typically in simulation studies).} \usage{ plotFDR(post1, post2 = NULL, lg1 = "FDR 1", lg2 = NULL, title = NULL, compH0 = 1, alpha = 0.1, complete.data = NULL, pctfdr = 0.3) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{post1}{The matrix of posterior probabilities from objects such as the output from \code{\link{spEMsymlocN01}}. The rows need to be sorted by increasing pvalues.} \item{post2}{A second object like \code{post1} if comparison is desired, also sorted by increasing pvalues.} \item{lg1}{Text describing the FDR estimate in \code{post1}.} \item{lg2}{Text describing the FDR estimate in \code{post2} if provided.} \item{title}{Plot title, a default is provided if \code{NULL}.} \item{compH0}{The component indicator associated to the null hypothesis H0, normally 1 since it is defined in this way in \code{\link{spEMsymlocN01}}, but in case of label switching in other algorithms it can be set to \code{2}.} \item{alpha}{The target FDR level; the index at which the FDR estimate crosses the horizontal line for level \code{alpha} gives the maximum number of cases to reject.} \item{complete.data}{An array with \eqn{n} lines and 2 columns, with the component indicator in column 1 and the p-values in column 2, sorted by p-values.} \item{pctfdr}{The level up to which the FDR is plotted, i.e. the scale of the vertical axis.} } \value{A plot of one or two FDR estimates, with the true FDR if available} \references{ \itemize{ \item Chauveau, D., Saby, N., Orton, T. G., Lemercier B., Walter, C. and Arrouys, D. Large-scale simultaneous hypothesis testing in monitoring carbon content from French soil database -- A semi-parametric mixture approach, Geoderma 219-220 (2014), 117-124. } } \author{Didier Chauveau} \seealso{ \code{\link{spEMsymlocN01}}} % \keyword{file}mixtools/man/wquantile.Rd0000644000176200001440000000261612644764451015225 0ustar liggesusers\name{wquantile} \alias{wIQR} \alias{wquantile} \title{Weighted quantiles} \description{ Functions to compute weighted quantiles and the weighted interquartile range. } \usage{ wquantile(wt = rep(1,length(x)), x, probs, already.sorted = FALSE, already.normalized = FALSE) wIQR(wt = rep(1,length(x)), x, already.sorted = FALSE, already.normalized = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{wt}{ Vector of weights } \item{x}{ Vector of data, same length as \code{wt} } \item{probs}{ Numeric vector of probabilities with values in [0,1].} \item{already.sorted}{ If FALSE, sort \code{wt} and \code{x} in increasing order of \code{x}. If TRUE, it is assumed that \code{wt} and \code{x} are already sorted.} \item{already.normalized}{ If FALSE, normalize \code{wt} by diving each entry by the sum of all entries. If TRUE, it is assumed that \code{sum(wt)==1}} } \details{ \code{wquantile} uses the \code{\link{findInterval}} function. \code{wIQR} calls the \code{wquantile} function. } \value{ Returns the sample quantiles or interquartile range of a discrete distribution with support points \code{x} and corresponding probability masses \code{wt} } \seealso{ \code{\link{npEM}}} \examples{ IQR(1:10) wIQR(x=1:10) # Note: Different algorithm than IQR function wIQR(1:10,1:10) # Weighted quartiles are now 4 and 8 } \keyword{robust} mixtools/man/summary.spRMM.Rd0000644000176200001440000000317413055603106015670 0ustar liggesusers\name{summary.spRMM} \alias{summary.spRMM} \title{Summarizing fits from Stochastic EM algorithm for semiparametric scaled mixture of censored data} \usage{ \method{summary}{spRMM}(object, digits = 6, ...) } \arguments{ \item{object}{an object of class \code{spRMM} such as a result of a call to \code{\link{spRMM_SEM}}} \item{digits}{Significant digits for printing values} \item{...}{Additional parameters passed to \code{print}.} } \description{ \code{\link[base]{summary}} method for class \code{spRMM}. } \details{ \code{\link{summary.spRMM}} prints scalar parameter estimates for a fitted mixture model: each component weight and the scaling factor, see reference below. The functional (nonparametric) estimates of survival and hazard rate funcions can be obtained using \code{\link{plotspRMM}}. } \value{ The function \code{\link{summary.spRMM}} prints the final loglikelihood value at the solution as well as The estimated mixing weights and the scaling parameter. } \seealso{ Function for plotting functional (nonparametric) estimates: \code{\link{plotspRMM}}. Other models and algorithms for censored lifetime data (name convention is model_algorithm): \code{\link{expRMM_EM}}, \code{\link{weibullRMM_SEM}}. } \references{ \itemize{ \item Bordes, L., and Chauveau, D. (2016), Stochastic EM algorithms for parametric and semiparametric mixture models for right-censored lifetime data, Computational Statistics, Volume 31, Issue 4, pages 1513-1538. \url{http://link.springer.com/article/10.1007/s00180-016-0661-7} } } \author{Didier Chauveau} \examples{ # See example(spRMM_SEM) } \keyword{file} mixtools/man/mvnpEM.Rd0000755000176200001440000001272212644775462014424 0ustar liggesusers\name{mvnpEM} \title{EM-like Algorithm for Nonparametric Mixture Models with Conditionally Independent Multivariate Component Densities} \alias{mvnpEM} \usage{ mvnpEM(x, mu0, blockid = 1:ncol(x), samebw = TRUE, bwdefault = apply(x,2,bw.nrd0), init = NULL, eps = 1e-8, maxiter = 500, verb = TRUE) } \description{ An extension of the original \code{\link{npEM}} algorithm, for mixtures of multivariate data where the coordinates of a row (case) in the data matrix are assumed to be made of independent but multivariate blocks (instead of just coordinates), conditional on the mixture component (subpopulation) from which they are drawn (Chauveau and Hoang 2015). } \arguments{ \item{x}{An \eqn{n\times r}{n x r} matrix of data. Each of the \eqn{n} rows is a case, and each case has \eqn{r} repeated measurements. These measurements are assumed to be conditionally independent, conditional on the mixture component (subpopulation) from which the case is drawn.} \item{mu0}{Either an \eqn{m\times r}{m x r} matrix specifying the initial centers for the \link{kmeans} function, or an integer \eqn{m} specifying the number of initial centers, which are then chosen randomly in \link{kmeans}} \item{blockid}{A vector of length \eqn{r} identifying coordinates (columns of \code{x}) that are in the same block. The default has all distinct elements, indicating that the model has \eqn{r} blocks of dimension 1, in which case the model is handled directly by the \code{\link{npEM}} algorithm. See example below for actual multivariate blocks example.} \item{samebw}{Logical: If \code{TRUE}, use the same bandwidth per coordinate for all iteration and all components. If \code{FALSE}, use a separate bandwidth for each component and coordinate, and update this bandwidth at each iteration of the algorithm using a suitably modified \code{\link{bw.nrd0}} method as described in Benaglia et al (2011) and Chauveau and Hoang (2015).} \item{bwdefault}{Bandwidth default for density estimation,a simplistic application of the default \code{\link{bw.nrd0}} for each coordinate (column) of the data.} \item{init}{Initialization method, based on an initial \eqn{n\times m}{n x m} matrix for the posterior probabilities. If \code{NULL}, a \code{\link{kmeans}} clustering with \code{mu0} initial centers is applied to the data and the initial matrix of posteriors is built from the result.} \item{eps}{Tolerance limit for declaring algorithm convergence. Convergence is declared whenever the maximum change in any coordinate of the \code{lambda} vector (of mixing proportion estimates) does not exceed \code{eps}.} \item{maxiter}{The maximum number of iterations allowed; convergence may be declared before \code{maxiter} iterations (see \code{eps} above).} \item{verb}{Verbose mode; if TRUE, print updates for every iteration of the algorithm as it runs} } \value{ \code{mvnpEM} returns a list of class \code{mvnpEM} with the following items: \item{data}{The raw data (an \eqn{n\times r}{n x r} matrix).} \item{posteriors}{An \eqn{n\times m}{n x m} matrix of posterior probabilities for each observation (row).} \item{lambda}{The sequence of mixing proportions over iterations.} \item{blockid}{The \code{blockid} input argument. Needed by any method that produces density estimates from the output, like \code{\link{plot.mvnpEM}}.} \item{samebw}{The \code{samebw} input argument. Needed by any method that produces density estimates from the output, like \code{\link{plot.mvnpEM}}.} \item{bandwidth}{The final bandwidth matrix after convergence of the algorithm. Its shape depends on the \code{samebw} input argument. If \code{samebw = TRUE}, a vectors with the bandwidth value for each of the \code{r} coordinates (same for all components and iterations). If \code{samebw = FALSE}, a \eqn{m\times r}{m x r} matrix, where each row is associated to one component and gives the \eqn{r} bandwidth values, one for each coordinate. Needed by any method that produces density estimates from the output, like \code{\link{plot.mvnpEM}}.} \item{lambdahat}{The final mixing proportions.} \item{loglik}{The sequence of pseudo log-likelihood values over iterations.} } \seealso{ \code{\link{plot.mvnpEM}}, \code{\link{npEM}} } \references{ \itemize{ \item Benaglia, T., Chauveau, D., and Hunter, D. R. (2009), An EM-like algorithm for semi- and non-parametric estimation in multivariate mixtures, Journal of Computational and Graphical Statistics, 18, 505-526. \item Benaglia, T., Chauveau, D. and Hunter, D.R. (2011), Bandwidth Selection in an EM-like algorithm for nonparametric multivariate mixtures. Nonparametric Statistics and Mixture Models: A Festschrift in Honor of Thomas P. Hettmansperger. World Scientific Publishing Co., pages 15-27. \item Chauveau, D., and Hoang, V. T. L. (2015), Nonparametric mixture models with conditionally independent multivariate component densities, Preprint under revision. \url{https://hal.archives-ouvertes.fr/hal-01094837} } } \examples{ # Example as in Chauveau and Hoang (2015) with 6 coordinates \dontrun{ m=2; r=6; blockid <-c(1,1,2,2,3,3) # 3 bivariate blocks # generate some data x ... a <- mvnpEM(x, mu0=2, blockid, samebw=F) # adaptive bandwidth plot(a) # this S3 method produces 6 plots of univariate marginals summary(a)} } \keyword{file} mixtools/man/plot.mixEM.Rd0000755000176200001440000001022512644764047015207 0ustar liggesusers\name{plot.mixEM} \title{Various Plots Pertaining to Mixture Models} \alias{plot.mixEM} \usage{ \method{plot}{mixEM}(x, whichplots = 1, loglik = 1 \%in\% whichplots, density = 2 \%in\% whichplots, xlab1="Iteration", ylab1="Log-Likelihood", main1="Observed Data Log-Likelihood", col1=1, lwd1=2, xlab2=NULL, ylab2=NULL, main2=NULL, col2=NULL, lwd2=2, alpha = 0.05, marginal = FALSE, ...) } \description{ Takes an object of class \code{mixEM} and returns various graphical output for select mixture models. } \arguments{ \item{x}{An object of class \code{mixEM}.} \item{whichplots}{vector telling which plots to produce: 1 = loglikelihood plot, 2 = density plot. Irrelevant if \code{loglik} and \code{density} are specified.} \item{loglik}{If TRUE, a plot of the log-likelihood versus the EM iterations is given.} \item{density}{Graphics pertaining to certain mixture models. The details are given below.} \item{xlab1, ylab1, main1, col1, lwd1}{Graphical parameters \code{xlab}, ..., \code{lwd} to be passed to the loglikelihood plot. Trying to change these parameters using \code{xlab}, ..., \code{lwd} will result in an error, but all other graphical parameters are passed directly to the plotting functions via ...} \item{xlab2, ylab2, main2, col2, lwd2}{Same as \code{xlab1} etc. but for the density plot} \item{alpha}{A vector of significance levels when constructing confidence ellipses and confidence bands for the mixture of multivariate normals and mixture of regressions cases, respectively. The default is 0.05.} \item{marginal}{For the mixture of bivariate normals, should optional marginal histograms be included?} \item{...}{Graphical parameters passed to \code{plot} command.} } \value{ \code{plot.mixEM} returns a plot of the log-likelihood versus the EM iterations by default for all objects of class \code{mixEM}. In addition, other plots may be produced for the following k-component mixture model functions: \item{normalmixEM}{A histogram of the raw data is produced along with k density curves determined by \code{normalmixEM}.} \item{repnormmixEM}{A histogram of the raw data produced in a similar manner as for \code{normalmixEM}.} \item{mvnormalmixEM}{A 2-dimensional plot with each point color-coded to denote its most probable component membership. In addition, the estimated component means are plotted along with (1 - \code{alpha})\% bivariate normal density contours. These ellipses are constructed by assigning each value to their component of most probable membership and then using normal theory. Optional marginal histograms may also be produced.} \item{regmixEM}{A plot of the response versus the predictor with each point color-coded to denote its most probable component membership. In addition, the estimated component regression lines are plotted along with (1 - \code{alpha})\% Working-Hotelling confidence bands. These bands are constructed by assigning each value to their component of most probable membership and then performing least squares estimation.} \item{logisregmixEM}{A plot of the binary response versus the predictor with each point color-coded to denote its most probable compopnent membership. In addition, the estimate component logistic regression lines are plotted.} \item{regmixEM.mixed}{Provides a 2x2 matrix of plots summarizing the posterior slope and posterior intercept terms from a mixture of random effects regression. See \code{post.beta} for a more detailed description.} } \seealso{ \code{\link{post.beta}} } \examples{ ##Analyzing the Old Faithful geyser data with a 2-component mixture of normals. data(faithful) attach(faithful) set.seed(100) out <- normalmixEM(waiting, arbvar = FALSE, verb = TRUE, epsilon = 1e-04) plot(out, density = TRUE, w = 1.1) ##Fitting randomly generated data with a 2-component location mixture of bivariate normals. x.1 <- rmvnorm(40, c(0, 0)) x.2 <- rmvnorm(60, c(3, 4)) X.1 <- rbind(x.1, x.2) out.1 <- mvnormalmixEM(X.1, arbvar = FALSE, verb = TRUE, epsilon = 1e-03) plot(out.1, density = TRUE, alpha = c(0.01, 0.05, 0.10), marginal = TRUE) } \keyword{file} mixtools/man/test.equality.Rd0000755000176200001440000000443112122215712016005 0ustar liggesusers\name{test.equality} \title{Performs Chi-Square Tests for Scale and Location Mixtures} \alias{test.equality} \usage{ test.equality(y, x = NULL, arbmean = TRUE, arbvar = FALSE, mu = NULL, sigma = NULL, beta = NULL, lambda = NULL, ...) } \description{ Performs a likelihood ratio test of a location (or scale) normal or regression mixture versus the more general model. For a normal mixture, the alternative hypothesis is that each component has its own mean and variance, whereas the null is that all means (in the case of a scale mixture) or all variances (in the case of a location mixture) are equal. This test is asymptotically chi-square with degrees of freedom equal to k-1, where k is the number of components. } \arguments{ \item{y}{The responses for \code{regmixEM} or the data for \code{normalmixEM}.} \item{x}{The predictors for \code{regmixEM}.} \item{arbmean}{If FALSE, then a scale mixture analysis is performed for \code{normalmixEM} or \code{regmixEM}.} \item{arbvar}{If FALSE, then a location mixture analysis is performed for \code{normalmixEM} or \code{regmixEM}.} \item{mu}{An optional vector for starting values (under the null hypothesis) for \code{mu} in \code{normalmixEM}.} \item{sigma}{An optional vector for starting values (under the null hypothesis) for \code{sigma} in \code{normalmixEM} or \code{regmixEM}.} \item{beta}{An optional matrix for starting values (under the null hypothesis) for \code{beta} in \code{regmixEM}.} \item{lambda}{An otional vector for starting values (under the null hypothesis) for \code{lambda} in \code{normalmixEM} or \code{regmixEM}.} \item{...}{Additional arguments passed to the various EM algorithms for the mixture of interest.} } \value{ \code{test.equality} returns a list with the following items: \item{chi.sq}{The chi-squared test statistic.} \item{df}{The degrees of freedom for the chi-squared test statistic.} \item{p.value}{The p-value corresponding to this likelihood ratio test.} } \seealso{ \code{\link{test.equality.mixed}} } \examples{ ## Should a location mixture be used for the Old Faithful data? data(faithful) attach(faithful) set.seed(100) test.equality(y = waiting, arbmean = FALSE, arbvar = TRUE) } \keyword{file} mixtools/man/repnormmixmodel.sel.Rd0000755000176200001440000000323113055645134017205 0ustar liggesusers\name{repnormmixmodel.sel} \title{Model Selection in Mixtures of Normals with Repeated Measures} \alias{repnormmixmodel.sel} \usage{ repnormmixmodel.sel(x, k = 2, ...) } \description{ Assess the number of components in a mixture model with normal components and repeated measures using the Akaike's information criterion (AIC), Schwartz's Bayesian information criterion (BIC), Bozdogan's consistent AIC (CAIC), and Integrated Completed Likelihood (ICL). } \arguments{ \item{x}{An mxn matrix of observations. The rows correspond to the repeated measures and the columns correspond to the subject.} \item{k}{The maximum number of components to assess.} \item{...}{Additional arguments passed to \code{repnormmixEM}.} } \value{ \code{repnormmixmodel.sel} returns a matrix of the AIC, BIC, CAIC, and ICL values along with the winner (i.e., the highest value given by the model selection criterion) for a mixture of normals with repeated measures. } \seealso{ \code{\link{repnormmixEM}} } \references{ Biernacki, C., Celeux, G., and Govaert, G. (2000). Assessing a Mixture Model for Clustering with the Integrated Completed Likelihood. \emph{IEEE Transactions on Pattern Analysis and Machine Intelligence}, 22(7):719-725. Bozdogan, H. (1987). Model Selection and Akaike's Information Criterion (AIC): The General Theory and its Analytical Extensions. \emph{Psychometrika}, 52:345-370. } \examples{ ## Assessing the number of components for the water-level task data set. data(Waterdata) water<-t(as.matrix(Waterdata[,3:10])) set.seed(100) out <- repnormmixmodel.sel(water, k = 3, epsilon = 5e-01) out } \keyword{file} mixtools/man/density.spEM.Rd0000755000176200001440000000365012122210306015510 0ustar liggesusers\name{density.spEM} \title{Normal kernel density estimate for semiparametric EM output} \alias{density.spEM} \usage{ \method{density}{spEM}(x, u=NULL, component=1, block=1, scale=FALSE, \dots) } \description{ Takes an object of class \code{spEM} and returns an object of class \code{\link{density}} giving the kernel density estimate. } \arguments{ \item{x}{An object of class \code{npEM} such as the output of the \code{\link{npEM}} or \code{\link{spEMsymloc}} functions.} \item{u}{Vector of points at which the density is to be evaluated} \item{component}{Mixture component number; should be an integer from 1 to the number of columns of \code{x$posteriors}.} \item{block}{Block of repeated measures. Only applicable in repeated measures case, for which \code{x$blockid} exists; should be an integer from 1 to \code{max(x$blockid)}.} \item{scale}{Logical: If TRUE, multiply the density values by the corresponding mixing proportions found in \code{x$lambdahat}} \item{\dots}{Additional arguments; not used by this method.} } \details{ The bandwidth is taken to be the same as that used to produce the \code{npEM} object, which is given by \code{x$bandwidth}. } \value{ \code{density.spEM} returns a list of type \code{"density"}. See \code{\link{density}} for details. In particular, the output of \code{density.spEM} may be used directly by functions such as \code{\link{plot}} or \code{\link{lines}}. } \seealso{ \code{\link{spEM}}, \code{\link{spEMsymloc}}, \code{\link{plot.spEM}} } \examples{ set.seed(100) mu <- matrix(c(0, 15), 2, 3) sigma <- matrix(c(1, 5), 2, 3) x <- rmvnormmix(300, lambda = c(.4,.6), mu = mu, sigma = sigma) d <- spEM(x, mu0 = 2, blockid = rep(1,3), constbw = TRUE) plot(d, xlim=c(-10, 40), ylim = c(0, .16), xlab = "", breaks = 30, cex.lab=1.5, cex.axis=1.5) # plot.spEM calls density.spEM here } \keyword{file} mixtools/man/ise.npEM.Rd0000755000176200001440000001027713055645105014626 0ustar liggesusers\name{ise.npEM} \title{Integrated Squared Error for a selected density from npEM output} \alias{ise.npEM} \usage{ ise.npEM(npEMout, component=1, block=1, truepdf, lower=-Inf, upper=Inf, plots = TRUE, ...) } \description{ Computes the integrated squared error for a selected estimated density from \code{\link{npEM}} output (selected by specifying the component and block number), relative to a true pdf that must be specified by the user. The range for the numerical integration must be specified. This function also returns (by default) a plot of the true and estimated densities. } \arguments{ \item{npEMout}{An object of class \code{npEM} such as the output of the \code{\link{npEM}} function} \item{component, block}{Component and block of particular density to analyze from \code{npEMout}.} \item{truepdf}{an \R function taking a numeric first argument and returning a numeric vector of the same length. Returning a non-finite element will generate an error.} \item{lower, upper}{the limits of integration. Can be infinite.} \item{plots}{logical: Should plots be produced?} \item{...}{additional arguments to be passed to \code{truepdf} (and that may be mandatory like, e.g., the \code{df = } argument of \code{dt}). Remember to use argument names not matching those of \code{ise.npRM}.} } \value{ Just as for the \code{\link{integrate}} function, a list of class \code{"integrate"} with components \item{value}{the final estimate of the integral.} \item{abs.error}{estimate of the modulus of the absolute error.} \item{subdivisions}{the number of subintervals produced in the subdivision process.} \item{message}{\code{"OK"} or a character string giving the error message.} \item{call}{the matched call.} } \details{This function calls the \code{\link{wkde}} (weighted kernel density estimate) function. } \seealso{ \code{\link{npEM}}, \code{\link{wkde}}, \code{\link{integrate}} } \references{ \itemize{ \item Benaglia, T., Chauveau, D., and Hunter, D. R. (2009), An EM-like algorithm for semi- and non-parametric estimation in multivariate mixtures, Journal of Computational and Graphical Statistics, 18, 505-526. \item Benaglia, T., Chauveau, D., Hunter, D. R., and Young, D. (2009), mixtools: An R package for analyzing finite mixture models. Journal of Statistical Software, 32(6):1-29. } } \examples{ # Mixture with mv gaussian model set.seed(100) m <- 2 # no. of components r <- 3 # no. of repeated measures (coordinates) lambda <- c(0.4, 0.6) # Note: Need first 2 coordinates conditionally iid due to block structure mu <- matrix(c(0, 0, 0, 3, 3, 5), m, r, byrow=TRUE) # means sigma <- matrix(rep(1, 6), m, r, byrow=TRUE) # stdevs blockid = c(1,1,2) # block structure of coordinates n <- 200 x <- rmvnormmix(n, lambda, mu, sigma) # simulated data # fit the model with "arbitrary" initial centers centers <- matrix(c(0, 0, 0, 4, 4, 4), 2, 3, byrow=TRUE) a <- npEM(x, centers, blockid, eps=1e-8, verb=FALSE) # Calculate integrated squared error for j=2, b=1: j <- 2 # component b <- 1 # block coords <- a$blockid == b ise.npEM(a, j, b, dnorm, lower=0, upper=10, plots=TRUE, mean=mu[j,coords][1], sd=sigma[j, coords][1]) # The following (lengthy) example recreates the normal multivariate # mixture model simulation from Benaglia et al (2009). mu <- matrix(c(0, 0, 0, 3, 4, 5), m, r, byrow=TRUE) nbrep <- 5 # Benaglia et al use 300 replications # matrix for storing sums of Integrated Squared Errors ISE <- matrix(0,m,r,dimnames=list(Components=1:m, Blocks=1:r)) nblabsw <- 0 # no. of label switches for (mc in 1:nbrep) { print(paste("REPETITION", mc)) x <- rmvnormmix(n,lambda,mu,sigma) # simulated data a <- npEM(x, centers, verb=FALSE) #default: if (a$lambda[1] > a$lambda[2]) nblabsw <- nblabsw + 1 for (j in 1:m) { # for each component for (k in 1:r) { # for each coordinate; not assuming iid! # dnorm with correct mean, sd is the true density: ISE[j,k] <- ISE[j,k] + ise.npEM(a, j, k, dnorm, lower=mu[j,k]-5, upper=mu[j,k]+5, plots=FALSE, mean=mu[j,k], sd=sigma[j,k])$value } } MISE <- ISE/nbrep # Mean ISE sqMISE <- sqrt(MISE) # root-mean-integrated-squared error } sqMISE } \keyword{file} mixtools/man/depth.Rd0000755000176200001440000000201012122210474014265 0ustar liggesusers\name{depth} \alias{depth} \title{Elliptical and Spherical Depth} \description{ Computation of spherical or elliptical depth. } \usage{ depth(pts, x, Cx = var(x)) } \arguments{ \item{pts}{A kxd matrix containing the k points that one wants to compute the depth. Each row is a point. } \item{x}{A nxd matrix containing the reference data. Each row is an observation.} \item{Cx}{A dxd scatter matrix for the data x where the default is var(x). When Cx = I(d), it returns the sphercial depth.} } \value{ \code{depth} returns a k-vector where each entry is the elliptical depth of a point in \code{pts}. } \references{ Elmore, R. T., Hettmansperger, T. P. and Xuan, F. (2000) Spherical Data Depth and a Multivariate Median, \emph{Proceedings of Data Depth: Robust Multivariate Statistical Analysis, Computational Geometry and Applications}. } \seealso{\code{\link{regcr}} } \examples{ set.seed(100) x <- matrix(rnorm(200),nc = 2) depth(x[1:3, ], x) } \note{\code{depth} is used in \code{regcr}.} \keyword{file} mixtools/man/tonedata.Rd0000644000176200001440000000330513056055632014777 0ustar liggesusers\name{tonedata} \alias{tonedata} \docType{data} \title{Tone perception data} \author{Christian Hennig} \description{ The tone perception data stem from an experiment of Cohen (1980) and have been analyzed in de Veaux (1989) and Viele and Tong (2002). The dataset and this documentation file were copied from the fpc package by Christian Hennig. A pure fundamental tone was played to a trained musician. Electronically generated overtones were added, determined by a stretching ratio of \code{stretchratio}. \code{stretchratio=2.0} corresponds to the harmonic pattern usually heard in traditional definite pitched instruments. The musician was asked to tune an adjustable tone to the octave above the fundamental tone. \code{tuned} gives the ratio of the adjusted tone to the fundamental, i.e. \code{tuned=2.0} would be the correct tuning for all \code{stretchratio}-values. The data analyzed here belong to 150 trials with the same musician. In the original study, there were four further musicians. } \usage{data(tonedata)} \format{A data frame with 2 variables, \code{stretchratio} and \code{tuned}, and 150 cases.} \source{ Original source: Cohen, E. A. (1980), \emph{Inharmonic tone perception}. Unpublished Ph.D. dissertation, Stanford University R source: Hennig, Christian (2010), fpc: Flexible procedures for clustering, R package version 2.0-2. \url{https://cran.r-project.org/package=fpc} } \references{ de Veaux, R. D. (1989), Mixtures of Linear Regressions, \emph{Computational Statistics and Data Analysis} 8, 227-245. Viele, K. and Tong, B. (2002), Modeling with Mixtures of Linear Regressions, \emph{Statistics and Computing} 12, 315-330. } \keyword{datasets} mixtools/man/ellipse.Rd0000755000176200001440000000274112122210456014631 0ustar liggesusers\name{ellipse} \title{Draw Two-Dimensional Ellipse Based on Mean and Covariance} \alias{ellipse} \usage{ ellipse(mu, sigma, alpha = .05, npoints = 250, newplot = FALSE, draw = TRUE, ...) } \description{ Draw a two-dimensional ellipse that traces a bivariate normal density contour for a given mean vector, covariance matrix, and probability content. } \arguments{ \item{mu}{A 2-vector giving the mean.} \item{sigma}{A 2x2 matrix giving the covariance matrix.} \item{alpha}{Probability to be excluded from the ellipse. The default value is alpha = .05, which results in a 95\% ellipse.} \item{npoints}{Number of points comprising the border of the ellipse.} \item{newplot}{If newplot = TRUE and draw = TRUE, plot the ellipse on a new plot. If newplot = FALSE and draw = TRUE, add the ellipse to an existing plot.} \item{draw}{If TRUE, draw the ellipse.} \item{...}{Graphical parameters passed to \code{lines} or \code{plot} command.} } \value{ \code{ellipse} returns an \code{npoints}x2 matrix of the points forming the border of the ellipse. } \references{ Johnson, R. A. and Wichern, D. W. (2002) \emph{Applied Multivariate Statistical Analysis, Fifth Edition}, Prentice Hall. } \seealso{ \code{\link{regcr}} } \examples{ ## Produce a 95\% ellipse with the specified mean and covariance structure. mu <- c(1, 3) sigma <- matrix(c(1, .3, .3, 1.5), 2, 2) ellipse(mu, sigma, npoints = 200, newplot = TRUE) } \keyword{file} mixtools/man/npEM.Rd0000755000176200001440000001361012514002272014030 0ustar liggesusers\name{npEM} \title{Nonparametric EM-like Algorithm for Mixtures of Independent Repeated Measurements} \alias{npEM} \alias{npEMindrep} \alias{npEMindrepbw} \usage{ npEM(x, mu0, blockid = 1:ncol(x), bw = bw.nrd0(as.vector(as.matrix(x))), samebw = TRUE, h = bw, eps = 1e-8, maxiter = 500, stochastic = FALSE, verb = TRUE) } \description{ Returns nonparametric EM algorithm output (Benaglia et al, 2009) for mixtures of multivariate (repeated measures) data where the coordinates of a row (case) in the data matrix are assumed to be independent, conditional on the mixture component (subpopulation) from which they are drawn. } \arguments{ \item{x}{An \eqn{n\times r}{n x r} matrix of data. Each of the \eqn{n} rows is a case, and each case has \eqn{r} repeated measurements. These measurements are assumed to be conditionally independent, conditional on the mixture component (subpopulation) from which the case is drawn.} \item{mu0}{Either an \eqn{m\times r}{m x r} matrix specifying the initial centers for the \link{kmeans} function, or an integer \eqn{m} specifying the number of initial centers, which are then choosen randomly in \link{kmeans}} \item{blockid}{A vector of length \eqn{r} identifying coordinates (columns of \code{x}) that are assumed to be identically distributed (i.e., in the same block). For instance, the default has all distinct elements, indicating that no two coordinates are assumed identically distributed and thus a separate set of \eqn{m} density estimates is produced for each column of \eqn{x}. On the other hand, if \code{blockid=rep(1,ncol(x))}, then the coordinates in each row are assumed conditionally i.i.d.} \item{bw}{Bandwidth for density estimation, equal to the standard deviation of the kernel density. By default, a simplistic application of the default \code{\link{bw.nrd0}} bandwidth used by \code{\link{density}} to the entire dataset.} \item{samebw}{Logical: If \code{TRUE}, use the same bandwidth for each iteration and for each component and block. If \code{FALSE}, use a separate bandwidth for each component and block, and update this bandwidth at each iteration of the algorithm using a suitably modified \code{\link{bw.nrd0}} method as described in Benaglia et al (2011).} \item{h}{Alternative way to specify the bandwidth, to provide backward compatibility.} \item{eps}{Tolerance limit for declaring algorithm convergence. Convergence is declared whenever the maximum change in any coordinate of the \code{lambda} vector (of mixing proportion estimates) does not exceed \code{eps}.} \item{maxiter}{The maximum number of iterations allowed, for both stochastic and non-stochastic versions; for non-stochastic algorithms (\code{stochastic = FALSE}), convergence may be declared before \code{maxiter} iterations (see \code{eps} above).} \item{stochastic}{Flag, if FALSE (the default), runs the non-stochastic version of the npEM algorithm, as in Benaglia et al (2009). Set to TRUE to run a stochastic version which simulates the posteriors at each iteration, and runs for \code{maxiter} iterations.} \item{verb}{If TRUE, print updates for every iteration of the algorithm as it runs} } \value{ \code{npEM} returns a list of class \code{npEM} with the following items: \item{data}{The raw data (an \eqn{n\times r}{n x r} matrix).} \item{posteriors}{An \eqn{n\times m}{n x m} matrix of posterior probabilities for observation. If \code{stochastic = TRUE}, this matrix is computed from an average over the \code{maxiter} iterations.} \item{bandwidth}{If \code{samebw==TRUE}, same as the \code{bw} input argument; otherwise, value of \code{bw} matrix at final iteration. This information is needed by any method that produces density estimates from the output.} \item{blockid}{Same as the \code{blockid} input argument, but recoded to have positive integer values. Also needed by any method that produces density estimates from the output.} \item{lambda}{The sequence of mixing proportions over iterations.} \item{lambdahat}{The final mixing proportions if \code{stochastic = FALSE}, or the average mixing proportions if \code{stochastic = TRUE}.} \item{loglik}{The sequence of log-likelihoods over iterations.} } \seealso{ \code{\link{plot.npEM}}, \code{\link{normmixrm.sim}}, \code{\link{spEMsymloc}}, \code{\link{spEM}}, \code{\link{plotseq.npEM}} } \references{ \itemize{ \item Benaglia, T., Chauveau, D., and Hunter, D. R. (2009), An EM-like algorithm for semi- and non-parametric estimation in multivariate mixtures, Journal of Computational and Graphical Statistics, 18, 505-526. \item Benaglia, T., Chauveau, D., Hunter, D. R., and Young, D. (2009), mixtools: An R package for analyzing finite mixture models. Journal of Statistical Software, 32(6):1-29. \item Benaglia, T., Chauveau, D. and Hunter, D.R. (2011), Bandwidth Selection in an EM-like algorithm for nonparametric multivariate mixtures. Nonparametric Statistics and Mixture Models: A Festschrift in Honor of Thomas P. Hettmansperger. World Scientific Publishing Co., pages 15-27. \item Bordes, L., Chauveau, D., and Vandekerkhove, P. (2007), An EM algorithm for a semiparametric mixture model, Computational Statistics and Data Analysis, 51: 5429-5443. } } \examples{ ## Examine and plot water-level task data set. ## First, try a 3-component solution where no two coordinates are ## assumed i.d. data(Waterdata) set.seed(100) \dontrun{ a <- npEM(Waterdata[,3:10], mu0=3, bw=4) # Assume indep but not iid plot(a) # This produces 8 plots, one for each coordinate } ## Next, same thing but pairing clock angles that are directly opposite one ## another (1:00 with 7:00, 2:00 with 8:00, etc.) \dontrun{ b <- npEM(Waterdata[,3:10], mu0=3, blockid=c(4,3,2,1,3,4,1,2), bw=4) # iid in pairs plot(b) # Now only 4 plots, one for each block } } \keyword{file} mixtools/man/summary.mvnpEM.Rd0000755000176200001440000000450012644775630016110 0ustar liggesusers\name{summary.mvnpEM} \alias{summary.mvnpEM} \alias{print.summary.mvnpEM} \title{Summarizing Fits for Nonparametric Mixture Models with Conditionally Independent Multivariate Component Densities} \usage{ \method{summary}{mvnpEM}(object, \dots) \method{print}{summary.mvnpEM}(x, digits=3, \dots) } \arguments{ \item{object,x}{an object of class \code{mvnpEM} such as a result of a call to \code{\link{mvnpEM}}} \item{digits}{Significant digits for printing values} \item{\dots}{further arguments passed to or from other methods.} } \description{ \code{\link[base]{summary}} method for class \code{mvnpEM}. } \details{ \code{\link{summary.mvnpEM}} prints means and variances of each block for each component. These quantities might not be part of the model, but they are estimated nonparametrically based on the posterior probabilities and the data. } \value{ The function \code{\link{summary.mvnpEM}} returns a list of type \code{summary.mvnpEM} with the following components: \item{n}{The number of observations} \item{m}{The number of mixture components} \item{B}{The number of blocks} \item{blockid}{The block ID (from 1 through B) for each of the coordinates of the multivariate observations. The \code{blockid} component is of length \eqn{r}, the dimension of each observation.} \item{means}{A \eqn{B\times m}{B x m} matrix giving the estimated mean of each block in each component.} \item{variances}{Same as \code{means} but giving the estimated variances instead.} } \references{ Benaglia, T., Chauveau, D., and Hunter, D. R. (2009), An EM-like algorithm for semi- and non-parametric estimation in multivariate mixtures, \emph{Journal of Computational and Graphical Statistics}, \bold{18(2)}, 505--526. Chauveau, D., and Hoang, V. T. L. (2015), Nonparametric mixture models with conditionally independent multivariate component densities, Preprint under revision. \url{https://hal.archives-ouvertes.fr/hal-01094837} } \seealso{ \code{\link{mvnpEM}}, \code{\link{plot.mvnpEM}} } \examples{ # Example as in Chauveau and Hoang (2015) with 6 coordinates \dontrun{ m=2; r=6; blockid <-c(1,1,2,2,3,3) # 3 bivariate blocks # generate some data x ... a <- mvnpEM(x, mu0=2, blockid, samebw=F) # adaptive bandwidth plot(a) # this S3 method produces 6 plots of univariate marginals summary(a)} } \keyword{file} mixtools/man/regcr.Rd0000755000176200001440000000652012122214144014273 0ustar liggesusers\name{regcr} \title{Add a Confidence Region or Bayesian Credible Region for Regression Lines to a Scatterplot} \alias{regcr} \usage{ regcr(beta, x, em.beta = NULL, em.sigma = NULL, alpha = .05, nonparametric = FALSE, plot = FALSE, xyaxes = TRUE, ...) } \description{ Produce a confidence or credible region for regression lines based on a sample of bootstrap beta values or posterior beta values. The beta parameters are the intercept and slope from a simple linear regression. } \arguments{ \item{beta}{An nx2 matrix of regression parameters. The first column gives the intercepts and the second column gives the slopes.} \item{x}{An n-vector of the predictor variable which is necessary when nonparametric = TRUE.} \item{em.beta}{The estimates for beta required when obtaining confidence regions. This is required for performing the standardization necessary when obtaining nonparametric confidence regions.} \item{em.sigma}{The estimates for the regression standard deviation required when obtaining confidence regions. This is required for performing the standardization necessary when obtaining nonparametric confidence regions.} \item{alpha}{The proportion of the beta sample to remove. In other words, 1-alpha is the level of the credible region.} \item{nonparametric}{ If nonparametric = TRUE, then the region is based on the convex hull of the remaining beta after trimming, which is accomplished using a data depth technique. If nonparametric = FALSE, then the region is based on the asymptotic normal approximation.} \item{plot}{If plot = TRUE, lines are added to the existing plot. The type of plot created depends on the value of xyaxes.} \item{xyaxes}{If xyaxes = TRUE and plot = TRUE, then a confidence or credible region for the regression lines is plotted on the x-y axes, presumably overlaid on a scatterplot of the data. If xyaxes = FALSE and plot = TRUE, the (convex) credible region for the regression line is plotted on the beta, or intercept-slope, axes, presumably overlaid on a scatterplot of beta.} \item{...}{Graphical parameters passed to \code{lines} or \code{plot} command.} } \value{ \code{regcr} returns a list containing the following items: \item{boundary}{A matrix of points in beta, or intercept-slope, space arrayed along the boundary of the confidence or credible region.} \item{upper}{A matrix of points in x-y space arrayed along the upper confidence or credible limit for the regression line.} \item{lower}{A matrix of points in x-y space arrayed along the lower confidence or credible limit for the regression line.} } \seealso{ \code{\link{regmixEM}}, \code{\link{regmixMH}} } \examples{ ## Nonparametric credible regions fit to NOdata. data(NOdata) attach(NOdata) set.seed(100) beta <- matrix(c(1.3, -0.1, 0.6, 0.1), 2, 2) sigma <- c(.02, .05) MH.out <- regmixMH(Equivalence, NO, beta = beta, s = sigma, sampsize = 2500, omega = .0013) attach(data.frame(MH.out$theta)) beta.c1 <- cbind(beta0.1[2400:2499], beta1.1[2400:2499]) beta.c2 <- cbind(beta0.2[2400:2499], beta1.2[2400:2499]) plot(NO, Equivalence) regcr(beta.c1, x = NO, nonparametric = TRUE, plot = TRUE, col = 2) regcr(beta.c2, x = NO, nonparametric = TRUE, plot = TRUE, col = 3) } \keyword{file} mixtools/man/plot.MCMC.Rd0000755000176200001440000000315212122213370014663 0ustar liggesusers\name{plot.mixMCMC} \title{Various Plots Pertaining to Mixture Model Output Using MCMC Methods} \alias{plot.mixMCMC} \usage{ \method{plot}{mixMCMC}(x, trace.plots = TRUE, summary.plots = FALSE, burnin = 2000, \dots) } \description{ Takes an object of class \code{mixMCMC} and returns various graphical output for select mixture models. } \arguments{ \item{x}{An object of class \code{mixMCMC}.} \item{trace.plots}{If TRUE, trace plots of the various parameters estimated by the MCMC methods is given.} \item{summary.plots}{Graphics pertaining to certain mixture models. The details are given below.} \item{burnin}{The values 1 to \code{burnin} are dropped when producing the plots in \code{summary.plots}.} \item{...}{Graphical parameters passed to \code{regcr} function.} } \value{ \code{plot.mixMCMC} returns trace plots of the various parameters estimated by the MCMC methods for all objects of class \code{mixMCMC}. In addition, other plots may be produced for the following k-component mixture model functions: \item{regmixMH}{Credible bands for the regression lines in a mixture of linear regressions. See \code{regcr} for more details.} } \seealso{ \code{\link{regcr}} } \examples{ ## M-H algorithm for NOdata with acceptance rate about 40\%. data(NOdata) attach(NOdata) set.seed(100) beta <- matrix(c(1.3, -0.1, 0.6, 0.1), 2, 2) sigma <- c(.02, .05) MH.out <- regmixMH(Equivalence, NO, beta = beta, s = sigma, sampsize = 2500, omega = .0013) plot(MH.out, summary.plots = TRUE, burnin = 2450, alpha = 0.01) } \keyword{file} mixtools/man/lambda.pert.Rd0000755000176200001440000000173612122211616015367 0ustar liggesusers\name{lambda.pert} \title{Perturbation of Mixing Proportions} \alias{lambda.pert} \usage{ lambda.pert(lambda, pert) } \description{ Perturbs a set of mixing proportions by first scaling the mixing proportions, then taking the logit of the scaled values, perturbing them, and inverting back to produce a set of new mixing proportions. } \arguments{ \item{lambda}{A vector of length k giving the mixing proportions which are to be perturbed.} \item{pert}{A vector (possibly of length k-1) for which to perturb \code{lambda}. If the length is less than k, then values of the vector are recycled. If length is greater than k, then only the first k values are used.} } \value{ \code{lambda.pert} returns new \code{lambda} values perturbed by \code{pert}. } \details{ This function is called by \code{regmixMH}. } \seealso{ \code{\link{regmixMH}} } \examples{ set.seed(100) x <- c(0.5, 0.2, 0.3) lambda.pert(x, rcauchy(3)) } \keyword{internal} mixtools/man/matsqrt.Rd0000755000176200001440000000076512122212260014666 0ustar liggesusers\name{matsqrt} \title{Calculates the Square Root of a Diagonalizable Matrix} \alias{matsqrt} \usage{ matsqrt(x) } \description{ Returns the square root of a diagonalizable matrix. } \arguments{ \item{x}{An nxn diagonalizable matrix.} } \value{ \code{matsqrt} returns the square root of \code{x}. } \details{ This function is called by \code{regcr}. } \seealso{ \code{\link{regcr}} } \examples{ a <- matrix(c(1, -0.2, -0.2, 1), 2, 2) matsqrt(a) } \keyword{internal} mixtools/man/multmixEM.Rd0000755000176200001440000000602412122212350015106 0ustar liggesusers\name{multmixEM} \title{EM Algorithm for Mixtures of Multinomials} \alias{multmixEM} \usage{ multmixEM(y, lambda = NULL, theta = NULL, k = 2, maxit = 10000, epsilon = 1e-08, verb = FALSE) } \description{ Return EM algorithm output for mixtures of multinomial distributions. } \arguments{ \item{y}{Either An nxp matrix of data (multinomial counts), where n is the sample size and p is the number of multinomial bins, or the output of the \code{\link{makemultdata}} function. It is not necessary that all of the rows contain the same number of multinomial trials (i.e., the row sums of \code{y} need not be identical).} \item{lambda}{Initial value of mixing proportions. Entries should sum to 1. This determines number of components. If NULL, then \code{lambda} is random from uniform Dirichlet and number of components is determined by \code{theta}.} \item{theta}{Initial value of \code{theta} parameters. Should be a kxp matrix, where p is the number of columns of y and k is number of components. Each row of \code{theta} should sum to 1. If NULL, then each row is random from uniform Dirichlet. If both \code{lambda} and \code{theta} are NULL, then number of components is determined by k.} \item{k}{Number of components. Ignored unless \code{lambda} and \code{theta} are NULL.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \value{ \code{multmixEM} returns a list of class \code{mixEM} with items: \item{y}{The raw data.} \item{lambda}{The final mixing proportions.} \item{theta}{The final multinomial parameters.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{compCDF}}, \code{\link{makemultdata}}, \code{\link{multmixmodel.sel}} } \references{ \itemize{ \item McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}, John Wiley \& Sons, Inc. \item Elmore, R. T., Hettmansperger, T. P. and Xuan, F. (2004) The Sign Statistic, One-Way Layouts and Mixture Models, \emph{Statistical Science} \bold{19(4)}, 579--587. } } \examples{ ## The sulfur content of the coal seams in Texas set.seed(100) A <- c(1.51, 1.92, 1.08, 2.04, 2.14, 1.76, 1.17) B <- c(1.69, 0.64, .9, 1.41, 1.01, .84, 1.28, 1.59) C <- c(1.56, 1.22, 1.32, 1.39, 1.33, 1.54, 1.04, 2.25, 1.49) D <- c(1.3, .75, 1.26, .69, .62, .9, 1.2, .32) E <- c(.73, .8, .9, 1.24, .82, .72, .57, 1.18, .54, 1.3) dis.coal <- makemultdata(A, B, C, D, E, cuts = median(c(A, B, C, D, E))) em.out <- multmixEM(dis.coal) em.out[1:4] } \keyword{file} mixtools/man/mvnormalmixEM.Rd0000755000176200001440000000704412122212544015770 0ustar liggesusers\name{mvnormalmixEM} \title{EM Algorithm for Mixtures of Multivariate Normals} \alias{mvnormalmixEM} \usage{ mvnormalmixEM(x, lambda = NULL, mu = NULL, sigma = NULL, k = 2, arbmean = TRUE, arbvar = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) } \description{ Return EM algorithm output for mixtures of multivariate normal distributions. } \arguments{ \item{x}{A matrix of size nxp consisting of the data.} \item{lambda}{Initial value of mixing proportions. Entries should sum to 1. This determines number of components. If NULL, then \code{lambda} is random from uniform Dirichlet and number of components is determined by \code{mu}.} \item{mu}{A list of size k consisting of initial values for the p-vector mean parameters. If NULL, then the vectors are generated from a normal distribution with mean and standard deviation according to a binning method done on the data. If both \code{lambda} and \code{mu} are NULL, then number of components is determined by \code{sigma}.} \item{sigma}{A list of size k consisting of initial values for the pxp variance-covariance matrices. If NULL, then \code{sigma} is generated using the data. If \code{lambda}, \code{mu}, and \code{sigma} are NULL, then number of components is determined by \code{k}.} \item{k}{Number of components. Ignored unless \code{lambda}, \code{mu}, and \code{sigma} are all NULL.} \item{arbmean}{If TRUE, then the component densities are allowed to have different \code{mu}s. If FALSE, then a scale mixture will be fit.} \item{arbvar}{If TRUE, then the component densities are allowed to have different \code{sigma}s. If FALSE, then a location mixture will be fit.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \value{ \code{normalmixEM} returns a list of class \code{mixEM} with items: \item{x}{The raw data.} \item{lambda}{The final mixing proportions.} \item{mu}{A list of with the final mean vectors.} \item{sigma}{A list with the final variance-covariance matrices.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{normalmixEM}} } \references{ McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}, John Wiley \& Sons, Inc. } \examples{ ##Fitting randomly generated data with a 2-component location mixture of bivariate normals. set.seed(100) x.1 <- rmvnorm(40, c(0, 0)) x.2 <- rmvnorm(60, c(3, 4)) X.1 <- rbind(x.1, x.2) mu <- list(c(0, 0), c(3, 4)) out.1 <- mvnormalmixEM(X.1, arbvar = FALSE, mu = mu, epsilon = 1e-02) out.1[2:5] ##Fitting randomly generated data with a 2-component scale mixture of bivariate normals. x.3 <- rmvnorm(40, c(0, 0), sigma = matrix(c(200, 1, 1, 150), 2, 2)) x.4 <- rmvnorm(60, c(0, 0)) X.2 <- rbind(x.3, x.4) lambda <- c(0.40, 0.60) sigma <- list(diag(1, 2), matrix(c(200, 1, 1, 150), 2, 2)) out.2 <- mvnormalmixEM(X.2, arbmean = FALSE, sigma = sigma, lambda = lambda, epsilon = 1e-02) out.2[2:5] } \keyword{file} mixtools/man/NOdata.Rd0000755000176200001440000000147113616576437014370 0ustar liggesusers\name{NOdata} \docType{data} \alias{NOdata} \title{Ethanol Fuel Data Set} \description{ This data set gives the equivalence ratios and peak nitrogen oxide emissions in a study using pure ethanol as a spark-ignition engine fuel. } \usage{ data(NOdata) } \format{This data frame consists of: \itemize{ \item{\code{NO}}{The peak nitrogen oxide emission levels.} \item{\code{Equivalence}}{The equivalence ratios for the engine at compression ratios from 7.5 to 18.} } } \source{Brinkman, N. D. (1981) Ethanol Fuel -- A Single-Cylinder Engine Study of Efficiency and Exhaust Emissions, \emph{S.A.E. Transactions}, 68. } \references{ Hurn, M., Justel, A. and Robert, C. P. (2003) Estimating Mixtures of Regressions, \emph{Journal of Computational and Graphical Statistics} \bold{12(1)}, 55--79. } \keyword{datasets} mixtools/man/rnormmix.Rd0000755000176200001440000000214012122215020015030 0ustar liggesusers\name{rnormmix} \title{Simulate from Mixtures of Normals} \alias{normmix.sim} \alias{rnormmix} \usage{ rnormmix(n, lambda=1, mu=0, sigma=1) } \description{ Simulate from a mixture of univariate normal distributions. } \arguments{ \item{n}{Number of cases to simulate.} \item{lambda}{Vector of mixture probabilities, with length equal to \eqn{m}, the desired number of components (subpopulations). This is assumed to sum to 1; if not, it is normalized.} \item{mu}{Vector of means.} \item{sigma}{Vector of standard deviations.} } \value{ \code{rnormmix} returns an \eqn{n}-vector sampled from an \eqn{m}-component mixture of univariate normal distributions. } \details{This function simply calls \code{\link{rmvnormmix}}.} \seealso{ \code{\link{makemultdata}}, \code{\link{rmvnormmix}} } \examples{ ##Generate data from a 2-component mixture of normals. set.seed(100) n <- 500 lambda <- rep(1, 2)/2 mu <- c(0, 5) sigma <- rep(1, 2) mixnorm.data <- rnormmix(n, lambda, mu, sigma) ##A histogram of the simulated data. hist(mixnorm.data) } \keyword{file} mixtools/man/rmvnorm.Rd0000755000176200001440000000141311665556372014714 0ustar liggesusers\name{rmvnorm} \alias{rmvnorm} \title{Simulate from a Multivariate Normal Distribution} \description{ Simulate from a multiviate normal distribution } \usage{ rmvnorm(n, mu=NULL, sigma=NULL) } \arguments{ \item{n}{Number of vectors to simulate} \item{mu}{mean vector} \item{sigma}{covariance matrix, assumed symmetric and nonnegative definite} } \value{ An \eqn{n \times d}{n x d} matrix in which each row is an independently generated realization from the desired multivariate normal distribution } \details{ This function uses an \code{\link{eigen}} decomposition assuming \code{sigma} is symmetric. In particular, the upper triangle of \code{sigma} is ignored. } \seealso{ \code{\link{eigen}}, \code{\link{dnorm}}, \code{\link{dmvnorm}} } \keyword{distribution} mixtools/man/Waterdata.Rd0000755000176200001440000000657613616576575015154 0ustar liggesusers\name{Waterdata} \docType{data} \title{Water-Level Task Data Set} \alias{Waterdata} \alias{WaterdataFull} \usage{ data(Waterdata) } \description{ This data set arises from the water-level task proposed by the Swiss psychologist Jean Piaget to assess children's understanding of the physical world. This involves presenting a child with a rectangular vessel with a cap, affixed to a wall, that can be tilted (like the minute hand of a clock) to point in any direction. A separate disk with a water line indicated on it, which can similarly be spun so that the water line may assume any desired angle with the horizontal, is positioned so that by spinning this disk, the child subject may make the hypothetical surface of water inside the vessel assume any desired orientation. For each of eight different orientations of the vessel, corresponding to the clock angles at 1:00, 2:00, 4:00, 5:00, 7:00, 8:00, 10:00, and 11:00, the child subject is asked to position the water level as it would appear in reality if water were in the vessel. The measurement is the acute angle with the horizontal, in degrees, assumed by the water line after it is positioned by the child. A sign is attached to the measurement to indicate whether the line slopes up (positive) or down (negative) from left to right. Thus, each child has 8 repeated measurements, one for each vessel angle, and the range of possible values are from -90 to 90. The setup of the experiment, along with a photograph of the testing apparatus, is given by Thomas and Jamison (1975). A more detailed analysis using a subset of 405 of the original 579 subjects is given by Thomas and Lohaus (1993); further analyses using the functions in \code{mixtools} are given by Benaglia et al (2008) and Levine et al (2011), among others. There are two versions of the dataset included in \code{mixtools}. The full dataset, called \code{WaterdataFull}, has 579 individuals. The dataset called \code{Waterdata} is a subset of 405 individuals, comprising all children aged 11 years or more and omitting any individuals with any observations equal to 100, which in this context indicates a missing value (since all of the degree measurements should be in the range from -90 to +90, 100 is not a possible value). } \format{These data frames consist of 405 or 579 rows, one row for each child. There are ten columns: The age (in years) and sex (where 1=male and 0=female) are given for each individual along with the degree of deviation from the horizontal for 8 specified clock-hour orientations (11, 4, 2, 7, 10, 5, 1, and 8 o'clock, in order). } \source{ Benaglia, T., Chauveau, D., and Hunter, D.R. (2009), An EM-Like Algorithm for Semi- and Non-Parametric Estimation in Multivariate Mixtures, \emph{Journal of Computational and Graphical Statistics}, 18: 505-526. Levine, M., Hunter, D.R., and Chauveau, D. (2011), Maximum Smoothed Likelihood for Multivariate Mixtures, \emph{Biometrika}, 98(2): 403-416. Thomas, H. and Jamison, W. (1975), On the Acquisition of Understanding that Still Water is Horizontal, \emph{Merrill-Palmer Quarterly of Behavior and Development}, 21(1): 31-44. Thomas, H. and Lohaus, A. (1993), \emph{Modeling Growth and Individual Differences in Spatial Tasks}, University of Chicago Press, Chicago, available on JSTOR. } \keyword{datasets} mixtools/man/lambda.Rd0000755000176200001440000000245611665556372014444 0ustar liggesusers\name{lambda} \title{Local Estimation for Lambda in Mixtures of Regressions} \alias{lambda} \usage{ lambda(z, x, xi, h = NULL, kernel = c("Gaussian", "Beta", "Triangle", "Cosinus", "Optcosinus"), g = 0) } \description{ Return local estimates of the mixing proportions from each component of a mixture of regressions model using output from an EM algorithm. } \arguments{ \item{z}{An nxk matrix of posterior probabilities obtained from the EM algorithm.} \item{x}{A vector of values for which the local estimation is calculated.} \item{xi}{An nx(p-1) matrix of the predictor values.} \item{h}{The bandwidth controlling the size of the window used for the local estimation.} \item{kernel}{The type of kernel to be used for the local estimation.} \item{g}{A shape parameter required for the symmetric beta kernel. The default is \code{g} = 0 which yields the uniform kernel. Some common values are \code{g} = 1 for the Epanechnikov kernel, \code{g} = 2 for the biweight kernel, and \code{g} = 3 for the triweight kernel.} } \value{ \code{lambda} returns local estimates of the mixing proportions for the inputted \code{x} vector. } \seealso{ \code{\link{regmixEM.loc}} } \note{\code{lambda} is for use within \code{regmixEM.loc}.} \keyword{internal} mixtools/man/RTdata.Rd0000755000176200001440000000233213616576462014374 0ustar liggesusers\name{RTdata} \docType{data} \title{Reaction Time (RT) Data Set} \alias{RTdata} \usage{ data(RTdata) } \description{ This data set involves normally developing children 9 years of age presented with two visual simuli on a computer monitor. The left image is the target stimuli and on the right is either an exact copy or a mirror image of the target stimuli. The child must press one key if it is a copy or another key if it is a mirror image. The data consists of the reaction times (RT) of the 197 children who provided correct responses for all 6 task trials. } \format{This data frame consists of 197 children (the rows) and their 6 responses (the columns) to the stimulus presented. The response (RT) is recorded in milliseconds. } \references{ Cruz-Medina, I. R., Hettmansperger, T. P. and Thomas, H. (2004) Semiparametric Mixture Models and Repeated Measures: The Multinomial Cut Point Model, \emph{Applied Statistics} \bold{53(3)}, 463--474. Miller, C. A., Kail, R., Leonard, L. B. and Tomblin, J. B. (2001) Speed of Processing in Children with Specific Language Impairment, \emph{Journal of Speech, Language, and Hearing Research} \bold{44(2)}, 416--433. } \seealso{ \code{\link{RTdata2}} } \keyword{datasets} mixtools/man/density.npEM.Rd0000755000176200001440000000456013055645065015530 0ustar liggesusers\name{density.npEM} \title{Normal kernel density estimate for nonparametric EM output} \alias{density.npEM} \usage{ \method{density}{npEM}(x, u=NULL, component=1, block=1, scale=FALSE, \dots) } \description{ Takes an object of class \code{npEM} and returns an object of class \code{\link{density}} giving the kernel density estimate for the selected component and, if applicable, the selected block. } \arguments{ \item{x}{An object of class \code{npEM} such as the output of the \code{\link{npEM}} or \code{\link{spEMsymloc}} functions.} \item{u}{Vector of points at which the density is to be evaluated} \item{component}{Mixture component number; should be an integer from 1 to the number of columns of \code{x$posteriors}.} \item{block}{Block of repeated measures. Only applicable in repeated measures case, for which \code{x$blockid} exists; should be an integer from 1 to \code{max(x$blockid)}.} \item{scale}{Logical: If TRUE, multiply the density values by the corresponding mixing proportions found in \code{x$lambdahat}} \item{\dots}{Additional arguments; not used by this method.} } \details{ The bandwidth is taken to be the same as that used to produce the \code{npEM} object, which is given by \code{x$bandwidth}. } \value{ \code{density.npEM} returns a list of type \code{"density"}. See \code{\link{density}} for details. In particular, the output of \code{density.npEM} may be used directly by functions such as \code{\link{plot}} or \code{\link{lines}}. } \seealso{ \code{\link{npEM}}, \code{\link{spEMsymloc}}, \code{\link{plot.npEM}} } \examples{ ## Look at histogram of Old Faithful waiting times data(faithful) Minutes <- faithful$waiting hist(Minutes, freq=FALSE) ## Superimpose equal-variance normal mixture fit: set.seed(100) nm <- normalmixEM(Minutes, mu=c(50,80), sigma=5, arbvar=FALSE, fast=TRUE) x <- seq(min(Minutes), max(Minutes), len=200) for (j in 1:2) lines(x, nm$lambda[j]*dnorm(x, mean=nm$mu[j], sd=nm$sigma), lwd=3, lty=2) ## Superimpose several semiparametric fits with different bandwidths: bw <- c(1, 3, 5) for (i in 1:3) { sp <- spEMsymloc(Minutes, c(50,80), bw=bw[i], eps=1e-3) for (j in 1:2) lines(density(sp, component=j, scale=TRUE), col=1+i, lwd=2) } legend("topleft", legend=paste("Bandwidth =",bw), fill=2:4) } \keyword{file} mixtools/man/poisregmixEM.Rd0000755000176200001440000000554212122213664015612 0ustar liggesusers\name{poisregmixEM} \title{EM Algorithm for Mixtures of Poisson Regressions} \alias{poisregmixEM} \usage{ poisregmixEM(y, x, lambda = NULL, beta = NULL, k = 2, addintercept = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) } \description{ Returns EM algorithm output for mixtures of Poisson regressions with arbitrarily many components. } \arguments{ \item{y}{An n-vector of response values.} \item{x}{An nxp matrix of predictors. See \code{addintercept} below.} \item{lambda}{Initial value of mixing proportions. Entries should sum to 1. This determines number of components. If NULL, then \code{lambda} is random from uniform Dirichlet and number of components is determined by \code{beta}.} \item{beta}{Initial value of \code{beta} parameters. Should be a pxk matrix, where p is the number of columns of x and k is number of components. If NULL, then \code{beta} is generated by binning the data into k bins and using \code{glm} on the values in each of the bins. If both \code{lambda} and \code{beta} are NULL, then number of components is determined by \code{k}.} \item{k}{Number of components. Ignored unless \code{lambda} and \code{beta} are both NULL.} \item{addintercept}{If TRUE, a column of ones is appended to the x matrix before the value of p is calculated.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \value{ \code{poisregmixEM} returns a list of class \code{mixEM} with items: \item{x}{The predictor values.} \item{y}{The response values.} \item{lambda}{The final mixing proportions.} \item{beta}{The final Poisson regression coefficients.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{logisregmixEM}} } \references{ McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}, John Wiley \& Sons, Inc. Wang, P., Puterman, M. L., Cockburn, I. and Le, N. (1996) Mixed Poisson Regression Models with Covariate Dependent Rates, \emph{Biometrics}, \bold{52(2)}, 381--400. } \examples{ ## EM output for data generated from a 2-component model. set.seed(100) beta <- matrix(c(1, .5, .7, -.8), 2, 2) x <- runif(50, 0, 10) xbeta <- cbind(1, x)\%*\%beta w <- rbinom(50, 1, .5) y <- w*rpois(50, exp(xbeta[, 1]))+(1-w)*rpois(50, exp(xbeta[, 2])) out <- poisregmixEM(y, x, verb = TRUE, epsilon = 1e-03) out } \keyword{file} mixtools/man/plot.mvnpEM.Rd0000755000176200001440000000344613046171330015361 0ustar liggesusers\name{plot.mvnpEM} \title{Plots of Marginal Density Estimates from the mvnpEM Algorithm Output} \alias{plot.mvnpEM} \usage{ \method{plot}{mvnpEM}(x, truenorm = FALSE, lambda = NULL, mu = NULL, v = NULL, lgdcex = 1, \dots) } \description{ Takes an object of class \code{mvnpEM}, as the one returned by the \code{\link{mvnpEM}} algorithm, and returns a set of plots of the density estimates for each coordinate within each multivariate block. All the components are displayed on each plot so it is possible to see the mixture structure for each coordinate and block. The final bandwidth values are also displayed, in a format depending on the bandwidth strategy . } \arguments{ \item{x}{An object of class \code{mvnpEM} such as the output of the \code{\link{mvnpEM}} function} \item{truenorm}{Mostly for checking purpose, if the nonparametric model is to be compared with a multivariate Gaussian mixture as the true model.} \item{lambda}{true weight parameters, for Gaussian models only (see above)} \item{mu}{true mean parameters, for Gaussian models only (see above)} \item{v}{true covariance matrices, for Gaussian models only (see above)} \item{lgdcex}{Character expansion factor for \code{\link{legend}}.} \item{\dots}{Any remaining arguments are passed to \code{\link{hist}}.} } \value{ \code{plot.mvnpEM} currently just plots the figure. } \seealso{ \code{\link{mvnpEM}}, \code{\link{npEM}}, \code{\link{density.npEM}} } \examples{ # example as in Chauveau and Hoang (2015) with 6 coordinates \dontrun{ m=2; r=6; blockid <-c(1,1,2,2,3,3) # 3 bivariate blocks # generate some data x ... a <- mvnpEM(x, mu0=2, blockid, samebw=F) # adaptive bandwidth plot(a) # this S3 method produces 6 plots of univariate marginals summary(a)} } \keyword{file} mixtools/man/CO2data.Rd0000755000176200001440000000154013616576412014425 0ustar liggesusers\name{CO2data} \docType{data} \title{GNP and CO2 Data Set} \alias{CO2data} \usage{ data(CO2data) } \description{ This data set gives the gross national product (GNP) per capita in 1996 for various countries as well as their estimated carbon dioxide (CO2) emission per capita for the same year. } \format{This data frame consists of 28 countries and the following columns: \itemize{ \item{\code{GNP}}{The gross national product per capita in 1996.} \item{\code{CO2}}{The estimated carbon dioxide emission per capita in 1996.} \item{\code{country}}{An abbreviation pertaining to the country measured (e.g., "GRC" = Greece and "CH" = Switzerland).} } } \references{ Hurn, M., Justel, A. and Robert, C. P. (2003) Estimating Mixtures of Regressions, \emph{Journal of Computational and Graphical Statistics} \bold{12(1)}, 55--79. } \keyword{datasets} mixtools/man/boot.se.Rd0000755000176200001440000000345312264135704014557 0ustar liggesusers\name{boot.se} \title{Performs Parametric Bootstrap for Standard Error Approximation} \alias{boot.se} \usage{ boot.se(em.fit, B = 100, arbmean = TRUE, arbvar = TRUE, N = NULL, ...) } \description{ Performs a parametric bootstrap by producing B bootstrap samples for the parameters in the specified mixture model. } \arguments{ \item{em.fit}{An object of class \code{mixEM}. The estimates produced in \code{em.fit} will be used as the parameters for the distribution from which we generate the bootstrap data.} \item{B}{The number of bootstrap samples to produce. The default is 100, but ideally, values of 1000 or more would be more acceptable.} \item{arbmean}{If FALSE, then a scale mixture analysis can be performed for \code{mvnormalmix}, \code{normalmix}, \code{regmix}, or \code{repnormmix}. The default is TRUE.} \item{arbvar}{If FALSE, then a location mixture analysis can be performed for \code{mvnormalmix}, \code{normalmix}, \code{regmix}, or \code{repnormmix}. The default is TRUE.} \item{N}{An n-vector of number of trials for the logistic regression type \code{logisregmix}. If NULL, then \code{N} is an n-vector of 1s for binary logistic regression.} \item{...}{Additional arguments passed to the various EM algorithms for the mixture of interest.} } \value{ \code{boot.se} returns a list with the bootstrap samples and standard errors for the mixture of interest. } \references{ McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}, John Wiley \& Sons, Inc. } \examples{ ## Bootstrapping standard errors for a regression mixture case. data(NOdata) attach(NOdata) set.seed(100) em.out <- regmixEM(Equivalence, NO, arbvar = FALSE) out.bs <- boot.se(em.out, B = 10, arbvar = FALSE) out.bs } \keyword{file} mixtools/man/hmeEM.Rd0000755000176200001440000000627012122211062014162 0ustar liggesusers\name{hmeEM} \title{EM Algorithm for Mixtures-of-Experts} \alias{hmeEM} \usage{ hmeEM(y, x, lambda = NULL, beta = NULL, sigma = NULL, w = NULL, k = 2, addintercept = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) } \description{ Returns EM algorithm output for a mixture-of-experts model. Currently, this code only handles a 2-component mixture-of-experts, but will be extended to the general k-component hierarchical mixture-of-experts. } \arguments{ \item{y}{An n-vector of response values.} \item{x}{An nxp matrix of predictors. See \code{addintercept} below.} \item{lambda}{Initial value of mixing proportions, which are modeled as an inverse logit function of the predictors. Entries should sum to 1. If NULL, then \code{lambda} is taken as 1/\code{k} for each \code{x}.} \item{beta}{Initial value of \code{beta} parameters. Should be a pxk matrix, where p is the number of columns of x and k is number of components. If NULL, then \code{beta} has standard normal entries according to a binning method done on the data.} \item{sigma}{A vector of standard deviations. If NULL, then \eqn{1/\code{sigma}^2} has random standard exponential entries according to a binning method done on the data.} \item{w}{A p-vector of coefficients for the way the mixing proportions are modeled. See \code{lambda}.} \item{k}{Number of components. Currently, only \code{k}=2 is accepted.} \item{addintercept}{If TRUE, a column of ones is appended to the x matrix before the value of p is calculated.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \value{ \code{hmeEM} returns a list of class \code{mixEM} with items: \item{x}{The set of predictors (which includes a column of 1's if \code{addintercept} = TRUE).} \item{y}{The response values.} \item{w}{The final coefficients for the functional form of the mixing proportions.} \item{lambda}{An nxk matrix of the final mixing proportions.} \item{beta}{The final regression coefficients.} \item{sigma}{The final standard deviations. If \code{arbmean} = FALSE, then only the smallest standard deviation is returned. See \code{scale} below.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{regmixEM}} } \references{ Jacobs, R. A., Jordan, M. I., Nowlan, S. J. and Hinton, G. E. (1991) Adaptive Mixtures of Local Experts, \emph{Neural Computation} \bold{3(1)}, 79--87. McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}, John Wiley \& Sons, Inc. } \examples{ ## EM output for NOdata. data(NOdata) attach(NOdata) set.seed(100) em.out <- regmixEM(Equivalence, NO) hme.out <- hmeEM(Equivalence, NO, beta = em.out$beta) hme.out[3:7] } \keyword{file} mixtools/man/flaremixEM.Rd0000755000176200001440000000550213616712247015241 0ustar liggesusers\name{flaremixEM} \title{EM Algorithm for Mixtures of Regressions with Flare} \alias{flaremixEM} \usage{ flaremixEM(y, x, lambda = NULL, beta = NULL, sigma = NULL, alpha = NULL, nu = NULL, epsilon = 1e-04, maxit = 10000, verb = FALSE, restart = 50) } \description{ Returns output for 2-component mixture of regressions with flaring using an EM algorithm with one step of Newton-Raphson requiring an adaptive barrier for maximization of the objective function. A mixture of regressions with flare occurs when there appears to be a common regression relationship for the data, but the error terms have a mixture structure of one normal component and one exponential component. } \arguments{ \item{y}{An n-vector of response values.} \item{x}{An n-vector of predictor values. An intercept term will be added by default.} \item{lambda}{Initial value of mixing proportions. Entries should sum to 1.} \item{beta}{Initial value of \code{beta} parameters. Should be a 2x2 matrix where the columns correspond to the component.} \item{sigma}{A vector of standard deviations.} \item{alpha}{A scalar for the exponential component's rate.} \item{nu}{A vector specifying the barrier constants to use. The first barrier constant where the algorithm converges is used.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} \item{restart}{The number of times to restart the algorithm in case convergence is not attained. The default is 50.} } \value{ \code{flaremixEM} returns a list of class \code{mixEM} with items: \item{x}{The set of predictors (which includes a column of 1's).} \item{y}{The response values.} \item{posterior}{An nx2 matrix of posterior probabilities for observations.} \item{lambda}{The final mixing proportions.} \item{beta}{The final regression coefficients.} \item{sigma}{The final standard deviations.} \item{alpha}{The final exponential rate.} \item{loglik}{The final log-likelihood.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{regmixEM}} } \examples{ ## Simulation output. set.seed(100) j=1 while(j == 1){ x1 <- runif(30, 0, 10) x2 <- runif(20, 10, 20) x3 <- runif(30, 20, 30) y1 <- 3+4*x1+rnorm(30, sd = 1) y2 <- 3+4*x2+rexp(20, rate = .05) y3 <- 3+4*x3+rnorm(30, sd = 1) x <- c(x1, x2, x3) y <- c(y1, y2, y3) nu <- (1:30)/2 out <- try(flaremixEM(y, x, beta = c(3, 4), nu = nu, lambda = c(.75, .25), sigma = 1), silent = TRUE) if(any(class(out) == "try-error")){ j <- 1 } else j <- 2 } out[4:7] plot(x, y, pch = 19) abline(out$beta) } \keyword{file} mixtools/man/aug.x.Rd0000755000176200001440000000236312112155706014223 0ustar liggesusers\name{aug.x} \title{Augmented Predictor Function} \alias{aug.x} \usage{ aug.x(X, cp.locs, cp, delta = NULL) } \description{ Creates the augmented predictor matrix based on an appropriately defined changepoint structure. } \arguments{ \item{X}{The raw matrix of predictor values. Note that the raw data matrix should not include a columns of 1's.} \item{cp.locs}{The locations of the changepoints. The length of this vector must be equal to the sum of the entries of \code{cp}.} \item{cp}{A vector having length equal to the number of predictors.} \item{delta}{A vector to accommodate discontinuities. If NULL, then no discontinuities are included. Otherwise, this must be a vector of the same length as \code{cp.locs}.} } \value{ \code{aug.x} returns a matrix of the original matrix \code{X} with the predictor adjusted for changepoints and (optional) discontinuities. } \details{ This function is called by \code{segregmixEM} and the associated internal functions. } \seealso{ \code{\link{segregmixEM}} } \examples{ x <- matrix(1:30, nrow = 10) cp <- c(1, 3, 0) cp.locs <- c(3, 12, 14, 16) d <- rep(0, 4) x1 <- aug.x(x, cp.locs, cp, delta = NULL) x1 x2 <- aug.x(x, cp.locs, cp, delta = d) x2 } \keyword{internal} mixtools/man/plot.spEMN01.Rd0000755000176200001440000000250012225537500015273 0ustar liggesusers\name{plot.spEMN01} \alias{plot.spEMN01} \title{Plot mixture pdf for the semiparametric mixture model output by spEMsymlocN01} \description{Plot mixture density for the semiparametric mixture model output by spEMsymlocN01, with one component known and set to normal(0,1), and a symmetric nonparametric density with location parameter.} \usage{ \method{plot}{spEMN01}(x, bw = x$bandwidth, knownpdf = dnorm, add.plot = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{An object of class "spEMN01" as returned by spEMsymlocN01} \item{bw}{Bandwidth for weighted kernel density estimation.} \item{knownpdf}{The known density of component 1, default to \code{dnorm}.} \item{add.plot}{Set to TRUE to add to an existing plot.} \item{...}{further arguments passed to \code{plot} if \code{add.plot = FALSE}, and to \code{lines} if \code{add.plot = TRUE}.} } \value{A plot of the density of the mixture} \references{ \itemize{ \item Chauveau, D., Saby, N., Orton, T. G., Lemercier B., Walter, C. and Arrouys, D. Large-scale simultaneous hypothesis testing in soil monitoring: A semi-parametric mixture approach, preprint (2013). } } \author{Didier Chauveau} %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{spEMsymlocN01}}} % \keyword{file}mixtools/man/regmixEM.mixed.Rd0000755000176200001440000001272412644763360016040 0ustar liggesusers\name{regmixEM.mixed} \title{EM Algorithm for Mixtures of Regressions with Random Effects} \alias{regmixEM.mixed} \usage{ regmixEM.mixed(y, x, w = NULL, sigma = NULL, arb.sigma = TRUE, alpha = NULL, lambda = NULL, mu = NULL, rho = NULL, R = NULL, arb.R = TRUE, k = 2, ar.1 = FALSE, addintercept.fixed = FALSE, addintercept.random = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) } \description{ Returns EM algorithm output for mixtures of multiple regressions with random effects and an option to incorporate fixed effects and/or AR(1) errors. } \arguments{ \item{y}{A list of N response trajectories with (possibly) varying dimensions of length \eqn{n_i}.} \item{x}{A list of N design matrices of dimensions \eqn{(n_i)\times p}{(n_i) x p}. Each trajectory in y has its own design matrix.} \item{w}{A list of N known explanatory variables having dimensions \eqn{(n_i)\times q}{(n-1) x q}. If \code{mixed} = FALSE, then \code{w} is replaced by a list of N zeros.} \item{sigma}{A vector of standard deviations. If NULL, then \eqn{1/s^2} has random standard exponential entries according to a binning method done on the data.} \item{arb.sigma}{If TRUE, then \code{sigma} is k-dimensional. Else a common standard deviation is assumed.} \item{alpha}{A q-vector of unknown regression parameters for the fixed effects. If NULL and \code{mixed} = TRUE, then \code{alpha} is random from a normal distribution with mean and variance according to a binning method done on the data. If \code{mixed} = FALSE, then \code{alpha} = 0.} \item{lambda}{Initial value of mixing proportions for the assumed mixture structure on the regression coefficients. Entries should sum to 1. This determines number of components. If NULL, then \code{lambda} is random from uniform Dirichlet and the number of components is determined by \code{mu}.} \item{mu}{A pxk matrix of the mean for the mixture components of the random regression coefficients. If NULL, then the columns of \code{mu} are random from a multivariate normal distribution with mean and variance determined by a binning method done on the data.} \item{rho}{An Nxk matrix giving initial values for the correlation term in an AR(1) process. If NULL, then these values are simulated from a uniform distribution on the interval (-1, 1).} \item{R}{A list of N pxp covariance matrices for the mixture components of the random regression coefficients. If NULL, then each matrix is random from a standard Wishart distribution according to a binning method done on the data.} \item{arb.R}{If TRUE, then \code{R} is a list of N pxp covariance matrices. Else, one common covariance matrix is assumed.} \item{k}{Number of components. Ignored unless \code{lambda} is NULL.} \item{ar.1}{If TRUE, then an AR(1) process on the error terms is included. The default is FALSE.} \item{addintercept.fixed}{If TRUE, a column of ones is appended to the matrices in w.} \item{addintercept.random}{If TRUE, a column of ones is appended to the matrices in x before p is calculated.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \value{ \code{regmixEM} returns a list of class \code{mixEM} with items: \item{x}{The predictor values corresponding to the random effects.} \item{y}{The response values.} \item{w}{The predictor values corresponding to the (optional) fixed effects.} \item{lambda}{The final mixing proportions.} \item{mu}{The final mean vectors.} \item{R}{The final covariance matrices.} \item{sigma}{The final component error standard deviations.} \item{alpha}{The final regression coefficients for the fixed effects.} \item{rho}{The final error correlation values if an AR(1) process is included.} \item{loglik}{The final log-likelihood.} \item{posterior.z}{An Nxk matrix of posterior membership probabilities.} \item{posterior.beta}{A list of N pxk matrices giving the posterior regression coefficient values.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{regmixEM}}, \code{\link{post.beta}} } \references{ Xu, W. and Hedeker, D. (2001) A Random-Effects Mixture Model for Classifying Treatment Response in Longitudinal Clinical Trials, \emph{Journal of Biopharmaceutical Statistics}, \bold{11(4)}, 253--273. Young, D. S. and Hunter, D. R. (2015) Random Effects Regression Mixtures for Analyzing Infant Habituation, \emph{Journal of Applied Statistics}, \bold{42(7)}, 1421--1441. } \examples{ ## EM output for simulated data from 2-component mixture of random effects. data(RanEffdata) set.seed(100) x <- lapply(1:length(RanEffdata), function(i) matrix(RanEffdata[[i]][, 2:3], ncol = 2)) x <- x[1:20] y <- lapply(1:length(RanEffdata), function(i) matrix(RanEffdata[[i]][, 1], ncol = 1)) y <- y[1:20] lambda <- c(0.45, 0.55) mu <- matrix(c(0, 4, 100, 12), 2, 2) sigma <- 2 R <- list(diag(1, 2), diag(1, 2)) em.out <- regmixEM.mixed(y, x, sigma = sigma, arb.sigma = FALSE, lambda = lambda, mu = mu, R = R, addintercept.random = FALSE, epsilon = 1e-02, verb = TRUE) em.out[4:10] } \keyword{file} mixtools/man/test.equality.mixed.Rd0000755000176200001440000000523312122215664017121 0ustar liggesusers\name{test.equality.mixed} \title{Performs Chi-Square Test for Mixed Effects Mixtures} \alias{test.equality.mixed} \usage{ test.equality.mixed(y, x, w=NULL, arb.R = TRUE, arb.sigma = FALSE, lambda = NULL, mu = NULL, sigma = NULL, R = NULL, alpha = NULL, ...) } \description{ Performs a likelihood ratio test of either common variance terms between the response trajectories in a mixture of random (or mixed) effects regressions or for common variance-covariance matrices for the random effects mixture distribution.} \arguments{ \item{y}{The responses for \code{regmixEM.mixed}.} \item{x}{The predictors for the random effects in \code{regmixEM.mixed}.} \item{w}{The predictors for the (optional) fixed effects in \code{regmixEM.mixed}.} \item{arb.R}{If FALSE, then a test for different variance-covariance matrices for the random effects mixture is performed.} \item{arb.sigma}{If FALSE, then a test for different variance terms between the response trajectories is performed.} \item{lambda}{A vector of mixing proportions (under the null hypothesis) with same purpose as outlined in \code{regmixEM.mixed}.} \item{mu}{A matrix of the means (under the null hypothesis) with same purpose as outlined in \code{regmixEM.mixed}.} \item{sigma}{A vector of standard deviations (under the null hypothesis) with same purpose as outlined in \code{regmixEM.mixed}.} \item{R}{A list of covariance matrices (under the null hypothesis) with same purpose as outlined in \code{regmixEM.mixed}.} \item{alpha}{An optional vector of fixed effects regression coefficients (under the null hypothesis) with same purpose as outlined in \code{regmixEM.mixed}.} \item{...}{Additional arguments passed to \code{regmixEM.mixed}.} } \value{ \code{test.equality.mixed} returns a list with the following items: \item{chi.sq}{The chi-squared test statistic.} \item{df}{The degrees of freedom for the chi-squared test statistic.} \item{p.value}{The p-value corresponding to this likelihood ratio test.} } \seealso{ \code{\link{test.equality}} } \examples{ ##Test of equal variances in the simulated data set. data(RanEffdata) set.seed(100) x<-lapply(1:length(RanEffdata), function(i) matrix(RanEffdata[[i]][, 2:3], ncol = 2)) x<-x[1:15] y<-lapply(1:length(RanEffdata), function(i) matrix(RanEffdata[[i]][, 1], ncol = 1)) y<-y[1:15] out<-test.equality.mixed(y, x, arb.R = TRUE, arb.sigma = FALSE, epsilon = 1e-1, verb = TRUE, maxit = 50, addintercept.random = FALSE) out } \keyword{file} mixtools/man/dmvnorm.Rd0000755000176200001440000000235411736707362014677 0ustar liggesusers\name{dmvnorm} \alias{dmvnorm} \alias{logdmvnorm} \title{The Multivariate Normal Density} \description{ Density and log-density for the multivariate normal distribution with mean equal to \code{mu} and variance matrix equal to \code{sigma}. } \usage{ dmvnorm(y, mu=NULL, sigma=NULL) logdmvnorm(y, mu=NULL, sigma=NULL) } \arguments{ \item{y}{Either a \eqn{d} - vector or an \eqn{n\times d}{n x d} matrix, where \eqn{d} is the dimension of the normal distribution and \eqn{n} is the number of points at which the density is to be evaluated.} \item{mu}{\eqn{d} - vector: Mean of the normal distribution (or NULL uses the origin as default)} \item{sigma}{This \eqn{d\times d}{d x d} matrix is the variance matrix of the normal distribution (or NULL uses the identity matrix as default)} } \value{ \code{dmvnorm} gives the densities, while \code{logdmvnorm} gives the logarithm of the densities. } \details{ This code is written to be efficient, using the qr-decomposition of the covariance matrix (and using it only once, rather than recalculating it for both the determinant and the inverse of \code{sigma}). } \seealso{ \code{\link{qr}}, \code{\link{qr.solve}}, \code{\link{dnorm}}, \code{\link{rmvnorm}} } \keyword{distribution} mixtools/man/tauequivnormalmixEM.Rd0000644000176200001440000002055313046171330017211 0ustar liggesusers\name{tauequivnormalmixEM} \title{Special EM Algorithm for three-component tau equivalence model} \alias{tauequivnormalmixEM} \usage{ tauequivnormalmixEM (x, lambda = NULL, mu = NULL, sigma = NULL, k = 3, mean.constr = NULL, sd.constr = NULL, gparam = NULL, epsilon = 1e-08, maxit = 10000, maxrestarts=20, verb = FALSE, fast=FALSE, ECM = TRUE, arbmean = TRUE, arbvar = TRUE) } \description{ Return ECM algorithm output for a specific case of a three-component tau equivalence model } \details{ The \code{tauequivnormalmixEM} function is merely a wrapper for the \code{\link{normalmixMMlc}} function. # This is the standard EM algorithm for normal mixtures that maximizes # the conditional expected complete-data # log-likelihood at each M-step of the algorithm. # If desired, the # EM algorithm may be replaced by an ECM algorithm (see \code{ECM} argument) # that alternates between maximizing with respect to the \code{mu} # and \code{lambda} while holding \code{sigma} fixed, and maximizing with # respect to \code{sigma} and \code{lambda} while holding \code{mu} # fixed. In the case where \code{arbmean} is \code{FALSE} # and \code{arbvar} is \code{TRUE}, there is no closed-form EM algorithm, # so the ECM option is forced in this case. } \arguments{ \item{x}{A vector of length n consisting of the data, passed directly to \code{\link{normalmixMMlc}}.} \item{lambda}{Initial value of mixing proportions, passed directly to \code{\link{normalmixMMlc}}. Automatically repeated as necessary to produce a vector of length \code{k}, then normalized to sum to 1. If \code{NULL}, then \code{lambda} is random from a uniform Dirichlet distribution (i.e., its entries are uniform random and then it is normalized to sum to 1).} \item{mu}{Starting value of vector of component means for algorithm, passed directly to \code{\link{normalmixMMlc}}. If non-NULL and a vector, \code{k} is set to \code{length(mu)}. If NULL, then the initial value is randomly generated from a normal distribution with center(s) determined by binning the data.} \item{sigma}{Starting value of vector of component standard deviations for algorithm, passed directly to \code{\link{normalmixMMlc}}. Obsolete for linear constraint on the inverse variances, use \code{gparam} instead to specify a starting value. Note: This needs more precision} \item{k}{Number of components, passed directly to \code{\link{normalmixMMlc}}. Initial value ignored unless \code{mu} and \code{sigma} are both NULL. Also, initial value is ignored if \code{mean.constr} is NULL, since in that case we presume \code{k=3}.} \item{mean.constr}{If non-NULL, this parameter is passed directly to \code{\link{normalmixMMlc}} and both \code{mean.lincstr} and \code{var.lincstr} are passed as NULL to \code{\link{normalmixMMlc}}. If NULL, then it is assumed that \code{k=3} and the means must take the form \eqn{\alpha}, \eqn{\alpha-\delta}, and \eqn{\alpha+\delta} for unknown parameters \eqn{\alpha} and \eqn{\delta}. Furthermore, the reciprocal variances are assumed to be \eqn{\gamma_1+\gamma_2}, \eqn{\gamma_1}, and \eqn{\gamma_1} for unknown positive parameters \eqn{\gamma_1} and \eqn{\gamma_2}. These constraints are passed to the \code{\link{normalmixMMlc}} function using the \code{mean.lincstr} and \code{var.lincstr} arguments as shown in the examples for the \code{\link{normalmixMMlc}} help file.} \item{sd.constr}{Deprecated.} \item{gparam}{This argument is passed directly to \code{\link{normalmixMMlc}.} } \item{epsilon}{The convergence criterion. Convergence is declared when the change in the observed data log-likelihood increases by less than epsilon.} \item{maxit}{The maximum number of iterations.} \item{maxrestarts}{The maximum number of restarts allowed in case of a problem with the particular starting values chosen due to one of the variance estimates getting too small (each restart uses randomly chosen starting values). It is well-known that when each component of a normal mixture may have its own mean and variance, the likelihood has no maximizer; in such cases, we hope to find a "nice" local maximum with this algorithm instead, but occasionally the algorithm finds a "not nice" solution and one of the variances goes to zero, driving the likelihood to infinity.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} \item{fast}{If TRUE and k==2 and arbmean==TRUE, then use \code{\link{normalmixEM2comp}}, which is a much faster version of the EM algorithm for this case. This version is less protected against certain kinds of underflow that can cause numerical problems and it does not permit any restarts. If k>2, \code{fast} is ignored.} \item{ECM}{logical: Should this algorithm be an ECM algorithm in the sense of Meng and Rubin (1993)? If FALSE, the algorithm is a true EM algorithm; if TRUE, then every half-iteration alternately updates the means conditional on the variances or the variances conditional on the means, with an extra E-step in between these updates. For \code{tauequivnormalmixEM}, it must be TRUE.} \item{arbmean}{Deprecated.} \item{arbvar}{Deprecated.} } \value{ \code{normalmixEM} returns a list of class \code{mixEM} with items: \item{x}{The raw data.} \item{lambda}{The final mixing proportions.} \item{mu}{The final mean parameters.} \item{sigma}{The final standard deviation(s)} \item{scale}{Scale factor for the component standard deviations, if applicable.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood. This vector includes both the initial and the final values; thus, the number of iterations is one less than its length.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{normalmixMMlc}}, \code{\link{normalmixEM}}, \code{\link{mvnormalmixEM}}, \code{\link{normalmixEM2comp}} } \references{ \itemize{ \item Thomas, H., Lohaus, A., and Domsch, H. (2011) Stable Unstable Reliability Theory, \emph{British Journal of Mathematical and Statistical Psychology} 65(2): 201-221. \item Meng, X.-L. and Rubin, D. B. (1993) Maximum Likelihood Estimation Via the ECM Algorithm: A General Framework, \emph{Biometrika} 80(2): 267-278. } } \examples{ ## Analyzing synthetic data as in the tau equivalent model ## From Thomas et al (2011), see also Chauveau and Hunter (2013) ## a 3-component mixture of normals with linear constraints. lbd <- c(0.6,0.3,0.1); m <- length(lbd) sigma <- sig0 <- sqrt(c(1,9,9)) # means constaints mu = M beta M <- matrix(c(1,1,1,0,1,-1), 3, 2) beta <- c(1,5) # unknown constained mean mu0 <- mu <- as.vector(M \%*\% beta) # linear constraint on the inverse variances pi = A.g A <- matrix(c(1,1,1,0,1,0), m, 2, byrow=TRUE) iv0 <- 1/(sig0^2) g0 <- c(iv0[2],iv0[1] - iv0[2]) # gamma^0 init # simulation and EM fits set.seed(40); n=100; x <- rnormmix(n,lbd,mu,sigma) s <- normalmixEM(x,mu=mu0,sigma=sig0,maxit=2000) # plain EM # EM with var and mean linear constraints sc <- normalmixMMlc(x, lambda=lbd, mu=mu0, sigma=sig0, mean.lincstr=M, var.lincstr=A, gparam=g0) # Using tauequivnormalmixEM function to call normalmixMMlc tau <- tauequivnormalmixEM (x, lambda=lbd, mu=mu0, gparam=g0) # plot and compare both estimates dnormmixt <- function(t, lam, mu, sig){ m <- length(lam); f <- 0 for (j in 1:m) f <- f + lam[j]*dnorm(t,mean=mu[j],sd=sig[j]) f} t <- seq(min(x)-2, max(x)+2, len=200) hist(x, freq=FALSE, col="lightgrey", ylim=c(0,0.3), ylab="density",main="") lines(t, dnormmixt(t, lbd, mu, sigma), col="darkgrey", lwd=2) # true lines(t, dnormmixt(t, s$lambda, s$mu, s$sigma), lty=2) lines(t, dnormmixt(t, sc$lambda, sc$mu, sc$sigma), col=1, lty=3) lines(t, dnormmixt(t, tau$lambda, tau$mu, tau$sigma), col=2, lty=4) legend("topleft", c("true","plain EM","constr EM", "Tau Equiv"), col=c("darkgrey",1,1,2), lty=c(1,2,3,4), lwd=c(2,1,1,1)) } \keyword{file} mixtools/man/rexpmix.Rd0000644000176200001440000000216313055022056014666 0ustar liggesusers\name{rexpmix} \title{Simulate from Mixtures of Exponentials} \alias{rexpmix} \usage{ rexpmix(n, lambda = 1, rate = 1) } \description{ Simulate from a mixture of univariate exponential distributions. } \arguments{ \item{n}{Number of cases to simulate.} \item{lambda}{Vector of mixture probabilities, with length equal to \eqn{m}, the desired number of components (subpopulations). This is assumed to sum to 1.} \item{rate}{Vector of component rates.} } \value{ \code{rexpmix} returns an \eqn{n}-vector sampled from an \eqn{m}-component mixture of univariate exponential distributions. } %%\details{This function simply calls \code{\link{sample}} and \code{\link{rexp}}.} \seealso{ \code{\link{rnormmix}}, \code{\link{rmvnormmix}} for Gaussian mixtures, \code{\link{rweibullmix}} for mixture of Weibull distributions. } \examples{ ## Generate data from a 2-component mixture of exponentials. n=300 # sample size m=2 # nb components lambda=c(1/3, 2/3); rate = c(1,1/10) # parameters set.seed(1234) x <- rexpmix(n, lambda, rate) # iid ~ exp mixture ## histogram of the simulated data. hist(x, col=8) } \keyword{file} mixtools/man/normalmixEM.Rd0000755000176200001440000001623712644763557015461 0ustar liggesusers\name{normalmixEM} \title{EM Algorithm for Mixtures of Univariate Normals} \alias{normalmixEM} \usage{ normalmixEM(x, lambda = NULL, mu = NULL, sigma = NULL, k = 2, mean.constr = NULL, sd.constr = NULL, epsilon = 1e-08, maxit = 1000, maxrestarts=20, verb = FALSE, fast=FALSE, ECM = FALSE, arbmean = TRUE, arbvar = TRUE) } \description{ Return EM algorithm output for mixtures of normal distributions. } \details{ This is the standard EM algorithm for normal mixtures that maximizes the conditional expected complete-data log-likelihood at each M-step of the algorithm. If desired, the EM algorithm may be replaced by an ECM algorithm (see \code{ECM} argument) that alternates between maximizing with respect to the \code{mu} and \code{lambda} while holding \code{sigma} fixed, and maximizing with respect to \code{sigma} and \code{lambda} while holding \code{mu} fixed. In the case where \code{arbmean} is \code{FALSE} and \code{arbvar} is \code{TRUE}, there is no closed-form EM algorithm, so the ECM option is forced in this case. } \arguments{ \item{x}{A vector of length n consisting of the data.} \item{lambda}{Initial value of mixing proportions. Automatically repeated as necessary to produce a vector of length \code{k}, then normalized to sum to 1. If \code{NULL}, then \code{lambda} is random from a uniform Dirichlet distribution (i.e., its entries are uniform random and then it is normalized to sum to 1).} \item{mu}{Starting value of vector of component means. If non-NULL and a scalar, \code{arbmean} is set to \code{FALSE}. If non-NULL and a vector, \code{k} is set to \code{length(mu)}. If NULL, then the initial value is randomly generated from a normal distribution with center(s) determined by binning the data.} \item{sigma}{Starting value of vector of component standard deviations for algorithm. If non-NULL and a scalar, \code{arbvar} is set to \code{FALSE}. If non-NULL and a vector, \code{arbvar} is set to \code{TRUE} and \code{k} is set to \code{length(sigma)}. If NULL, then the initial value is the reciprocal of the square root of a vector of random exponential-distribution values whose means are determined according to a binning method done on the data.} \item{k}{Number of components. Initial value ignored unless \code{mu} and \code{sigma} are both NULL.} \item{mean.constr}{Equality constraints on the mean parameters, given as a vector of length \code{k}. Each vector entry helps specify the constraints, if any, on the corresponding mean parameter: If \code{NA}, the corresponding parameter is unconstrained. If numeric, the corresponding parameter is fixed at that value. If a character string consisting of a single character preceded by a coefficient, such as \code{"0.5a"} or \code{"-b"}, all parameters using the same single character in their constraints will fix these parameters equal to the coefficient times some the same free parameter. For instance, if \code{mean.constr = c(NA, 0, "a", "-a")}, then the first mean parameter is unconstrained, the second is fixed at zero, and the third and forth are constrained to be equal and opposite in sign.} \item{sd.constr}{Equality constraints on the standard deviation parameters. See \code{mean.constr}.} \item{epsilon}{The convergence criterion. Convergence is declared when the change in the observed data log-likelihood increases by less than epsilon.} \item{maxit}{The maximum number of iterations.} \item{maxrestarts}{The maximum number of restarts allowed in case of a problem with the particular starting values chosen due to one of the variance estimates getting too small (each restart uses randomly chosen starting values). It is well-known that when each component of a normal mixture may have its own mean and variance, the likelihood has no maximizer; in such cases, we hope to find a "nice" local maximum with this algorithm instead, but occasionally the algorithm finds a "not nice" solution and one of the variances goes to zero, driving the likelihood to infinity.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} \item{fast}{If TRUE and k==2 and arbmean==TRUE, then use \code{\link{normalmixEM2comp}}, which is a much faster version of the EM algorithm for this case. This version is less protected against certain kinds of underflow that can cause numerical problems and it does not permit any restarts. If k>2, \code{fast} is ignored.} \item{ECM}{logical: Should this algorithm be an ECM algorithm in the sense of Meng and Rubin (1993)? If FALSE, the algorithm is a true EM algorithm; if TRUE, then every half-iteration alternately updates the means conditional on the variances or the variances conditional on the means, with an extra E-step in between these updates.} \item{arbmean}{If TRUE, then the component densities are allowed to have different \code{mu}s. If FALSE, then a scale mixture will be fit. Initial value ignored unless \code{mu} is NULL.} \item{arbvar}{If TRUE, then the component densities are allowed to have different \code{sigma}s. If FALSE, then a location mixture will be fit. Initial value ignored unless \code{sigma} is NULL.} } \value{ \code{normalmixEM} returns a list of class \code{mixEM} with items: \item{x}{The raw data.} \item{lambda}{The final mixing proportions.} \item{mu}{The final mean parameters.} \item{sigma}{The final standard deviations. If \code{arbmean} = FALSE, then only the smallest standard deviation is returned. See \code{scale} below.} \item{scale}{If \code{arbmean} = FALSE, then the scale factor for the component standard deviations is returned. Otherwise, this is omitted from the output.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood. This vector includes both the initial and the final values; thus, the number of iterations is one less than its length.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{mvnormalmixEM}}, \code{\link{normalmixEM2comp}}, \code{\link{normalmixMMlc}}, \code{\link{spEMsymloc}} } \references{ \itemize{ \item McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}, John Wiley \& Sons, Inc. \item Meng, X.-L. and Rubin, D. B. (1993) Maximum Likelihood Estimation Via the ECM Algorithm: A General Framework, \emph{Biometrika} 80(2): 267-278. \item Benaglia, T., Chauveau, D., Hunter, D. R., and Young, D. mixtools: An R package for analyzing finite mixture models. Journal of Statistical Software, 32(6):1-29, 2009. } } \examples{ ##Analyzing the Old Faithful geyser data with a 2-component mixture of normals. data(faithful) attach(faithful) set.seed(100) system.time(out<-normalmixEM(waiting, arbvar = FALSE, epsilon = 1e-03)) out system.time(out2<-normalmixEM(waiting, arbvar = FALSE, epsilon = 1e-03, fast=TRUE)) out2 # same thing but much faster } \keyword{file} mixtools/man/plotspRMM.Rd0000644000176200001440000000307313055603106015071 0ustar liggesusers\name{plotspRMM} \alias{plotspRMM} %- Also NEED an '\alias' for EACH other topic documented here. \title{Plot output from Stochastic EM algorithm for semiparametric scaled mixture of censored data } \description{Function for plotting various results from an object returned by \code{\link{spRMM_SEM}}, a Stochastic EM algorithm for semiparametric scaled mixture of randomly right censored lifetime data. Four plots of sequences of estimates along iterations, survival and density estimates (see reference below). } \usage{ plotspRMM(sem, tmax = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{sem}{An object returned by \code{\link{spRMM_SEM}}.} \item{tmax}{The max time for \eqn{x} axis, set to some default value if \code{NULL}.} } \value{The four plots returned} \seealso{ Related functions: \code{\link{spRMM_SEM}}. Other models and algorithms for censored lifetime data (name convention is model_algorithm): \code{\link{expRMM_EM}}, \code{\link{weibullRMM_SEM}}. } \references{ \itemize{ \item Bordes, L., and Chauveau, D. (2016), Stochastic EM algorithms for parametric and semiparametric mixture models for right-censored lifetime data, Computational Statistics, Volume 31, Issue 4, pages 1513-1538. \url{http://link.springer.com/article/10.1007/s00180-016-0661-7} } } \author{Didier Chauveau} %% ~Make other sections like Warning with \section{Warning }{....} ~ \examples{ # See example(spRMM_SEM) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{file} mixtools/man/rmvnormmix.Rd0000755000176200001440000000450113055645031015415 0ustar liggesusers\name{rmvnormmix} \title{Simulate from Multivariate (repeated measures) Mixtures of Normals} \alias{normmixrm.sim} \alias{rmvnormmix} \usage{ rmvnormmix(n, lambda=1, mu=0, sigma=1) } \description{ Simulate from a mixture of multivariate zero-correlation normal distributions } \arguments{ \item{n}{Number of cases to simulate.} \item{lambda}{Vector of mixture probabilities with length equal to \eqn{m}, the desired number of components. This is assumed to sum to 1; if not, it is normalized.} \item{mu}{Matrix of means of dimensions \eqn{m\times r}{m x r}, where \eqn{m} is the number of components (subpopulations) and \eqn{r} is the number of coordinates (repeated measurements) per case. Note: \code{mu} is automatically coerced to a matrix with \eqn{m} rows even if it is not given in this form, which can lead to unexpected behavior in some cases.} \item{sigma}{Matrix of standard deviations, same dimensions as \code{mu}. The coordinates within a case are independent, conditional on the mixture component. (There is marginal correlation among the coordinates, but this is due to the mixture structure only.) Note: \code{sigma} is automatically coerced to a matrix with \eqn{m} rows even if it is not given in this form, which can lead to unexpected behavior in some cases.} } \details{ It is possible to generate univariate standard normal random variables using the default values (but why bother?). The case of conditionally iid coordinates is covered by the situation in which all columns in mu and sigma are identical. } \value{ \code{rmvnormmix} returns an \eqn{n\times r}{n x r} matrix in which each row is a sample from one of the components of a mixture of zero-correlation multivariate normals. The mixture structure induces nonzero correlations among the coordinates. } \seealso{ \code{\link{rnormmix}} } \examples{ ##Generate data from a 2-component mixture of trivariate normals. set.seed(100) n <- 200 lambda <- rep(1, 2)/2 mu <- matrix(2*(1:6), 2, 3) sigma <- matrix(1,2,3) mydata<-rmvnormmix(n, lambda, mu, sigma) ## Now check to see if we can estimate mixture densities well: title <- paste("Does this resemble N(", mu[1,], ",1) and N(", mu[2,],",1)?", sep="") plot(npEM(mydata, 2), title=title) } \keyword{file} mixtools/man/regmixEM.Rd0000755000176200001440000000732012514002272014707 0ustar liggesusers\name{regmixEM} \title{EM Algorithm for Mixtures of Regressions} \alias{regmixEM} \usage{ regmixEM(y, x, lambda = NULL, beta = NULL, sigma = NULL, k = 2, addintercept = TRUE, arbmean = TRUE, arbvar = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) } \description{ Returns EM algorithm output for mixtures of multiple regressions with arbitrarily many components. } \arguments{ \item{y}{An n-vector of response values.} \item{x}{An nxp matrix of predictors. See \code{addintercept} below.} \item{lambda}{Initial value of mixing proportions. Entries should sum to 1. This determines number of components. If NULL, then \code{lambda} is random from uniform Dirichlet and number of components is determined by \code{beta}.} \item{beta}{Initial value of \code{beta} parameters. Should be a pxk matrix, where p is the number of columns of x and k is number of components. If NULL, then \code{beta} has standard normal entries according to a binning method done on the data. If both \code{lambda} and \code{beta} are NULL, then number of components is determined by \code{sigma}.} \item{sigma}{A vector of standard deviations. If NULL, then 1/\code{sigma}^2 has random standard exponential entries according to a binning method done on the data. If \code{lambda}, \code{beta}, and \code{sigma} are NULL, then number of components is determined by \code{k}.} \item{k}{Number of components. Ignored unless all of \code{lambda}, \code{beta}, and \code{sigma} are NULL.} \item{addintercept}{If TRUE, a column of ones is appended to the x matrix before the value of p is calculated.} \item{arbmean}{If TRUE, each mixture component is assumed to have a different set of regression coefficients (i.e., the \code{beta}s).} \item{arbvar}{If TRUE, each mixture component is assumed to have a different \code{sigma}.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \value{ \code{regmixEM} returns a list of class \code{mixEM} with items: \item{x}{The set of predictors (which includes a column of 1's if \code{addintercept} = TRUE).} \item{y}{The response values.} \item{lambda}{The final mixing proportions.} \item{beta}{The final regression coefficients.} \item{sigma}{The final standard deviations. If \code{arbmean} = FALSE, then only the smallest standard deviation is returned. See \code{scale} below.} \item{scale}{If \code{arbmean} = FALSE, then the scale factor for the component standard deviations is returned. Otherwise, this is omitted from the output.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{regcr}}, \code{\link{regmixMH}} } \references{ de Veaux, R. D. (1989), Mixtures of Linear Regressions, \emph{Computational Statistics and Data Analysis} 8, 227-245. Hurn, M., Justel, A. and Robert, C. P. (2003) Estimating Mixtures of Regressions, \emph{Journal of Computational and Graphical Statistics} \bold{12(1)}, 55--79. McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}, John Wiley \& Sons, Inc. } \examples{ ## EM output for NOdata. data(NOdata) attach(NOdata) set.seed(100) em.out <- regmixEM(Equivalence, NO, verb = TRUE, epsilon = 1e-04) em.out[3:6] } \keyword{file} mixtools/man/print.mvnpEM.Rd0000755000176200001440000000211012644777537015552 0ustar liggesusers\name{print.mvnpEM} \alias{print.mvnpEM} \title{Printing of Results from the mvnpEM Algorithm Output} \usage{ \method{print}{mvnpEM}(x, \dots) } \arguments{ \item{x}{an object of class \code{mvnpEM} such as a result of a call to \code{\link{mvnpEM}}} \item{\dots}{Additional arguments to \code{\link{print}}} } \description{ \code{\link[base]{print}} method for class \code{mvnpEM}. } \details{ \code{print.mvnpEM} prints the elements of an \code{mvnpEM} object without printing the data or the posterior probabilities. (These may still be accessed as \code{x$data} and \code{x$posteriors}.) } \value{ \code{print.mvnpEM} returns (invisibly) the full value of \code{x} itself, including the \code{data} and \code{posteriors} elements. } \seealso{ \code{\link{mvnpEM}}, \code{\link{plot.mvnpEM}} \code{\link{summary.mvnpEM}} } \examples{ # Example as in Chauveau and Hoang (2015) with 6 coordinates \dontrun{ m=2; r=6; blockid <-c(1,1,2,2,3,3) # 3 bivariate blocks # generate some data x ... a <- mvnpEM(x, mu0=2, blockid, samebw=F) # adaptive bandwidth print(a)} } \keyword{file} mixtools/man/mixturegram.Rd0000755000176200001440000000742413616452214015554 0ustar liggesusers\name{mixturegram} \title{Mixturegrams} \alias{mixturegram} \usage{ mixturegram(data, pmbs, method = c("pca", "kpca", "lda"), all.n = FALSE, id.con = NULL, score = 1, iter.max = 50, nstart = 25, ...) } \description{ Construct a mixturegram for determining an apporpriate number of components. } \arguments{ \item{data}{The data, which must either be a vector or a matrix. If a matrix, then the rows correspond to the observations.} \item{pmbs}{A list of length (K-1) such that each element is an nxk matrix of the posterior membership probabilities. These are obtained from each of the "best" estimated k-component mixture models, k = 2,...,K. } \item{method}{The dimension reduction method used. \code{method = "pca"} implements principal components analysis. \code{method = "kpca"} implements kernel principal components analysis. \code{method = "lda"} implements reduced rank linear discriminant analysis. } \item{all.n}{A logical specifying whether the mixturegram should plot the profiles of all observations (\code{TRUE}) or just the K-profile summaries (\code{FALSE}). The default is \code{FALSE}.} \item{id.con}{An argument that allows one to impose some sort of (meaningful) identifiability constraint so that the mixture components are in some sort of comparable order between mixture models with different numbers of components. If \code{NULL}, then the components are ordered by the component means for univariate data or ordered by the first dimension of the component means for multivariate data.} \item{score}{The value for the specified dimension reduction technique's score, which is used for constructing the mixturegram. By default, this value is \code{1}, which is the value that will typically be used. Larger values will result in more variability displayed on the mixturegram. Note that the largest value that can be calculated at each value of k>1 on the mixturegram is p+k-1, where p is the number of columns of \code{data}. } \item{iter.max}{The maximum number of iterations allowed for the k-means clustering algorithm, which is passed to the \code{\link{kmeans}} function. The default is \code{50}. } \item{nstart}{The number of random sets chosen based on k centers, which is passed to the \code{\link{kmeans}} function. The default is \code{25}.} \item{...}{Additional arguments that can be passed to the underlying \code{\link{plot}} function.} } \value{ \code{mixturegram} returns a mixturegram where the profiles are plotted over component values of k = 1,...,K. } \seealso{ \code{\link{boot.comp}} } \references{ Young, D. S., Ke, C., and Zeng, X. (2018) The Mixturegram: A Visualization Tool for Assessing the Number of Components in Finite Mixture Models, \emph{Journal of Computational and Graphical Statistics}, \bold{27(3)}, 564--575. } \examples{ ##Data generated from a 2-component mixture of normals. set.seed(100) n <- 100 w <- rmultinom(n,1,c(.3,.7)) y <- sapply(1:n,function(i) w[1,i]*rnorm(1,-6,1) + w[2,i]*rnorm(1,0,1)) selection <- function(i,data,rep=30){ out <- replicate(rep,normalmixEM(data,epsilon=1e-06, k=i,maxit=5000),simplify=FALSE) counts <- lapply(1:rep,function(j) table(apply(out[[j]]$posterior,1, which.max))) counts.length <- sapply(counts, length) counts.min <- sapply(counts, min) counts.test <- (counts.length != i)|(counts.min < 5) if(sum(counts.test) > 0 & sum(counts.test) < rep) out <- out[!counts.test] l <- unlist(lapply(out, function(x) x$loglik)) tmp <- out[[which.max(l)]] } all.out <- lapply(2:5, selection, data = y, rep = 2) pmbs <- lapply(1:length(all.out), function(i) all.out[[i]]$post) mixturegram(y, pmbs, method = "pca", all.n = FALSE, id.con = NULL, score = 1, main = "Mixturegram (Well-Separated Data)") } \keyword{file} mixtools/man/npMSL.Rd0000755000176200001440000001661512644763736014221 0ustar liggesusers\name{npMSL} \title{Nonparametric EM-like Algorithm for Mixtures of Independent Repeated Measurements - Maximum Smoothed Likelihood version} \alias{npMSL} \usage{ npMSL(x, mu0, blockid = 1:ncol(x), bw = bw.nrd0(as.vector(as.matrix(x))), samebw = TRUE, bwmethod = "S", h = bw, eps = 1e-8, maxiter=500, bwiter = maxiter, nbfold = NULL, ngrid=200, post=NULL, verb = TRUE) } \description{ Returns nonparametric Smoothed Likelihood algorithm output (Levine et al, 2011) for mixtures of multivariate (repeated measures) data where the coordinates of a row (case) in the data matrix are assumed to be independent, conditional on the mixture component (subpopulation) from which they are drawn. } \arguments{ \item{x}{An \eqn{n\times r}{n x r} matrix of data. Each of the \eqn{n} rows is a case, and each case has \eqn{r} repeated measurements. These measurements are assumed to be conditionally independent, conditional on the mixture component (subpopulation) from which the case is drawn.} \item{mu0}{Either an \eqn{m\times r}{m x r} matrix specifying the initial centers for the \link{kmeans} function, or an integer \eqn{m} specifying the number of initial centers, which are then choosen randomly in \link{kmeans}} \item{blockid}{A vector of length \eqn{r} identifying coordinates (columns of \code{x}) that are assumed to be identically distributed (i.e., in the same block). For instance, the default has all distinct elements, indicating that no two coordinates are assumed identically distributed and thus a separate set of \eqn{m} density estimates is produced for each column of \eqn{x}. On the other hand, if \code{blockid=rep(1,ncol(x))}, then the coordinates in each row are assumed conditionally i.i.d.} \item{bw}{Bandwidth for density estimation, equal to the standard deviation of the kernel density. By default, a simplistic application of the default \code{\link{bw.nrd0}} bandwidth used by \code{\link{density}} to the entire dataset.} \item{samebw}{Logical: If \code{TRUE}, use the same bandwidth for each iteration and for each component and block. If \code{FALSE}, use a separate bandwidth for each component and block, and update this bandwidth at each iteration of the algorithm until \code{bwiter} is reached (see below). Two adaptation methods are provided, see \code{bwmethod} below.} \item{bwmethod}{Define the adaptive bandwidth strategy when \code{samebw = FALSE}, in which case the bandwidth depends on each component, block, and iteration of the algorithm. If set to "S" (the default), adaptation is done using a suitably modified \code{\link{bw.nrd0}} method as described in Benaglia et al (2011). If set to "CV", an adaptive \eqn{k}-fold Cross Validation method is applied, as described in Chauveau et al (2014), where \code{nbfold} is the number of subsamples. This corresponds to a Leave-\eqn{[n/nbfold]}-Out CV. } \item{h}{Alternative way to specify the bandwidth, to provide backward compatibility.} \item{eps}{Tolerance limit for declaring algorithm convergence. Convergence is declared whenever the maximum change in any coordinate of the \code{lambda} vector (of mixing proportion estimates) does not exceed \code{eps}.} \item{maxiter}{The maximum number of iterations allowed, convergence may be declared before \code{maxiter} iterations (see \code{eps} above).} \item{bwiter}{The maximum number of iterations allowed for adaptive bandwidth stage, when \code{samebw = FALSE}. If set to \code{0}, then the initial bandwidth matrix is used without adaptation.} \item{nbfold}{A parameter passed to the internal function \code{wbs.kCV}, which controls the weighted bandwidth selection by k-fold cross-validation.} \item{ngrid}{Number of points in the discretization of the intervals over which are approximated the (univariate) integrals for non linear smoothing of the log-densities, as required in the E step of the npMSL algorithm, see Levine et al (2011).} \item{post}{If non-NULL, an \eqn{n\times m}{n x m} matrix specifying the initial posterior probability vectors for each of the observations, i.e., the initial values to start the EM-like algorithm.} \item{verb}{If TRUE, print updates for every iteration of the algorithm as it runs} } \value{ \code{npMSL} returns a list of class \code{npEM} with the following items: \item{data}{The raw data (an \eqn{n\times r}{n x r} matrix).} \item{posteriors}{An \eqn{n\times m}{n x m} matrix of posterior probabilities for observation.} \item{bandwidth}{If \code{samebw==TRUE}, same as the \code{bw} input argument; otherwise, value of \code{bw} matrix at final iteration. This information is needed by any method that produces density estimates from the output.} \item{blockid}{Same as the \code{blockid} input argument, but recoded to have positive integer values. Also needed by any method that produces density estimates from the output.} \item{lambda}{The sequence of mixing proportions over iterations.} \item{lambdahat}{The final mixing proportions.} \item{loglik}{The sequence of log-likelihoods over iterations.} \item{f}{An array of size \eqn{ngrid \times m \times l}{ngrid x m x l}, returning last values of density for component \eqn{j} and block \eqn{k} over \code{grid} points.} \item{meanNaN}{Average number of \code{NaN} that occured over iterations (for internal testing and control purpose).} \item{meanUdfl}{Average number of "underflow" that occured over iterations (for internal testing and control purpose).} } \seealso{ \code{\link{npEM}}, \code{\link{plot.npEM}}, \code{\link{normmixrm.sim}}, \code{\link{spEMsymloc}}, \code{\link{spEM}}, \code{\link{plotseq.npEM}} } \references{ \itemize{ \item Benaglia, T., Chauveau, D., and Hunter, D. R. (2009), An EM-like algorithm for semi- and non-parametric estimation in multivariate mixtures, Journal of Computational and Graphical Statistics, 18, 505-526. \item Benaglia, T., Chauveau, D. and Hunter, D.R. (2011), Bandwidth Selection in an EM-like algorithm for nonparametric multivariate mixtures. Nonparametric Statistics and Mixture Models: A Festschrift in Honor of Thomas P. Hettmansperger. World Scientific Publishing Co., pages 15-27. \item Chauveau D., Hunter D. R. and Levine M. (2014), Semi-Parametric Estimation for Conditional Independence Multivariate Finite Mixture Models. Preprint (under revision). \item Levine, M., Hunter, D. and Chauveau, D. (2011), Maximum Smoothed Likelihood for Multivariate Mixtures, Biometrika 98(2): 403-416. } } \examples{ ## Examine and plot water-level task data set. ## Block structure pairing clock angles that are directly opposite one ## another (1:00 with 7:00, 2:00 with 8:00, etc.) set.seed(111) # Ensure that results are exactly reproducible data(Waterdata) blockid <- c(4,3,2,1,3,4,1,2) # see Benaglia et al (2009a) \dontrun{ a <- npEM(Waterdata[,3:10], mu0=3, blockid=blockid, bw=4) # npEM solution b <- npMSL(Waterdata[,3:10], mu0=3, blockid=blockid, bw=4) # smoothed version # Comparisons on the 4 default plots, one for each block par(mfrow=c(2,2)) for (l in 1:4){ plot(a, blocks=l, breaks=5*(0:37)-92.5, xlim=c(-90,90), xaxt="n",ylim=c(0,.035), xlab="") plot(b, blocks=l, hist=FALSE, newplot=FALSE, addlegend=FALSE, lty=2, dens.col=1) axis(1, at=30*(1:7)-120, cex.axis=1) legend("topleft",c("npMSL"),lty=2, lwd=2)} } } \keyword{file} mixtools/man/spregmix.Rd0000755000176200001440000001321212122215550015025 0ustar liggesusers\name{spregmix} \title{EM-like Algorithm for Semiparametric Mixtures of Regressions} \alias{spregmix} \usage{ spregmix(lmformula, bw = NULL, constbw = FALSE, bwmult = 0.9, z.hat = NULL, symm = TRUE, betamethod = "LS", m = ifelse(is.null(z.hat), 2, ncol(z.hat)), epsilon = 1e-04, maxit = 1000, verbose = FALSE, \dots) } \description{ Returns parameter estimates for finite mixtures of linear regressions with unspecified error structure. Based on Hunter and Young (2012). } \arguments{ \item{lmformula}{Formula for a linear model, in the same format used by \code{\link{lm}}. Additional parameters may be passed to \code{\link{lm}} via the \code{...} argument.} \item{bw}{Initial bandwidth value. If NULL, this will be chosen automatically by the algorithm.} \item{constbw}{Logical: If TRUE, the bandwidth is held constant throughout the algorithm; if FALSE, it adapts at each iteration according to the rules given in Hunter and Young (2012).} \item{bwmult}{ Whenever it is updated automatically, the bandwidth is equal to \code{bwmult} divided by the fifth root of \eqn{n} times the smaller of s and IQR/1.34, where s and IQR are estimates of the standard deviation and interquartile range of the residuals, as explained in Hunter and Young (2012). The value of 0.9 gives the rule of Silverman (1986) and the value of 1.06 gives the rule of Scott (1992). Larger values lead to greater smoothing, whereas smaller values lead to less smoothing. } \item{z.hat}{Initial nxm matrix of posterior probabilities. If NULL, this is initialized randomly. As long as a parametric estimation method like least squares is used to estimate \code{beta} in each M-step, the \code{z.hat} values are the only values necessary to begin the EM iterations.} \item{symm}{Logical: If TRUE, the error density is assumed symmetric about zero. If FALSE, it is not. WARNING: If FALSE, the intercept parameter is not uniquely identifiable if it is included in the linear model.} \item{betamethod}{Method of calculating beta coefficients in the M-step. Current possible values are "LS" for least-squares; "L1" for least absolute deviation; "NP" for fully nonparametric; and "transition" for a transition from least squares to fully nonparametric. If something other than these four possibilities is used, then "NP" is assumed. For details of these methods, see Hunter and Young (2012).} \item{m}{Number of components in the mixture.} \item{epsilon}{Convergence is declared if the largest change in any lambda or beta coordinate is smaller than \code{epsilon}.} \item{maxit}{The maximum number of iterations; if convergence is never declared based on comparison with \code{epsilon}, then the algorithm stops after \code{maxit} iterations.} \item{verbose}{Logical: If TRUE, then various updates are printed during each iteration of the algorithm.} \item{\dots}{Additional parameters passed to the \code{\link{model.frame}} and \code{\link{model.matrix}} functions, which are used to obtain the response and predictor of the regression.} } \value{ \code{regmixEM} returns a list of class \code{npEM} with items: \item{x}{The set of predictors (which includes a column of 1's if \code{addintercept} = TRUE).} \item{y}{The response values.} \item{lambda}{The mixing proportions for every iteration in the form of a matrix with m columns and (#iterations) rows} \item{beta}{The final regression coefficients.} \item{posterior}{An nxm matrix of posterior probabilities for observations.} \item{np.stdev}{Nonparametric estimate of the standard deviation, as given in Hunter and Young (2012)} \item{bandwidth}{Final value of the bandwidth} \item{density.x}{Points at which the error density is estimated} \item{density.y}{Values of the error density at the points \code{density.x}} \item{symmetric}{Logical: Was the error density assumed symmetric?} \item{loglik}{A quantity similar to a log-likelihood, computed just like a standard loglikelihood would be, conditional on the component density functions being equal to the final density estimates.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{regmixEM}}, \code{\link{spEMsymloc}}, \code{\link{lm}} } \references{ Hunter, D. R. and Young, D. S. (2012) Semi-parametric Mixtures of Regressions, Journal of Nonparametric Statistics 24(1): 19-38. Scott, D. W. (1992) \emph{Multivariate Density Estimation}, John Wiley & Sons Inc., New York. Silverman, B. W. (1986). \emph{Density Estimation for Statistics and Data Analysis}, Chapman & Hall, London. } \examples{ data(tonedata) ## By default, the bandwidth will adapt and the error density is assumed symmetric set.seed(100) a=spregmix(tuned~stretchratio, bw=.2, data=tonedata, verb=TRUE) ## Look at the sp mixreg solution: plot(tonedata) abline(a=a$beta[1,1],b=a$beta[2,1], col=2) abline(a=a$beta[1,2],b=a$beta[2,2], col=3) ## Look at the nonparametric KD-based estimate of the error density, ## constrained to be zero-symmetric: plot(xx<-a$density.x, yy<-a$density.y, type="l") ## Compare to a normal density with mean 0 and NP-estimated stdev: z <- seq(min(xx), max(xx), len=200) lines(z, dnorm(z, sd=sqrt((a$np.stdev)^2+a$bandwidth^2)), col=2, lty=2) # Add bandwidth^2 to variance estimate to get estimated var of KDE ## Now add the sp mixreg estimate without assuming symmetric errors: b=spregmix(tuned~stretchratio, bw=.2, , symm=FALSE, data=tonedata, verb=TRUE) lines(b$density.x, b$density.y, col=3) } \keyword{file} mixtools/man/try.flare.Rd0000755000176200001440000000413311665556372015124 0ustar liggesusers\name{try.flare} \title{Mixtures of Regressions with Flare MM Algorithm} \alias{try.flare} \usage{ try.flare(y, x, lambda = NULL, beta = NULL, sigma = NULL, alpha = NULL, nu = 1, epsilon = 1e-04, maxit = 10000, verb = FALSE, restart = 50) } \description{ The function which \code{flaremixEM} actually calls. This only allows one barrier constant to be inputted at a time. } \arguments{ \item{y}{An n-vector of response values.} \item{x}{An n-vector of predictor values. An intercept term will be added by default.} \item{lambda}{Initial value of mixing proportions. Entries should sum to 1.} \item{beta}{Initial value of \code{beta} parameters. Should be a 2x2 matrix where the columns corresond to the component.} \item{sigma}{A vector of standard deviations.} \item{alpha}{A scalar for the exponential component's rate.} \item{nu}{A scalar specifying the barrier constant to use.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} \item{restart}{The number of times to restart the algorithm in case convergence is not attained. The default is 50.} } \value{ \code{try.flare} returns a list of class \code{mixEM} with items: \item{x}{The set of predictors (which includes a column of 1's).} \item{y}{The response values.} \item{posterior}{An nx2 matrix of posterior probabilities for observations.} \item{lambda}{The final mixing proportions.} \item{beta}{The final regression coefficients.} \item{sigma}{The final standard deviations.} \item{alpha}{The final exponential rate.} \item{loglik}{The final log-likelihood.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{flaremixEM}} } \details{ This usually is not called by the user. The user will likely want \code{flaremixEM}, which also has an example to demonstrate this algorithm. } \keyword{internal} mixtools/man/spRMM_SEM.Rd0000644000176200001440000001041113055603106014670 0ustar liggesusers\name{spRMM_SEM} \title{Stochastic EM algorithm for semiparametric scaled mixture of censored data} \alias{spRMM_SEM} \usage{ spRMM_SEM(t, d = NULL, lambda = NULL, scaling = NULL, centers = 2, kernelft = triang_wkde, bw = rep(bw.nrd0(t),length(t)), averaged = TRUE, epsilon = 1e-08, maxit = 100, batchsize = 1, verb = FALSE) } \description{ Stochastic EM algorithm for semiparametric scaled mixture for randomly right censored data. } \arguments{ \item{t}{A vector of \eqn{n}{n} real positive lifetime (possibly censored) durations. If \code{d} is not \code{NULL} then a vector of random censoring times \code{c} occurred, so that \eqn{x= min(x,c)} and \eqn{d = I(x <= c)}.} \item{d}{The vector of censoring indication, where 1 means observed lifetime data, and 0 means censored lifetime data.} \item{lambda}{Initial value of mixing proportions. If \code{NULL}, then \code{lambda} is set to \code{rep(1/k,k)}.} \item{scaling}{Initial value of scaling between components, set to 1 if \code{NULL}.} \item{centers}{initial centers for initial call to kmeans for initialization.} \item{kernelft}{.} \item{bw}{Bandwidth in the kernel hazard estimates.} \item{averaged}{averaged.} \item{epsilon}{Tolerance limit.} \item{maxit}{The number of iterations allowed.} \item{batchsize}{The batchsize (see reference below).} \item{verb}{If TRUE, print updates for every iteration of the algorithm as it runs} } \value{ \code{spRMM_SEM} returns a list of class \code{"spRMM"} with the following items: \item{t}{The input data.} \item{d}{The input censoring indicator.} \item{lambda}{The estimates for the mixing proportions.} \item{scaling}{The estimates for the components scaling.} \item{posterior}{An \eqn{n\times k}{n x 2} matrix of posterior probabilities for observation, after convergence of the algorithm.} \item{loglik}{The (pseudo) log-likelihood value at convergence of the algorithm.} \item{all.loglik}{The sequence of log-likelihood values over iterations.} \item{all.lambda}{The sequence of mixing proportions over iterations.} \item{all.scaling}{The sequence of scaling parameter over iterations.} \item{meanpost}{Posterior probabilities averaged over iterations.} \item{survival}{Kaplan-Meier last iteration estimate (a \code{stepfun} object).} \item{hazard}{Hazard rate last iteration estimate evaluated at \code{final.t}.} \item{final.t}{Last iteration unscaled sample (see reference).} \item{s.hat}{Kaplan-Meier average estimate.} \item{t.hat}{Ordered unscaled sample, for testing purpose.} \item{avg.od}{For testing purpose only.} \item{hazard.hat}{Hazard rate average estimate on \code{t.hat}.} \item{batch.t}{Batch sample (not ordered), see reference.} \item{batch.d}{Associated event indicators just \code{rep(d,batchsize)}, for testing purpose.} \item{sumNaNs}{Internal control of numerical stability.} \item{ft}{A character vector giving the name of the function.} } \seealso{ Related functions: \code{\link{plotspRMM}}, \code{\link{summary.spRMM}}. Other models and algorithms for censored lifetime data (name convention is model_algorithm): \code{\link{expRMM_EM}}, \code{\link{weibullRMM_SEM}}. } \references{ \itemize{ \item Bordes, L., and Chauveau, D. (2016), Stochastic EM algorithms for parametric and semiparametric mixture models for right-censored lifetime data, Computational Statistics, Volume 31, Issue 4, pages 1513-1538. \url{http://link.springer.com/article/10.1007/s00180-016-0661-7} } } \author{Didier Chauveau} \examples{ \dontrun{ n=500 # sample size m=2 # nb components lambda=c(0.4, 0.6) # parameters meanlog=3; sdlog=0.5; scale=0.1 set.seed(12) # simulate a scaled mixture of lognormals x <- rlnormscalemix(n, lambda, meanlog, sdlog, scale) cs=runif(n,20,max(x)+400) # Censoring (uniform) and incomplete data t <- apply(cbind(x,cs),1,min) d <- 1*(x <= cs) tauxc <- 100*round( 1-mean(d),3) cat(tauxc, "percents of data censored.\n") c0 <- c(25, 180) # data-driven initial centers (visible modes) sc0 <- 25/180 # and scaling s <- spRMM_SEM(t, d, scaling = sc0, centers = c0, bw = 15, maxit = 100) plotspRMM(s) # default summary(s) # S3 method for class "spRMM" } } \keyword{file} mixtools/man/wkde.Rd0000755000176200001440000000355212514002272014127 0ustar liggesusers\name{wkde} \title{Weighted Univariate (Normal) Kernel Density Estimate} \alias{wkde} \alias{wkde.symm} \usage{ wkde(x, u=x, w=rep(1, length(x)), bw=bw.nrd0(as.vector(x)), sym=FALSE) } \description{ Evaluates a weighted kernel density estimate, using a Gaussian kernel, at a specified vector of points. } \arguments{ \item{x}{Data} \item{u}{Points at which density is to be estimated} \item{w}{Weights (same length as \code{x})} \item{bw}{Bandwidth} \item{sym}{Logical: Symmetrize about zero?} } \value{A vector of the same length as \code{u} } \seealso{ \code{\link{npEM}}, \code{\link{ise.npEM}} } \references{ \itemize{ \item Benaglia, T., Chauveau, D., and Hunter, D. R. (2009), An EM-like algorithm for semi- and non-parametric estimation in multivariate mixtures, Journal of Computational and Graphical Statistics, 18, 505-526. \item Benaglia, T., Chauveau, D., Hunter, D. R., and Young, D. (2009), mixtools: An R package for analyzing finite mixture models. Journal of Statistical Software, 32(6):1-29. } } \examples{ # Mixture with mv gaussian model set.seed(100) m <- 2 # no. of components r <- 3 # no. of repeated measures (coordinates) lambda <- c(0.4, 0.6) mu <- matrix(c(0, 0, 0, 4, 4, 6), m, r, byrow=TRUE) # means sigma <- matrix(rep(1, 6), m, r, byrow=TRUE) # stdevs centers <- matrix(c(0, 0, 0, 4, 4, 4), 2, 3, byrow=TRUE) # initial centers for est blockid = c(1,1,2) # block structure of coordinates n = 100 x <- rmvnormmix(n, lambda, mu, sigma) # simulated data a <- npEM(x, centers, blockid, eps=1e-8, verb=FALSE) par(mfrow=c(2,2)) u <- seq(min(x), max(x), len=200) for(j in 1:2) { for(b in 1:2) { xx <- as.vector(x[,a$blockid==b]) wts <- rep(a$post[,j], length.out=length(xx)) bw <- a$bandwidth title <- paste("j =", j, "and b =", b) plot(u, wkde(xx, u, wts, bw), type="l", main=title) } } } \keyword{file} mixtools/man/expRMM_EM.Rd0000644000176200001440000000716313055603106014731 0ustar liggesusers\name{expRMM_EM} \title{EM algorithm for Reliability Mixture Models (RMM) with right Censoring} \alias{expRMM_EM} \usage{ expRMM_EM(x, d=NULL, lambda = NULL, rate = NULL, k = 2, complete = "tdz", epsilon = 1e-08, maxit = 1000, verb = FALSE) } \description{ Parametric EM algorithm for univariate finite mixture of exponentials distributions with randomly right censored data. } \arguments{ \item{x}{A vector of \eqn{n}{n} real positive lifetime (possibly censored) durations. If \code{d} is not \code{NULL} then a vector of random censoring times \code{c} occurred, so that \eqn{x= min(x,c)} and \eqn{d = I(x <= c)}.} \item{d}{The vector of censoring indication, where 1 means observed lifetime data, and 0 means censored lifetime data.} \item{lambda}{Initial value of mixing proportions. If \code{NULL}, then \code{lambda} is set to \code{rep(1/k,k)}.} \item{rate}{Initial value of component exponential rates, all set to 1 if \code{NULL}.} \item{k}{Number of components of the mixture.} \item{complete}{Nature of complete data involved within the EM machinery, can be "tdz" for \code{(t,d,z)} (the default), or "xz" for \code{(x,z)} (see Bordes L. and Chauveau D. (2016) reference below).} \item{epsilon}{Tolerance limit for declaring algorithm convergence based on the change between two consecutive iterations.} \item{maxit}{The maximum number of iterations allowed, convergence may be declared before \code{maxit} iterations (see \code{epsilon} above).} \item{verb}{If TRUE, print updates for every iteration of the algorithm as it runs} } \value{ \code{expRMM_EM} returns a list of class "mixEM" with the following items: \item{x}{The input data.} \item{d}{The input censoring indicator.} \item{lambda}{The estimates for the mixing proportions.} \item{rate}{The estimates for the component rates.} \item{loglik}{The log-likelihood value at convergence of the algorithm.} \item{posterior}{An \eqn{n\times k}{n x k} matrix of posterior probabilities for observation, after convergence of the algorithm.} \item{all.loglik}{The sequence of log-likelihoods over iterations.} \item{all.lambda}{The sequence of mixing proportions over iterations.} \item{all.rate}{The sequence of component rates over iterations.} \item{ft}{A character vector giving the name of the function.} } \seealso{ Related functions: \code{\link{plotexpRMM}}, \code{\link{summary.mixEM}}. Other models and algorithms for censored lifetime data: \code{\link{weibullRMM_SEM}}, \code{\link{spRMM_SEM}}. } \references{ \itemize{ \item Bordes, L., and Chauveau, D. (2016), Stochastic EM algorithms for parametric and semiparametric mixture models for right-censored lifetime data, Computational Statistics, Volume 31, Issue 4, pages 1513-1538. \url{http://link.springer.com/article/10.1007/s00180-016-0661-7} } } \author{Didier Chauveau} \examples{ n <- 300 # sample size m <- 2 # number of mixture components lambda <- c(1/3,1-1/3); rate <- c(1,1/10) # mixture parameters set.seed(1234) x <- rexpmix(n, lambda, rate) # iid ~ exponential mixture cs <- runif(n,0,max(x)) # Censoring (uniform) and incomplete data t <- apply(cbind(x,cs),1,min) # observed or censored data d <- 1*(x <= cs) # censoring indicator ###### EM for RMM, exponential lifetimes l0 <- rep(1/m,m); r0 <- c(1, 0.5) # "arbitrary" initial values a <- expRMM_EM(t, d, lambda = l0, rate = r0, k = m) summary(a) # EM estimates etc plotexpRMM(a, lwd=2) # default plot of EM sequences plot(a, which=2) # or equivalently, S3 method for "mixEM" object %%\dontrun{ %%} } \keyword{file} mixtools/man/gammamixEM.Rd0000755000176200001440000000715013616451065015231 0ustar liggesusers\name{gammamixEM} \title{EM Algorithm for Mixtures of Gamma Distributions} \alias{gammamixEM} \usage{ gammamixEM(x, lambda = NULL, alpha = NULL, beta = NULL, k = 2, mom.start = TRUE, fix.alpha = FALSE, epsilon = 1e-08, maxit = 1000, maxrestarts = 20, verb = FALSE) } \description{ Return EM algorithm output for mixtures of gamma distributions. } \arguments{ \item{x}{A vector of length n consisting of the data.} \item{lambda}{Initial value of mixing proportions. If \code{NULL}, then \code{lambda} is random from a uniform Dirichlet distribution (i.e., its entries are uniform random and then it is normalized to sum to 1).} \item{alpha}{Starting value of vector of component shape parameters. If non-NULL, \code{alpha} must be of length \code{k} if allowing different component shape parameters, or a single value if \code{fix.alpha = TRUE}. If NULL, then the initial value is estimated by partitioning the data into \code{k} regions (with \code{lambda} determining the proportion of values in each region) and then calculating the method of moments estimates.} \item{beta}{Starting value of vector of component scale parameters. If non-NULL and a vector, \code{k} is set to \code{length(beta)}. If NULL, then the initial value is estimated the same method described for \code{alpha}.} \item{k}{Number of components. Initial value ignored unless \code{alpha} and \code{beta} are both NULL.} \item{mom.start}{Logical to indicate if a method of moments starting value strategy should be implemented. If \code{TRUE}, then only unspecified starting values will be generated according to this strategy.} \item{epsilon}{The convergence criterion. Convergence is declared when the change in the observed data log-likelihood increases by less than epsilon.} \item{fix.alpha}{Logical to indicate if the components should have a common shape parameter \code{alpha} estimated. The default is \code{FALSE}.} \item{maxit}{The maximum number of iterations.} \item{maxrestarts}{The maximum number of restarts allowed in case of a problem with the particular starting values chosen (each restart uses randomly chosen starting values).} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \value{ \code{gammamixEM} returns a list of class \code{mixEM} with items: \item{x}{The raw data.} \item{lambda}{The final mixing proportions.} \item{gamma.pars}{A 2xk matrix where each column provides the component estimates of \code{alpha} and \code{beta}.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood. This vector includes both the initial and the final values; thus, the number of iterations is one less than its length.} \item{ft}{A character vector giving the name of the function.} } \references{ Dempster, A. P., Laird, N. M., and Rubin, D. B. (1977) Maximum Likelihood From Incomplete Data Via the EM Algorithm, \emph{Journal of the Royal Statistical Society, Series B}, \bold{39(1)}, 1--38. Young, D. S., Chen, X., Hewage, D., and Nilo-Poyanco, R. (2019) Finite Mixture-of-Gamma Distributions: Estimation, Inference, and Model-Based Clustering, \emph{Advances in Data Analysis and Classification}, \bold{13(4)}, 1053--1082. } \examples{ ##Analyzing a 3-component mixture of gammas. set.seed(100) x <- c(rgamma(200, shape = 0.2, scale = 14), rgamma(200, shape = 32, scale = 10), rgamma(200, shape = 5, scale = 6)) out <- gammamixEM(x, lambda = c(1, 1, 1)/3, verb = TRUE) out[2:4] } \keyword{file} mixtools/man/logisregmixEM.Rd0000755000176200001440000000677612122212050015753 0ustar liggesusers\name{logisregmixEM} \title{EM Algorithm for Mixtures of Logistic Regressions} \alias{logisregmixEM} \usage{ logisregmixEM(y, x, N = NULL, lambda = NULL, beta = NULL, k = 2, addintercept = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) } \description{ Returns EM algorithm output for mixtures of logistic regressions with arbitrarily many components. } \arguments{ \item{y}{An n-vector of successes out of N trials.} \item{x}{An nxp matrix of predictors. See \code{addintercept} below.} \item{N}{An n-vector of number of trials for the logistic regression. If NULL, then \code{N} is an n-vector of 1s for binary logistic regression.} \item{lambda}{Initial value of mixing proportions. Entries should sum to 1. This determines number of components. If NULL, then \code{lambda} is random from uniform Dirichlet and number of components is determined by \code{beta}.} \item{beta}{Initial value of \code{beta} parameters. Should be a pxk matrix, where p is the number of columns of x and k is number of components. If NULL, then \code{beta} is generated by binning the data into k bins and using \code{glm} on the values in each of the bins. If both \code{lambda} and \code{beta} are NULL, then number of components is determined by \code{k}.} \item{k}{Number of components. Ignored unless \code{lambda} and \code{beta} are both NULL.} \item{addintercept}{If TRUE, a column of ones is appended to the x matrix before the value of p is calculated.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \value{ \code{logisregmixEM} returns a list of class \code{mixEM} with items: \item{x}{The predictor values.} \item{y}{The response values.} \item{lambda}{The final mixing proportions.} \item{beta}{The final logistic regression coefficients.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{poisregmixEM}} } \references{ McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}, John Wiley \& Sons, Inc. } \examples{ ## EM output for data generated from a 2-component logistic regression model. set.seed(100) beta <- matrix(c(1, .5, 2, -.8), 2, 2) x <- runif(50, 0, 10) x1 <- cbind(1, x) xbeta <- x1\%*\%beta N <- ceiling(runif(50, 50, 75)) w <- rbinom(50, 1, .3) y <- w*rbinom(50, size = N, prob = (1/(1+exp(-xbeta[, 1]))))+ (1-w)*rbinom(50, size = N, prob = (1/(1+exp(-xbeta[, 2])))) out.1 <- logisregmixEM(y, x, N, verb = TRUE, epsilon = 1e-01) out.1 ## EM output for data generated from a 2-component binary logistic regression model. beta <- matrix(c(-10, .1, 20, -.1), 2, 2) x <- runif(500, 50, 250) x1 <- cbind(1, x) xbeta <- x1\%*\%beta w <- rbinom(500, 1, .3) y <- w*rbinom(500, size = 1, prob = (1/(1+exp(-xbeta[, 1]))))+ (1-w)*rbinom(500, size = 1, prob = (1/(1+exp(-xbeta[, 2])))) out.2 <- logisregmixEM(y, x, beta = beta, lambda = c(.3, .7), verb = TRUE, epsilon = 1e-01) out.2 } \keyword{file} mixtools/man/segregmixEM.Rd0000755000176200001440000001747012334534340015423 0ustar liggesusers\name{segregmixEM} \title{ECM Algorithm for Mixtures of Regressions with Changepoints} \alias{segregmixEM} \alias{regmixEM.chgpt} \usage{ segregmixEM(y, x, lambda = NULL, beta = NULL, sigma = NULL, k = 2, seg.Z, psi, psi.locs = NULL, delta = NULL, epsilon = 1e-08, maxit = 10000, verb = FALSE, max.restarts = 15) } \description{ Returns ECM algorithm output for mixtures of multiple regressions with changepoints and arbitrarily many components. } \arguments{ \item{y}{An n-vector of response values.} \item{x}{An nxp matrix of predictors. Note that this model assumes the presence of an intercept.} \item{lambda}{Initial value of mixing proportions. Entries should sum to 1. This determines number of components. If NULL, then \code{lambda} is random from uniform Dirichlet and the number of components is determined by \code{beta}.} \item{beta}{Initial value of \code{beta} parameters. This is a list of length \code{k} such that each element must contain a vector having length consistent with the defined changepoint structure. See \code{seg.Z}, \code{psi}, and \code{psi.loc} below. If NULL, then \code{beta} has standard normal entries according to a binning method done on the data. If both \code{lambda} and \code{beta} are NULL, then number of components is determined by \code{sigma}.} \item{sigma}{A vector of standard deviations. If NULL, then 1/\code{sigma}^2 has random standard exponential entries according to a binning method done on the data. If \code{lambda}, \code{beta}, and \code{sigma} are NULL, then number of components is determined by \code{k}.} \item{k}{Number of components. Ignored unless all of \code{lambda}, \code{beta}, and \code{sigma} are NULL.} \item{seg.Z}{A list of length \code{k} whose elements are right-hand side formulas, which are additive linear models of the predictors that have changepoints in their respective components. See below for more details.} \item{psi}{A kxp matrix specifying the number of changepoints for each predictor in each component. See below for more details.} \item{psi.locs}{A list of length \code{k} that has initial estimates for the changepoint locations. Each element of the list must have length equal to the number of chanegpoints specified in the corresponding row of the \code{psi} matrix. For components with no changepoints, simply set that element equal to NULL. See below for more details.} \item{delta}{An optional list of values quantifying the amount of separation at each changepoint if assuming discontinuities at the changepoints. This has the same dimensions as \code{psi.locs}.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} \item{max.restarts}{The number of times to try restarting the ECM algorithm if estimation problems occur - such as choice of poor initial values or a poorly chosen changepoint structure.} } \value{ \code{segregmixEM} returns a list of class \code{segregmixEM} with items: \item{x}{The set of predictors.} \item{y}{The response values.} \item{lambda}{The final mixing proportions.} \item{beta}{The final regression coefficients.} \item{sigma}{The final standard deviations.} \item{seg.Z}{The list of right-hand side formulas as defined by the user.} \item{psi.locs}{A list of length k with the final estimates for the changepoint locations.} \item{delta}{A list of the delta values that were optionally specified by the user.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \details{ \code{seg.Z} is defined as a list of right-hand side linear model formulas that are used to identify which predictors have changepoints in each component. For example, suppose you have a dataframe with three predictors: \code{V1}, \code{V2}, \code{V3}. Suppose now that you wish to model a 3-component mixture of regressions with changepoints structure such that the first component has changepoints in V1 and V2, the second component has changepoints in \code{V3}, and the third component has no changepoints. Then you would define \code{seg.Z = list(~V1+V2, ~V3, NULL)}. Note that you MUST place the variables in order with respect to how they appear in the predictor matrix \code{x}. \code{psi} is a kxp matrix specifying the number of changepoints for each predictor in each component. For the example given above, suppose there are three changepoints for \code{V1}, two changepoints for \code{V2}, and four changepoints for \code{V3}. Then you would define \code{psi = rbind(c(3, 2, 0), c(0, 0, 4), c(0, 0, 0))}. \code{psi.locs} is a list of length k whose elements give the initial locations of the changepoints for each component. Each element of the list must have length equal to the total number of changepoints for that component's regression equation. For the example given above, in component 1, assume that the three changepoints for \code{V1} are at 3, 7, and 10 and the two changepoints for \code{V1} are at 5, 20, and 30. In component 2, assume that the four changepoints for \code{V3} are at 2, 4, 6, and 8. Then you would define \code{psi.locs = list(c(3, 7, 10, 5, 20, 30), c(2, 4, 6, 8), NULL)}. Note that the order of the changepoints is determined by first sorting the predictors by how they appear in the formulas in \code{seg.Z} and then sorting in increasing order within each predictor. } \seealso{ \code{\link{regmixEM}} } \references{ Young, D. S. (2014) Mixtures of Regressions with Changepoints, \emph{Statistics and Computing}, \bold{24(2)}, 265--281. } \note{ As of version 0.4.6, this more general function has replaced the now defunct \code{regmixEM.chgpt} and associated internal functions. } \examples{ \dontrun{ ## Simulated example. set.seed(100) x <- 1:20 y1 <- 3 + x + rnorm(20) y2 <- 3 - x - 5*(x - 15)*(x > 15) + rnorm(20) y <- c(y1, y2) x <- c(x, x) set.seed(100) be <- list(c(3, -1, -5), c(3, 1)) s <- c(1, 1) psi.locs <- list(comp.1 = list(x = 15), comp.2 = NULL) out <- segregmixEM(y, cbind(1,x), verb = TRUE, k = 2, beta = be, sigma = s, lambda = c(1, 1)/2, seg.Z = list(~x, NULL), psi = rbind(1, 0), psi.locs = psi.locs, epsilon = 0.9) z <- seq(0, 21, len = 40) plot(x, y, col = apply(out$post, 1, which.max) + 1, pch = 19, cex.lab = 1.4, cex = 1.4) b <- out$beta d <- out$psi.locs lines(z, b[[1]][1] + b[[1]][2] * z + b[[1]][3] * (z - d[[1]][[1]]) * (z > d[[1]][[1]]) , col = 2, lwd = 2) lines(z, b[[2]][1] + b[[2]][2] * z, col = 3, lwd = 2) abline(v = out$psi.locs[[1]][1], col = 2, lty = 2) } \dontrun{ ## Example using the NOdata. data(NOdata) attach(NOdata) set.seed(100) be <- list(c(1.30, -0.13, 0.08), c(0.56, 0.09)) s <- c(0.02, 0.04) psi.locs <- list(comp.1 = list(NO = 1.57), comp.2 = NULL) out <- segregmixEM(Equivalence, cbind(NO), verb = TRUE, k = 2, beta = be, sigma = s, lambda = c(1, 1)/2, seg.Z = list(~NO, NULL), psi = rbind(1, 0), psi.locs = psi.locs, epsilon = 0.1) z <- seq(0, 5, len = 1000) plot(NOdata, col = apply(out$post, 1, which.max) + 1, pch = 19, cex.lab = 1.4, cex = 1.4, ylab = "Equivalence Ratio") b <- out$beta d <- out$psi.locs lines(z, b[[1]][1] + b[[1]][2] * z + b[[1]][3] * (z - d[[1]][[1]]) * (z > d[[1]][[1]]) , col = 2, lwd = 2) lines(z, b[[2]][1] + b[[2]][2] * z, col = 3, lwd = 2) abline(v = out$psi.locs[[1]][1], col = 2, lty = 2) detach(NOdata) } } \keyword{file} mixtools/man/RodFramedata.Rd0000755000176200001440000000226513616576545015555 0ustar liggesusers\name{RodFramedata} \docType{data} \title{Rod and Frame Task Data Set} \alias{RodFramedata} \usage{ data(RodFramedata) } \description{ This data set involves assessing children longitudinally at 6 age points from ages 4 through 18 years for the rod and frame task. This task sits the child in a darkened room in front of a luminous square frame tilted at 28 degrees on its axis to the left or right. Centered inside the frame was a luminous rod also tilted 28 degrees to the left or right. The child's task was to adjust the rod to the vertical position and the absolute deviation from the vertical (in degrees) was the measured response. } \format{This data frame consists of 140 children (the rows). Column 1 is the subject number and column 2 is the sex (0=MALE and 1=FEMALE). Columns 3 through 26 give the 8 responses at each of the ages 4, 5, and 7. Columns 27 through 56 give the 10 responses at each of the ages 11, 14, and 18. A value of 99 denotes missing data. } \source{ Thomas, H. and Dahlin, M. P. (2005) Individual Development and Latent Groups: Analytical Tools for Interpreting Heterogeneity, \emph{Developmental Review} \bold{25(2)}, 133--154. } \keyword{datasets} mixtools/man/regmixEM.lambda.Rd0000755000176200001440000000675212122214214016132 0ustar liggesusers\name{regmixEM.lambda} \title{EM Algorithm for Mixtures of Regressions with Local Lambda Estimates} \alias{regmixEM.lambda} \usage{ regmixEM.lambda(y, x, lambda = NULL, beta = NULL, sigma = NULL, k = 2, addintercept = TRUE, arbmean = TRUE, arbvar = TRUE, epsilon = 1e-8, maxit = 10000, verb = FALSE) } \description{ Returns output for one step of an EM algorithm output for mixtures of multiple regressions where the mixing proportions are estimated locally. } \arguments{ \item{y}{An n-vector of response values.} \item{x}{An nxp matrix of predictors. See \code{addintercept} below.} \item{lambda}{An nxk matrix of initial local values of mixing proportions. Entries should sum to 1. This determines number of components. If NULL, then \code{lambda} is simply one over the number of components.} \item{beta}{Initial value of \code{beta} parameters. Should be a pxk matrix, where p is the number of columns of x and k is number of components. If NULL, then \code{beta} has uniform standard normal entries. If both \code{lambda} and \code{beta} are NULL, then number of components is determined by \code{sigma}.} \item{sigma}{k-vector of initial global values of standard deviations. If NULL, then \eqn{1/\code{sigma}^2} has random standard exponential entries. If \code{lambda}, \code{beta}, and \code{sigma} are NULL, then number of components is determined by \code{k}.} \item{k}{The number of components. Ignored unless all of \code{lambda}, \code{beta}, and \code{sigma} are NULL.} \item{addintercept}{If TRUE, a column of ones is appended to the x matrix before the value of p is calculated.} \item{arbmean}{If TRUE, each mixture component is assumed to have a different set of regression coefficients (i.e., the \code{beta}s).} \item{arbvar}{If TRUE, each mixture component is assumed to have a different \code{sigma}.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \value{ \code{regmixEM.lambda} returns a list of class \code{mixEM} with items: \item{x}{The set of predictors (which includes a column of 1's if \code{addintercept} = TRUE).} \item{y}{The response values.} \item{lambda}{The inputted mixing proportions.} \item{beta}{The final regression coefficients.} \item{sigma}{The final standard deviations. If \code{arbmean} = FALSE, then only the smallest standard deviation is returned. See \code{scale} below.} \item{scale}{If \code{arbmean} = FALSE, then the scale factor for the component standard deviations is returned. Otherwise, this is omitted from the output.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \details{ Primarily used within \code{regmixEM.loc}. } \seealso{ \code{\link{regmixEM.loc}} } \examples{ ## Compare a 2-component and 3-component fit to NOdata. data(NOdata) attach(NOdata) set.seed(100) out1 <- regmixEM.lambda(Equivalence, NO) out2 <- regmixEM.lambda(Equivalence, NO, k = 3) c(out1$loglik, out2$loglik) } \keyword{file} mixtools/man/compCDF.Rd0000755000176200001440000000447212122210100014434 0ustar liggesusers\name{compCDF} \title{Plot the Component CDF} \alias{compCDF} \usage{ compCDF(data, weights, x=seq(min(data, na.rm=TRUE), max(data, na.rm=TRUE), len=250), comp=1:NCOL(weights), makeplot=TRUE, ...) } \description{ Plot the components' CDF via the posterior probabilities. } \arguments{ \item{data}{A matrix containing the raw data. Rows are subjects and columns are repeated measurements.} \item{weights}{The weights to compute the empirical CDF; however, most of time they are the posterior probabilities.} \item{x}{The points at which the CDFs are to be evaluated.} \item{comp}{The mixture components for which CDFs are desired.} \item{makeplot}{Logical: Should a plot be produced as a side effect?} \item{...}{Additional arguments (other than \code{lty} and \code{type}, which are already used) to be passed directly to \code{plot} and \code{lines} functions.} } \value{ A matrix with \code{length(comp)} rows and \code{length(x)} columns in which each row gives the CDF evaluated at each point of \code{x}. } \details{ When \code{makeplot} is \code{TRUE}, a line plot is produced of the CDFs evaluated at \code{x}. The plot is not a step function plot; the points \eqn{(x, CDF(x))} are simply joined by line segments. } \references{ McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}, John Wiley \& Sons, Inc. Elmore, R. T., Hettmansperger, T. P. and Xuan, F. (2004) The Sign Statistic, One-Way Layouts and Mixture Models, \emph{Statistical Science} \bold{19(4)}, 579--587. } \seealso{ \code{\link{makemultdata}}, \code{\link{multmixmodel.sel}}, \code{\link{multmixEM}}. } \examples{ ## The sulfur content of the coal seams in Texas set.seed(100) A <- c(1.51, 1.92, 1.08, 2.04, 2.14, 1.76, 1.17) B <- c(1.69, 0.64, .9, 1.41, 1.01, .84, 1.28, 1.59) C <- c(1.56, 1.22, 1.32, 1.39, 1.33, 1.54, 1.04, 2.25, 1.49) D <- c(1.3, .75, 1.26, .69, .62, .9, 1.2, .32) E <- c(.73, .8, .9, 1.24, .82, .72, .57, 1.18, .54, 1.3) dis.coal <- makemultdata(A, B, C, D, E, cuts = median(c(A, B, C, D, E))) temp <- multmixEM(dis.coal) ## Now plot the components' CDF via the posterior probabilities compCDF(dis.coal$x, temp$posterior, xlab="Sulfur", ylab="", main="empirical CDFs") } \keyword{file} mixtools/man/multmixmodel.sel.Rd0000755000176200001440000000313012546524074016505 0ustar liggesusers\name{multmixmodel.sel} \title{Model Selection Mixtures of Multinomials} \alias{multmixmodel.sel} \usage{ multmixmodel.sel(y, comps = NULL, ...) } \description{ Assess the number of components in a mixture of multinomials model using the Akaike's information criterion (AIC), Schwartz's Bayesian information criterion (BIC), Bozdogan's consistent AIC (CAIC), and Integrated Completed Likelihood (ICL). } \arguments{ \item{y}{Either An nxp matrix of data (multinomial counts), where n is the sample size and p is the number of multinomial bins, or the output of the \code{\link{makemultdata}} function. It is not necessary that all of the rows contain the same number of multinomial trials (i.e., the row sums of \code{y} need not be identical).} \item{comps}{Vector containing the numbers of components to consider. If NULL, this is set to be 1:(max possible), where (max possible) is floor((m+1)/2) and m is the minimum row sum of y.} \item{...}{Arguments passed to \code{multmixEM} that control convergence of the underlying EM algorithm.} } \value{ \code{multmixmodel.sel} returns a table summarizing the AIC, BIC, CAIC, ICL, and log-likelihood values along with the winner (the number with the lowest aforementioned values). } \seealso{ \code{\link{compCDF}}, \code{\link{makemultdata}}, \code{\link{multmixEM}} } \examples{ ##Data generated using the multinomial cutpoint method. set.seed(100) x <- matrix(rpois(70, 6), 10, 7) x.new <- makemultdata(x, cuts = 5) multmixmodel.sel(x.new$y, comps = c(1,2), epsilon = 1e-03) } \keyword{file} mixtools/man/initializations.Rd0000755000176200001440000000431612112160306016403 0ustar liggesusers%gammamix things temporarily commented out by DRH on 8-29-2008 %gammamix things uncommented by DSY on 10-2-2009 \name{mixtools initializations} \alias{flaremix.init} \alias{gammamix.init} \alias{logisregmix.init} \alias{multmix.init} \alias{mvnormalmix.init} \alias{normalmix.init} \alias{poisregmix.init} %\alias{regmix.chgpt.init} \alias{regmix.init} \alias{regmix.lambda.init} \alias{regmix.mixed.init} \alias{repnormmix.init} \alias{segregmix.init} \title{Initializations for Various EM Algorithms in 'mixtools'} \description{ Internal intialization functions for EM algorithms in the package \code{mixtools}. } \usage{ flaremix.init(y, x, lambda = NULL, beta = NULL, sigma = NULL, alpha = NULL) gammamix.init(x, lambda = NULL, alpha = NULL, beta = NULL, k = 2) logisregmix.init(y, x, N, lambda = NULL, beta = NULL, k = 2) multmix.init(y, lambda = NULL, theta = NULL, k = 2) mvnormalmix.init(x, lambda = NULL, mu = NULL, sigma = NULL, k = 2, arbmean = TRUE, arbvar = TRUE) normalmix.init(x, lambda = NULL, mu = NULL, s = NULL, k = 2, arbmean = TRUE, arbvar = TRUE) poisregmix.init(y, x, lambda = NULL, beta = NULL, k = 2) %regmix.chgpt.init(y, x, lambda = NULL, beta = NULL, % gamma = NULL, sigma = NULL, t = NULL, k = 2) regmix.init(y, x, lambda = NULL, beta = NULL, s = NULL, k = 2, addintercept = TRUE, arbmean = TRUE, arbvar=TRUE) regmix.lambda.init(y, x, lambda = NULL, beta = NULL, s = NULL, k = 2, addintercept = TRUE, arbmean = TRUE, arbvar = TRUE) regmix.mixed.init(y, x, w = NULL, sigma = NULL, arb.sigma = TRUE, alpha = NULL, lambda = NULL, mu = NULL, R = NULL, arb.R = TRUE, k = 2, mixed = FALSE, addintercept.fixed = FALSE, addintercept.random = TRUE) repnormmix.init(x, lambda = NULL, mu = NULL, s = NULL, k = 2, arbmean = TRUE, arbvar = TRUE) segregmix.init(y, x, lambda = NULL, beta = NULL, s = NULL, k = 2, seg.Z, psi, psi.locs = NULL) } \details{ These are usually not to be called by the user. Definitions of the arguments appear in the respective EM algorithms. } \keyword{internal} mixtools/man/summary.mixEM.Rd0000644000176200001440000000306413055603106015707 0ustar liggesusers\name{summary.mixEM} \alias{summary.mixEM} \title{Summarizing EM mixture model fits} \usage{ \method{summary}{mixEM}(object, digits=6, \dots) } \arguments{ \item{object}{an object of class \code{mixEM} such as a result of a call to \code{\link{normalmixEM}}} \item{digits}{Significant digits for printing values} \item{\dots}{further arguments passed to \code{print} method.} } \description{ \code{\link[base]{summary}} method for class \code{mixEM}. } \details{ \code{\link{summary.mixEM}} prints parameter estimates for each component of a fitted mixture model. The estimates printed vary with the type of model. } \value{ The function \code{\link{summary.mixEM}} prints the final loglikelihood value at the solution as well as a matrix of values for each component that could include: \item{lambda}{The estimated mixing weights} \item{mu}{The estimated mean parameters} \item{sigma}{The estimated standard deviations} \item{theta}{The estimated multinomial parameters} \item{beta}{The estimated regression parameters} } \seealso{ \code{\link{normalmixEM}}, \code{\link{logisregmixEM}}, \code{\link{multmixEM}}, \code{\link{mvnormalmixEM}}, \code{\link{poisregmixEM}}, \code{\link{regmixEM}}, \code{\link{regmixEM.lambda}}, \code{\link{regmixEM.loc}}, \code{\link{regmixEM.mixed}}, \code{\link{regmixEM.chgpt}}, \code{\link{repnormmixEM}}, \code{\link{expRMM_EM}}, \code{\link{weibullRMM_SEM}} } \examples{ data(faithful) attach(faithful) set.seed(100) out <- normalmixEM(waiting, mu=c(50,80), sigma=c(5,5), lambda=c(.5,.5)) summary(out) } \keyword{file} mixtools/man/repnormmixEM.Rd0000755000176200001440000000651712514002272015623 0ustar liggesusers\name{repnormmixEM} \title{EM Algorithm for Mixtures of Normals with Repeated Measurements} \alias{repnormmixEM} \usage{ repnormmixEM(x, lambda = NULL, mu = NULL, sigma = NULL, k = 2, arbmean = TRUE, arbvar = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) } \description{ Returns EM algorithm output for mixtures of normals with repeated measurements and arbitrarily many components. } \arguments{ \item{x}{An mxn matrix of data. The columns correspond to the subjects and the rows correspond to the repeated measurements.} \item{lambda}{Initial value of mixing proportions. Entries should sum to 1. This determines number of components. If NULL, then \code{lambda} is random from uniform Dirichlet and number of components is determined by \code{mu}.} \item{mu}{A k-vector of component means. If NULL, then \code{mu} is determined by a normal distribution according to a binning method done on the data. If both \code{lambda} and \code{mu} are NULL, then number of components is determined by \code{sigma}.} \item{sigma}{A vector of standard deviations. If NULL, then \eqn{1/\code{sigma}^2} has random standard exponential entries according to a binning method done on the data. If \code{lambda}, \code{mu}, and \code{sigma} are NULL, then number of components is determined by \code{k}.} \item{k}{Number of components. Ignored unless all of \code{lambda}, \code{mu}, and \code{sigma} are NULL.} \item{arbmean}{If TRUE, then the component densities are allowed to have different \code{mu}s. If FALSE, then a scale mixture will be fit.} \item{arbvar}{If TRUE, then the component densities are allowed to have different \code{sigma}s. If FALSE, then a location mixture will be fit.} \item{epsilon}{The convergence criterion.} \item{maxit}{The maximum number of iterations.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \value{ \code{repnormmixEM} returns a list of class \code{mixEM} with items: \item{x}{The raw data.} \item{lambda}{The final mixing proportions.} \item{mu}{The final mean parameters.} \item{sigma}{The final standard deviations. If \code{arbmean} = FALSE, then only the smallest standard deviation is returned. See \code{scale} below.} \item{scale}{If \code{arbmean} = FALSE, then the scale factor for the component standard deviations is returned. Otherwise, this is omitted from the output.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{normalmixEM}} } \references{ Hettmansperger, T. P. and Thomas, H. (2000) Almost Nonparametric Inference for Repeated Measures in Mixture Models, \emph{Journal of the Royals Statistical Society, Series B} \bold{62(4)} 811--825. } \examples{ ## EM output for the water-level task data set. data(Waterdata) set.seed(100) water <- t(as.matrix(Waterdata[,3:10])) em.out <- repnormmixEM(water, k = 2, verb = TRUE, epsilon = 1e-03) em.out } \keyword{file} mixtools/man/normalmixMMlc.Rd0000755000176200001440000002062313046171330015755 0ustar liggesusers\name{normalmixMMlc} \title{EC-MM Algorithm for Mixtures of Univariate Normals with linear constraints} \alias{normalmixMMlc} \usage{ normalmixMMlc(x, lambda = NULL, mu = NULL, sigma = NULL, k = 2, mean.constr = NULL, mean.lincstr = NULL, mean.constant = NULL, var.lincstr = NULL, gparam = NULL, epsilon = 1e-08, maxit = 1000, maxrestarts=20, verb = FALSE) } \description{ Return EC-MM (see below) algorithm output for mixtures of normal distributions with linear constraints on the means and variances parameters, as in Chauveau and Hunter (2013). The linear constraint for the means is of the form \eqn{\mu = M \beta + C}, where \eqn{M} and \eqn{C} are matrix and vector specified as parameters. The linear constraints for the variances are actually specified on the inverse variances, by \eqn{\pi = A \gamma}, where \eqn{\pi} is the vector of inverse variances, and \eqn{A} is a matrix specified as a parameter (see below). } \details{ This is a specific "EC-MM" algorithm for normal mixtures with linear constraints on the means and variances parameters. EC-MM here means that this algorithm is similar to an ECM algorithm as in Meng and Rubin (1993), except that it uses conditional MM (Minorization-Maximization)-steps instead of simple M-steps. Conditional means that it alternates between maximizing with respect to the \code{mu} and \code{lambda} while holding \code{sigma} fixed, and maximizing with respect to \code{sigma} and \code{lambda} while holding \code{mu} fixed. This ECM generalization of EM is forced in the case of linear constraints because there is no closed-form EM algorithm. } \arguments{ \item{x}{A vector of length n consisting of the data.} \item{lambda}{Initial value of mixing proportions. Automatically repeated as necessary to produce a vector of length \code{k}, then normalized to sum to 1. If \code{NULL}, then \code{lambda} is random from a uniform Dirichlet distribution (i.e., its entries are uniform random and then it is normalized to sum to 1).} \item{mu}{Starting value of vector of component means. If non-NULL and a vector, \code{k} is set to \code{length(mu)}. If NULL, then the initial value is randomly generated from a normal distribution with center(s) determined by binning the data.} \item{sigma}{Starting value of vector of component standard deviations for algorithm. Obsolete for linear constraints on the inverse variances; use \code{gparam} instead to specify a starting value.} \item{k}{Number of components. Initial value ignored unless \code{mu} and \code{sigma} are both NULL.} \item{mean.constr}{First, simplest way to define equality constraints on the mean parameters, given as a vector of length \code{k}, as in \code{\link{normalmixEM}}. Each vector entry specifies the constraints, if any, on the corresponding mean parameter: If \code{NA}, the corresponding parameter is unconstrained. If numeric, the corresponding parameter is fixed at that value. If a character string consisting of a single character preceded by a coefficient, such as \code{"0.5a"} or \code{"-b"}, all parameters using the same single character in their constraints will fix these parameters equal to the coefficient times some the same free parameter. For instance, if \code{mean.constr = c(NA, 0, "a", "-a")}, then the first mean parameter is unconstrained, the second is fixed at zero, and the third and forth are constrained to be equal and opposite in sign. Note: if there are no linear constraints for the means, it is more efficient to use directly \code{\link{normalmixEM}}.} \item{mean.lincstr}{Matrix \eqn{M} \eqn{(k,p)} in the linear constraint for the means equation \eqn{\mu = M \beta + C}, with \eqn{p \le k}.} \item{mean.constant}{Vector of \eqn{k} constants \eqn{C} in the linear constraint for the means equation \eqn{\mu = M \beta + C}.} \item{var.lincstr}{Matrix \eqn{A} \eqn{(k,q)} in the linear constraint for the inverse variances equation \eqn{\pi = A \gamma}, with \eqn{q \le k}.} \item{gparam}{Vector of \eqn{q} starting values for the \eqn{\gamma} parameter in the linear constraint for the inverse variances; see \code{var.lincstr}. If NULL, a vector of randomly generated standard exponential variables is used.} \item{epsilon}{The convergence criterion. Convergence is declared when the change in the observed data log-likelihood increases by less than epsilon.} \item{maxit}{The maximum allowed number of iterations.} \item{maxrestarts}{The maximum number of restarts allowed in case of a problem with the particular starting values chosen due to one of the variance estimates getting too small (each restart uses randomly chosen starting values). It is well-known that when each component of a normal mixture may have its own mean and variance, the likelihood has no maximizer; in such cases, we hope to find a "nice" local maximum with this algorithm instead, but occasionally the algorithm finds a "not nice" solution and one of the variances goes to zero, driving the likelihood to infinity.} \item{verb}{If TRUE, then various updates are printed during each iteration of the algorithm.} } \value{ \code{normalmixMMlc} returns a list of class \code{mixEM} with items: \item{x}{The raw data.} \item{lambda}{The final mixing proportions.} \item{mu}{The final mean parameters.} \item{sigma}{The final standard deviation(s)} \item{scale}{Scale factor for the component standard deviations, if applicable.} \item{loglik}{The final log-likelihood.} \item{posterior}{An nxk matrix of posterior probabilities for observations.} \item{all.loglik}{A vector of each iteration's log-likelihood. This vector includes both the initial and the final values; thus, the number of iterations is one less than its length.} \item{restarts}{The number of times the algorithm restarted due to unacceptable choice of initial values.} \item{beta}{The final \eqn{\beta} parameter estimate.} \item{gamma}{The final \eqn{\gamma} parameter estimate.} \item{ft}{A character vector giving the name of the function.} } \seealso{ \code{\link{normalmixEM}}, \code{\link{mvnormalmixEM}}, \code{\link{normalmixEM2comp}}, \code{\link{tauequivnormalmixEM}} } \references{ \itemize{ \item McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}, John Wiley & Sons, Inc. \item Meng, X.-L. and Rubin, D. B. (1993) Maximum Likelihood Estimation Via the ECM Algorithm: A General Framework, \emph{Biometrika} 80(2): 267-278. \item Chauveau, D. and Hunter, D.R. (2013) ECM and MM algorithms for mixtures with constrained parameters, \emph{preprint \url{http://hal.archives-ouvertes.fr/hal-00625285}}. \item Thomas, H., Lohaus, A., and Domsch, H. (2011) Stable Unstable Reliability Theory, \emph{British Journal of Mathematical and Statistical Psychology} 65(2): 201-221. } } \author{Didier Chauveau} \examples{ ## Analyzing synthetic data as in the tau equivalent model ## From Thomas et al (2011), see also Chauveau and Hunter (2013) ## a 3-component mixture of normals with linear constraints. lbd <- c(0.6,0.3,0.1); m <- length(lbd) sigma <- sig0 <- sqrt(c(1,9,9)) # means constaints mu = M beta M <- matrix(c(1,1,1,0,-1,1), 3, 2) beta <- c(1,5) # unknown constrained mean mu0 <- mu <- as.vector(M \%*\% beta) # linear constraint on the inverse variances pi = A.g A <- matrix(c(1,1,1,0,1,0), m, 2, byrow=TRUE) iv0 <- 1/(sig0^2) g0 <- c(iv0[2],iv0[1] - iv0[2]) # gamma^0 init # simulation and EM fits set.seed(50); n=100; x <- rnormmix(n,lbd,mu,sigma) s <- normalmixEM(x,mu=mu0,sigma=sig0,maxit=2000) # plain EM # EM with var and mean linear constraints sc <- normalmixMMlc(x, lambda=lbd, mu=mu0, sigma=sig0, mean.lincstr=M, var.lincstr=A, gparam=g0) # plot and compare both estimates dnormmixt <- function(t, lam, mu, sig){ m <- length(lam); f <- 0 for (j in 1:m) f <- f + lam[j]*dnorm(t,mean=mu[j],sd=sig[j]) f} t <- seq(min(x)-2, max(x)+2, len=200) hist(x, freq=FALSE, col="lightgrey", ylim=c(0,0.3), ylab="density",main="") lines(t, dnormmixt(t, lbd, mu, sigma), col="darkgrey", lwd=2) # true lines(t, dnormmixt(t, s$lambda, s$mu, s$sigma), lty=2) lines(t, dnormmixt(t, sc$lambda, sc$mu, sc$sigma), col=1, lty=3) legend("topleft", c("true","plain EM","constr EM"), col=c("darkgrey",1,1), lty=c(1,2,3), lwd=c(2,1,1)) } \keyword{file} mixtools/DESCRIPTION0000755000176200001440000000361013617252762013654 0ustar liggesusersPackage: mixtools Version: 1.2.0 Date: 2020-02-05 Title: Tools for Analyzing Finite Mixture Models Authors@R: c(person("Derek", "Young", role = c("aut", "cre"), email = "derek.young@uky.edu", comment = c(ORCID = "0000-0002-3048-3803")), person("Tatiana", "Benaglia", role = "aut"), person("Didier", "Chauveau", role = "aut"), person("David", "Hunter", role = "aut"), person("Ryan", "Elmore", role = "ctb"), person("Thomas", "Hettmansperger", role = "ctb"), person("Hoben", "Thomas", role = "ctb"), person("Fengjuan", "Xuan", role = "ctb")) Depends: R (>= 3.5.0) Imports: kernlab, MASS, segmented, stats, survival Description: Analyzes finite mixture models for various parametric and semiparametric settings. This includes mixtures of parametric distributions (normal, multivariate normal, multinomial, gamma), various Reliability Mixture Models (RMMs), mixtures-of-regressions settings (linear regression, logistic regression, Poisson regression, linear regression with changepoints, predictor-dependent mixing proportions, random effects regressions, hierarchical mixtures-of-experts), and tools for selecting the number of components (bootstrapping the likelihood ratio test statistic, mixturegrams, and model selection criteria). Bayesian estimation of mixtures-of-linear-regressions models is available as well as a novel data depth method for obtaining credible bands. This package is based upon work supported by the National Science Foundation under Grant No. SES-0518772. License: GPL (>= 2) NeedsCompilation: yes Packaged: 2020-02-06 13:42:31 UTC; derekyoung Author: Derek Young [aut, cre] (), Tatiana Benaglia [aut], Didier Chauveau [aut], David Hunter [aut], Ryan Elmore [ctb], Thomas Hettmansperger [ctb], Hoben Thomas [ctb], Fengjuan Xuan [ctb] Maintainer: Derek Young Repository: CRAN Date/Publication: 2020-02-07 12:20:02 UTC mixtools/build/0000755000176200001440000000000013617013707013234 5ustar liggesusersmixtools/build/vignette.rds0000644000176200001440000000032113617013707015567 0ustar liggesusersb```b`fcb`b2 1# 'ͬ() +G))8E )98)HICBXt0XXT%榢Z]?4-ީE0=(jؠjX2sRad9.nP&c0Gq?gQ~oݣ9JI,IK+ ātmixtools/src/0000755000176200001440000000000013617013707012724 5ustar liggesusersmixtools/src/multinompost.c0000755000176200001440000000302411665556372015657 0ustar liggesusers#include #include /* Compute the matrix of "posterior" probabilities in a finite mixture of multinomial distributions. The algorithm used is fairly safe from a numerical perspective; it avoids over- or under-flow as long as the values of sigma are not too large or small. */ void multinompost( int *nn, /* sample size */ int *mm, /* number of components */ double *loglamcd, /* n by m matrix of log(lambda * component density) values */ double *post, /* n by m matrix of posterior probabilities */ double *loglik /* scalar loglikelihood value (input value is a constant, which is modified before return). */ ) { int n=*nn, m=*mm, i, j, maxj; double sum, max, *loglamcolumnptr; for (loglamcolumnptr=loglamcd, i=0; i max) { max = loglamcolumnptr[j]; maxj = j; } } sum = 1.0; for(j = 0; j #include /* Just like KDEsymloc2.c except that we do not symmetrize. This works because the assumption of symmetry is not necessary in the case of regression, where the (i,j) mean depends not only on the jth component but also on the ith predictor value */ void KDEloc2( int *n, /* Sample size */ int *m, /* Number of components */ double *mu, /* nn*mm vector of current mean estimates */ double *x, /* data: vector of length n */ double *h, /* bandwidth */ double *z, /* nn*mm vector of normalized posteriors (or indicators in stochastic case), normalized by "column" */ double *f /* KDE evaluated at n*m matrix of points, namely, x_i - mu_ij for 1<=i<=n and 1<=j<=m */ ) { int nn=*n, mm=*m, i, j, a, b; double sum, u1, u2, tmp1, hh=*h; double const1 = -1.0 / (2.0 * hh * hh); double const2 = 0.39894228040143267794/(hh*(double)nn); /* .3989...=1/(sqrt(2*pi)) */ /* Must loop n^2*m^2 times; each f entry requires n*m calculations */ for(a=0; a #include #include #include /* Simultaneously calculate m different multivariate weighted KDEs (mvwkde) 1 for each component, for a fixed value of block index l, and same bandwidth for all components (option samebw=TRUE) final value is a (n,m) matrix f where f[i,j]=\hat f_{jl}(u_i) */ void mvwkde_samebw ( int *nn, /* Sample size */ int *dd, /* Number of coordinates in l-th block */ int *mm, /* Number of components */ double *h, /* bandwidth d-vector */ double *x, /* data: vector of length nn*rr */ double *u, /* data: vector of length nn*rr (u=xfor now, ToDo for any u!) */ double *z, /* nn*mm vector of normalized posteriors */ double *f /* nxm matrix of weighted KDE - multidim */ ) { int n=*nn, d=*dd, dn=*dd*n, mn=*mm*n, iu, ix; int kn, jn, id; double tmp1, tmp2, sum1, sum2, xik, uik, hk; double c2, det_h; /*const float const1 = -0.5;*/ double const2 = 0.39894228040143267794; /* =1/(sqrt(2*pi)) */ /* computing the constant from bandwidth matrix and d */ det_h = 1.0; for (id=0; id #include #include #include #include // for NULL #include /* FIXME: Check these declarations against the C/Fortran source code. */ /* .C calls */ extern void mudepth(void *, void *, void *, void *, void *, void *, void *); extern void KDEloc2(void *, void *, void *, void *, void *, void *, void *); extern void KDElocscale(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void KDErepeated(void *, void *, void *, void *, void *, void *, void *); extern void KDErepeatedbw(void *, void *, void *, void *, void *, void *, void *); extern void KDEsymloc(void *, void *, void *, void *, void *, void *, void *); extern void KDEsymloc1comp(void *, void *, void *, void *, void *, void *, void *); extern void KDEsymloc2(void *, void *, void *, void *, void *, void *, void *); extern void multinompost(void *, void *, void *, void *, void *); extern void mvwkde_adaptbw(void *, void *, void *, void *, void *, void *, void *, void *); extern void mvwkde_samebw(void *, void *, void *, void *, void *, void *, void *, void *); extern void newz(void *, void *, void *, void *, void *); extern void normpost(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void npMSL_Estep(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void npMSL_Estep_bw(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void npMSL_Mstep(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void npMSL_Mstep_bw(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); static const R_CMethodDef CEntries[] = { {"mudepth", (DL_FUNC) &mudepth, 7}, {"KDEloc2", (DL_FUNC) &KDEloc2, 7}, {"KDElocscale", (DL_FUNC) &KDElocscale, 10}, {"KDErepeated", (DL_FUNC) &KDErepeated, 7}, {"KDErepeatedbw", (DL_FUNC) &KDErepeatedbw, 7}, {"KDEsymloc", (DL_FUNC) &KDEsymloc, 7}, {"KDEsymloc1comp", (DL_FUNC) &KDEsymloc1comp, 7}, {"KDEsymloc2", (DL_FUNC) &KDEsymloc2, 7}, {"multinompost", (DL_FUNC) &multinompost, 5}, {"mvwkde_adaptbw", (DL_FUNC) &mvwkde_adaptbw, 8}, {"mvwkde_samebw", (DL_FUNC) &mvwkde_samebw, 8}, {"newz", (DL_FUNC) &newz, 5}, {"normpost", (DL_FUNC) &normpost, 10}, {"npMSL_Estep", (DL_FUNC) &npMSL_Estep, 15}, {"npMSL_Estep_bw",(DL_FUNC) &npMSL_Estep_bw, 15}, {"npMSL_Mstep", (DL_FUNC) &npMSL_Mstep, 13}, {"npMSL_Mstep_bw",(DL_FUNC) &npMSL_Mstep_bw, 13}, {NULL, NULL, 0} }; void R_init_mixtools(DllInfo *dll) { R_registerRoutines(dll, CEntries, NULL, NULL, NULL); R_useDynamicSymbols(dll, FALSE); R_forceSymbols(dll, TRUE); } mixtools/src/KDErepeated.c0000755000176200001440000000331511665556372015225 0ustar liggesusers#include #include #include #include /* simultaneously calculate m different products of KDEs, 1 for each component, as in equation (8) of Benaglia et al for a fixed value of \ell. If r is the number of coordinates in block \ell, then each final value is the r-fold product of KDEs */ void KDErepeated( int *nn, /* Sample size */ int *mm, /* Number of components */ int *rr, /* size of current block */ double *x, /* data: vector of length nn*rr */ double *hh, /* scalar bandwidth (compare to KDErepeatedbw) */ double *z, /* nn*mm vector of normalized posteriors (or indicators in stochastic case), normalized by "column" */ double *f /* nxm matrix of KDE products */ ) { int n=*nn, i, ii; int rn = *rr*n, mn=*mm*n, jn, kn, kkn; double sum1, sum2, tmp, h=*hh, xik; double const1 = -0.5 / (h * h); double const2 = 0.39894228040143267794/(h*(double)(*rr)); /* .3989...=1/(sqrt(2*pi)) */ for(jn=0; jn #include /* Slightly modified version of KDEsymloc.c that can handle an nxm matrix of current mean estimates (instead of merely an m-vector). This is useful for the regression case, where the (i,j) mean depends not only on the jth component but also on the ith predictor value */ void KDEsymloc2( int *n, /* Sample size */ int *m, /* Number of components */ double *mu, /* nn*mm vector of current mean estimates */ double *x, /* data: vector of length n */ double *h, /* bandwidth */ double *z, /* nn*mm vector of normalized posteriors (or indicators in stochastic case), normalized by "column" */ double *f /* KDE evaluated at n*m matrix of points, namely, x_i - mu_ij for 1<=i<=n and 1<=j<=m */ ) { int nn=*n, mm=*m, i, j, a, b; double sum, u1, u2, tmp1, tmp2, hh=*h; double const1 = -1.0 / (2.0 * hh * hh); double const2 = 0.39894228040143267794/(2.0*hh*(double)nn); /* .3989...=1/(sqrt(2*pi)) */ /* Must loop n^2*m^2 times; each f entry requires n*m calculations */ for(a=0; a /* Translated from FORTRAN code written by Fengjuan Xuan */ void mudepth (int *nn, int *tt, int *dd, double *mpt, double *x, int *count, double *sdep) { int n=*nn, t=*tt, d=*dd; int i, j, k, l; double d1, d2, d3, d5, xik, xjk, mptlk; for (l=0; l #include /* Implement symmetric kernel density-estimation step for location mixture model, equation (20) in Benaglia et al (2008) */ void KDEsymloc( int *n, /* Sample size */ int *m, /* Number of components */ double *mu, /* m-vector of current mean estimates */ double *x, /* data: vector of length n */ double *h, /* bandwidth */ double *z, /* nn*mm vector of normalized posteriors (or indicators in stochastic case), normalized by "column" */ double *f /* KDE evaluated at n*m matrix of points, namely, x_i - mu_j for 1<=i<=n and 1<=j<=m */ ) { int nn=*n, mm=*m, i, j, a, b; double sum, u1, u2, tmp1, tmp2, hh=*h; double const1 = -1.0 / (2.0 * hh * hh); double const2 = 0.39894228040143267794/(2.0*hh*(double)nn); /* .3989...=1/(sqrt(2*pi)) */ /* Must loop n^2*m^2 times; each f entry requires n*m calculations */ for(a=0; a /* in order to make the sqrt function work */ /* First, create the function prototype (global declaration that there exists a function called new_svalues returning void and taking these arguments) */ void new_svalues (double *z, double *y, double *x, double *beta, int *k, int *n, int *p, double *out, double *sz, double *runsum); /* Next comes the function itself: */ void new_svalues (double *z, double *y, double *x, double *beta, int *k, int *n, int *p, double *out, double *sz, double *runsum) { int i, j, l; double sum; double xbeta; double diff; double diff2; double zdiff; double zdiff2; double zdiff3; /* Create the sz (column sum of z) vector */ for(j=0; j < *k ; j=j+1) { sum=0.0; for(i=0; i < *n; i=i+1) { sum += z[j * (*n) + i]; } sz[j]=sum; } for(j=0; j < *k ; j=j+1) { zdiff=0.0; for(i=0; i < *n; i=i+1) { xbeta=0.0; /* Calculate i,j component of x %*% beta */ for(l=0; l < *p; l++){ xbeta += x[l * (*n) + i] * beta[j * (*p) + l]; } /* Subtract it from the i component of y */ diff = y[i] - xbeta; /* square the difference */ diff2 = pow(diff,2); /* multiply by the i,j component of z */ zdiff += z[j * (*n) + i] * diff2; } /* keep track of running sum */ runsum[j] = zdiff; /* divide dot product by sz[j] */ zdiff2 = runsum[j] / sz[j]; /* take the square root */ zdiff3 = sqrt(zdiff2); /* put result in out[j] */ out[j] = zdiff3; } } mixtools/src/KDEsymloc1comp.c0000755000176200001440000000300612645535075015673 0ustar liggesusers#include #include /* Implement symmetric kernel density-estimation step for location mixture model with 1 component known , as in Bordes Delmas & Vandekerkhove (2006) normalization before symmetrization must be 1/(n\lambda_2)h */ void KDEsymloc1comp( int *n, /* Sample size */ double *mean, /* component 2 mean estimate, scalar */ double *lambda, /* component 2 weigtht, scalar */ double *x, /* data: vector of length n */ double *h, /* bandwidth */ double *z, /* nn*mm vector of normalized posteriors (or indicators in stochastic case), normalized by "column" */ double *f /* KDE evaluated at n vector of points, namely, x_i - mu for 1<=i<=n */ ) { int nn=*n, i, j, a; double sum, u1, u2, tmp1, tmp2, hh=*h, mu=*mean, lbd=*lambda; double const1 = -1.0 / (2.0 * hh * hh); double const2 = 0.39894228040143267794/(2.0*hh*(double)nn*lbd); /* .3989...=1/(sqrt(2*pi)) */ /* loop over each f entry evaluated at x_a - mu */ for(a=0; a #include /* Implement kernel density-estimation step for location-scale mixture model (17) in Benaglia et al (2008) where each component and block has the same shape but may differ from others by a location and scale, implementing equation (18) */ void KDElocscale( int *nn, /* Sample size */ int *mm, /* Number of components */ int *rr, /* Number of coordinates */ int *blockid, /* r-vector of block numbers */ double *mu, /* m by max(blockid) matrix of current mean estimates */ double *sigma, /* m by max(blockid) matrix of current stdev estimates */ double *x, /* n by r data matrix */ double *hh, /* scalar bandwidth */ double *z, /* n by m vector of normalized posteriors (or indicators in stochastic case), normalized by "column" */ double *f /* n by m matrix of KDE products */ ) { int n=*nn, m=*mm, r=*rr; int i, j, k, ii, kk, ell, ell2; double sum1, sum2, tmp, h=*hh, u; double const1 = -0.5 / (h * h); double const2; for(j=0; j #include #include void newz (int *n, int *k, double *V, double *W, double *z) { int i, j, l, ind1, ind2, nn=*n, kk=*k; double sum; for(i=0; i< nn; i++) { for(j=0; j< kk; j++) { sum=1.0; ind1 = i + nn * j; for(l = 0; l < kk; l++) { if (l != j) { ind2 = i + nn * l; sum += V[ind2]/V[ind1]*exp(W[ind1]-W[ind2]); } } z[ind1] = 1.0/sum; } } } mixtools/src/KDErepeatedbw.c0000755000176200001440000000331411665556372015555 0ustar liggesusers#include #include #include #include /* simultaneously calculate m different products of KDEs, 1 for each component, as in equation (8) of Benaglia et al for a fixed value of \ell. If r is the number of coordinates in block \ell, then each final value is the r-fold product of KDEs */ void KDErepeatedbw( int *nn, /* Sample size */ int *mm, /* Number of components */ int *rr, /* size of current block */ double *x, /* data: vector of length nn*rr */ double *hh, /* m-vector of bandwidths (compare to KDErepeated) */ double *z, /* nn*mm vector of normalized posteriors (or indicators in stochastic case), normalized by "column" */ double *f /* nxm matrix of KDE products */ ) { int n=*nn, i, ii; int mn = *mm*n, rn=*rr*n, jn, kn, kkn; double sum1, sum2, tmp, xik; double const2 = 0.39894228040143267794/((double)(*rr)); /* .3989...=1/(sqrt(2*pi)) */ double const1; for(jn=0; jn #include /* Compute the matrix of "posterior" probabilities in a finite mixture of univariate normal densities. The algorithm used is fairly safe from a numerical perspective; it avoids over- or under-flow as long as the values of sigma are not too large or small. */ void normpost( int *nn, /* sample size */ int *mm, /* number of components */ double *data, /* vector of observations */ double *mu, /* current vector of component means */ double *sigma, /* current vector of component stdevs */ double *lambda, /* current vector of mixing parameters */ double *res2, /* n by m matrix of squared residuals */ double *work, /* 3*m-vector of workspace, which will be broken into 3 parts */ double *post, /* n by m matrix of posterior probabilities */ double *loglik /* scalar loglikelihood value */ ) { int n=*nn, m=*mm, i, j, minj=0; double x, r, rowsum, min=0.0; double *LamSigRatio = work+m; /* Second 1/3 of workspace, for frequently used constants */ double *logLamSigRatio = work+2*m; /* Third 1/3 of workspace, for frequently used constants */ *loglik = -(double)n * 0.91893853320467274178; /* n/2 times log(2pi) */ for (j=0; j #include #define MAX(a,b) ((a) > (b) ? (a) : (b)) /* In the npMSL algorithm (formerly NEMS), it is necessary to store the values of each f_{jk} density on a univariate grid of points, since the E-step involves a convolution (which means that it no longer suffices to store f_{jk} only at the data points x_{ik}, as it did for the original npEM algorithm of Benaglia et al). In the M-step, we must set each f_{jk}(u_a) equal to the sum (over i) of p_{ij} * phi((u_a - x_{ik}) / h) / h, where phi is the std. normal density which gives the normal kernel with bandwidth h. Thus, the M-step involves four nested loops, one each for i, j, k, and a. In the E-step, we must set each p_{ij} equal to K * lambda_j * N(f_j)(x_i), where: -- K is a normalizing constant ensuring that sum_j p_{ij} = 1 for all i. -- N(f_j)(x_i) is the product prod_j N(f_jk)(x_{ik}) -- N(f_{jk})(x_{ik}) = exp{integral of (1/h)*phi((u-x_{ik})/h)*log(f_{jk}(u))du} -- that last integral is approximated by the sum (over a) of (normalizing constant)*exp((u_a-x_{ik})^2/h^2)*log(f_{jk}(u_a)) Current version: with underflow handling in the E-step and block structure, */ /* ********************************************************** Version allowing blocks of conditionaly iid coordinates: f is now a ngrid by m by B array of f_{j ell} values on the grid note: *BB is not used here, but remains in the parameter list for consistency with the npMSL_Estep_bw() version which needs it */ void npMSL_Estep( int *nngrid, /* size of grid */ int *nn, /* sample size */ int *mm, /* number of components */ int *rr, /* number of repeated measurements */ int *BB, /* total nb of blocks (not used in samebw version) */ int *blockid, /* r-vector (b_k) block to which belongs coord k */ double *hh, /* bandwidth */ double *data, /* n by r vector of observations */ double *grid, /* grid points */ double *f, /* ngrid by m by B array of density values on grid */ double *lambda, /* current vector of mixing parameters */ double *post, /* n by m matrix of posterior probabilities */ double *loglik, /* scalar value of penalized loglikelihood */ int *nb_udfl, /* nb of underflows log(0) cancelled */ int *nb_nan /* nb of nonzero K()*log(0) cancelled */ ) { int n=*nn, m=*mm, r=*rr, ngrid=*nngrid; int i, j, k, ell, a; double sum, conv, xik, *fjl, two_h_squared =2*(*hh)*(*hh); double Delta = (grid[2]-grid[1]) / *hh / sqrt(2*3.14159265358979); double t1, expminus500=exp(-500); double epsi=1e-323; /* smallest number; maybe machine-dependent ? */ double epsi2=1e-100; /* assumed small enough for cancelling log(0) */ *loglik=0.0; for (i=0; i epsi) { /* no underflow pb */ conv += t1 * log(fjl[a]); } else if (t1 < epsi2) { /* assume kernel cancels log(0) part */ *nb_udfl +=1; /* count underflow replaced by 0 */ } else *nb_nan +=1; /* kernel *may* be not small enough ! */ } conv *= Delta; /* Delta is the normalizing constant */ conv = exp(conv); /* conv now = Nf_{jb_k}(xik) */ post[i + n*j] *= conv; /* numerator = lambda_j*prod_k Nf_{jb_k}(xik) */ } sum += post[i + n*j]; } *loglik += log(sum); for(j=0; j epsi) { /* no underflow pb */ conv += t1 * log(fjl[a]); } else if (t1 < epsi2) { /* assume kernel cancels log(0) part */ *nb_udfl +=1; /* count underlow replaced by 0 */ } else *nb_nan +=1; /* kernel *may* be not small enough ! */ } conv *= Delta; /* Delta is the normalizing constant */ conv = exp(conv); /* conv now = Nf_{jb_k}(xik) */ post[i + n*j] *= conv; /* numerator = lambda_j*prod_k Nf_{jb_k}(xik) */ } sum += post[i + n*j]; } *loglik += log(sum); for(j=0; j1$ arbitrary distributions, which we will call {\em components} throughout this article. The density of each $\vec X_i$ may be written \begin{equation} \label{mvmixture} g_{\f}(\vec x_i) = \sum_{j=1}^m\lambda_j\phi_j(\vec x_i), \quad \vec x_i\in\bR^r, \end{equation} where $\f=(\vec\lambda, \vec \phi) = (\lambda_1, \ldots, \lambda_m, \phi_1, \ldots, \phi_m)$ denotes the parameter and the $\lambda_m$ are positive and sum to unity. We assume that the $\phi_j$ are drawn from some family $\cal F$ of multivariate density functions absolutely continuous with respect to, say, Lebesgue measure. The representation \eqref{mvmixture} is not identifiable if no restrictions are placed on $\cal F$, where by ``identifiable'' we mean that $g_{\f}$ has a {\em unique} representation of the form \eqref{mvmixture} and we do not consider that ``label-switching'' --- i.e., reordering the $m$ pairs $(\lambda_1, \phi_1), \ldots, (\lambda_m, \phi_m)$ --- produces a distinct representation. In the next sections we will sometimes have to distinguish between {\em parametric} and more general {\em nonparametric} situations. This distinction is related to the structure of the family $\CF$ of distributions to which the component densities $\phi_j$ in model \eqref{mvmixture} belong. We say that the mixture is {\em parametric} if $\CF$ is a parametric family, $\CF = \{\phi(\cdot|\vec\xi), \vec\xi\in\bR^d\}$, indexed by a ($d$-dimensional) Euclidean parameter $\vec\xi$. A parametric family often used is the univariate Gaussian family $\CF = \{\phi(\cdot|\mu,\sigma^2)=\mbox{density of }\CN(\mu,\sigma^2), (\mu,\sigma^2)\in\bR\times\bR^+_*\}$, in which case the model parameter reduces to $\f = (\vec \lambda, (\mu_1,\sigma^2_1),\ldots,(\mu_m,\sigma^2_m))$. For the multivariate case, a possible parametric model is the {\em conditionally i.i.d.\ normal model}, for which $\CF=\{\phi(\vec x_i) = \prod_{k=1}^r f(x_{ik}), \mbox{$f(t)$ density of $\CN(\mu,\sigma^2)$}\}$ (this model is included in \pkg{mixtools}; see Section~\ref{ss:nbcomp}). An example of a (multivariate) nonparametric situation is $\CF=\{\phi(\vec x_i) = \prod_{k=1}^r f(x_{ik}), \mbox{$f(t)$ a univariate density on $\bR$}\}$, in which case $\vec\f$ consists in a Euclidean part ($\vec\lb$) and a nonparametric part $(f_1,\ldots,f_m)$. As a simple example of a dataset to which mixture models may be applied, consider the sample depicted in Figure \ref{geyser}. In the Old Faithful dataset, measurements give time in minutes between eruptions of the Old Faithful geyser in Yellowstone National Park, USA. These data are included as part of the \pkg{datasets} package in \proglang{R} \citep{r2009}; type \code{help("faithful")} in \proglang{R} for more details. <>= library(mixtools) data(faithful) attach(faithful) @ \setkeys{Gin}{width=0.6\textwidth} \begin{figure}[h] \centering <>= hist(waiting, main="Time between Old Faithful eruptions", xlab="Minutes", ylab="", cex.main=1.5, cex.lab=1.5, cex.axis=1.4) @ \caption{The Old Faithful dataset is clearly suggestive of a two-component mixture of symmetric components.} \label{geyser} \end{figure} For the Old Faithful eruption data, a two-component mixture model is clearly a reasonable model based on the bimodality evident in the histogram. This example is analyzed by \citet{hunter2007ims}, who compare a standard normal-mixture method for fitting it with a novel semiparametric approach. Both approaches are included in \pkg{mixtools}; see Sections \ref{section:EMexample} and \ref{section:SPexample} of this article. In Section~\ref{section:EM} of the current article we review the well-known class of EM algorithms for finite mixture models, a common thread that runs throughout much of the rest of the article. The remaining sections discuss various categories of functions found in the \pkg{mixtools} package, from cutpoint methods that relax distributional assumptions for multivariate data by discretizing the data (Section~\ref{section:cut}), to semi- and non-parametric methods that eliminate distributional assumptions almost entirely depending on what the identifiability of the model allows (Section~\ref{section:np}), to methods that handle various mixtures of regressions (Section~\ref{section:reg}). Finally, Section \ref{section:misc} describes several miscellaneous features of the \pkg{mixtools} package. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{EM algorithms for finite mixtures} \label{section:EM} \subsection{Missing data setup} Much of the general methodology used in \pkg{mixtools} involves the representation of the mixture problem as a particular case of maximum likelihood estimation (MLE) when the observations can be viewed as incomplete data. This setup implies consideration of two sample spaces, the sample space of the (incomplete) observations, and a sample space of some ``complete'' observations, the characterization of which being that the estimation can be performed explicitly at this level. For instance, in parametric situations, the MLE based on the complete data may exist in closed form. Among the numerous reference papers and monographs on this subject are, e.g., the original EM algorithm paper by \citet{dempster1977mli} and the finite mixture model book by \citet{mclachlan2000fmm} and references therein. We now give a brief description of this setup as it applies to finite mixture models in general. The (observed) data consist of $n$ i.i.d. observations $\vec x = (\vec x_1,\ldots,\vec x_n)$ from a density $g_\f$ given by \eqref{mvmixture}. It is common to denote the density of the sample by $\Bg_\f$, the $n$-fold product of $g_\f$, so that we write simply $\Bx\sim \Bg_\f$. In the missing data setup, $\Bg_\f$ is called the incomplete-data density, and the associated log-likelihood is $L_{\Bx}(\f) = \sum_{i=1}^n \log g_\f(\vec x_i)$. The (parametric) ML estimation problem consists in finding $\hat\f_{\Bx} = \argmax_{\f\in\Phi} L_{\Bx}(\f)$, or at least finding a local maximum --- there are certain well-known cases in which a finite mixture model likelihood is unbounded \citep{mclachlan2000fmm}, but we ignore these technical details for now. Calculating $\hat\f_{\Bx}$ even for a parametric finite mixture model is known to be a difficult problem, and considering $\Bx$ as incomplete data resulting from non-observed complete data helps. The associated complete data is denoted by $\Bc = (\vec c_1,\ldots, \vec c_n)$, with density $\Bh_\f(\Bc)=\prod_{i=1}^n h_\f(\vec c_i)$ (there exists a many-to-one mapping from $\Bc$ to $\Bx$, representing the loss of information). In the model for complete data associated with model~\eqref{mvmixture}, each random vector $\vec C_i = (\vec X_i,\vec Z_i)$, where $\vec Z_i = (Z_{ij},j=1,\ldots m)$, and $Z_{ij}\in\{0,1\}$ is a Bernoulli random variable indicating that individual $i$ comes from component $j$. Since each individual comes from exactly one component, this implies $\sum_{j=1}^m Z_{ij}=1$, and $$ \Prob(Z_{ij} = 1) = \lambda_{j},\quad (\vec X_i|Z_{ij}=1) \sim \phi_j, \quad j=1,\ldots,m. $$ The complete-data density for one observation is thus $$ h_\f(\vec c_i) = h_\f(\vec x_i,\vec z_i) = \sum_{j=1}^m \I_{z_{ij}}\lb_j \phi_j (\vec x_i), $$ In the parametric situation, i.e.\ when $\CF$ is a parametric family, it is easy to check that the complete-data MLE $\hat\f_{\Bc}$ based on maximizing $\log \Bh_\f(\Bc)$ is easy to find, provided that this is the case for the family $\CF$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{EM algorithms} \label{sec:EM} An EM algorithm iteratively maximizes, instead of the observed log-likelihood $L_{\Bx}(\f)$, the operator $$ Q(\f | \f^{(t)}) = \E \left[\log \Bh_\f(\BC)|\Bx,\f^{(t)} \right], $$ where $\f^{(t)}$ is the current value at iteration~$t$, and the expectation is with respect to the distribution $\Bk_\f(\Bc|\Bx)$ of $\Bc$ given $\Bx$, for the value $\f^{(t)}$ of the parameter. The iteration $\f^{(t)} \to \f^{(t+1)}$ is defined in the above general setup by \begin{enumerate} \item E-step: compute $Q(\f | \f^{(t)})$ \item M-step: set $\f^{(t+1)} = \argmax_{\f\in\Phi}Q(\f | \f^{(t)})$ \end{enumerate} For finite mixture models, the E-step does not depend on the structure of $\CF$, since the missing data part is only related to the $\Bz$'s: $$ \Bk_\f(\Bc|\Bx) = \prod_{i=1}^n k_\f(\vec z_i|\vec x_i). $$ The $\Bz$ are discrete, and their distribution is given via Bayes' theorem. The M-step itself can be split in two parts, the maximization related to $\vec\lb$, which does not depend on $\CF$, and the maximization related to $\vec \phi$, which has to be handled specifically (say, parametrically, semi- or non-parametrically) for each model. Hence the EM algorithms for the models handled by the \pkg{mixtools} package share the following common features: \begin{enumerate} \item{\bf E-step:\ } Calculate the ``posterior'' probabilities (conditional on the data and $\vec\theta^{(t)}$) of component inclusion, \begin{equation}\label{posteriors} \post_{ij}^{(t)} \, \defn \, \Prob_{\vec\theta^{(t)}}(Z_{ij}=1| \vec x_i) = \frac{\lambda_j^{(t)} \phi_{j}^{(t)}(\vec x_{i})} {\sum_{j'=1}^m\lambda_{j'}^{(t)} \phi_{j'}^{(t)}(\vec x_{i})} \end{equation} for all $i=1,\ldots, n$ and $j=1, \ldots, m$. Numerically, it can be dangerous to implement equation (\ref{posteriors}) exactly as written due to the possibility of the indeterminant form $0/0$ in cases where $\vec x_i$ is so far from any of the components that all $\phi_{j'}^{(t)}(\vec x_i)$ values result in a numerical underflow to zero. Thus, many of the routines in \pkg{mixtools} actually use the equivalent expression \begin{equation}\label{altposteriors} \post_{ij}^{(t)} = \left[ 1 + \sum_{j'\ne j} \frac{ \lambda_{j'}^{(t)} \phi_{j'}^{(t)}(\vec x_{i})} {\lambda_j^{(t)} \phi_{j}^{(t)}(\vec x_{i})} \right]^{-1} \end{equation} or some variant thereof. \item{\bf M-step for $\vec\lb$:\ } Set \begin{equation}\label{lambda} \lambda_j^{(t+1)} = \frac1n\sum_{i=1}^n \post_{ij}^{(t)} , \quad\mbox{for $j=1, \ldots, m$.} \end{equation} \end{enumerate} \subsection{An EM algorithm example} \label{section:EMexample} As an example, we consider the univariate normal mixture analysis of the Old Faithful waiting data depicted in Figure \ref{geyser}. This fully parametric situation corresponds to a mixture from the univariate Gaussian family described in Section~\ref{s:intro}, where the $j$th component density $\phi_j(x)$ in \eqref{mvmixture} is normal with mean $\mu_j$ and variance $\sigma_j^2$. This is a special case of the general mixture-of-normal model that is well-studied in the literature and for which other software, such as the \pkg{mclust} \citep{Fraley+Raftery:2009} package for \proglang{R}, may also be used for parameter estimation. The M-step for the parameters $(\mu_j,\sigma^2_j)$, $j=1,\ldots,m$ of this EM algorithm for such mixtures of univariate normals is straightforward, and can be found, e.g., in \citet{mclachlan2000fmm}. The function \code{normalmixEM} implements the algorithm in \pkg{mixtools}. Code for the Old Faithful example, using most of the default values (e.g., stopping criterion, maximum number of iterations), is simply <>= wait1 <- normalmixEM(waiting, lambda = .5, mu = c(55, 80), sigma = 5) @ The code above will fit a 2-component mixture (because \code{mu} is a vector of length two) in which the standard deviations are assumed equal (because \code{sigma} is a scalar instead of a vector). See \code{help("normalmixEM")} for details about specifying starting values for this EM algorithm. <>= plot(wait1, density=TRUE, cex.axis=1.4, cex.lab=1.4, cex.main=1.8, main2="Time between Old Faithful eruptions", xlab2="Minutes") @ \setkeys{Gin}{width=0.49\textwidth} \begin{figure}[!h] \centering <>= for(i in 1:2){ file=paste("geyserEM", i, ".pdf", sep="") pdf(file=file, paper="special", width=6, height=6) plot(wait1, whichplots=i, cex.axis = 1.4, cex.lab = 1.4, cex.main = 1.8, main2 = "Time between Old Faithful eruptions", xlab2 = "Minutes") dev.off() cat("\\includegraphics{", file, "}\n", sep="") } @ \caption{The Old Faithful waiting data fitted with a parametric EM algorithm in \pkg{mixtools}. Left: the sequence of log-likelihood values; Right: the fitted Gaussian components.} \label{geyserEM} \end{figure} The \code{normalmixEM} function returns an object of class \code{"mixEM"}, and the \code{plot} method for these objects delivers the two plots given in Figure \ref{geyserEM}: the sequence $t\mapsto L_{\Bx}(\f^{(t)})$ of observed log-likelihood values and the histogram of the data with the $m$ ($m=2$ here) fitted Gaussian component densities of $\CN(\hat\mu_j,\hat\sigma^2_j)$, $j=1,\ldots,m$, each scaled by the corresponding $\hat\lambda_j$, superimposed. The estimator $\hat{\vec\theta}$ can be displayed by typing, e.g., <>= wait1[c("lambda", "mu", "sigma")] @ Alternatively, the same output may be obtained using the \code{summary} method: <>= summary(wait1) @ \section{Cutpoint methods} \label{section:cut} Traditionally, most literature on finite mixture models has assumed that the density functions $\phi_j(\vec x)$ of equation (\ref{mvmixture}) come from a known parametric family. However, some authors have recently considered the problem in which $\phi_j(\vec x)$ is unspecified except for some conditions necessary to ensure the identifiability of the parameters in the model. One such set of conditions is as follows: \citet{hettmansperger2000ani}; \citet{cruzmedina2004smm}; and \citet{elmore2004ecc} treat the case in which $\phi_j(\vec x)$ equals the product $f_j(x_i)\cdots f_j(x_r)$ for some univariate density function $f_j$. Thus, conditional on knowing that $\vec X$ comes from the $j$th mixture component, the coordinates of $\vec X$ are independent and identically distributed. For this reason, this case is called the conditionally i.i.d.\ model. The authors named above have developed an estimation method for the conditionally i.i.d.\ model. This method, the {\em cutpoint approach}, discretizes the continuous measurements by replacing each $r$-dimensional observation, say $\vec X_i= (x_{i1}, \ldots, x_{ir})$, by the $p$-dimensional multinomial vector $(n_1, \ldots, n_p)$, where $p\ge2$ is chosen by the experimenter along with a set of cutpoints $-\infty = c_0 < c_1 < \cdots < c_p=\infty$, so that for $a=1, \ldots, p$, \[ n_a = \sum_{k=1}^r I\{c_{a-1} < x_{ik} \le c_a\}. \] Note that the multinomial distribution is guaranteed by the conditional i.i.d.\ assumption, and the multinomial probability of the $a$th category is equal to $\theta_a \equiv P_{}(c_{a-1}>= data("Waterdata") cutpts <- 10.5*(-6:6) watermult <- makemultdata(Waterdata, cuts = cutpts) @ Once the multinomial data have been created, we may apply the \code{multmixEM} function to estimate the multinomial parameters via an EM algorithm. <>= set.seed(15) theta4 <- matrix(runif(56), ncol = 14) theta3 <- theta4[1:3,] mult3 <- multmixEM(watermult, lambda = rep(1, 3)/3, theta = theta3) mult4 <- multmixEM (watermult, lambda = rep (1, 4) / 4, theta = theta4) @ Finally, \code{compCDF} calculates and plots the estimated distribution functions of equation (\ref{ecdf}). Figure \ref{WDcutpoint} gives plots for both a 3-component and a 4-component solution; these plots are very similar to the corresponding plots in Figures 1 and 2 of \citet{elmore2004ecc}. <>= cdf3 <- compCDF(Waterdata, mult3$posterior, lwd=2, lab=c(7, 5, 7), xlab="Angle in degrees", ylab="Component CDFs", main="Three-Component Solution") cdf4 <- compCDF(Waterdata, mult4$posterior, lwd=2, lab=c(7, 5, 7), xlab="Angle in degrees", ylab="Component CDFs", main="Four-Component Solution") @ <>= pdf(file="WDcutpoint3comp.pdf", paper="special", width=8, height=8) cdf3 <- compCDF(Waterdata, mult3$posterior, lwd=3, xlab="Angle in degrees", lab=c(7, 5, 7), ylab="Component CDFs", main="Three-Component Solution", cex.axis=1.4, cex.lab=1.5, cex.main=1.5) ltext <- paste(round(mult3$lam*100, 1), "%", sep="") legend("bottomright", legend=ltext, pch=15:17, cex=1.5, pt.cex=1.35) y <- compCDF(Waterdata, mult3$posterior, x=cutpts, makeplot=F) for(i in 1:3) points(cutpts, y[i,], pch=14+i, cex=1.35) dev.off() pdf(file="WDcutpoint4comp.pdf", paper="special", width=8, height=8) cdf4 <- compCDF(Waterdata, mult4$posterior, lwd=3, xlab="Angle in degrees", lab=c(7, 5, 7), ylab="Component CDFs", main="Four-Component Solution", cex.axis=1.4, cex.lab=1.5, cex.main=1.5) ltext <- paste(round(mult4$lam*100,1), "%", sep="") legend("bottomright", legend=ltext, pch=15:18, cex=1.5, pt.cex=1.35) y <- compCDF(Waterdata, mult4$posterior, x=cutpts, makeplot=F) for(i in 1:4) points(cutpts, y[i,], pch=14+i, cex=1.35) dev.off() @ \begin{figure}[!h] \centering \includegraphics[width=0.49\textwidth]{WDcutpoint3comp} \includegraphics[width=0.49\textwidth]{WDcutpoint4comp} \caption{Empirical cumulative distribution function (CDF) estimates for the three- and four-component multinomial cutpoint models for the water-level data; compare Figures 1 and 2 of \citet{elmore2004ecc}. The 13 cutpoints used are indicated by the points in the plots, and the estimated mixing proportions for the various components are given by the legend. } \label{WDcutpoint} \end{figure} As with the output of \code{normalmixEM} in Section~\ref{section:EM}, it is possible to summarize the output of the \code{multmixEM} function using the \code{summary} method for \code{mixEM} objects: <>= summary(mult4) @ \section{Nonparametric and semiparametric methods} \label{section:np} In this section, we consider nonparametric multivariate finite mixture models. The first algorithm presented here was introduced by \citet{benaglia2009} as a generalization of the stochastic semiparametric EM algorithm of \citet{bordes2007sas}. Both algorithms are implemented in \pkg{mixtools}. \subsection{EM-like algorithms for mixtures of unspecified densities} \label{section:EMlike} Consider the mixture model described by equation \eqref{mvmixture}. If we assume that the coordinates of the $\vec X_i$ vector are {\em conditionally independent}, i.e. they are independent conditional on the subpopulation or component ($\phi_1$ through $\phi_m$) from which $\vec X_i$ is drawn, the density in \eqref{mvmixture} can be rewritten as: \begin{equation} \label{mvmixture2} g_{\vec\theta}(\vec x_i) = \sum_{j=1}^m\lambda_j\prod_{k=1}^rf_{jk}(x_{ik}), \end{equation} where the function $f(\cdot)$, with or without subscripts, will always denote a univariate density function. Here we do not assume that $f_{jk}(\cdot)$ comes from a family of densities that may be indexed by a finite-dimensional parameter vector, and we estimate these densities using nonparametric density techniques. That is why we say that this algorithm is a fully nonparametric approach. The density in equation \eqref{mvmixture2} allows for a different distribution for each component and each coordinate of $\vec X_i$. Notice that if the density $f_{jk}(\cdot)$ does not depend on $k$, we have the case in which the $\vec X_i$ are not only conditionally independent but identically distributed as well. These are the two extreme cases. In order to encompass both the conditionally i.i.d. case and the more general case \eqref{mvmixture2} simultaneously in one model, we allow that the coordinates of $\vec X_i$ are conditionally independent and there exist {\em blocks} of coordinates that are also identically distributed. If we let $b_k$ denote the block to which the $k$th coordinate belongs, where $1\le b_k\le B$ and $B$ is the total number of such blocks, then equation \eqref{mvmixture2} is replaced by \begin{equation}\label{rmgeneral} g_{\vec\theta} (\vec x_i) = \sum_{j=1}^m \lambda_j \prod_{k=1}^r f_{j{b_k}} (x_{ik}). \end{equation} The indices $i$, $j$, $k$, and $\ell$ will always denote a generic individual, component (subpopulation), coordinate (repeated measurement), and block, respectively. Therefore, we will always have $1\le i\le n$, $1\le j\le m$, $1\le k\le r$, and $1\le\ell\le B$. The EM algorithm to estimate model \eqref{rmgeneral} has the E-step and M-step described in Section~\ref{sec:EM}. In equation (\ref{posteriors}), we have $\phi_j^{(t)}(\vec x_i) = \prod_{k=1}^r f_{jb_k}^{(t)}(x_{ik})$, where $f_{j\ell}^{(t)}(\cdot)$ is obtained by a weighted nonparametric (kernel) density estimate, given by: \begin{enumerate} \addtocounter{enumi}{2} \item{\bf Nonparametric (Kernel) density estimation step:\ } For any real $u$, define for each component $j\in\{1, \ldots, m\}$ and each block $\ell\in\{1, \ldots, B\}$ \begin{equation} \label{densest} f_{j\ell}^{t+1}(u) = \frac {1}{nh_{j\ell} C_\ell\lambda_{j}^{t+1}} \sum_{k=1}^r \sum_{i=1}^n \post_{ij}^{(t)} I\{b_k=\ell\} K\left(\frac{u-x_{ik}}{h_{j\ell}}\right), \end{equation} where $K(\cdot)$ is a kernel density function, $h_{j\ell}$ is the bandwidth for the $j$th component and $\ell$th block density estimate, and $C_\ell$ is the number of coordinates in the $\ell$th block. \end{enumerate} The function \code{npEM} implements this algorithm in \pkg{mixtools}. This function has an argument \code{samebw} which, when set to \code{TRUE} (the default), takes $h_{j\ell} = h$, for all $1 \le j \le m$ and $1\le\ell\le B$, that is, the same bandwidth for all components and blocks, while \code{samebw = FALSE} allows a different bandwidth for each component and each block, as detailed in \citet{bch:festchrift2009}. This function will, if called using \code{stochastic = TRUE}, replace the deterministic density estimation step (\ref{densest}) by a {\em stochastic} density estimation step of the type proposed by \citet{bordes2007sas}: First, generate $\vec Z^{(t)}_{i} = (Z^{(t)}_{i1}, \ldots, Z^{(t)}_{im})$ as a multivariate random vector with a single trial and success probability vector $\vec p_i^{(t)} = (p_{i1}^{(t)}, \ldots, p_{1m}^{(t)})$, then in the M-step for $\lambda_{j}^{t+1}$ in equation~(\ref{lambda}), replace $p^{(t)}_{ij}$ by $Z^{(t)}_{ij}$ and let \[ f_{j\ell}^{t+1}(u) = \frac {1}{nh_{j\ell} C_\ell\lambda_{j}^{t+1}} \sum_{k=1}^r \sum_{i=1}^n Z_{ij}^{(t)} I\{b_k=\ell\} K\left(\frac{u-x_{ik}}{h_{j\ell}}\right). \] In other words, the stochastic versions of these algorithms re-assign each observation randomly at each iteration, according to the $p_{ij}^{(t)}$ values at that iteration, to one of the $m$ components, then the density estimate for each component is based only on those observations that have been assigned to it. Because the stochastic algorithms do not converge the way a deterministic algorithm often does, the output of \code{npEM} is slightly different when \code{stochastic = TRUE} than when \code{stochastic = FALSE}, the default. See the corresponding help file for details. \citet{benaglia2009} also discuss specific cases of model (\ref{rmgeneral}) in which some of the $f_{jb_k}(\cdot)$ densities are assumed to be the same except for a location and scale change. They refer to such cases as semiparametric since estimating each $f_{jb_k}(\cdot)$ involves estimating an unknown density as well as multiple location and scale parameters. For instance, equation (17) of \citet{benaglia2009} sets \begin{equation} \label{spEM} f_{j\ell}(x) = \frac{1}{\sigma_{j\ell}}f \left( \frac{x-\mu_{j\ell}}{\sigma_{j\ell}} \right), \end{equation} where $\ell=b_k$ for a generic $k$. The \pkg{mixtools} package implements an algorithm for fitting model (\ref{spEM}) in a function called \code{spEM}. Details on the use of this function may be obtained by typing \code{help("spEM")}. Implementation of this algorithm and of that of the \code{npEM} function requires updating the values of $f_{jb_k}(x_{ik})$ for all $i$, $j$, and $k$ for use in the E-step (\ref{posteriors}). To do this, the \code{spEM} algorithm keeps track of an $n\times m$ matrix, called $\Phi$ here, where \[ \Phi_{ij} \equiv \phi_j(\vec x_i) = \prod_{k=1}^r f_{jb_k}(x_{ik}). \] The density estimation step of equation (\ref{densest}) updates the $\Phi$ matrix for the $(t+1)$th iteration based on the most recent values of all of the parameters. For instance, in the case of model (\ref{spEM}), we obtain \begin{eqnarray*} \Phi_{ij}^{t+1} &=& \prod_{\ell=1}^B\prod_{k:b_k=\ell} \frac{1}{\sigma_{j\ell}^{t+1}} f^{t+1} \left( \frac{x-\mu_{j\ell}^{t+1}}{\sigma_{j\ell}^{t+1}} \right) \\ &=& \prod_{\ell=1}^B \prod_{k:b_k=\ell} \frac{1}{\sigma_{j\ell}^{t+1}} \sum_{i'=1}^n \frac{p_{ij}^{t+1}}{hrn\lambda_j^{t+1}} \sum_{k'=1}^r K\left[ \frac{\left(\frac{x_{ik}-\mu_{j\ell}^{t+1}}{\sigma_{j\ell}^{t+1}} \right) - (x_{i'k'} - \mu_{j\ell}^{t+1})} {h\sigma_{j\ell}^{t+1}} \right]. \end{eqnarray*} \subsection{A univariate symmetric, location-shifted semiparametric example} \label{section:SPexample} Both \citet{hunter2007ims} and \citet{bordes2006set} study a particular case of model ({\ref{mvmixture}) in which $x$ is univariate and \begin{equation} \label{spmodel} g_{\vec \theta}(x) = \sum_{j=1}^m\lambda_j \phi(x-\mu_j), \end{equation} where $\phi(\cdot)$ is a density that is assumed to be completely unspecified except that it is symmetric about zero. Because each component distribution has both a nonparametric part $\phi(\cdot)$ and a parametric part $\mu_j$, we refer to this model as semiparametric. Under the additional assumption that $\phi(\cdot)$ is absolutely continuous with respect to Lebesgue measure, \citet{bordes2007sas} propose a stochastic algorithm for estimating the model parameters, namely, $(\vec\lambda, \vec\mu, \phi)$. This algorithm is implemented by the \pkg{mixtools} function \code{spEMsymloc}. This function also implements a nonstochastic version of the algorithm, which is the default and which is a special case of the general algorithm described in Section~\ref{section:EMlike}. <>= pdf(file="spsymmfig1.pdf", paper="special", width=8, height=8) par(mar=0.1+c(5,4.2,4,1.8)) plot(wait1, which = 2, cex.axis = 1.4, cex.lab = 1.5, cex.main = 1.5, main2 = "Time between Old Faithful eruptions", xlab2 = "Minutes") wait2 <- spEMsymloc(waiting, mu0 = c(55, 80)) plot(wait2, lty = 2, newplot = FALSE, addlegend = FALSE) dev.off() pdf(file="spsymmfig2.pdf", paper="special", width=8, height=8) par(mar=0.1+c(5,4.2,4,1.8)) wait2a <- spEMsymloc(waiting, mu0 = c(55, 80), bw = 1) wait2b <- spEMsymloc(waiting, mu0 = c(55, 80), bw = 6) plot(wait2a, lty = 1, addlegend = FALSE, cex.axis = 1.4, cex.lab = 1.5, cex.main = 1.5, title = "Time between Old Faithful eruptions", xlab = "Minutes") plot(wait2b, lty = 2, newplot = FALSE, addlegend = FALSE) dev.off() @ \begin{figure}[h] \centering \includegraphics[height=3in,width=3in]{spsymmfig1} \includegraphics[height=3in,width=3in]{spsymmfig2} \caption{The Old Faithful dataset, fit using different algorithms in \pkg{mixtools}. Left: the fitted Gaussian components (solid) and a semiparametric fit assuming model (\ref{spmodel}) with the default bandwidth of $4.0$ (dashed); Right: the same model (\ref{spmodel}) using bandwidths of $1.0$ (solid) and $6.0$ (dashed).} \label{spsymmfig} \end{figure} As noted in Figure \ref{geyser}, model (\ref{spmodel}) appears to be an appropriate model for the Old Faithful waiting times dataset. Here, we provide code that applies the \code{spEMsymloc} function to these data. First, we display the normal mixture solution of Figure \ref{geyserEM} with a semiparametric solution superimposed, in Figure \ref{spsymmfig}(a): <>= plot(wait1, which = 2, cex.axis = 1.4, cex.lab = 1.4, cex.main = 1.8, main2 = "Time between Old Faithful eruptions", xlab2 = "Minutes") wait2 <- spEMsymloc(waiting, mu0 = c(55, 80)) plot(wait2, lty = 2, newplot = FALSE, addlegend = FALSE) @ Because the semiparametric version relies on a kernel density estimation step (\ref{densest}), it is necessary to select a bandwidth for this step. By default, \code{spEMsymloc} uses a fairly simplistic approach: It applies ``Silverman's rule of thumb'' \citep{silverman1986des} to the entire dataset using the \code{bw.nrd0} function in \proglang{R}. For the Old Faithful waiting time dataset, this bandwidth is about~$4$: <>= bw.nrd0(waiting) @ But the choice of bandwidth can make a big difference, as seen in Figure \ref{spsymmfig}(b). <>= wait2a <- spEMsymloc(waiting, mu0 = c(55, 80), bw = 1) wait2b <- spEMsymloc(waiting, mu0 = c(55, 80), bw = 6) plot(wait2a, lty = 1, addlegend = FALSE, cex.axis = 1.4, cex.lab = 1.4, cex.main = 1.8, xlab = "Minutes", title = "Time between Old Faithful eruptions") plot(wait2b, lty = 2, newplot = FALSE, addlegend = FALSE) @ We find that with a bandwidth near $2$, the semiparametric solution looks quite close to the normal mixture solution of Figure \ref{geyserEM}. Reducing the bandwidth further results in the ``bumpiness'' exhibited by the solid line in Figure \ref{spsymmfig}(b). On the other hand, with a bandwidth of 8, the semiparametric solution completely breaks down in the sense that algorithm tries to make each component look similar to the whole mixture distribution. We encourage the reader to experiment by changing the bandwidth in the above code. \subsection{A trivariate Gaussian example} \label{ss:trigauss} As a first simple, nonparametric example, we simulate a Gaussian trivariate mixture with independent repeated measures and a shift of location between the two components in each coordinate, i.e., $m=2$, $r=3$, and $b_k=k$, $k=1,2,3$. The individual densities $f_{jk}$ are the densities of $\CN(\mu_{jk},1)$, with component means $\vec\mu_1 = (0,0,0)$ and $\vec\mu_2=(3,4,5)$. This example was introduced by \citet{hall2005nim} then later reused by \citet{benaglia2009} for comparison purposes. Note that the parameters in this model are identifiable, since \citet{hall2003nec} showed that for two components ($m=2$), identifiability holds in model~\eqref{mvmixture} is under mild assumptions as long as $r\ge3$, even in the most general case in which $b_k=k$ for all $k$. A function \code{ise.npEM} has been included in \pkg{mixtools} for numerically computing the integrated squared error (ISE) relative to a user-specified true density for a selected estimated density $\hat f_{jk}$ from \code{npEM} output. Each density $\hat f_{jk}$ is computed using equation~(\ref{densest}) together with the posterior probabilities after convergence of the algorithm, i.e., the final values of the $\post_{ij}^t$ (when \code{stochastic = FALSE}). We illustrate the usage of \code{ise.npEM} in this example by running a Monte Carlo simulation for $S$ replications, then computing the square root of the mean integrated squared error (MISE) for each density, where \[ {\rm MISE} = \frac{1}{S}\sum_{s=1}^S \int \left(\hat f_{jk}^{(s)}(u)-f_{jk}(u)\right)^2\,du,\quad j=1,2 \mbox{ and } k=1,2,3. \] For this example, we first set up the model true parameters with $S=100$ replications of $n=300$ observations each: <>= m <- 2; r <- 3; n <- 300; S <- 100 lambda <- c(0.4, 0.6) mu <- matrix(c(0, 0, 0, 3, 4, 5), m, r, byrow = TRUE) sigma <- matrix(rep(1, 6), m, r, byrow = TRUE) @ Next, we set up ``arbitrary'' initial centers, a matrix for storing sums of integrated squared errors, and an integer storing the number of suspected instances of label switching that may occur during the replications: <>= centers <- matrix(c(0, 0, 0, 4, 4, 4), 2, 3, byrow = TRUE) ISE <- matrix(0, m, r, dimnames = list(Components = 1:m, Blocks = 1:r)) nblabsw <- 0 @ Finally, we run the Monte Carlo simulation, using the \code{samebw = FALSE} option since it is more appropriate for this location-shift model: <>= set.seed(1000) for (mc in 1:S) { x <- rmvnormmix(n, lambda, mu, sigma) a <- npEM(x, centers, verb = FALSE, samebw = FALSE) if (a$lambda[1] > a$lambda[2]) nblabsw <- nblabsw + 1 for (j in 1:m) { for (k in 1:r) { ISE[j, k] <- ISE[j, k] + ise.npEM(a, j, k, dnorm, lower = mu[j, k] - 5, upper = mu[j, k] + 5, plots = FALSE, mean = mu[j, k], sd = sigma[j, k])$value #$ } } } MISE <- ISE/S print(sqMISE <- sqrt(MISE)) @ We can examine the \code{npEM} output from the last replication above using <>= summary(a) @ We can also get plots of the estimated component densities for each block (recall that in this example, block $\ell$ consists only of coordinate $\ell$) using the \code{plot} function. The resulting plots are given in Figure~\ref{fig:gausstrivariate}. <>= plot(a) @ <>= pdf("gauss3rm.pdf", paper="special", width=10, height=5) par(mfrow=c(1,3), ask=F) plot(a) dev.off() @ \begin{figure}[h] \centering \includegraphics[width=.99\textwidth]{gauss3rm} \caption{Output of the \code{npEM} algorithm for the trivariate Gaussian model with independent repeated measures.} \label{fig:gausstrivariate} \end{figure} \subsection{A more general multivariate nonparametric example} \label{sec:generalmv} In this section, we fit a more difficult example, with non-multimodal mixture densities (in block \#2), heavy-tailed distributions, and different scales among the coordinates. The model is multivariate with $r=5$ repeated measures and $m=2$ components (hence identifiability holds; cf.\ \citet{hall2003nec} as cited in Section~\ref{ss:trigauss}). The $5$ repeated measures are grouped into $B=2$ blocks, with $b_1=b_2=b_3=1$ and $b_4=b_5=2$. Block $1$ corresponds to a mixture of two noncentral Student $t$ distributions, $t'(2,0)$ and $t'(10,8)$, where the first parameter is the number of degrees of freedom, and the second is the non-centrality. Block~2 corresponds to a mixture of Beta distributions, ${\cal B}(1,1)$ (which is actually the uniform distribution over $[0,1]$) and ${\cal B}(1,5)$. The first component weight is $\lambda_1 = 0.4$. The true mixtures are depicted in Figure~\ref{fig:true5rm}. <>= pdf("truepdf5rm_block1.pdf") par(mar=0.1+c(5,4.2,4,1.5)) x <- seq(-10, 25, len=250) plot(x, .4* dt(x, 2, 0) + .6 * dt(x, 10, 8), type="l", lwd=3, col=2, cex.axis=1.4, cex.lab=1.5, cex.main=1.5, main="Block 1", xlab="", ylab="Density") lines (x, .4*dt(x, 2, 0), lwd=4, lty=2) lines (x, .6*dt(x, 10, 8), lwd=4, lty=2) dev.off() pdf("truepdf5rm_block2.pdf") par(mar=0.1+c(5,4.2,4,1.5)) x <- seq(0, 1, len=250) plot(x, .4 + .6 * dbeta(x, 1, 5), type="l", lwd=3, col=2, cex.axis=1.4, cex.lab=1.5, cex.main=1.5, main="Block 2", xlab="", ylab="Density", ylim= c(0, 3.4)) lines (x, rep(.4, 250), lwd=4, lty=2) lines (x, .6*dbeta(x, 1, 5), lwd=4, lty=2) dev.off() @ \begin{figure}[h] \centering \includegraphics[height=2.5in,width=2.5in]{truepdf5rm_block1} \includegraphics[height=2.5in,width=2.5in]{truepdf5rm_block2} \caption{True densities for the mixture of Section~\ref{sec:generalmv}, with individual component densities (scaled by $\lambda_j$) in dotted lines and mixture densities in solid lines. The noncentral $t$ mixture of coordinates 1 through 3 is on the left, the beta mixture of coordinates 4 and 5 on the right.} \label{fig:true5rm} \end{figure} To fit this model in \pkg{mixtools}, we first set up the model parameters: <>= m <- 2; r <- 5 lambda <- c(0.4, 0.6) df <- c(2, 10); ncp <- c(0, 8) sh1 <- c(1, 1) ; sh2 <- c(1, 5) @ Then we generate a pseudo-random sample of size $n=300$ from this model: <>= n <- 300; z <- sample(m, n, rep = TRUE, prob = lambda) r1 <- 3; z2 <- rep(z, r1) x1 <- matrix(rt(n * r1, df[z2], ncp[z2]), n, r1) r2 <- 2; z2 <- rep(z, r2) x2 <- matrix(rbeta(n * r2, sh1[z2], sh2[z2]), n, r2) x <- cbind(x1, x2) @ For this example in which the coordinate densities are on different scales, it is obvious that the bandwidth in \code{npEM} should depend on the blocks and components. We set up the block structure and some initial centers, then run the algorithm with the option \code{samebw = FALSE}: <>= id <- c(rep(1, r1), rep(2, r2)) centers <- matrix(c(0, 0, 0, 1/2, 1/2, 4, 4, 4, 1/2, 1/2), m, r, byrow = TRUE) b <- npEM(x, centers, id, eps = 1e-8, verb = FALSE, samebw = FALSE) @ Figure~\ref{fig:npEM5rm} shows the resulting density estimates, which may be obtained using the plotting function included in \pkg{mixtools}: <>= plot(b, breaks = 15) @ % plot(b, breaks = 15, cex.main = 1.5, cex.lab = 1.5, cex.axis = 1.4, % cex.legend = 1.5) <>= pdf("npEM5rm.pdf", width=8, height=5) par(mfrow=c(1,2)) plot(b, breaks = 15) dev.off() @ \begin{figure}[h] \centering \includegraphics[width=.95\textwidth]{npEM5rm} \caption{Result of plotting \code{npEM} output for the example of Section~\ref{sec:generalmv}. Since $n=300$, the histogram on the left includes 900 observations and the one on the right includes 600.} \label{fig:npEM5rm} \end{figure} Finally, we can compute the ISE of the estimated density relative to the truth for each block and component. The corresponding output is depicted in Figure \ref{fig:ISEnpEM5rm}. <>= par(mfrow=c(2,2)) for (j in 1:2){ ise.npEM(b, j, 1, truepdf = dt, lower = ncp[j] - 10, upper = ncp[j] + 10, df = df[j], ncp = ncp[j]) ise.npEM(b, j, 2, truepdf = dbeta, lower = -0.5, upper = 1.5, shape1 = sh1[j], shape2 = sh2[j]) } @ <>= options(warn=-1) pdf("ISEnpEM5rm.pdf", width=8, height=8) par(mfrow = c(2, 2)) for (j in 1:2){ ise.npEM(b, j, 1, truepdf = dt, lower = ncp[j] - 10, upper = ncp[j] + 10, df = df[j], ncp = ncp[j]) ise.npEM(b, j, 2, truepdf = dbeta, lower = -0.5, upper = 1.5, shape1 = sh1[j], shape2 = sh2[j]) } dev.off() @ \begin{figure}[h] \centering \includegraphics[height=5in,width=6in]{ISEnpEM5rm} \caption{\code{ise.npEM} output for the 5-repeated measures example; the true densities are $f_{11}\equiv t'(2,0)$, $f_{21}\equiv t'(10,8)$, $f_{12}\equiv {\cal U}_{(0,1)}$, $f_{22}\equiv {\cal B}(1,5)$.} \label{fig:ISEnpEM5rm} \end{figure} \section{Mixtures of regressions} \label{section:reg} \subsection{Mixtures of linear regressions} Consider a mixture setting where we now assume $\textbf{X}_{i}$ is a vector of covariates observed with a response $Y_{i}$. The goal of mixtures of regressions is to describe the conditional distribution of $Y_{i}|\textbf{X}_{i}$. Mixtures of regressions have been extensively studied in the econometrics literature and were first introduced by \citet{quandt1972sr} as the \textit{switching regimes} (or \textit{switching regressions}) problem. A switching regimes system is often compared to \textit{structural change} in a system \citep{quandtram1978sr}. A structural change assumes the system depends deterministically on some observable variables, but switching regimes implies one is unaware of what causes the switch between regimes. In the case where it is assumed there are two heterogeneous classes, \citet{quandt1972sr} characterized the switching regimes problem ``by assuming that nature chooses between regimes with probabilities $\lambda$ and $1-\lambda$''. Suppose we have $n$ independent univariate observations, $y_{1},\ldots,y_{n}$, each with a corresponding vector of predictors, $\textbf{x}_{1},\ldots,\textbf{x}_{n}$, with $\textbf{x}_{i}=(x_{i,1},\ldots,x_{i,p})^\top$ for $i=1,\ldots,n$. We often set $x_{i,1}=1$ to allow for an intercept term. Let $\textbf{y}=(y_{1},\ldots,y_{n})^\top$ and let $\underline{\textbf{X}}$ be the $n\times p$ matrix consisting of the predictor vectors. Suppose further that each observation $(y_{i}, \vec x_i)$ belongs to one of $m$ classes. Conditional on membership in the $j$th component, the relationship between $y_{i}$ and $\textbf{x}_{i}$ is the normal regression model \begin{equation}\label{regmodel} y_{i}=\textbf{x}_{i}^\top\vec{\beta}_{j}+\epsilon_{i}, \end{equation} where $\epsilon_{i}\thicksim \CN(0,\sigma^{2}_{j})$ and $\vec{\beta}_{j}$ and $\sigma_{j}^{2}$ are the $p$-dimensional vector of regression coefficients and the error variance for component $j$, respectively. Accounting for the mixture structure, the conditional density of $y_{i}|\vec x_i$ is \begin{equation}\label{mor} g_{\vec\theta}(y_{i}|\textbf{x}_{i})=\sum_{j=1}^{m}\lambda_{j} \phi(y_{i} | \textbf{x}_{i}^\top\vec{\beta}_{j},\sigma^{2}_{j}), \end{equation} where $\phi(\cdot|\textbf{x}^\top\vec{\beta}_{j},\sigma^{2}_{j})$ is the normal density with mean $\textbf{x}^\top\vec\beta$ and variance $\sigma^2$. Notice that the model parameter for this setting is $\vec\theta=(\vec\lambda,(\vec{\beta}_{1},\sigma^2_{1}),\ldots,(\vec{\beta}_{m},\sigma^2_{m}))$. The mixture of regressions model (\ref{mor}) differs from the well-known mixture of multivariate normals model $(Y_{i},\textbf{X}_{i}^\top)^\top\thicksim \sum_{j=1}^{m}\lambda_{j}\CN_{p+1}(\vec{\mu}_{j},\Sigma_{j})$ because model (\ref{mor}) makes no assertion about the marginal distribution of $\textbf{X}_{i}$, whereas the mixture of multivariate normals specifies that $\textbf{X}_{i}$ itself has a mixture of multivariate normals distribution. <>= data("CO2data") attach(CO2data) pdf("gnpdata.pdf") par(mar=0.1+c(5,4.2,4,1.5)) plot(GNP, CO2, xlab="Gross National Product", ylab=expression(paste(CO[2]," per Capita")), cex.lab=1.5, cex.main=1.5, cex.axis=1.4, main="1996 GNP and Emissions Data") text(GNP, CO2, country, adj=c(.5,-.5)) dev.off() @ \begin{figure}[h] \centering \includegraphics[height=3in,width=3in]{gnpdata.pdf} \caption{1996 data on gross national product (GNP) per capita and estimated carbon dioxide ($\textrm{CO}_{2}$) emissions per capita. Note that ``CH'' stands for Switzerland, not China.} \label{gnpdata} \end{figure} As a simple example of a dataset to which a mixture of regressions models may be applied, consider the sample depicted in Figure \ref{gnpdata}. In this dataset, the measurements of carbon dioxide ($\textrm{CO}_{2}$) emissions are plotted versus the gross national product (GNP) for $n=28$ countries. These data are included \pkg{mixtools}; type \code{help("CO2data")} in \proglang{R} for more details. \citet{hurn} analyzed these data using a mixture of regressions from the Bayesian perspective, pointing out that ``there do seem to be several groups for which a linear model would be a reasonable approximation.'' They further point out that identification of such groups could clarify potential development paths of lower GNP countries. \subsection{EM algorithms for mixtures of regressions} A standard EM algorithm, as described in Section~\ref{section:EM}, may be used to find a local maximum of the likelihood surface. \citet{deveaux1989} describes EM algorithms for mixtures of regressions in more detail, including proposing a method for choosing a starting point in the parameter space. The E-step is the same as for any finite mixture model EM algorithm; i.e., the $p_{ij}^{(t)}$ values are updated according to equation (\ref{posteriors})---or, in reality, equation (\ref{altposteriors})---where each $\phi_j^{(t)}(\vec x_i)$ is replaced in the regression context by $\phi(y_i | \vec x_i^\top\vec\beta_j, \sigma_j^2)$: \begin{equation}\label{regposteriors} \post_{ij}^{(t)} = \left[ 1 + \sum_{j'\ne j} \frac{ \lambda_{j'}^{(t)} \phi(y_i | \vec x_i^\top\vec\beta_{j'}, \sigma_{j'}^2)}{\lambda_j^{(t)} \phi(y_i | \vec x_i^\top\vec\beta_j, \sigma_j^2)} \right]^{-1} \end{equation} The update to the $\lambda$ parameters in the M-step, equation (\ref{lambda}), is also the same. Letting $\textbf{W}_{j}^{(t)}=\textrm{diag}(\post_{1j}^{(t)},\ldots,\post_{nj}^{(t)})$, the additional M-step updates to the $\vec\beta$ and $\sigma$ parameters are given by \begin{eqnarray}\label{betaEM} \vec{\beta}_{j}^{(t+1)} &=& (\underline{\textbf{X}}^\top\textbf{W}_{j}^{(t)}\underline{\textbf{X}})^{-1}\underline{\textbf{X}}^\top \textbf{W}_{j}^{(t)}\textbf{y} \quad \mbox{and} \\ \label{sigma} \sigma_{j}^{2(t+1)} &=& \frac{\biggl\|\textbf{W}_{j}^{1/2(t)}(\textbf{y}-\underline{\textbf{X}}^\top\vec{\beta}_{j}^{(t+1)})\biggr\|^{2}}{\mbox{tr}(\textbf{W}_{j}^{(t)})}, \end{eqnarray} where $\|\textbf{A}\|^{2}=\textbf{A}^\top\textbf{A}$ and $\mbox{tr}(\textbf{A})$ means the trace of the matrix $\textbf{A}$. Notice that equation (\ref{betaEM}) is a weighted least squares (WLS) estimate of $\vec{\beta}_{j}$ and equation (\ref{sigma}) resembles the variance estimate used in WLS. Allowing each component to have its own error variance $\sigma_j^2$ results in the likelihood surface having no maximizer, since the likelihood may be driven to infinity if one component gives a regression surface passing through one or more points exactly and the variance for that component is allowed to go to zero. A similar phenomenon is well-known in the finite mixture-of-normals model where the component variances are allowed to be distinct \citep{mclachlan2000fmm}. However, in practice we observe this behavior infrequently, and the \pkg{mixtools} functions automatically force their EM algorithms to restart at randomly chosen parameter values when it occurs. A local maximum of the likelihood function, a consistent version of which is guaranteed to exist by the asymptotic theory as long as the model is correct and all $\lambda_j$ are positive, usually results without any restarts. The function \code{regmixEM} implements the EM algorithm for mixtures of regressions in \pkg{mixtools}. This function has arguments that control options such as adding an intercept term, \code{addintercept = TRUE}; forcing all $\vec\beta_j$ estimates to be the same, \code{arbmean = FALSE} (for instance, to model outlying observations as having a separate error variance from the non-outliers); and forcing all $\sigma_j^2$ estimates to be the same, \code{arbvar = FALSE}. For additional details, type \code{help("regmixEM")}. As an example, we fit a 2-component model to the GNP data shown in Figure \ref{gnpdata}. \citet{hurn} and \citet{youngphd} selected 2 components for this dataset using model selection criteria, Bayesian approaches to selecting the number of components, and a bootstrapping approach. The function \code{regmixEM} will be used for fitting a 2-component mixture of regressions by an EM algorithm: <>= data("CO2data") attach(CO2data) @ <>= CO2reg <- regmixEM(CO2, GNP, lambda = c(1, 3) / 4, beta = matrix(c(8, -1, 1, 1), 2, 2), sigma = c(2, 1)) @ We can then pull out the final observed log-likelihood as well as estimates for the 2-component fit, which include $\hat{\lambda}$, $\hat{\vec{\beta}}_{1}$, $\hat{\vec{\beta}}_{2}$, $\hat{\sigma}_{1}$, and $\hat{\sigma}_{2}$: <>= summary(CO2reg) @ The reader is encouraged to alter the starting values or let the internal algorithm generate random starting values. However, this fit seems appropriate and the solution is displayed in Figure \ref{co2EM} along with 99\% Working-Hotelling Confidence Bands, which are constructed automatically by the \code{plot} method in this case by assigning each point to its most probable component and then fitting two separate linear regressions: <>= plot(CO2reg, density = TRUE, alpha = 0.01, cex.main = 1.5, cex.lab = 1.5, cex.axis = 1.4) @ \setkeys{Gin}{width=0.49\textwidth} \begin{figure}[!h] \centering <>= for(i in 1:2){ file=paste("CO2reg", i, ".pdf", sep="") pdf(file=file, paper="special", width=6, height=6) plot(CO2reg, whichplots=i, alpha = 0.01, cex.main = 1.5, cex.lab = 1.5, cex.axis = 1.4) dev.off() cat("\\includegraphics{", file, "}\n", sep="") } @ \caption{The GNP data fitted with a 2-component parametric EM algorithm in \pkg{mixtools}. Left: the sequence of log-likelihood values, $L_{\Bx}(\f^{(t)})$; Right: the fitted regression lines with 99\% Working-Hotelling Confidence Bands.} \label{co2EM} \end{figure} \subsection{Predictor-dependent mixing proportions} \label{section:pdmp} Suppose that in model (\ref{mor}), we replace $\lambda_j$ by $\lambda_{j}(\textbf{x}_{i})$ and assume that the mixing proportions vary as a function of the predictors $\textbf{x}_{i}$. Allowing this type of flexibility in the model might be useful for a number of reasons. For instance, sometimes it is the proportions $\lambda_j$ that are of primary scientific interest, and in a regression setting it may be helpful to know whether these proportions appear to vary with the predictors. As another example, consider a \code{regmixEM} model using \code{arbmean = FALSE} in which the mixture structure only concerns the error variance: In this case, $\lambda_j(\vec x)$ would give some sense of the proportion of outliers in various regions of the predictor space. One may assume that $\lambda_{j}(\textbf{x})$ has a particular parametric form, such as a logistic function, which introduces new parameters requiring estimation. This is the idea of the \textit{hierarchical mixtures of experts} (HME) procedure \citep{jacobsall}, which is commonly used in neural networks and which is implemented, for example, in the \pkg{flexmix} package for \proglang{R} \citep{jss:Leisch:2004, Grun+Leisch:2008}. However, a parametric form of $\lambda_{j}(\textbf{x})$ may be too restrictive; in particular, the logistic function is monotone, which may not realistically capture the pattern of change of $\lambda_j$ as a function of $\vec x$. As an alternative, \citet{young2009mor} propose a nonparametric estimate of $\lambda_{j}(\textbf{x}_{i})$ that uses ideas from kernel density estimation. The intuition behind the approach of \citet{young2009mor} is as follows: The M-step estimate (\ref{lambda}) of $\lambda_j$ at each iteration of a finite mixture model EM algorithm is simply an average of the ``posterior'' probabilities $p_{ij}=\E(Z_{ij}|\mbox{data})$. As a substitute, the nonparametric approach uses local linear regression to approximate the $\lambda_j(\textbf{x})$ function. Considering the case of univariate $x$ for simplicity, we set $\lambda_j(x) = \hat\alpha_{0j}(x)$, where \begin{equation}\label{lambdax} (\hat\alpha_{0j}(x), \hat\alpha_{1j}(x))= \arg\min_{(\alpha_0, \alpha_1)} \sum_{i=1}^n K_h(x-x_i) \left[ p_{ij} - \alpha_0 - \alpha_1(x-x_i) \right]^2 \end{equation} and $K_h(\cdot)$ is a kernel density function with scale parameter (i.e., bandwidth) $h$. It is straightforward to generalize equation (\ref{lambdax}) to the case of vector-valued $\vec x$ by using a multivariate kernel function. \citet{young2009mor} give an iterative algorithm for estimating mixture of regression parameters that replaces the standard $\lambda_j$ updates (\ref{lambda}) by the kernel-weighted version (\ref{lambdax}). The algorithm is otherwise similar to a standard EM; thus, like the algorithm in Section~\ref{section:EMlike} of this article, the resulting algorithm is an EM-like algorithm. Because only the $\lambda_j$ parameters depend on $\vec x$ (and are thus ``locally estimated''), whereas the other parameters (the $\vec\beta_j$ and $\sigma_j$) can be considered to be globally estimated, \citet{young2009mor} call this algorithm an iterative global/local estimation (IGLE) algorithm. Naturally, it replaces the usual E-step (\ref{regposteriors}) by a modified version in which each $\lambda_j$ is replaced by $\lambda_j(x_i)$. The function \code{regmixEM.loc} implements the IGLE algorithm in \pkg{mixtools}. Like the \code{regmixEM} function, \code{regmixEM.loc} has the flexibility to include an intercept term by using \code{addintercept = TRUE}. Moreover, this function has the argument \code{kern.l} to specify the kernel used in the local estimation of the $\lambda_{j}(\textbf{x}_{i})$. Kernels the user may specify include \code{"Gaussian"}, \code{"Beta"}, \code{"Triangle"}, \code{"Cosinus"}, and \code{"Optcosinus"}. Further numeric arguments relating to the chosen kernel include \code{kernl.g} to specify the shape parameter for when \code{kern.l = "Beta"} and \code{kernl.h} to specify the bandwidth which controls the size of the window used in the local estimation of the mixing proportions. See the corresponding help file for additional details. For the GNP and emissions dataset, Figure \ref{co2EM} indicates that the assumption of constant weights for the component regressions across all values of the covariate space may not be appropriate. The countries with higher GNP values appear to have a greater probability of belonging to the first component (i.e., the red line in Figure \ref{co2EM}). We will therefore apply the IGLE algorithm to this dataset. We will use the triweight kernel in equation (\ref{lambdax}), which is given by setting $\gamma=3$ in \begin{equation}\label{beta} K_{h}(x)=\frac{1}{hB(1/2,\gamma+1)}\left(1-\frac{x^2}{h^2}\right)^{\gamma}_{+}, \end{equation} where $B(x,y)=\Gamma(x)\Gamma(y)/\Gamma(x+y)$ is the beta function. For the triweight, $B(1/2, 4)$ is exactly $32/35$. This kernel may be specified in \code{regmixEM.loc} with \code{kern.l = "Beta"} and \code{kernl.g = 3}. The bandwidth we selected was $h=20$, which we specify with \code{kernl.h = 20}. For this implementation of the IGLE algorithm, we set the parameter estimates and posterior probability estimates obtained from the mixture of regressions EM algorithm as starting values for $\hat{\vec{\beta}}_{1}$, $\hat{\vec{\beta}}_{2}$, $\hat{\sigma}_{1}$, $\hat{\sigma}_{2}$, and $\lambda(x_{i})$. <>= CO2igle <- regmixEM.loc(CO2, GNP, beta = CO2reg$beta, sigma = CO2reg$sigma, lambda = CO2reg$posterior, kern.l = "Beta", kernl.h = 20, kernl.g = 3) @ We can view the estimates for $\hat{\vec{\beta}}_{1}$, $\hat{\vec{\beta}}_{2}$, $\hat{\sigma}_{1}$, and $\hat{\sigma}_{2}$. Notice that the estimates are comparable to those obtained for the mixture of regressions EM output and the log-likelihood value is slightly higher. <>= summary(CO2igle) @ Next, we can plot the estimates of $\lambda(x_{i})$ from the IGLE algorithm. <>= plot(GNP, CO2igle$post[,1], xlab = "GNP", cex.axis = 1.4, cex.lab = 1.5, ylab = "Final posterior probabilities") lines(sort(GNP), CO2igle$lambda[order(GNP), 1], col=2) abline(h = CO2igle$lambda[1], lty = 2) @ <>= pdf("lamplot.pdf") plot(GNP, CO2igle$post[,1], xlab = "GNP", cex.axis = 1.4, cex.lab = 1.5, ylab = "Final posterior probabilities") lines(sort(GNP), CO2igle$lambda[order(GNP), 1], col=2, lwd=2) abline(h = CO2igle$lambda[1], lty = 2, lwd=2) dev.off() @ This plot is given in Figure \ref{lamplot}. Notice the curvature provided by the estimates from the IGLE fit. These fits indicate an upward trend in the posteriors. The predictor-dependent mixing proportions model provides a viable way to reveal this trend since the regular mixture of regressions fit simply provides the same estimate of $\lambda$ for all $x_{i}$. \begin{figure}[h] \centering \includegraphics[height=3in,width=3in]{lamplot.pdf} \caption{Posterior membership probabilities $p_{i1}$ for component one versus the predictor GNP along with estimates of $\lambda_1(x)$ from the IGLE algorithm (the solid red curve) and $\lambda_1$ from the mixture of linear regressions EM algorithm (the dashed black line).} \label{lamplot} \end{figure} \subsection{Parametric bootstrapping for standard errors} With likelihood methods for estimation in mixture models, it is possible to obtain standard error estimates by using the inverse of the observed information matrix when implementing a Newton-type method. However, this may be computationally burdensome. An alternative way to report standard errors in the likelihood setting is by implementing a parametric bootstrap. \citet{eftib} claim that the parametric bootstrap should provide similar standard error estimates to the traditional method involving the information matrix. In a mixture-of-regressions context, a parametric bootstrap scheme may be outlined as follows: \begin{enumerate} \item Use \code{regmixEM} to find a local maximizer $\hat{\vec\theta}$ of the likelihood. \item For each $\textbf{x}_{i}$, simulate a response value $y_{i}^{*}$ from the mixture density $g_{\hat{\vec\theta}}(\cdot|\textbf{x}_{i})$. \item Find a parameter estimate $\tilde{\vec{\theta}}$ for the bootstrap sample using \code{regmixEM}. \item Use some type of check to determine whether label-switching appears to have occurred, and if so, correct it. \item Repeat steps 2 through 4 $B$ times to simulate the bootstrap sampling distribution of $\hat{\vec\theta}$. \item Use the sample covariance matrix of the bootstrap sample as an approximation to the covariance matrix of $\hat{\vec\theta}$. \end{enumerate} Note that step 3, which is not part of a standard parametric bootstrap, can be especially important in a mixture setting. The \pkg{mixtools} package implements a parametric bootstrap algorithm in the \code{boot.se} function. We may apply it to the regression example of this section, which assumes the same estimate of $\lambda$ for all $x_{i}$, as follows: <>= set.seed(123) CO2boot <- boot.se(CO2reg, B = 100) @ This output consists of both the standard error estimates and the parameter estimates obtained at each bootstrap replicate. An examination of the slope and intercept parameter estimates of the 500 bootstrap replicates reveals that no label-switching is likely to have occurred. For instance, the intercept terms of component one range from 4 to 11, whereas the intercept terms of component two are all tightly clumped around 0: <>= rbind(range(CO2boot$beta[1,]), range(CO2boot$beta[2,])) @ We may examine the bootstrap standard error estimates by themselves as follows: <>= CO2boot[c("lambda.se", "beta.se", "sigma.se")] @ \section[Additional capabilities of mixtools]{Additional capabilities of \pkg{mixtools}} \label{section:misc} \subsection{Selecting the number of components} \label{ss:nbcomp} Determining the number of components $k$ is still a major contemporary issue in mixture modeling. Two commonly employed techniques are information criterion and parametric bootstrapping of the likelihood ratio test statistic values for testing \begin{eqnarray}\label{mixturetest} \nonumber H_{0}&:& k = k_{0} \\ H_{1}&:& k = k_{0}+1 \end{eqnarray} for some positive integer $k_{0}$ \citep{mclach}. The \pkg{mixtools} package has functions to employ each of these methods using EM output from various mixture models. The information criterion functions calculate An Information Criterion (AIC) of \citet{aic}, the Bayesian Information Criterion (BIC) of \citet{schw}, the Integrated Completed Likelihood (ICL) of \citet{biern}, and the consistent AIC (CAIC) of \citet{boz}. The functions for performing parametric bootstrapping of the likelihood ratio test statistics sequentially test $k=k_{0}$ versus $k=k_{0}+1$ for $k_0=1, 2, \ldots$, terminating after the bootstrapped $p$-value for one of these tests exceeds a specified significance level. Currently, \pkg{mixtools} has functions for calculating information criteria for mixtures of multinomials (\code{multmixmodel.sel}), mixtures of multivariate normals under the conditionally i.i.d.\ assumption (\code{repnormmixmodel.sel}), and mixtures of regressions (\code{regmixmodel.sel}). Output from various mixture model fits available in \pkg{mixtools} can also be passed to the function \code{boot.comp} for the parametric bootstrapping approach. The parameter estimates from these EM fits are used to simulate data from the null distribution for the test given in (\ref{mixturetest}). For example, the following application of the \code{multmixmodel.sel} function to the water-level multinomial data from Section~\ref{section:cut} indicates that either 3 or 4 components seems like the best option (no more than 4 are allowed here since there are only 8 multinomial trials per observation and the mixture of multinomials requires $2m \le r+1$ for identifiability): <>= <> set.seed(10) multmixmodel.sel(watermult, comps = 1:4, epsilon = 0.001) @ \citet{youngphd} gives more applications of these functions to real datasets. \subsection{Bayesian methods} Currently, there are only two \pkg{mixtools} functions relating to Bayesian methodology and they both pertain to analyzing mixtures of regressions as described in \citet{hurn}. The \code{regmixMH} function performs a Metropolis-Hastings algorithm for fitting a mixture of regressions model where a proper prior has been assumed. The sampler output from \code{regmixMH} can then be passed to \code{regcr} in order to construct credible regions of the regression lines. Type \code{help("regmixMH")} and \code{help("regcr")} for details and an illustrative example. \section*{Acknowledgments} This research is partially supported by NSF Award SES-0518772. DRH received additional funding from Le Studium, an agency of the Centre National de la Recherche Scientifique of France. \bibliography{mixtools} \end{document} mixtools/NEWS0000755000176200001440000000740113616556343012650 0ustar liggesusersChanges in mixtools 1.2.0 ============================ * 2020-02-05 * Fixed compatibility issues with R 4.0.0. * Modified the NAMESPACE to only import three functions from the kernlab package: kpca(), pcv(), and Q(). * Modified gammamixEM() to reflect the developments in Young, Chen, Hewage, and Nilo-Poyanco (2019). This includes the addition of the argument mom.start to use a method-of-moments starting value strategy, as well as the addition of the fix.alpha argument for estimating a common shape parameter across the different components. * Added the mixturegram() function to construct mixturegrams for determining the number of components, which was introduced by Young, Ke, and Zeng (2018). * Modified the way the mixing proportions were randomly generated in the poisregmix.init() and logisregmix.init() functions. Previously, there was the possibility of generating too small of a mixing component, which could cause numerical instabilities. Thanks to Xiaoqiong Fang for identifying this issue. Changes in mixtools 1.1.0 ============================ * 2017-03-10 * Properly registered native routines for internal C code to conform with R-devel checks. * Fixed some inconsistencies with the S3 methods used. * Added tauequivnormalmixEM() to search for a local maximum of the likelihood surface for a univariate finite mixture of normals with possible equality constraints on the stdev parameters. * Added expRMM_EM() for fitting univariate finite mixtures-of-exponentials with right censoring. Some associated distribution functions for this calculation were also added. * Added weibullRMM_SEM(), which is a stochastic EM algorithm for estimating Reliability Mixture Models (RMM) with censoring. * Added spRMM_SEM(), which is a stochastic EM algorithm for estimating a semiparametric Scaling RMM with censoring. * Corrected an issue with loading WaterdataFull. * Removed dependency on 'boot' package and revised code in the appropriate functions. Thanks to Tim Hesterberg and Karl Millar of Google for this suggestion. * Updated some help files. Changes in mixtools 1.0.4 ============================ * 2016-01-11 * Added mvnpEM() and some related functions, which are multivariate blocks extension to the npEM() function. * Updated some help files. Changes in mixtools 1.0.3 ============================ * 2015-04-17 * Updated the maintainer's contact e-mail. * Added a new npMSL() version, with k-fold cross-validation. * Fixed some errors in the examples due to change in format to the Waterdata dataset. * Updated some references. Changes in mixtools 1.0.2 ============================ * 2014-05-13 * Fixed an indexing error in the boot.comp() function. * Updated some references. Changes in mixtools 1.0.1 ============================ * 2014-01-01 * Fixed a small bug to the boot.comp() function. * Added more complete version of Waterdata dataset, which includes all 579 children originally measured along with age and sex information Changes in mixtools 1.0.0 ============================ * 2013-10-05 * Started a NEWS file as of this version. New capabilities include: * A normalmixMMlc() function was added, for mixtures of univariate Normals with linear constraints on both the means and the variances parameters, requiring a MM-type algorithm. * A spEMsymlocN01() function was added, implementing a specific semiparametric EM-like algorithm for univariate mixture in False Discovery Rate (FDR) estimation, in which setup one component is known and set to the standard normal whereas the second component is semiparametric. Functions for plotting the results from this algorithm and for FDR estimation from its output have also been added. * Some documentation (Rd) files have been updated for examples, and some reference papers in EM-related Rd files have been updated. mixtools/R/0000755000176200001440000000000013617013707012336 5ustar liggesusersmixtools/R/multmixmodelsel.R0000755000176200001440000000350113616715141015707 0ustar liggesusersmultmixmodel.sel <- function (y, comps = NULL, ...) { if (class(y)[1]=="list" && !is.null(y$y)) { y <- y$y } n = dim(y)[1] p = dim(y)[2] m = min(apply(y, 1, sum)) # m = unique(apply(y, 1, sum)) # if (length(m) > 1) { # stop("Each row of y must have same total number of observations") # } max.allowed.comp = floor((m + 1)/2) if (is.null(comps)) comps = 1:max.allowed.comp if (max(comps) > max.allowed.comp) { stop(paste("No more than", max.allowed.comp, "components allowed", "with", m, "multinomial trials")) } aic = NULL bic = NULL caic = NULL icl = NULL ll = NULL theta = matrix(0, 0, p) lambda = NULL for (k in sort(comps)) { # cat("Testing", k, "components: ") # newrows = k - nrow(theta) tmp <- multmix.init(y, k = k) theta <- tmp$theta lambda <- tmp$lambda if (k!=1){ em = multmixEM(y, lambda = lambda, theta = theta, k = k, ...) loglik = em$loglik lambda = em$lambda theta = em$theta # cat(em$iter, "iterations.\n") } else loglik = sum(log(exp(apply(y,1,ldmult,theta=theta)))) aic = c(aic, loglik - (p * k - 1)) bic = c(bic, loglik - log(n) * (p * k - 1)/2) caic = c(caic, loglik - (log(n) + 1) * (p * k - 1)/2) if (k==1) { icl = c(icl, loglik - log(n) * (p * k - 1)/2) } else icl = c(icl, loglik - log(n) * (p * k - 1)/2 - sum(lambda * log(lambda))) ll = c(ll, loglik) } out = rbind(aic, bic, caic, icl, ll) # Winner = apply(out, 1, function(x) (1:length(x))[x == # max(x)]) win = apply(out, 1, which.max) rownames(out) = c("AIC", "BIC", "CAIC", "ICL", "Loglik") colnames(out) = sort(comps) Winner = as.numeric(colnames(out)[win]) cbind(out, Winner) } mixtools/R/bootcomp.R0000755000176200001440000007651312777447152014335 0ustar liggesusersboot.comp <- function (y, x = NULL, N = NULL, max.comp = 2, B = 100, sig = 0.05, arbmean = TRUE, arbvar = TRUE, mix.type = c("logisregmix", "multmix", "mvnormalmix", "normalmix", "poisregmix", "regmix", "regmix.mixed", "repnormmix"), hist = TRUE, ...) { mix.type <- match.arg(mix.type) k = max.comp p = 0 sigtest = 1 Q.star = list() i = 0 if (mix.type == "regmix") { Q0 = 0 Q1 = 0 obs.Q = 0 i = 1 while (sigtest == 1 && i <= k) { Q.star[[i]] = 0 if (i == 1) { w = 1 while (w == 1) { H0.fit = lm(y ~ x) beta = coef(H0.fit) Q0[i] = as.numeric(logLik(H0.fit)) H1.fit = try(regmixEM(y = y, x = x, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(H1.fit) == "try-error") { w = 1 } else { Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) w = 1 else w = 2 beta = coef(H0.fit) xbeta = cbind(1, x) %*% beta xy.sd = sqrt(sum(H0.fit$res^2)/(length(y) - 2)) j = 0 } } while (j < B) { j = j + 1 y.sim = rnorm(length(y), mean = xbeta, sd = xy.sd) xy.simout = lm(y.sim ~ x) em.out = try(regmixEM(y = y.sim, x = x, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(em.out) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out$loglik - as.numeric(logLik(xy.simout))) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } else { w = 1 while (w == 1) { H0.fit = try(regmixEM(y = y, x = x, k = i, arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) H1.fit = try(regmixEM(y = y, x = x, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(H0.fit) == "try-error" || class(H1.fit) == "try-error") { w = 1 } else { Q0[i] = H0.fit$loglik if (arbmean == FALSE) { scale = H0.fit$scale beta = matrix(rep(H0.fit$beta, i), ncol = i) } else { scale = 1 } Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) w = 1 else w = 2 } beta.new = H0.fit$beta xbeta.new = cbind(1, x) %*% beta.new j = 0 } while (j < B) { j = j + 1 wt = rmultinom(length(y), size = 1, prob = H0.fit$lambda) if (arbmean == FALSE) { y.sim = sapply(1:length(y), function(i) rnorm(1, mean = xbeta.new, sd = ((scale * H0.fit$sigma)[wt[, i] == 1]))) } else { if (arbvar == FALSE) { y.sim = sapply(1:length(y), function(i) rnorm(1, mean = xbeta.new[i, (wt[, i] == 1)], sd = H0.fit$sigma)) } else { y.sim = sapply(1:length(y), function(i) rnorm(1, mean = xbeta.new[i, (wt[, i] == 1)], sd = H0.fit$sigma[wt[, i] == 1])) } } em.out.0 = try(regmixEM(y = y.sim, x = x, k = i, arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) em.out.1 = try(regmixEM(y = y.sim, x = x, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(em.out.0) == "try-error" || class(em.out.1) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out.1$loglik - em.out.0$loglik) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } p[i] = mean(Q.star[[i]] >= obs.Q[i]) sigtest = (p[i] < sig) i = i + 1 } } if (mix.type == "repnormmix") { Q0 = 0 Q1 = 0 obs.Q = 0 i = 1 while (sigtest == 1 && i <= k) { Q.star[[i]] = 0 if (i == 1) { w = 1 while (w == 1) { dens = dnorm(y, mean = mean(y), sd = sd(y)) Q0[i] = sum(log(dens[dens > 0])) H1.fit = try(repnormmixEM(x = y, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(H1.fit) == "try-error") { w = 1 } else { Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) { w = 1 } else { w = 2 } j = 0 } } while (j < B) { j = j + 1 y.sim = rmvnormmix(nrow(y), mu = rep(mean(y), ncol(y)), sigma = rep(sd(y), ncol(y))) dens.sim = dnorm(y.sim, mean = mean(y), sd = sd(y)) x.simout = sum(log(dens.sim[dens.sim > 0])) em.out = try(repnormmixEM(x = y.sim, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(em.out) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out$loglik - x.simout) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } else { w = 1 while (w == 1) { H0.fit = try(repnormmixEM(x = y, k = i, arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) H1.fit = try(repnormmixEM(x = y, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(H0.fit) == "try-error" || class(H1.fit) == "try-error") { w = 1 } else { Q0[i] = H0.fit$loglik if (arbmean == FALSE) scale = H0.fit$scale else scale = 1 Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) { w = 1 } else { w = 2 } } j = 0 } while (j < B) { j = j + 1 wt = rmultinom(length(y), size = 1, prob = H0.fit$lambda) if (arbmean == FALSE) { y.sim = sapply(1:ncol(y), function(i) rnorm(nrow(y), mean = H0.fit$mu, sd = ((scale * H0.fit$sigma)[wt[, i] == 1]))) } else { if (arbvar == FALSE) { y.sim = sapply(1:ncol(y), function(i) rnorm(nrow(y), mean = H0.fit$mu[wt[, i] == 1], sd = H0.fit$sigma)) } else { y.sim = sapply(1:ncol(y), function(i) rnorm(nrow(y), mean = H0.fit$mu[wt[, i] == 1], sd = H0.fit$sigma[wt[, i] == 1])) } } em.out.0 = try(repnormmixEM(x = y.sim, k = i, arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) em.out.1 = try(repnormmixEM(x = y.sim, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(em.out.0) == "try-error" || class(em.out.1) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out.1$loglik - em.out.0$loglik) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } p[i] = mean(Q.star[[i]] >= obs.Q[i]) sigtest = (p[i] < sig) i = i + 1 } } if (mix.type == "regmix.mixed") { if (is.list(y)) { if (length(y) != length(x)) stop("Number of elements in lists for x and y must match!") } tt = sapply(1:length(x), function(i) x[[i]][, 1]) beta = t(sapply(1:length(y), function(i) lm(y[[i]] ~ x[[i]])$coef)) y = beta mix.type = "mvnormalmix" } if (mix.type == "mvnormalmix") { Q0 = 0 Q1 = 0 obs.Q = 0 i = 1 while (sigtest == 1 && i <= k) { Q.star[[i]] = 0 if (i == 1) { w = 1 while (w == 1) { y.mean = apply(y, 2, mean) y.cov = cov(y) dens = dmvnorm(y, mu = y.mean, sigma = y.cov) Q0[i] = sum(log(dens[dens > 0])) H1.fit = try(mvnormalmixEM(x = y, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(H1.fit) == "try-error") { w = 1 } else { Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) w = 1 else w = 2 j = 0 } } while (j < B) { j = j + 1 y.sim = rmvnorm(nrow(y), mu = apply(y, 2, mean), sigma = y.cov) dens.sim = dmvnorm(y.sim, mu = y.mean, sigma = y.cov) y.simout = sum(log(dens.sim[dens.sim > 0])) em.out = try(mvnormalmixEM(x = y.sim, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(em.out) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out$loglik - y.simout) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } else { w = 1 while (w == 1) { H0.fit = try(mvnormalmixEM(x = y, k = i, ...), silent = TRUE) H1.fit = try(mvnormalmixEM(x = y, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(H0.fit) == "try-error" || class(H1.fit) == "try-error") { w = 1 } else { Q0[i] = H0.fit$loglik if (arbmean == FALSE) { H0.fit$mu = lapply(1:i, function(l) H0.fit$mu) } if (arbvar == FALSE) { H0.fit$sigma = lapply(1:i, function(l) H0.fit$sigma) } Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) { w = 1 } else { w = 2 } } j <- 0 } while (j < B) { j = j + 1 wt = rmultinom(nrow(y), size = 1, prob = H0.fit$lambda) if (arbmean == FALSE) { y.sim = t(sapply(1:nrow(y), function(i) rmvnorm(1, mu = H0.fit$mu, sigma = H0.fit$sigma[wt[, i] == 1][[1]]))) } else { if (arbvar == FALSE) { y.sim = t(sapply(1:nrow(y), function(i) rmvnorm(1, mu = H0.fit$mu[wt[, i] == 1][[1]], sigma = H0.fit$sigma))) } else { y.sim = t(sapply(1:nrow(y), function(i) rmvnorm(1, mu = H0.fit$mu[wt[, i] == 1][[1]], sigma = H0.fit$sigma[wt[, i] == 1][[1]]))) } } em.out.0 = try(mvnormalmixEM(x = y.sim, k = i, arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) em.out.1 = try(mvnormalmixEM(x = y.sim, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(em.out.0) == "try-error" || class(em.out.1) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out.1$loglik - em.out.0$loglik) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } p[i] = mean(Q.star[[i]] >= obs.Q[i]) sigtest = (p[i] < sig) i = i + 1 } } if (mix.type == "normalmix") { Q0 = 0 Q1 = 0 obs.Q = 0 i = 1 while (sigtest == 1 && i <= k) { Q.star[[i]] = 0 if (i == 1) { w = 1 while (w == 1) { dens = dnorm(y, mean = mean(y), sd = sd(y)) Q0[i] = sum(log(dens[dens > 0])) H1.fit = try(normalmixEM(x = y, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(H1.fit) == "try-error") { w = 1 } else { Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) { w = 1 } else { w = 2 } j = 0 } } while (j < B) { j = j + 1 y.sim = rnorm(length(y), mean = mean(y), sd = sd(y)) dens.sim = dnorm(y.sim, mean = mean(y), sd = sd(y)) x.simout = sum(log(dens.sim[dens.sim > 0])) em.out = try(normalmixEM(x = y.sim, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(em.out) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out$loglik - x.simout) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } else { w = 1 while (w == 1) { H0.fit = try(normalmixEM(x = y, k = i, arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) H1.fit = try(normalmixEM(x = y, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(H0.fit) == "try-error" || class(H1.fit) == "try-error") { w = 1 } else { Q0[i] = H0.fit$loglik if (arbmean == FALSE) scale = H0.fit$scale else scale = 1 Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) { w = 1 } else { w = 2 } } j = 0 } while (j < B) { j = j + 1 wt = rmultinom(length(y), size = 1, prob = H0.fit$lambda) if (arbmean == FALSE) { y.sim = sapply(1:length(y), function(i) rnorm(1, mean = H0.fit$mu, sd = ((scale * H0.fit$sigma)[wt[, i] == 1]))) } else { if (arbvar == FALSE) { y.sim = sapply(1:length(y), function(i) rnorm(1, mean = H0.fit$mu[(wt[, i] == 1)], sd = H0.fit$sigma)) } else { y.sim = sapply(1:length(y), function(i) rnorm(1, mean = H0.fit$mu[(wt[, i] == 1)], sd = H0.fit$sigma[wt[, i] == 1])) } } em.out.0 = try(normalmixEM(x = y.sim, k = i, arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) em.out.1 = try(normalmixEM(x = y.sim, k = (i + 1), arbmean = arbmean, arbvar = arbvar, ...), silent = TRUE) if (class(em.out.0) == "try-error" || class(em.out.1) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out.1$loglik - em.out.0$loglik) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } p[i] = mean(Q.star[[i]] >= obs.Q[i]) sigtest = (p[i] < sig) i = i + 1 } } if (mix.type == "multmix") { Q0 = 0 Q1 = 0 obs.Q = 0 i = 1 while (sigtest == 1 && i <= k) { Q.star[[i]] = 0 if (i == 1) { w = 1 while (w == 1) { m = apply(y, 1, sum) n.i = apply(y, 2, sum) theta = n.i/sum(n.i) Q0[i] = sum(log(exp(apply(y, 1, ldmult, theta = theta)))) H1.fit = try(multmixEM(y = y, k = (i + 1), ...), silent = TRUE) if (class(H1.fit) == "try-error") { w = 1 } else { Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) { w = 1 } else { w = 2 } j = 0 } } while (j < B) { j = j + 1 y.sim = matrix(0, ncol = ncol(y), nrow = nrow(y)) for (l in 1:length(m)) { y.sim[l, ] <- rmultinom(1, size = m[l], prob = theta) } theta.sim = apply(y.sim, 2, sum)/sum(apply(y.sim, 2, sum)) y.simout = sum(log(exp(apply(y.sim, 1, ldmult, theta = theta)))) em.out = try(multmixEM(y = y.sim, k = (i + 1), ...), silent = TRUE) if (class(em.out) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out$loglik - y.simout) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } else { w = 1 while (w == 1) { H0.fit = try(multmixEM(y = y, k = i, ...), silent = TRUE) H1.fit = try(multmixEM(y = y, k = (i + 1), ...), silent = TRUE) if (class(H0.fit) == "try-error" || class(H1.fit) == "try-error") { w = 1 } else { theta = H0.fit$theta Q0[i] = H0.fit$loglik Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) { w = 1 } else { w = 2 } } j = 0 } while (j < B) { j = j + 1 wt = rmultinom(nrow(y), size = 1, prob = H0.fit$lambda) # y.sim = t(sapply(1:nrow(y), function(i) rmultinom(1, # size = n.i[i], prob = H0.fit$theta[(wt[, # i] == 1), ]))) new.y.sim = t(sapply(1:nrow(y), function(i) rmultinom(1, size = n.i, prob = H0.fit$theta[(wt[, i] == 1), ]))) # new.y.sim = 0 em.out.0 = try(multmixEM(y = new.y.sim, k = i, ...), silent = TRUE) em.out.1 = try(multmixEM(y = new.y.sim, k = (i + 1), ...), silent = TRUE) if (class(em.out.0) == "try-error" || class(em.out.1) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out.1$loglik - em.out.0$loglik) Q.star[[i]][j] if (Q.star[[i]][j] < 0) { j = j - 1 } } } } p[i] = mean(Q.star[[i]] >= obs.Q[i]) sigtest = (p[i] < sig) i = i + 1 } } if (mix.type == "logisregmix") { if (is.null(N)) stop("Number of trials must be specified!") Q0 = 0 Q1 = 0 obs.Q = 0 i = 1 while (sigtest == 1 && i <= k) { Q.star[[i]] = 0 if (i == 1) { w = 1 while (w == 1) { H0.fit = glm(cbind(y, N - y) ~ x, family = binomial()) Q0[i] = logLik(H0.fit) H1.fit = try(logisregmixEM(y = y, x = x, N = N, k = (i + 1), ...), silent = TRUE) if (class(H1.fit) == "try-error") { w = 1 } else { Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) { w = 1 } else { w = 2 } beta = coef(H0.fit) xbeta = cbind(1, x) %*% beta j = 0 } } while (j < B) { j = j + 1 y.sim = rbinom(length(y), size = N, prob = inv.logit(xbeta)) xy.simout = glm(cbind(y.sim, N - y.sim) ~ x, family = binomial()) em.out = try(logisregmixEM(y = y.sim, x = x, N = N, k = (i + 1), ...), silent = TRUE) if (class(em.out) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out$loglik - logLik(xy.simout)) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } else { w = 1 while (w == 1) { H0.fit = try(logisregmixEM(y = y, x = x, N = N, k = i, ...), silent = TRUE) H1.fit = try(logisregmixEM(y = y, x = x, N = N, k = (i + 1), ...), silent = TRUE) if (class(H0.fit) == "try-error" || class(H1.fit) == "try-error") { w = 1 } else { Q0[i] = H0.fit$loglik Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) { w = 1 } else { w = 2 } beta = H0.fit$beta xbeta = cbind(1, x) %*% beta } j = 0 } while (j < B) { j = j + 1 wt = rmultinom(length(y), size = 1, prob = H0.fit$lambda) y.sim = sapply(1:length(y), function(i) rbinom(1, size = N[i], prob = inv.logit(xbeta)[, (wt[, i] == 1)])) em.out.0 = try(logisregmixEM(y = y.sim, x = x, N = N, k = i, ...), silent = TRUE) em.out.1 = try(logisregmixEM(y = y.sim, x = x, N = N, k = (i + 1), ...), silent = TRUE) if (class(em.out.0) == "try-error" || class(em.out.1) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out.1$loglik - em.out.0$loglik) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } p[i] = mean(Q.star[[i]] >= obs.Q[i]) sigtest = (p[i] < sig) i = i + 1 } } if (mix.type == "poisregmix") { Q0 = 0 Q1 = 0 obs.Q = 0 i = 1 while (sigtest == 1 && i <= k) { Q.star[[i]] = 0 if (i == 1) { w = 1 while (w == 1) { H0.fit = glm(y ~ x, family = poisson()) Q0[i] = logLik(H0.fit) H1.fit = try(poisregmixEM(y = y, x = x, k = (i + 1), ...), silent = TRUE) if (class(H1.fit) == "try-error") { w = 1 } else { Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) { w = 1 } else { w = 2 } beta = coef(H0.fit) xbeta = cbind(1, x) %*% beta j = 0 } } while (j < B) { j = j + 1 y.sim = rpois(length(y), lambda = exp(xbeta)) xy.simout = glm(y.sim ~ x, family = poisson()) em.out = try(poisregmixEM(y = y.sim, x = x, k = (i + 1), ...), silent = TRUE) if (class(em.out) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out$loglik - logLik(xy.simout)) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } else { w = 1 while (w == 1) { H0.fit = try(poisregmixEM(y = y, x = x, k = i, ...), silent = TRUE) H1.fit = try(poisregmixEM(y = y, x = x, k = (i + 1), ...), silent = TRUE) if (class(H0.fit) == "try-error" || class(H1.fit) == "try-error") { w = 1 } else { Q0[i] = H0.fit$loglik Q1[i] = H1.fit$loglik obs.Q[i] = 2 * (Q1[i] - Q0[i]) if (obs.Q[i] < 0) { w = 1 } else { w = 2 } beta = H0.fit$beta xbeta = cbind(1, x) %*% beta } j = 0 } while (j < B) { j = j + 1 wt = rmultinom(length(y), size = 1, prob = H0.fit$lambda) y.sim = sapply(1:length(y), function(i) rpois(1, lambda = exp(xbeta)[, (wt[, i] == 1)])) em.out.0 = try(poisregmixEM(y = y.sim, x = x, k = i, ...), silent = TRUE) em.out.1 = try(poisregmixEM(y = y.sim, x = x, k = (i + 1), ...), silent = TRUE) if (class(em.out.0) == "try-error" || class(em.out.1) == "try-error") { j = j - 1 } else { Q.star[[i]][j] = 2 * (em.out.1$loglik - em.out.0$loglik) if (Q.star[[i]][j] < 0) { j = j - 1 } } } } p[i] = mean(Q.star[[i]] >= obs.Q[i]) sigtest = (p[i] < sig) i = i + 1 } } if (hist) { if (length(p) == 2) { par(mfrow = c(1, 2)) for (i in 1:length(p)) { hist(Q.star[[i]], xlab = c("Bootstrap Likelihood", "Ratio Statistic"), main = paste(i, "versus", i + 1, "Components")) segments(obs.Q[i], 0, obs.Q[i], B, col = 2, lwd = 2) } } else { g = ceiling(sqrt(length(p))) par(mfrow = c(g, g)) for (i in 1:length(p)) { hist(Q.star[[i]], xlab = c("Bootstrap Likelihood", "Ratio Statistic"), main = paste(i, "versus", i + 1, "Components")) segments(obs.Q[i], 0, obs.Q[i], B, col = 2, lwd = 2) } } } if (p[length(p)] < sig) { cat("Decision: Select", length(p) + 1, "Component(s)", "\n") } else { cat("Decision: Select", length(p), "Component(s)", "\n") } list(p.values = p, log.lik = Q.star, obs.log.lik = obs.Q) }mixtools/R/ddirichlet.R0000755000176200001440000000053111736710154014576 0ustar liggesusersddirichlet <- function (x, alpha) { if (length(x) != length(alpha)) stop("Mismatch between dimensions of x and alpha in ddirichlet().\n") logD <- sum(lgamma(alpha)) - lgamma(sum(alpha)) s <- sum((alpha - 1) * log(x)) pd <- exp(sum(s) - logD) pd[any(x < 0 | x > 1)] <- 0 if(sum(x) != 1) pd <- 0 return(pd) }mixtools/R/tryflare.R0000755000176200001440000000646713616712322014327 0ustar liggesuserstry.flare <- function (y, x, lambda = NULL, beta = NULL, sigma = NULL, alpha = NULL, nu=1, epsilon = 1e-04, maxit = 10000, verb = FALSE, restart=50) { loglik <- function(res, sigma, lambda, alpha) { tmp <- lambda*dnorm(res,sd=sqrt(sigma)) + (1-lambda)*dexp(res,rate=alpha) sum(log(tmp)) } Q=function(res, sigma, lambda, alpha, z) { Q <- sum(z*log(lambda)) + sum((1-z)*log(1-lambda)) - log(2*pi*sigma)*sum(z)/2 - sum(z*res^2)/2/sigma + log(alpha)*sum(1-z) - alpha*sum((1-z)*res) Q } Z <- function(res, sigma, lambda, alpha) { z=rep(1, length(res)) z[res>0] = lambda / (lambda+(1-lambda)* sqrt(2*pi*sigma) * alpha * as.vector(exp(res[res>0]^2/2/sigma - alpha*res[res>0]))) z } x <- cbind(1, x) n <- length(y) p <- ncol(x) est <- flaremix.init(y=y, x=x, lambda=lambda, beta=beta, sigma=sigma, alpha=alpha) lambda <- est$lambda beta <- est$beta sigma <- est$sigma alpha <- est$alpha diff <- 1 iter <- 0 counts <- 0 ll.counts<-0 xbeta <- x %*% beta res <- y - xbeta dn <- dnorm(res,sd=sqrt(sigma)) de <- dexp(res,rate=alpha) obsloglik <- loglik(res, sigma, lambda, alpha) ll<-obsloglik Q1 <- -Inf all.Q <- NULL z=Z(res,sigma,lambda,alpha) while (sum(abs(diff) > epsilon)>0 && iter < maxit) { iter=iter+1 temp=(solve(-1/sigma*t(x) %*% sweep(x, 1, z, "*") + nu*t(x) %*% sweep(x, 1, (1-z)/(y-x%*%beta), "*"))%*%(1/sigma*(apply(sweep(x,1,z*(y-x%*%beta),"*"),2,sum)) +alpha*apply(sweep(x,1,1-z,"*"),2,sum))) m=1 while(m0) beta.new=beta else beta.new=beta.new xbeta.new <- x %*% beta.new res.new <- y-xbeta.new Q.beta <- Q(res.new,sigma,lambda,alpha,z) if(Q.beta < Q1) j=j+1 else j=101 } if(j==restart) stop(paste("Too many attempts at step-halving!","\n")) z.new=Z(res.new,sigma,lambda,alpha) lambda.new <- mean(z.new) sigma.new <- sum(z.new*(res.new^2))/sum(z.new) alpha.new <- sum(1-z.new[res.new>0])/sum((1-z.new[res.new>0])*res.new[res.new>0]) diff <- c(lambda.new,beta.new,sigma.new,alpha.new)-c(lambda,beta,sigma,alpha) z.new2=Z(res,sigma,lambda,alpha) # z.new2=z.new2/apply(z.new2,1,sum) Q.new <- Q(res.new,sigma.new,lambda.new,alpha.new,z.new2) q.diff=Q.new-Q1 if(q.diff<0) m=m+1 else m=101 } if(m==restart) stop(paste("Too many attempts at step-halving!","\n")) lambda <- lambda.new beta <- beta.new xbeta <- xbeta.new res <- res.new sigma <- sigma.new alpha <- alpha.new z<-z.new2 newobsloglik <- loglik(res.new, sigma.new, lambda.new, alpha.new) ll<-c(ll, newobsloglik) counts <- counts + (Q.newnewobsloglik) Q1 <- Q.new obsloglik <- newobsloglik if(verb==TRUE) cat("iteration=", iter, "diff=", diff, "log-likelihood", obsloglik, "\n") } if (iter == maxit) { cat("WARNING! NOT CONVERGENT!", "\n") } # par(mfrow=c(2,1)) # plot(all.Q,type="l") # plot(ll,type="l") cat("number of iterations=", iter, "\n") a=list(x=x,y=y,posterior=cbind(z,1-z),lambda=c(lambda,1-lambda),beta=beta,sigma=sigma,alpha=alpha,loglik=obsloglik,all.loglik=ll,ft="flaremixEM") class(a)="mixEM" a } mixtools/R/depth.R0000755000176200001440000000243513060301775013571 0ustar liggesusers###################################################################### # the following depth function can compute the depth for # # multi points at one time # ###################################################################### #dyn.load("~fxuan/depth/spherical/sphericaldepth.so") depth <- function(pts,x,Cx=var(x)){ x=x%*%t(chol(solve(Cx))) pts=pts%*%t(chol(solve(Cx))) # y <- .Fortran("mudepth", # as.integer(nrow(x)), # as.integer(nrow(pts)), # as.integer(ncol(x)), # as.single(pts), # as.single(x), # as.integer(1:nrow(pts)), # as.single(1:nrow(pts)), # PACKAGE="mixtools") # Now rewritten in C: y <- .C(C_mudepth, as.integer(nrow(x)), as.integer(nrow(pts)), as.integer(ncol(x)), as.double(pts), as.double(x), integer(nrow(pts)), double(nrow(pts)), PACKAGE="mixtools") count <- y[[6]] n <- nrow(x) # depth <- sqrt(8)*(count-n*(n-1)/4)/sqrt((n*(n-1)))#this is to standardize the depth # depth <- (factorial(n)/(2*factorial(n-2)))^(-1)*count depth <- choose(n,2)^(-1)*count depth } mixtools/R/rnormmix.R0000755000176200001440000000050611665556372014353 0ustar liggesusers# Simulate from a normal mixture. New version simply calls rmvnormmix. # normmix.sim is here for backwards compatibility rnormmix <- normmix.sim <- function(n,lambda=1,mu=0,sigma=1) { if (NCOL(mu)>1 || NCOL(sigma)>1) stop ("Use the rmvnormmix function instead.") as.vector(rmvnormmix(n,lambda,mu,sigma)) } mixtools/R/plot.npEM.R0000755000176200001440000000406611665556372014321 0ustar liggesusersplot.spEM <- plot.npEM <- function(x, blocks = NULL, hist=TRUE, addlegend=TRUE, scale = TRUE, title=NULL, breaks="Sturges", ylim=NULL, dens.col, newplot=TRUE, pos.legend="topright", cex.legend=1, ...) { r <- NCOL(x$data) m <- NCOL(x$posteriors) blockid <- x$blockid if (is.null(blocks)) { if(!is.null(blockid)) { blocks <- 1:max(blockid) } else { blocks <- blockid <- 1:r } } ask <- par(ask=(length(unique(blocks))>1)) ylim.orig <- ylim out <- list(x=list(), y=list()) if (!newplot) { hist <- FALSE } for(i in 1:length(blocks)) { coords <- blockid == blocks[i] ylim <- ylim.orig if (is.null(title)) { if (r>1) { tt <- paste(which(coords), collapse=",") tt <- paste("Coordinate", ifelse(sum(coords)>1, "s ", " "), tt, sep="") } else { tt <- "Density Curves" } } else { tt <- rep(title,length(blocks))[i] } dx <- dy <- NULL for (j in 1:m) { d <- density(x, component=j, block=blocks[i], scale=scale) dx <- cbind(dx, d$x) dy <- cbind(dy, d$y) } xx <- as.vector(as.matrix(x$data)[,coords]) if (is.null(ylim)) { ylim=range(dy) if (hist) { ylim[2] <- max(ylim[2], hist(xx, breaks=breaks, plot=FALSE)$density) } } if (!hist && newplot) { pf <- plot # Use plot or hist as plotting fn the 1st time only, then lines } else { pf <- lines } if (hist) { hist(xx, breaks=breaks, prob=TRUE, ylim=ylim, main="", ...) } if (missing(dens.col)) dens.col <- 2:(m+1) dens.col <- rep(dens.col, length.out=m) for (j in 1:m) { pf(dx[,j],dy[,j], type="l", lwd=2, col=dens.col[j], ylim=ylim, ...) pf <- lines } if (addlegend) { legend(pos.legend, legend=round(x$lambdahat,3), fill=dens.col, cex=cex.legend) out$x[[i]]<-dx out$y[[i]]<-dy } if (newplot) { title(main=tt, ...) } } par(ask=ask) invisible(out) } mixtools/R/poisregmixEM.R0000755000176200001440000000641611736346560015111 0ustar liggesuserspoisregmixEM = function (y, x, lambda = NULL, beta = NULL, k = 2, addintercept = TRUE, epsilon = 1e-08, maxit = 10000, verb=FALSE) { if (addintercept) { x = cbind(1, x) } else x = as.matrix(x) n <- length(y) p <- ncol(x) tmp <- poisregmix.init(y=y, x=x, lambda=lambda, beta=beta, k=k) lambda <- tmp$lambda beta <- tmp$beta k <- tmp$k xbeta <- x %*% beta z <- matrix(0, n, k) diff <- 1 iter <- 0 comp <- t(t(dpois(y, exp(xbeta))) * lambda) compsum <- apply(comp, 1, sum) obsloglik <- sum(log(compsum)) ll <- obsloglik restarts <- 0 while (diff > epsilon && iter < maxit) { j.star = apply(xbeta, 1, which.max) for (i in 1:n) { for (j in 1:k) { z[i, j] = lambda[j]/lambda[j.star[i]] * exp(y[i] * (xbeta[i, j] - xbeta[i, j.star[i]]) + exp(xbeta[i, j.star[i]]) - exp(xbeta[i, j])) } } z = z/apply(z, 1, sum) z[,k]=1-apply(as.matrix(z[,(1:(k-1))]),1,sum) if(sum(is.na(z))>0){ cat("Need new starting values due to underflow...","\n") restarts <- restarts + 1 if(restarts>15) stop("Too many tries!") tmp <- poisregmix.init(y=y, x=x, k=k) lambda <- tmp$lambda beta <- tmp$beta k <- tmp$k diff <- 1 iter <- 0 xbeta <- x %*% beta comp <- t(t(dpois(y, exp(xbeta))) * lambda) compsum <- apply(comp, 1, sum) obsloglik <- sum(log(compsum)) ll <- obsloglik } else{ lambda <- apply(z, 2, mean) lm.out <- lapply(1:k, function(j) try(glm.fit(x, y, weights = z[, j], family = poisson()) ,silent=TRUE)) beta = sapply(lm.out,coef) xbeta <- x %*% beta comp <- t(t(dpois(y, exp(xbeta))) * lambda) compsum <- apply(comp, 1, sum) newobsloglik <- sum(log(compsum)) if(abs(newobsloglik)==Inf || is.na(newobsloglik) || newobsloglik < obsloglik){# || sum(z)!=n){ cat("Need new starting values due to singularity...","\n") restarts <- restarts + 1 if(restarts>15) stop("Too many tries!") tmp <- poisregmix.init(y=y, x=x, k=k) lambda <- tmp$lambda beta <- tmp$beta k <- tmp$k diff <- 1 iter <- 0 xbeta <- x %*% beta comp <- t(t(dpois(y, exp(xbeta))) * lambda) compsum <- apply(comp, 1, sum) obsloglik <- sum(log(compsum)) ll <- obsloglik } else{ diff <- newobsloglik - obsloglik obsloglik <- newobsloglik ll <- c(ll, obsloglik) iter <- iter + 1 if (verb) { cat("iteration=", iter, "diff=", diff, "log-likelihood", obsloglik, "\n") } } } } if (iter == maxit) { cat("WARNING! NOT CONVERGENT!", "\n") } cat("number of iterations=", iter, "\n") beta <- matrix(beta,ncol=k) rownames(beta) <- c(paste("beta", ".", 0:(p-1), sep = "")) colnames(beta) <- c(paste("comp", ".", 1:k, sep = "")) colnames(z) <- c(paste("comp", ".", 1:k, sep = "")) a=list(x=x, y=y, lambda = lambda, beta = beta, loglik = obsloglik, posterior = z, all.loglik=ll, restarts=restarts, ft="poisregmixEM") class(a) = "mixEM" a } mixtools/R/regcr.R0000755000176200001440000000537311665556372013611 0ustar liggesusers# Produce credible region for regression lines based on # sample from posterior distribution of beta parameters. # It is assumed that beta, upon entry, is an nx2 matrix, # where the first column gives the intercepts and the # second column gives the slopes. # alpha is the proportion of beta to remove from the posterior. # Thus, 1-alpha is the level of the credible region. # # If nonparametric=TRUE, then the region is based on the convex # hull of the remaining beta after trimming, which is accomplished # using a data depth technique. # # If nonparametric=FALSE, then the region is based on the # asymptotic normal approximation. regcr=function (beta, x, em.beta=NULL, em.sigma=NULL, alpha = 0.05, nonparametric = FALSE, plot = FALSE, xyaxes = TRUE, ...) { if (nonparametric) { beta.old=beta if(is.null(em.sigma) && is.null(em.beta)){ beta=t(matsqrt(solve(cov(beta)))%*%(t(beta)-apply(beta,2,mean))) } else beta=1/sqrt(length(x))*t(em.sigma^(-1)*matsqrt(t(cbind(1,x))%*%cbind(1,x))%*%(t(beta.old)-em.beta)) d = depth(beta, beta) beta = beta.old[order(d), ] d = d[order(d)] n = length(d) trimbeta = beta[-(1:round(n * alpha)), ] h = unique(trimbeta[chull(trimbeta), ]) nh = nrow(h) m = which.max(h[, 2]) h = rbind(h[m:nh, ], h[((1:nh) < m), ], h[m, ]) bound = NULL for (i in 1:nh) { bound = rbind(bound, h[i, ]) bound = rbind(bound, cbind(seq(h[i, 1], h[i + 1, 1], len = 50), seq(h[i, 2], h[i + 1, 2], len = 50))) } beta=trimbeta } else { xbar = apply(beta, 2, mean) n = nrow(beta) cbeta = t(t(beta) - xbar) S = t(cbeta) %*% cbeta/(n - 1) eS = eigen(S) B = eS$vec %*% diag(sqrt(eS$val)) theta = seq(0, 2 * pi, len = 250) v = cbind(cos(theta), sin(theta)) * sqrt(qchisq(1 - alpha, 2)) h = t(B %*% t(v) + xbar) nh = nrow(h) m = which.max(h[, 2]) h = rbind(h[m:nh, ], h[((1:nh) < m), ], h[m, ]) bound = h } z <- length(x) * 5 u <- seq(min(x), max(x), length = z) lower <- c() upper <- c() v <- c() for (j in 1:z) { for (i in 1:nrow(beta)) { v[i] <- as.matrix(beta[, 1][i] + beta[, 2][i] * u[j]) } uv <- cbind(u[j], v) lower <- rbind(lower, uv[order(v), ][1, ]) upper <- rbind(upper, uv[order(v), ][nrow(beta), ]) } if (plot) { if (xyaxes) { lines(upper, ...) lines(lower, ...) } else { lines(bound, ...) } } invisible(list(boundary = bound, upper = upper, lower = lower)) } mixtools/R/ellipse.R0000755000176200001440000000067111665556372014140 0ustar liggesusersellipse <- function(mu, sigma, alpha=.05, npoints=250, newplot=FALSE, draw=TRUE, ...) { es <- eigen(sigma) e1 <- es$vec%*%diag(sqrt(es$val)) r1 <- sqrt(qchisq(1-alpha,2)) theta <- seq(0,2*pi,len=npoints) v1 <- cbind(r1*cos(theta),r1*sin(theta)) pts=t(mu-(e1%*%t(v1))) if (newplot && draw) { plot(pts, ...) } else if (!newplot && draw) { lines(pts, ...) } invisible(pts) } mixtools/R/regmixEMlambda.R0000755000176200001440000001032613060302263015332 0ustar liggesusersregmixEM.lambda = function (y, x, lambda = NULL, beta = NULL, sigma = NULL, k = 2, addintercept = TRUE, arbmean = TRUE, arbvar = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) { if (arbmean == FALSE && arbvar == FALSE) { stop(paste("Must change constraints on beta and/or sigma!", "\n")) } s = sigma if (addintercept) { x = cbind(1, x) } n <- length(y) p <- ncol(x) tmp <- regmix.lambda.init(y = y, x = x, lambda = lambda, beta = beta, s = s, k = k, addintercept = addintercept, arbmean = arbmean, arbvar = arbvar) lambda <- tmp$lambda beta <- tmp$beta s <- tmp$s k <- tmp$k diff <- 1 iter <- 0 xbeta <- x %*% beta res2 <- (y - xbeta)^2 if (arbmean == FALSE) { res2 <- sapply(1:k, function(i) res2) } comp <- t(t(lambda)/sqrt(2 * pi * s^2)) * exp(-t(t(res2)/(2 * s^2))) obsloglik <- sum(log(apply(comp, 1, sum))) ll <- obsloglik while (diff > epsilon && iter < 1) { V=as.double(sweep(lambda, 2, s+rep(0,k), "/")) W=as.double(sweep(res2, 2, 2*(s+rep(0,k))^2, "/")) z <- matrix(.C(C_newz, as.integer(n), as.integer(k), V=V, W=W, newz=double(n*k), PACKAGE = "mixtools")$newz, ncol=k) z = z/apply(z,1,sum) if (addintercept) { lm.out <- lapply(1:k, function(i) lm(y ~ x[,-1], weights = z[, i])) } else lm.out <- lapply(1:k, function(i) lm(y ~ x - 1, weights = z[, i])) beta.new <- sapply(lm.out, coef) if (arbmean == FALSE) { beta.new <- apply(t(apply(z, 2, sum) * t(beta.new)), 1, sum)/n } xbeta.new <- x %*% beta.new res2 <- (y - xbeta.new)^2 if (arbmean == FALSE) { res2 <- sapply(1:k, function(i) res2) } if (arbvar) { s.new <- sqrt(sapply(1:k, function(i) sum(z[,i] * (res2[, i]))/sum(z[, i]))) } else s.new <- sqrt(sum(z * res2)/n) beta <- beta.new xbeta <- x %*% beta s <- s.new sing <- sum(s < 1e-08) comp <- lapply(1:k, function(i) lambda[,i] * dnorm(y, xbeta[, i * arbmean + (1 - arbmean)], s[i * arbvar + (1 - arbvar)])) comp <- sapply(comp, cbind) compsum <- apply(comp, 1, sum) newobsloglik <- sum(log(compsum)) if (newobsloglik < obsloglik || sing > 0 || abs(newobsloglik) == Inf || is.nan(newobsloglik)){# || sum(z) != n) { cat("Need new starting values due to singularity...", "\n") tmp <- regmix.lambda.init(y = y, x = x, k = k, addintercept = addintercept, arbmean = arbmean, arbvar = arbvar) lambda <- tmp$lambda beta <- tmp$beta s <- tmp$s k <- tmp$k diff <- 1 iter <- 0 xbeta <- x %*% beta res2 <- (y - xbeta)^2 if (arbmean == FALSE) { res2 <- sapply(1:k, function(i) res2) } comp <- t(t(lambda)/sqrt(2 * pi * s^2)) * exp(-t(t(res2)/(2 * s^2))) obsloglik <- sum(log(apply(comp, 1, sum))) ll <- obsloglik } else { diff <- newobsloglik - obsloglik obsloglik <- newobsloglik ll <- c(ll, obsloglik) iter <- iter + 1 } } scale.order = order(s) sigma.min = min(s) if (arbmean == FALSE) { z = z[, scale.order] names(beta) <- c(paste("beta", ".", 0:(p - 1), sep = "")) colnames(z) <- c(paste("comp", ".", 1:k, sep = "")) a = list(x = x, y = y, lambda = lambda[,scale.order], beta = beta, sigma = sigma.min, scale = s[scale.order]/sigma.min, loglik = obsloglik, posterior = z[, scale.order], all.loglik = ll, ft="regmixEM.lambda") class(a) = "mixEM" a } else { rownames(beta) <- c(paste("beta", ".", 0:(p - 1), sep = "")) colnames(beta) <- c(paste("comp", ".", 1:k, sep = "")) colnames(z) <- c(paste("comp", ".", 1:k, sep = "")) a = list(x = x, y = y, lambda = lambda, beta = beta, sigma = s, loglik = obsloglik, posterior = z, all.loglik = ll, ft="regmixEM.lambda") class(a) = "mixEM" a } } mixtools/R/plot.mixEM.R0000644000176200001440000002142113055603106014447 0ustar liggesusersplot.mixEM <-function (x, whichplots = 1, loglik = 1 %in% whichplots, density = 2 %in% whichplots, xlab1="Iteration", ylab1="Log-Likelihood", main1="Observed Data Log-Likelihood", col1=1, lwd1=2, xlab2=NULL, ylab2=NULL, main2=NULL, col2=NULL, lwd2=2, alpha = 0.05, marginal = FALSE, ...) { def.par <- par(ask=(loglik + density > 1), "mar") # only ask and mar are changed mix.object <- x if (!inherits(mix.object, "mixEM")) stop("Use only with \"mixEM\" objects!") if (loglik) { plot(mix.object$all.loglik, xlab = xlab1, ylab = ylab1, main = main1, type="l", lwd=lwd1, col=col1, ...) } if (density) { if (mix.object$ft == "logisregmixEM") { if (ncol(mix.object$x) != 2) { stop("The predictors must have 2 columns!") } if (sum((mix.object$y == 1) + (mix.object$y == 0)) != length(mix.object$y)) { stop("The response must be binary!") } k = ncol(mix.object$beta) x = mix.object$x[, 2] if(is.null(main2)) { main2 <- "Most Probable Component Membership" } if(is.null(xlab2)) { xlab2 <- "Predictor" } if(is.null(ylab2)) { ylab2 <- "Response" } if(is.null(col2)) { col2 <- 2:(k+1) } plot(x, mix.object$y, main=main2, xlab=xlab2, ylab=ylab2, col = col2[apply(mix.object$posterior, 1, which.max)], ...) a = cbind(x, mix.object$y) a = a[order(a[, 1]), ] for (i in 1:k) { lines(a[, 1], plogis(mix.object$beta[1, i] + mix.object$beta[2, i] * a[, 1]), col = col2[i]) } } if (mix.object$ft == "normalmixEM") { k <- ncol(mix.object$posterior) x <- sort(mix.object$x) a <- hist(x, plot = FALSE) maxy <- max(max(a$density), .3989*mix.object$lambda/mix.object$sigma) if(is.null(main2)) { main2 <- "Density Curves" } if(is.null(xlab2)) { xlab2 <- "Data" } if(is.null(col2)) { col2 <- 2:(k+1) } hist(x, prob = TRUE, main = main2, xlab = xlab2, ylim = c(0,maxy), ...) if (length(mix.object$mu) == 1) { arbvar <- TRUE mix.object$sigma <- mix.object$scale * mix.object$sigma arbmean <- FALSE } if (length(mix.object$mu) == k && length(mix.object$sigma) == 1) { arbmean <- TRUE arbvar <- FALSE } if (length(mix.object$sigma) == k && length(mix.object$mu) == k) { arbmean <- TRUE arbvar <- TRUE } for (i in 1:k) { lines(x, mix.object$lambda[i] * dnorm(x, mean = mix.object$mu[i * arbmean + (1 - arbmean)], sd = mix.object$sigma[i * arbvar + (1 - arbvar)]), col = col2[i], lwd = lwd2) } } if (mix.object$ft == "repnormmixEM") { x <- as.vector(as.matrix(x)) k <- ncol(mix.object$posterior) x <- sort(mix.object$x) a <- hist(x, plot = FALSE) maxy <- max(max(a$density), .3989*mix.object$lambda/mix.object$sigma) if (is.null(main2)) { main2 <- "Density Curves" } if(is.null(xlab2)) { xlab2 <- "Data" } if(is.null(col2)) { col2 <- 2:(k+1) } hist(x, prob = TRUE, main = main2, xlab = xlab2, ylim = c(0, maxy), ...) if (length(mix.object$mu) == 1) { arbvar <- TRUE mix.object$sigma = mix.object$scale * mix.object$sigma arbmean <- FALSE } if (length(mix.object$mu) == k && length(mix.object$sigma) == 1) { arbmean <- TRUE arbvar <- FALSE } if (length(mix.object$sigma) == k && length(mix.object$mu) == k) { arbmean <- TRUE arbvar <- TRUE } for (i in 1:k) { lines(x, mix.object$lambda[i] * dnorm(x, mean = mix.object$mu[i * arbmean + (1 - arbmean)], sd = mix.object$sigma[i * arbvar + (1 - arbvar)]), col = col2[i], lwd = lwd2) } } if (mix.object$ft == "regmixEM.mixed") { x.1 = mix.object$x n = sum(sapply(x.1, nrow)) x.1.sum = sum(sapply(1:length(x.1), function(i) length(x.1[[i]][, 1]))) if (x.1.sum == n) { x = lapply(1:length(x.1), function(i) matrix(x.1[[i]][, -1], ncol = 1)) } else { x = x.1 } post.beta(x = x, y = mix.object$y, p.beta = mix.object$posterior.beta, p.z = mix.object$posterior.z) } if (mix.object$ft == "mvnormalmixEM") { x = mix.object$x if (ncol(x) != 2) { stop("The data must have 2 columns!") } post = apply(mix.object$posterior, 1, which.max) k <- ncol(mix.object$posterior) if (is.list(mix.object$sigma)) { sigma = mix.object$sigma } else { sigma = lapply(1:k, function(i) mix.object$sigma) } if (is.list(mix.object$mu)) { mu = mix.object$mu } else { mu = lapply(1:k, function(i) mix.object$mu) } if(is.null(xlab2)) { xlab2 <- "X.1" } if(is.null(ylab2)) { ylab2 <- "X.2" } if(is.null(col2)) { col2 <- 2:(k+1) } if (marginal == FALSE) { if (is.null(main2)) { main2 <- "Density Curves" } plot(x, col = col2[post], xlab = xlab2, ylab = ylab2, main = main2, ...) lapply(1:k, function(i) points(mu[[i]][1], mu[[i]][2], pch = 19)) for (i in 1:k) { for (j in 1:length(alpha)) { ellipse(mu = mu[[i]], sigma = sigma[[i]], alpha = alpha[j], col = col2[i]) } } } else { if (is.null(main2)) { main2 <- "" } # FIXME: What's the right main2 here? x <- mix.object$x[, 1] y <- mix.object$x[, 2] xhist <- hist(x, plot = FALSE) yhist <- hist(y, plot = FALSE) top <- max(c(xhist$counts, yhist$counts)) xrange <- range(x) yrange <- range(y) nf <- layout(matrix(c(2, 0, 1, 3), 2, 2, byrow = TRUE), c(4, 1), c(1, 4), TRUE) layout.show(nf) par(mar = c(3, 3, 1, 1)) plot(mix.object$x[, 1], mix.object$x[,2], col = col2[post], xlab = xlab2, ylab = ylab2, main = main2, ...) lapply(1:k, function(i) points(mu[[i]][1], mu[[i]][2], pch = 19)) for (i in 1:k) { for (j in 1:length(alpha)) { ellipse(mu = mu[[i]], sigma = sigma[[i]], alpha = alpha[j], col = (i + 1)) } } par(mar = c(0, 3, 1, 1)) barplot(xhist$counts, axes = FALSE, ylim = c(0, top), space = 0, ...) par(mar = c(3, 0, 1, 1)) barplot(yhist$counts, axes = FALSE, xlim = c(0, top), space = 0, horiz = TRUE, ...) } } if (mix.object$ft == "regmixEM") { if (ncol(mix.object$x) != 2) { stop("The predictors must have 2 columns!") } post <- apply(mix.object$posterior, 1, which.max) k <- ncol(mix.object$posterior) x <- mix.object$x[, 2] y <- mix.object$y n <- length(y) if(is.null(main2)) { main2 <- "Most Probable Component Membership" } if(is.null(xlab2)) { xlab2 <- "Predictor" } if(is.null(ylab2)) { ylab2 <- "Response" } if(is.null(col2)) { col2 <- 2:(k+1) } plot(x, y, main = main2, xlab=xlab2, ylab=ylab2, type="n", ...) a = cbind(mix.object$x[, 2], mix.object$y, post) for (i in 1:k) { xy = subset(cbind(a, mix.object$posterior[, i]), a[, 3] == i)[, -3] xy = matrix(xy, ncol=3) points(xy[, 1], xy[, 2], col = col2[i]) if (is.matrix(mix.object$beta) == FALSE) { abline(coef = mix.object$beta) beta = matrix(mix.object$beta, ncol = k, nrow = 2) } else { abline(coef = mix.object$beta[, i], col = col2[i]) beta = mix.object$beta } out = lm(y ~ x, weights = mix.object$posterior[,i]) fit = beta[1, i] + beta[2, i] * x out.aov = anova(out) MSE = out.aov$Mean[2] xy.f = cbind(x, y, fit) xy.sort = xy.f[order(xy.f[, 1]), ] x.new = seq(from=min(x),to=max(x),length.out=100) y.new = beta[1, i] + beta[2, i] * x.new s.h <- sqrt(MSE * (1/n + (x.new - mean(xy.sort[,1]))^2 / var(xy.sort[, 1])/(n - 1))) for (j in 1:length(alpha)) { W = sqrt(qf(1 - alpha[j], 2, n - 2)) upper = y.new + W * s.h lower = y.new - W * s.h lines(x.new, upper, col = (i + 1)) lines(x.new, lower, col = (i + 1)) } } } if (mix.object$ft == "expRMM_EM") {plotexpRMM(mix.object, ...)} # all default if (mix.object$ft == "weibullRMM_SEM") {plotweibullRMM(mix.object, ...)} # all default } par(def.par) # reset ask and mar to original values } mixtools/R/normalmixMMlc.R0000644000176200001440000001767113060302617015245 0ustar liggesusers################################################################### ## "EC-MM" algorithm ## to search for a local maximum of the likelihood surface for a ## univariate finite mixture of normals with possible ## linear constraints on the mean and variance parameters. ## ## (EC-MM is ECM in the sense of Meng and Rubin, ## Biometrika 1993, where the M step is replaced by a ## Conditional MM step) ## ## version allowing for linear constraints on the mean ## mu = M beta + C, where M is matrix(k,p) and C is vector() ## C-MM step required for the linear constraint on the Variances ## var.lincstr = matrix A (k,q) s.t. iv = A g, where "i"nverse "v"ar ## iv = k-vector of 1/v_j's, and g = q-vector of unknown parameters ## no fast option, & ECM-MM algorithm forced (no ECM option available) ## init values for gparam are *required* here for the MM algorithm ## # default value for A could be diag(k) so that iv=g normalmixMMlc <- function (x, lambda = NULL, mu = NULL, sigma = NULL, k = 2, mean.constr = NULL, mean.lincstr = NULL, mean.constant = NULL, var.lincstr = NULL, gparam = NULL, epsilon = 1e-08, maxit = 1000, maxrestarts=20, verb = FALSE) { ECM <- TRUE # always required in this case A <- var.lincstr x <- as.vector(x); n <- length(x) tmp <- normalmix.init(x = x, lambda = lambda, mu = mu, s = sigma, k = k) # no arbmean & arbvar parameters lambda <- tmp$lambda; mu <- tmp$mu; sigma <- tmp$s; k <- tmp$k # arbvar <- tmp$arbvar; arbmean <- tmp$arbmean arbmean <- arbvar <- TRUE # forced for parse.constraints() warn <- options(warn=-1) # Turn off warnings (for parsing only) z <- parse.constraints(mean.constr, k=k, allsame=!arbmean) options(warn) # Reset warnings to original value meancat <- z$category; meanalpha <- z$alpha if (!is.null(mean.lincstr)) { # linear constraint on the means M <- mean.lincstr cat("linear constraint mu = M beta + C version\n") p <- dim(M)[2] # nb of columns = size of constr. mean parameter if (dim(M)[1] != k) stop("mean.lincstr and mu dimensions must agree") if (is.null(mean.constant)) C <- matrix(0,k,1) else C <- matrix(mean.constant,k,1) } notdone <- TRUE while(notdone) { # Initialize everything notdone <- FALSE tmp <- normalmix.init(x=x, lambda=lambda, mu=mu, s=sigma, k=k) lambda <- tmp$lambda; mu <- tmp$mu; k <- tmp$k; sigma <- tmp$s q <- dim(A)[2] # nb of inverse variance parameters (gamma) if(is.null(gparam)) g <- rexp(q) else g <- gparam iv <- A %*% g # inverse variances, as a one-column matrix v <- 1/iv # is it necessary to redefined sigma from g here ? sigma <- as.vector(sqrt(v)) diff <- epsilon+1 iter <- 0 postprobs <- matrix(nrow = n, ncol = k) restarts <- 0 mu <- rep(mu, k)[1:k] # is this useful? sigma <- rep(sigma,k)[1:k] # sigma still needed for post computation ## Initialization E-step here: z <- .C(C_normpost, as.integer(n), as.integer(k), as.double(x), as.double(mu), as.double(sigma), as.double(lambda), res2 = double(n*k), double(3*k), post = double(n*k), loglik = double(1), PACKAGE = "mixtools") postprobs <- matrix(z$post, nrow=n) res <- matrix(z$res2, nrow=n) # n,m matrix of squared residuals (x_i-mu_j)^2 ll <- obsloglik <- z$loglik ## EC-MM iterations while (diff > epsilon && iter < maxit) { # ECM loop, 1st M-step: condition on sigma, update lambda and mu : lambda <- colMeans(postprobs) # update for lambda # update for mu, depending on constraint type mu[meancat==0] <- meanalpha[meancat==0] if (max(meancat)>0 && is.null(mean.lincstr)) { # simple constraint for(i in 1:max(meancat)) { w <- which(meancat==i) if (length(w)==1) { mu[w] <- sum(postprobs[,w]*x) / (n*lambda[w]) } else { tmp <- t(postprobs[,w])*(meanalpha[w]/sigma[w]^2) mu[w] <- meanalpha[w] * sum(t(tmp)*x) / sum(tmp*meanalpha[w]) } } } if (!is.null(mean.lincstr)) { # linear constraint mu = M beta + C iv2 <- as.vector(iv) # A1_j = sum_i p_ij x_i/v_j A1 <- apply(postprobs*matrix(x,n,k),2,sum)*iv2 B <- diag(apply(postprobs,2,sum)*iv2) Stemp <- solve(t(M) %*% B %*% M) beta <- Stemp %*% t(M) %*% (A1 - B %*% C) mu <- as.vector(M %*% beta + C) # coerce to vector } # ECM E-step number one: z <- .C(C_normpost, as.integer(n), as.integer(k), as.double(x), as.double(mu), as.double(sigma), as.double(lambda), res2 = double(n*k), double(3*k), post = double(n*k), loglik = double(1), PACKAGE = "mixtools") postprobs <- matrix(z$post, nrow=n) res <- matrix(z$res2, nrow=n) #### ECM loop 2nd M-step: condition on mu, update lambda #### and sigma via the MM step lambda <- colMeans(postprobs) # Update variances with the MM algo on g, conditional on mu # note: code in too much steps/details for debugging # computing q-vector of denominators r0 <- postprobs*res # (i,j)th is p_{ij}*(x_i - mu_j)^2 r1 <- r0 %*% A # r1(n,q) matrix, (i,l) term =\sum_j p_{ij}A_{jl}(x_i - mu_j)^2 den <- colSums(r1) # q-vector of denominators for updating g # computing q-vector of numerators r3 <- matrix(v,nrow=k,ncol=q)*A # (k,q) matrix of v_j.A_{jl} r4 <- postprobs %*% r3 # (n,q) matrix of \sum_j p_{ij} A_{jl} v_j num <- colSums(r4) # # update of gamma parameters which gives iv, v and sigma g.new <- g*(num/den) iv <- A %*% g.new v <- 1/iv # needed for next computation of r3 sigma <- as.vector(sqrt(v)) # needed in next E-steps g <- g.new # iterates ## test for variance degeneration if(any(sigma < 1e-08)) { notdone <- TRUE cat("One of the variances is going to zero; ", "trying new starting values.\n") restarts <- restarts + 1 lambda <- mu <- sigma <- NULL # WHAT TO DO WITH g in this case? if(restarts>maxrestarts) { stop("Too many tries!") } break } # ECM E-step number two: z <- .C(C_normpost, as.integer(n), as.integer(k), as.double(x), as.double(mu), as.double(sigma), as.double(lambda), res2 = double(n*k), double(3*k), post = double(n*k), loglik = double(1), PACKAGE = "mixtools") postprobs <- matrix(z$post, nrow=n) res <- matrix(z$res2, nrow=n) newobsloglik <- z$loglik diff <- newobsloglik - obsloglik # does not increase that one? obsloglik <- newobsloglik ll <- c(ll, obsloglik) iter <- iter + 1 if (verb) { cat("iteration", iter, ": log-lik diff =", round(diff,4), " log-lik =", obsloglik, "\n") } } } # summurizing and returning structure if (iter == maxit) { cat("WARNING! NOT CONVERGENT!", "\n") } cat("number of iterations=", iter, "\n") if(arbmean == FALSE){ scale.order = order(sigma) sigma.min = min(sigma) postprobs = postprobs[,scale.order] colnames(postprobs) <- c(paste("comp", ".", 1:k, sep = "")) a=list(x=x, lambda = lambda[scale.order], mu = mu, sigma = sigma.min, scale = sigma[scale.order]/sigma.min, loglik = obsloglik, posterior = postprobs, all.loglik=ll, restarts=restarts, gamma = g, ft="normalmixMMlc") } else { colnames(postprobs) <- c(paste("comp", ".", 1:k, sep = "")) a=list(x=x, lambda = lambda, mu = mu, sigma = sigma, loglik = obsloglik, posterior = postprobs, all.loglik=ll, restarts=restarts, beta = beta, gamma = g, ft="normalmixMMlc") } class(a) = "mixEM" a } mixtools/R/zzz.R0000755000176200001440000000053111736707362013327 0ustar liggesusers.onAttach <- function(lib, pkg){ info <- packageDescription("mixtools") packageStartupMessage( paste('mixtools package, version ', info$Version, ', Released ', info$Date, '\n', 'This package is based upon work supported by the National Science ', 'Foundation under Grant No. SES-0518772.\n', sep="") ) } mixtools/R/hmeEM.R0000755000176200001440000001164211665556372013476 0ustar liggesusershmeEM <- function (y, x, lambda=NULL, beta = NULL, sigma = NULL, w = NULL, k = 2, addintercept = TRUE, epsilon = 1e-08, maxit = 10000, verb=FALSE) { NR <- function (w, x, z) { A = (inv.logit(x %*% w)/(1 + exp(x %*% w)))^-1 B = z[,1]-inv.logit(x %*% w) C = matrix(c(sum(B),sum(x[,2]*B)),2,1) D = matrix(nrow=2,ncol=nrow(x)) D = solve(matrix(c(sum(A),sum(A * x[,2]),sum(A * x[,2]),sum(A * x[,2]^2)),nrow=2,ncol=2)) E = w + apply(D %*% C, 1, sum) list(w.new = E) } if (k != 2) cat("This algorithm currently only works for a 2-component mixture-of-experts model!") s=sigma if (addintercept) { x = cbind(1, x) } n <- length(y) p <- ncol(x) if (is.null(w)) { w = rep(0,p*(k-1)) } tmp <- regmix.init(y = y, x = x, beta = beta, s = s, k = k, addintercept = addintercept) beta = tmp$beta s = tmp$s k = tmp$k if(is.null(lambda)) lambda = inv.logit(x %*% w) diff <- 1 iter <- 0 nlogroot2pi = n * log(sqrt(2 * pi)) xbeta <- x %*% beta tmp <- t(-1/2/s[2]^2 * t(y - xbeta[, -1])^2) + 1/2/s[1]^2 * (y - xbeta[, 1])^2 tmp2 <- (1-lambda)/s[2] * s[1]/lambda tmp3 <- log(1 + (tmp2 * exp(tmp))) obsloglik <- sum(log(lambda)) - nlogroot2pi - n * log(s[1]) - 1/2/s[1]^2 * sum((y - xbeta[, 1])^2) + sum(tmp3) ll <- obsloglik z = matrix(nrow = n, ncol = k) restarts <- 0 while (diff > epsilon & iter < maxit) { lambda.mat <- cbind(lambda,1-lambda) res <- (y - xbeta)^2 for (i in 1:n) { for (j in 1:k) { z.denom = c() for (h in 1:k) { z.denom = c(z.denom, (lambda.mat[i,h]/lambda.mat[i,j]) * (s[j]/s[h]) * exp(-0.5 * ((1/s[h]^2) * res[i, h] - (1/s[j]^2) * res[i, j]))) } z[i, j] = 1/sum(z.denom) } } # z[, k] = 1 - apply(as.matrix(z[, (1:(k - 1))]), 1, sum) z = z/apply(z,1,sum) if (addintercept) { lm.out <- lapply(1:k, function(i) lm(y ~ x[, -1], weights = z[, i])) } else lm.out <- lapply(1:k, function(i) lm(y ~ x - 1, weights = z[, i])) beta.new <- sapply(lm.out, coef) w.diff=10 while(sum(abs(w.diff))>(p*.001)){ w.temp <- NR(w,x,z)$w.new w.diff <- w-w.temp w <- w.temp } w.new <- w lambda.new <- inv.logit(x %*% w.new) xbeta.new <- x %*% beta.new res <- (y - xbeta.new)^2 s.new <- sqrt(sapply(1:k, function(i) sum(z[,i] * (res[, i]))/sum(z[, i]))) sing <- sum(s.new < 1e-08) lambda <- lambda.new beta <- beta.new s <- s.new w <- w.new xbeta <- xbeta.new tmp <- t(-1/2/s[2]^2 * t(y - xbeta[, -1])^2) + 1/2/s[1]^2 * (y - xbeta[, 1])^2 tmp2 <- (1-lambda)/s[2] * s[1]/lambda tmp3 <- log(1 + (tmp2 * exp(tmp))) newobsloglik <- sum(log(lambda)) - nlogroot2pi - n * log(s[1]) - 1/2/s[1]^2 * sum((y - xbeta[, 1])^2) + sum(tmp3) if (sing > 0 || is.na(newobsloglik) || newobsloglik < obsloglik || abs(newobsloglik) == Inf){# || sum(z) != n) { cat("Need new starting values due to singularity...", "\n") restarts <- restarts + 1 if (restarts > 15) stop("Too many tries!") tmp <- regmix.init(y = y, x = x, k = k, addintercept = addintercept) beta = tmp$beta s = tmp$s k = tmp$k w = rep(0,p*(k-1)) lambda = inv.logit(x %*% w) diff <- 1 iter <- 0 xbeta <- x %*% beta tmp <- t(-1/2/s[2]^2 * t(y - xbeta[, -1])^2) + 1/2/s[1]^2 * (y - xbeta[, 1])^2 tmp2 <- (1-lambda)/s[2] * s[1]/lambda tmp3 <- log(1 + (tmp2 * exp(tmp))) obsloglik <- sum(log(lambda)) - nlogroot2pi - n * log(s[1]) - 1/2/s[1]^2 * sum((y - xbeta[, 1])^2) + sum(tmp3) ll <- obsloglik } else{ diff <- newobsloglik - obsloglik obsloglik <- newobsloglik ll <- c(ll, obsloglik) iter <- iter + 1 if (verb) { cat("iteration=", iter, "diff=", diff, "log-likelihood", obsloglik, "\n")} } } if (iter == maxit) { cat("WARNING! NOT CONVERGENT!", "\n") } cat("number of iterations=", iter, "\n") rownames(beta) <- c(paste("beta", ".", 0:(p - 1), sep = "")) colnames(beta) <- c(paste("comp", ".", 1:k, sep = "")) colnames(z) <- c(paste("comp", ".", 1:k, sep = "")) a = list(x = x, y = y, w=w, lambda = cbind(lambda,1-lambda), beta = beta, sigma = s, loglik = obsloglik, posterior = z, all.loglik = ll, restarts = restarts, ft = "regmixEM") class(a) = "mixEM" a } mixtools/R/parse.constraints.R0000755000176200001440000000242511665556372016162 0ustar liggesusersparse.constraints <- function(constr, k=2, allsame=FALSE) { if (!is.null(constr)) { alpha <- category <- rep(NA, k) u <- sum(is.na(constr)) # Number of unconstrained means category[is.na(constr)] <- 1:u numcon <- as.numeric(constr) # All non-numbers become NA here. category[!is.na(numcon)] <- 0 # These means will remain fixed constants alpha[!is.na(numcon)] <- numcon[!is.na(numcon)] w <- which(is.na(numcon) & !is.na(constr)) # w indexes only character constraints if (length(w)>0) { # Process all the character (equality) constraints constr <- as.character(constr) alpha[w] <- constr[w] nc <- nchar(constr)[w] # nc: number of characters (in each constraint) ss <- substr(constr[w], nc, nc) # extract final character category[w] <- u + match(ss, unique(ss)) # make categories for final characters alpha[w] <- substr(alpha[w], 1, nc-1) # Now extract number before final character alpha[w][alpha[w]=="-"] <- "-1" alpha[w][alpha[w]==""] <- "1" } alpha <- as.numeric(alpha) # Entire alpha vector should be numeric or NA now. } else { alpha <- rep(1,k) if(allsame) category <- alpha else category <- 1:k } list(category=category, alpha=alpha) } mixtools/R/density.spEM.R0000755000176200001440000000351111665556372015021 0ustar liggesusers# Method for "density" generic: Takes an spEM object and returns # a corresponding KDE for the appropriate component and block, evaluated # at the given set of points. # Does not use the FFT like the density.default function does; still # quite fast, but not optimized for speed density.spEM <- function (x, u = NULL, component=1, block = 1, scale = FALSE, ...) { m <- NCOL(x$posteriors) r <- NCOL(x$data) n <- NROW(x$data) if (is.null(blockid <- x$blockid)) { coords <- 1 u2 <- rep(1, r) } else { u2 <- match(x$blockid, unique(x$blockid)) # convert blockid to integers 1, 2, ... coords <- blockid == block if (!any(coords)) stop("Illegal value of block argument") } stackedx <- x$data[rep(1:n,m),] cs <- colSums(x$posteriors) z.tmp <- sweep(x$posteriors, 2, cs, "/") z.tmp[, cs==0] <- 1/NROW(z.tmp) # Just in case wts <- rep(as.vector(z.tmp),r) mu <- matrix(x$muhat, nrow=m) sigma <- matrix(x$sigmahat, nrow=m) scaledx <- as.vector((stackedx - mu[rep(1:m, each=n), u2])/ sigma[rep(1:m, each=n), u2]) bw <- x$bandwidth if (is.null(u)) { xx <- as.vector(as.matrix(x$data)[, coords]) u = seq(min(xx) - 4 * bw, max(xx) + 4 * bw, len = 250) } # This part isn't used for now: if (!is.null(x$symmetric) && x$symmetric) { d <- wkde(x=scaledx, u=(u-mu[component, block])/sigma[component, block], w=wts, bw=bw, sym=TRUE) / sigma[component, block] } else { d <- wkde(x=scaledx, u=(u-mu[component, block])/sigma[component, block], w=wts, bw=bw) / sigma[component, block] } if (scale) d <- d * x$lambdahat[component] structure(list(x = u, y = d, bw = bw, n = n, call = match.call(), data.name = deparse(substitute(x)), has.na = FALSE), class = "density") } mixtools/R/poisregmixinit.R0000755000176200001440000000206013210323563015525 0ustar liggesuserspoisregmix.init <- function(y, x, lambda = NULL, beta = NULL, k = 2){ x <- as.matrix(x) n <- length(y) if (is.null(lambda)) { cond = TRUE while(cond){ lambda = runif(k) lambda = lambda/sum(lambda) if(min(lambda)<0.05) cond=TRUE else cond=FALSE } } else k = length(lambda) p <- ncol(x) w = cbind(y, x) w = w[order(w[, 1]), ] w.bin = list() for (j in 1:k) { w.bin[[j]] <- w[max(1, floor((j - 1) * n/k)):ceiling(j * n/k), ] } if (is.null(beta)) { beta.hyp = matrix(sapply(1:k, function(j) glm.fit(w.bin[[j]][, 2:(p + 1)], w.bin[[j]][, 1], family = poisson())$coeff), ncol = k) sd.hyp = apply(beta.hyp, 1, sd) beta = matrix(0, p, k) for (j in 1:k) { beta[, j] = rnorm(p, mean = as.vector(beta.hyp[, j]), sd = sd.hyp) } } else k = ncol(beta) list(lambda = lambda, beta = beta, k = k) } mixtools/R/regmixMH.R0000755000176200001440000000675011737456006014220 0ustar liggesusersregmixMH=function (y, x, lambda = NULL, beta = NULL, s = NULL, k = 2, addintercept = TRUE, mu = NULL, sig = NULL, lam.hyp = NULL, sampsize = 1000, omega = 0.01, thin = 1) { if (addintercept) { x = cbind(1, x) } XTX <- solve(t(x)%*%x) n <- length(y) p <- ncol(x) if (is.null(s)) { s = sqrt(1/rexp(k)) } else k = length(s) if (is.null(beta)) { beta = matrix(rnorm(p * k), p, k) } else k = ncol(beta) if (is.null(lambda)) { lambda = runif(k) lambda = lambda/sum(lambda) } else k = length(lambda) if (is.null(mu)) { mu = 0 * beta } if (is.null(sig)) { sig = rep(5 * sqrt(var(y)),k) } sig.beta = t(matrix(rep(sig,p),ncol=p)) * sqrt(matrix(rep(diag(XTX),k),ncol=k)) if(is.null(lam.hyp)) lam.hyp = rep(1,k) L.theta <- matrix(nrow = n, ncol = k) pi.beta <- matrix(nrow = p, ncol = k) pi.sigma <- c() pi.lambda <- c() new.L.theta <- matrix(nrow = length(y), ncol = k) new.pi.beta <- matrix(nrow = p, ncol = k) new.pi.sigma <- c() new.pi.lambda <- c() accepts = 0 theta <- matrix(c(beta, s, lambda), nrow = 1) thetalist <- matrix(theta, sampsize, ncol(theta), byrow=TRUE) for (i in 2:sampsize) { log.pi.beta <- dnorm(beta, mu, sig.beta, log=TRUE) log.pi.sigma <- dexp(s, 1/sig, log=TRUE) log.pi.lambda <- sum((lam.hyp-1)*log(lambda)) - lgamma(lam.hyp) + lgamma(sum(lam.hyp)) # Dirichlet log-density # log.pi.lambda <- log(ddirichlet(lambda, lam.hyp)) L.theta <- dnorm(y - x %*% beta, 0, matrix(s, n, k, byrow = TRUE) ) %*% matrix(lambda, k, 1) log.Lik.theta <- sum(log(L.theta)) log.prior <- sum(log.pi.beta) + sum(log.pi.sigma) + sum(log.pi.lambda) log.f.theta <- log.Lik.theta + log.prior new.beta <- beta + omega * matrix(rcauchy(k * p), p, k) new.sigma <- log(s) + omega * rcauchy(k) new.sigma <- exp(new.sigma) new.lambda <- lambda.pert(lambda, omega*rcauchy(k)) log.new.pi.beta <- dnorm(new.beta, mu, sig.beta, log=TRUE) log.new.pi.sigma <- dexp(new.sigma, 1/sig, log=TRUE) log.new.pi.lambda <- sum((lam.hyp-1)*log(new.lambda)) - lgamma(lam.hyp) + lgamma(sum(lam.hyp)) # Dirichlet log-density # log.new.pi.lambda <- log(ddirichlet(new.lambda, lam.hyp)) new.L.theta <- dnorm(y - x %*% new.beta, 0, matrix(new.sigma, n, k, byrow = TRUE)) %*% matrix(new.lambda, k, 1) log.new.Lik.theta <- sum(log(new.L.theta)) log.new.prior <- sum(log.new.pi.beta) + sum(log.new.pi.sigma) + sum(log.new.pi.lambda) log.new.f.theta <- log.new.Lik.theta + log.new.prior new.theta <- c(new.beta, new.sigma, new.lambda) a <- log.new.f.theta - log.f.theta r <- log(runif(1)) if (a > r & !is.na(a)) { theta <- new.theta beta <- new.beta s <- new.sigma lambda <- new.lambda accepts = accepts + 1 } if (i%%thin == 0) thetalist[i,] = theta } cat(paste("Acceptance rate: ", 100 * accepts/sampsize, "%\n", sep = "")) colnames(thetalist) <- c(paste("beta", rep(0:(p - 1), k), ".", rep(1:k, rep(p, k)), sep = ""), paste("s.", 1:k, sep = ""), paste("lambda.", 1:k, sep = "")) invisible(thetalist) a=list(x=x, y=y, theta=thetalist, components=k) class(a)="mixMCMC" a } mixtools/R/perm.R0000755000176200001440000000034312112160724013416 0ustar liggesusersperm <- function (n, r, v = 1:n) { if (r == 1) matrix(v, n, 1) else if (n == 1) matrix(v, 1, r) else { X <- NULL for (i in 1:n) X <- rbind(X, cbind(v[i], perm(n - 1, r - 1, v[-i]))) X } } mixtools/R/wkde.R0000755000176200001440000000045211665556372013432 0ustar liggesuserswkde <- function(x, u=x, w=rep(1, length(x)), bw=bw.nrd0(as.vector(x)), sym=FALSE) { if (sym) { return((wkde(x, u, w, bw) + wkde(x, -u, w, bw))/2) } Km <- exp(outer(x/bw, u/bw, function(a,b) -(a-b)^2/2)) normw <- matrix(w/sum(w), nrow=1) as.vector(normw %*% Km) / (bw * sqrt(2*pi)) } mixtools/R/kernC.R0000755000176200001440000000010511665556372013535 0ustar liggesuserskern.C <- function (x, xi, h) { (1+cos(pi*(xi-x)/h))/(2*h) } mixtools/R/regmixEMlambdainit.R0000755000176200001440000000341111665556372016240 0ustar liggesusersregmix.lambda.init = function (y, x, lambda = NULL, beta = NULL, s = NULL, k = 2, addintercept = TRUE, arbmean = TRUE, arbvar = TRUE) { x <- as.matrix(x) n <- length(y) p <- ncol(x) if (addintercept) { x = x[, -1] } else x = x w = cbind(y, x) w = w[order(w[, 1]), ] w.bin = list() for (j in 1:k) { w.bin[[j]] <- w[max(1, floor((j - 1) * n/k)):ceiling(j * n/k), ] } if (addintercept) { lm.out <- lapply(1:k, function(i) lm(w.bin[[i]][, 1] ~ w.bin[[i]][, 2:p])) } else lm.out <- lapply(1:k, function(i) lm(w.bin[[i]][, 1] ~ w.bin[[i]][, 2:(p + 1)] - 1)) if (is.null(s)) { s.hyp = lapply(lm.out, anova) s.hyp = as.vector(sqrt(sapply(1:k, function(i) s.hyp[[i]]$Mean[length(s.hyp[[i]]$Mean)]))) if (arbvar) { s = 1/rexp(k, rate = s.hyp) } else { s.hyp = mean(s.hyp) s = 1/rexp(1, rate = s.hyp) } } if (is.null(s) == FALSE && arbvar == TRUE) { k = length(s) } if (is.null(beta)) { beta.hyp = matrix(sapply(lm.out, coef), ncol = k) beta = matrix(0, nrow = p, ncol = k) for (j in 1:k) { beta[, j] = rnorm(p, mean = as.vector(beta.hyp[, j]), sd = s[arbvar * j + (1 - arbvar)]) } if (arbmean == FALSE) { beta = apply(beta, 1, mean) } } if (is.null(beta) == FALSE && arbmean == TRUE) { k = ncol(beta) } if (is.null(lambda)) { lam = runif(k) lam = lam/sum(lam) for(i in 1:n) {lambda <- rbind(lambda,lam)} } else k = ncol(lambda) list(lambda = lambda, beta = beta, s = s, k = k) }mixtools/R/spEMsymloc.R0000644000176200001440000000564413060302365014556 0ustar liggesusers## EM-like algorithm for a nonparametric univariate mixture model with ## symmetric components from a location family spEMsymloc <- function(x, mu0, bw = bw.nrd0(x), h=bw, eps = 1e-8, maxiter=100, stochastic = FALSE, verbose = FALSE){ bw <- h # h is alternative bandwidth argument, for backward compatibility n <- length(x) if (length(mu0)>1) m <- length(mu0) # mu0=centers else m <- mu0 # when mu0=number of clusters z.hat <- matrix(0, nrow=n, ncol=m) fkernel <- matrix(0, nrow=n, ncol=m) tt0 <- proc.time() lambda <- rep(1/m, m) kmeans <- kmeans(x, mu0) for(j in 1:m) { z.hat[kmeans$cluster==j, j] <- 1 } iter <- 0 if (stochastic) { sumpost <- matrix(0, n, m) } finished <- FALSE lambda <- mu <- matrix(0,maxiter,m) while (!finished) { #while (max(abs(change)) > eps & iter < maxiter) { iter <- iter + 1 t0 <- proc.time() ## M-Step lambda[iter,] <- colMeans(z.hat) mu[iter,] <- apply(sweep(z.hat, 1, x, "*"), 2, mean)/lambda[iter,] ## density estimation step if(stochastic){ z <- t(apply(z.hat, 1, function(prob) rmultinom(1, 1, prob))) ans <- .C(C_KDEsymloc, n=as.integer(n), m=as.integer(m), mu=as.double(mu[iter,]), x=as.double(x), bw=as.double(bw), z=as.double(z), f = double(n*m), PACKAGE="mixtools") } else { ans <- .C(C_KDEsymloc, n=as.integer(n), m=as.integer(m), mu=as.double(mu[iter,]), x=as.double(x), bw=as.double(bw), z=as.double(z.hat), f = double(n*m), PACKAGE="mixtools") } fkernel <- matrix(ans$f, ncol=m) lambda.f <- sweep(fkernel, 2, lambda[iter,], "*") ## E-step (for next iteration) z.hat <- lambda.f/rowSums(lambda.f) finished <- iter >= maxiter if (stochastic) { sumpost <- sumpost + z.hat } else if (iter>1) { # This convergence criterion is too simplistic: change <- c(lambda[iter,] - lambda[iter-1,], mu[iter,]-mu[iter-1,]) finished <- finished | (max(abs(change)) < eps) } if (verbose) { t1 <- proc.time() cat("iteration ", iter, " lambda ", round(lambda[iter,], 4), " mu ", round(mu[iter,], 4)) cat(" time", (t1 - t0)[3], "\n") } } if (verbose) { tt1 <- proc.time() cat("lambda ", round(lambda[iter,], 4)) cat(", total time", (tt1 - tt0)[3], "s\n") } if(stochastic) { return(structure(list(data=x, posteriors=sumpost/iter, lambda=lambda, bandwidth=bw, lambdahat=colMeans(lambda), mu = mu, muhat = colMeans(mu), symmetric=TRUE), class="npEM")) } else { return(structure(list(data=x, posteriors=z.hat, lambda=lambda[1:iter,], bandwidth=bw, lambdahat=lambda[iter,], mu = mu[1:iter,], muhat = mu[iter,], symmetric=TRUE), class="npEM")) } } mixtools/R/flaremixinit.R0000755000176200001440000000111711665556372015172 0ustar liggesusersflaremix.init <- function(y, x, lambda=NULL, beta=NULL, sigma=NULL, alpha=NULL){ n<-length(y) if(is.null(lambda)){ lambda=runif(2) lambda=lambda/sum(lambda) } lm.out=lm(y~x[,2]) if(is.null(beta)){ beta=lm.out$coef beta[1]=beta[1]+mean(sort(lm.out$residuals)[(n-10):n]) beta[2]=rnorm(1,mean=beta[2],sd=abs(beta[2]/10)) } if(is.null(sigma)){ sigma=rexp(1,rate=sqrt(1/anova(lm.out)$Mean[2])) } if(is.null(alpha)){ a=1/sum(lm.out$res[lm.out$res>0]) alpha=abs(rnorm(1,a)) } list(lambda=lambda[1], beta=matrix(beta,ncol=1), sigma=sigma, alpha=alpha) } mixtools/R/segregmixEM.R0000755000176200001440000003010112112161112014652 0ustar liggesuserssegregmixEM=function (y, x, lambda = NULL, beta = NULL, sigma = NULL, k = 2, seg.Z, psi, psi.locs = NULL, delta = NULL, epsilon = 1e-08, maxit = 10000, verb = FALSE, max.restarts=15) { if (sum(x[,1]==1)==nrow(x)) x=x[,-1] x=data.frame(x) col.names.x <- colnames(x) xnam <- colnames(x) fmla <- as.formula(paste("y ~ ", paste(xnam, collapse= "+"))) if(!is.null(psi.locs)){ psi.counts=apply(psi,1,sum) for(i in 1:k){ if(psi.counts[i]>0){ TEMP <- (is.list(psi.locs[[i]]))&(length(psi.locs[[i]])==sum(psi[i,]>0)) } else{ TEMP <- is.null(psi.locs[[i]]) } if(TEMP==FALSE) stop(paste("You must specify a correct changepoint structure!", "\n")) } } if (!is.null(delta)) { cat("Estimation performed assuming the changepoints are known.", "\n") if (is.null(psi.locs)) { stop(paste("You must specify the changepoints for this setting!", "\n")) } } if ((length(seg.Z) != k) | class(seg.Z) != "list") { stop(paste("You must specify a list of length k for the segmented relationships!", "\n")) } if (!identical(all.equal(dim(psi),c(k,ncol(x))),TRUE)) { stop(paste("You must specify a matrix with the correct dimension for psi!", "\n")) } if (((length(psi.locs) != k) | class(psi.locs) != "list") & !is.null(psi.locs)) { stop(paste("You must specify a list of length k for the number of changepoints per predictor in each component!", "\n")) } tot.cp <- apply(psi,1,sum) tmp.ind=1 tmp <- try(suppressWarnings(segregmix.init(y=y, x=x, lambda = lambda, beta = beta, s = sigma, k = k, seg.Z=seg.Z, psi=psi, psi.locs = psi.locs)),silent=TRUE) if(class(tmp)=="try-error"){ cat("Generating new initial values.", "\n") while(tmp.ind<=10){ tmp <- try(suppressWarnings(segregmix.init(y=y, x=x, lambda = NULL, beta = NULL, s = NULL, k = k, seg.Z=seg.Z, psi=psi, psi.locs = NULL)),silent=TRUE) tmp.ind <- tmp.ind+1 if(tmp.ind==11) stop(paste("Had to reinitialize algorithm too many times. Reconsider specified model.", "\n")) if(class(tmp)!="try-error") tmp.ind=20 } } x.old=x x = cbind(1, x) data.x=cbind(y,x) lambda <- tmp$lambda beta <- tmp$beta s <- tmp$s k <- tmp$k psi.locs <- tmp$psi.locs sing <- 0 perms=perm(k,k) perm.ind=nrow(perms) if (is.null(delta)) delta <- lapply(1:k,function(i) NULL) n <- length(y) diff <- 1 iter <- 0 X.aug <- lapply(1:k, function(i) cbind(1,aug.x(x[,-1],unlist(psi.locs[[i]]),psi[i,],delta=delta[[i]]))) X.aug.old <- X.aug psi.locs.old <- psi.locs xbeta <- lapply(1:k, function(i) X.aug[[i]] %*% matrix(beta[[i]],ncol=1)) res <- sapply(1:k, function(i) as.numeric((y - xbeta[[i]])^2)) comp <- t((lambda/sqrt(2 * pi * s^2)) * t(exp(-t(t(res)/(2 * s^2))))) obsloglik <- sum(log(apply(comp, 1, sum))) ll <- obsloglik z = matrix(nrow = n, ncol = k) restarts <- 0 while (diff > epsilon && iter < maxit) { null.beta=0 lambda.old <- lambda beta.old <- beta s.old <- s for (i in 1:n) { for (j in 1:k) { z.denom = c() for (h in 1:k) { z.denom = c(z.denom, (lambda[h]/lambda[j]) * (s[j]/s[h]) * exp(-0.5 * ((1/s[h]^2) * res[i, h] - (1/s[j]^2) * res[i, j]))) } z[i, j] = 1/sum(z.denom) } } z = z/apply(z, 1, sum) z[,k] = 1-apply(as.matrix(z[,-k]),1,sum) z<-round(z,15) z.old <- z lambda.new <- apply(z, 2, mean) if (sum(lambda.new < 1e-08) > 0 || is.na(sum(lambda.new))) { sing <- 1 } else { lm.out <- vector("list",k) psi.temp=psi.locs psi.ind=lapply(1:k,function(i) which(psi[i,]!=0)) for(i in 1:k){ if(is.null(seg.Z[[i]]) | (sum(1-sapply(delta,is.null))>0)){ temp.seg <- lm(fmla,data=data.x,weights=z[,i]) } else temp.seg <- try(suppressWarnings(segmented(lm(fmla,data=data.x,weights=z[,i]),seg.Z=seg.Z[[i]],psi=psi.temp[[i]])),silent=TRUE) if(class(temp.seg)[1]=="try-error"){ seq = 1 temp.names = names(psi.locs.old[[i]]) while(seq < 20){ psi.temp2 <- vector("list",length(psi.temp[[i]])) for(ii in 1:length(psi.temp[[i]])){ x.range <- range(data.x[,which(names(data.x)==temp.names[ii])]) psi.temp2[[ii]] <- psi.temp[[i]][[ii]]+sample(c(-1,1),length(psi.temp[[i]][[ii]]),replace=TRUE)*runif(length(psi.temp[[i]][[ii]]),0,diff(x.range)/10) if((any(psi.temp2[[ii]]<=x.range[1]))|(any(psi.temp2[[ii]]>=x.range[2]))) psi.temp2[[ii]]=psi.temp[[i]][[ii]] psi.temp2[[ii]]=sort(psi.temp2[[ii]]) } names(psi.temp2)=temp.names temp.seg <- try(suppressWarnings(segmented(lm(fmla,data=data.x,weights=z[,i]),seg.Z=seg.Z[[i]],psi=psi.temp2[[i]],control=seg.control(it.max=1))),silent=TRUE) if(class(temp.seg)[1]=="try-error"){ seq = seq+1 } else seq=40 } if(seq!=40){ temp.seg <- try(suppressWarnings(segmented(lm(fmla,data=data.x,weights=z[,i]),seg.Z=seg.Z[[i]],psi=psi.temp[[i]],control=seg.control(it.max=1))),silent=TRUE) } } lm.out[[i]]=temp.seg } lambda <- lambda.new if(sum(sapply(lm.out,class)=="try-error")>0){ newobsloglik=-Inf } else{ if(sum(1-sapply(delta,is.null))>0){ psi.new <- psi.locs.old } else { psi.new <- psi.locs for(i in 1:k){ if(class(lm.out[[i]])[1]=="segmented"){ temp.names=names(psi.locs[[i]]) temp.cumsum=cumsum(sapply(psi.locs[[i]],length)) TC.ind = length(temp.cumsum) seg.temp = lm.out[[i]]$psi[,2] psi.new[[i]] = lapply(1:length(psi.locs[[i]]), function(j) as.numeric(lm.out[[i]]$psi[,2])) psi.new[[i]] = vector("list",TC.ind) psi.new[[i]][[1]]=sort(seg.temp[1:temp.cumsum[1]]) if(TC.ind>1) for(j in 2:TC.ind) psi.new[[i]][[j]] = sort(seg.temp[(temp.cumsum[j-1]+1):temp.cumsum[j]]) names(psi.new[[i]])=temp.names } } } X.aug.new <- lapply(1:k, function(i) cbind(1,aug.x(x[,-1],unlist(psi.new[[i]]),psi[i,],delta[[i]]))) lm.out2=lapply(1:perm.ind, function(j) lapply(1:k, function(i) lm(y~X.aug.new[[i]][,-1],weights=z[,perms[j,i]]))) beta.new <- lapply(1:perm.ind, function(j) lapply(lm.out2[[j]],coef)) null.perms <- sapply(1:perm.ind,function(i) all(!is.na(lapply(beta.new,unlist)[[i]]))) null.beta=0 if(sum(null.perms)>0){ xbeta.new <- lapply(1:perm.ind, function(j) lapply(1:k, function(i) X.aug.new[[i]] %*% matrix(beta.new[[j]][[i]],ncol=1))) res <- lapply(1:perm.ind, function(j) sapply(1:k, function(i) (y - xbeta.new[[j]][[i]])^2)) s.new <- lapply(1:perm.ind, function(j) sqrt(sapply(1:k, function(i) sum(z[,perms[j,i]] * (res[[j]][, i]))/sum(z[,perms[j,i]])))) comp <- lapply(1:perm.ind, function(j) lapply(1:k, function(i) lambda.new[i] * dnorm(y, xbeta.new[[j]][[i]], s.new[[j]][i]))) comp <- lapply(1:perm.ind, function(j) sapply(comp[[j]], cbind)) compsum <- lapply(1:perm.ind, function(j) apply(comp[[j]], 1, sum)) newobsloglik <- sapply(1:perm.ind, function(j) sum(log(compsum[[j]]))) newobsloglik[c(1-null.perms)]=-Inf IND <- which.max(newobsloglik) z = z[,perms[IND,]] lambda.new <- apply(z, 2, mean) lambda <- lambda.new beta <- beta.new[[IND]] xbeta <- xbeta.new[[IND]] res <- res[[IND]] X.aug <- X.aug.old s <- s.new[[IND]] psi.locs <- psi.new sing <- sum(s < 1e-08) newobsloglik <- newobsloglik[IND] } else{ newobsloglik=Inf null.beta=1 } } if((newobsloglik0){ xbeta.new <- lapply(1:perm.ind, function(j) lapply(1:k, function(i) X.aug.old[[i]] %*% matrix(beta.new[[j]][[i]],ncol=1))) res <- lapply(1:perm.ind, function(j) sapply(1:k, function(i) (y - xbeta.new[[j]][[i]])^2)) s.new <- lapply(1:perm.ind, function(j) sqrt(sapply(1:k, function(i) sum(z[,perms[j,i]] * (res[[j]][, i]))/sum(z[,perms[j,i]])))) comp <- lapply(1:perm.ind, function(j) lapply(1:k, function(i) lambda.new[i] * dnorm(y, xbeta.new[[j]][[i]], s.new[[j]][i]))) comp <- lapply(1:perm.ind, function(j) sapply(comp[[j]], cbind)) compsum <- lapply(1:perm.ind, function(j) apply(comp[[j]], 1, sum)) newobsloglik <- sapply(1:perm.ind, function(j) sum(log(compsum[[j]]))) newobsloglik[c(1-null.perms)]=-Inf IND <- which.max(newobsloglik) z = z[,perms[IND,]] lambda.new <- apply(z, 2, mean) lambda <- lambda.new beta <- beta.new[[IND]] xbeta <- xbeta.new[[IND]] res <- res[[IND]] X.aug <- X.aug.old s <- s.new[[IND]] psi.locs <- psi.locs.old sing <- sum(s < 1e-08) newobsloglik <- newobsloglik[IND] } else{ newobsloglik=Inf sing=1 } } if(newobsloglik 0 || is.na(newobsloglik) || abs(newobsloglik) == Inf) { cat("Need new starting values due to singularity...", "\n") restarts <- restarts + 1 if (restarts > max.restarts) stop("Too many tries!") tmp.ind=1 while(tmp.ind==1){ if(sum(1-sapply(delta,is.null))>0) psi.temp=psi.locs tmp <- try(suppressWarnings(segregmix.init(y=y, x=x.old, lambda = NULL, beta = NULL, s = NULL, k = k, seg.Z=seg.Z, psi=psi, psi.locs = NULL)),silent=TRUE) if(class(tmp)!="try-error") tmp.ind=2 } lambda <- tmp$lambda beta <- tmp$beta s <- tmp$s k <- tmp$k psi.locs <- tmp$psi.locs n <- length(y) diff <- 1 iter <- 0 X.aug <- lapply(1:k, function(i) cbind(1,aug.x(x[,-1],unlist(psi.locs[[i]]),psi[i,],delta[[i]]))) xbeta <- lapply(1:k, function(i) X.aug[[i]] %*% matrix(beta[[i]],ncol=1)) res <- sapply(1:k, function(i) as.numeric((y - xbeta[[i]])^2)) comp <- t((lambda/sqrt(2 * pi * s^2)) * t(exp(-t(t(res)/(2 * s^2))))) obsloglik <- sum(log(apply(comp, 1, sum))) ll <- obsloglik } else { diff <- newobsloglik - obsloglik obsloglik <- newobsloglik ll <- c(ll, obsloglik) X.aug.old <- X.aug psi.locs.old <- psi.locs iter <- iter + 1 if (verb) { cat("iteration =", iter, "diff =", diff, "log-likelihood =", obsloglik, "\n") } } } if (iter == maxit) { warning("Maximum number of iterations reached.", call. = FALSE) } if (iter == 1) { cat("Converged in 1 iteration. Consider rerunning with different starting values or smaller stopping criterion.", "\n") } cat("number of iterations=", iter, "\n") names(delta) <- c(paste("comp", ".", 1:k, sep = "")) names(seg.Z) <- c(paste("comp", ".", 1:k, sep = "")) names(psi.locs) <- c(paste("comp", ".", 1:k, sep = "")) names(beta) <- c(paste("comp", ".", 1:k, sep = "")) for(i in 1:k){ names(beta[[i]])[1]="(Intercept)" names(beta[[i]])[2:ncol(x)]=colnames(x)[-1] if(!is.null(psi.locs[[i]])){ for(j in 1:ncol(psi)){ if(psi[i,j]>0 & j==1){ names(beta[[i]])[(ncol(x)+1):(ncol(x)+cumsum(psi[i,])[j])]=c(paste(colnames(x)[j+1], ".", 1:psi[i,j], sep = "")) } else if(psi[i,j]>0) names(beta[[i]])[(ncol(x)+cumsum(psi[i,])[j-1]+1):(ncol(x)+cumsum(psi[i,])[j])]=c(paste(colnames(x)[j+1], ".", 1:psi[i,j], sep = "")) } } } colnames(z) <- c(paste("comp", ".", 1:k, sep = "")) a = list(x = x, y = y, lambda = lambda, beta = beta, sigma = s, seg.Z = seg.Z, psi.locs = psi.locs, delta = delta, loglik = obsloglik, posterior = z, all.loglik = ll, restarts = restarts, ft = "segregmixEM") class(a) = "mixEM" a }mixtools/R/expReliabilityMixEM.R0000644000176200001440000001231213055603106016340 0ustar liggesusers# code for Reliability Mixture Models (RMM) with Censored data # D. Chauveau # ref: Bordes L. and Chauveau D. Computational Statistics (2016) # Simulate from an exponential mixture. # lambda = vector of component probabilities # rate = vector of component rates rexpmix <- function(n,lambda=1,rate=1) { m <- length(lambda) # nb of components z <- sample(m, n, replace = TRUE, prob = lambda) # component indicator rexp(n, rate = rate[z]) } # pdf of exponential mixture dexpmixt <- function(t,lam,rate){ m <- length(lam) f <- 0 for (j in 1:m) f <- f + lam[j]*dexp(t,rate=rate[j]) f } ############################################## ## EM algorithm for Reliability Mixture Models (RMM) with Censoring ## Parametric model, for ## univariate finite mixture of exponentials with right censoring # x = lifetime data, censored by random c if d is not NULL, in which case # x= min(x,c) and d = I(x <= c) # complete data may be (t,d,z) or (x,z), see option complete expRMM_EM <- function (x, d=NULL, lambda = NULL, rate = NULL, k = 2, complete="tdz", epsilon = 1e-08, maxit = 1000, verb = FALSE) { # warn <- options(warn=-1) # Turn off warnings # x <- as.vector(x) n <- length(x) if (!is.null(lambda)) k=length(lambda) if (!is.null(rate)) k=length(rate) # either should be define !!! if (is.null(d)) d <- rep(1,n) # simple EM for noncensored case xx <- matrix(x, nrow=n, ncol=k) # x repeated k times, used in E-steps dd <- matrix(d, nrow=n, ncol=k) # idem for d # init function call to do later, in case lambda = rate = NULL if (is.null(lambda)) lambda <- rep(1/k,k) if (is.null(rate)) rate <- rep(1,k) # to do : init.rate(k) # handle the 2 complete data models if (complete=="tdz") comptdz=TRUE else comptdz=FALSE # sequences for storing along iterations lambda_seq <- rate_seq <- matrix(0, nrow = maxit, ncol = k) lambda_seq[1,] <- lambda rate_seq[1,] <- rate loglik_seq <- NULL oldloglik <- -Inf # notdone <- TRUE # while(notdone) { # Initialize everything notdone <- FALSE dll <- epsilon+1 iter <- 1 post <- matrix(nrow = n, ncol = k) restarts <- 0 while (dll > epsilon && iter < maxit) { # EM iterations ### E-step ### rr <- matrix(rate, n, k, byrow=T) ll <- matrix(lambda, n, k, byrow=T) # handling censored & non-censored cases post <- ((ll*dexp(xx,rr))^dd)*((ll*(1-pexp(xx,rr)))^(1-dd)) rs <- rowSums(post) loglik <- sum(log(rs)) # loglik without the constant term # post normalized per row post <- sweep(post, 1, rs, "/") # posteriors p_{ij}^t 's ### M-step ### lambda <- colMeans(post) lambda_seq[iter+1, ] <- lambda if (comptdz) rate <- colSums(post*dd)/colSums(post*xx) # complete=(t,d,z) # rate <- colSums(post*d)/colSums(post*x) # gives same answer # cf automatic recycling if (!comptdz) { # complete data = (x,z) mean_xi <- matrix(1/rate, n, k, byrow=T) rate <- colSums(post)/colSums(post*(xx + (1-dd)*mean_xi)) # rate <- colSums(post)/colSums(post*(x + (1-d)*mean_xi)) # same answer } rate_seq[iter+1, ] <- rate dll <- loglik - oldloglik # = Inf for iter=0 1st time oldloglik <- loglik loglik_seq <- c(loglik_seq, loglik) if (verb) { cat("iteration", iter, " log-lik diff =", dll, " log-lik =", loglik, "\n") } iter <- iter + 1 } # end of EM loops over iterations if (iter == maxit) cat("WARNING! NOT CONVERGENT!", "\n") cat("number of iterations =", iter - 1, "\n") colnames(post) <- c(paste("comp", ".", 1:k, sep = "")) a=list(x=x, d=d, lambda = lambda, rate = rate, loglik = loglik, posterior = post, all.loglik=loglik_seq, all.lambda = lambda_seq[1:iter,], all.rate = rate_seq[1:iter,], # restarts=restarts, ft="expRMM_EM") class(a) = "mixEM" a } ################################################## # plot EM sequences from expRMM_EM: # color by components, one plot per parameter type # commented-out abline() usages are for true values for rate and lambda if available plotexpRMM <- function(a, title=NULL, rowstyle=TRUE, subtitle=NULL, ...) { n <- length(a$x); m <- dim(a$all.lambda)[2] pcc <- round(100*(1-mean(a$d)),2) sizes <- paste("n=",n,", ", pcc, "% censored", sep="") if (is.null(subtitle)) { subtitle <- paste("n=",n,", ", pcc, "% censored", sep="")} if (is.null(title)) { tt1 <- "Rate parameters" tt2 <- "Weight parameters" } else tt1 <- tt2 <- title lwdset <- 2 if (rowstyle) par(mfrow=c(1,2)) else par(mfrow=c(2,1)) plot(a$all.rate[,1], type="l", ylim=c(0,max(a$all.rate)), xlab="iterations", ylab="estimates", main=tt1, ...) title(sub=subtitle,cex.sub = 0.75) lgd <- expression(xi[1]); lcol <- 1 for (j in 2:m) { lines(a$all.rate[,j], col=j, ...) # abline(rate[j],0,col=j,lty=3) lgd <- c(lgd,substitute(xi[j])); lcol <- c(lcol,j) } legend("topright",lgd,col=lcol,lty=1,...) plot(a$all.lambda[,1], type="l", ylim=c(0,1), xlab="iterations", ylab="estimates", main=tt2, ...) title(sub=subtitle,cex.sub = 0.75) lgd <- expression(lambda[1]); lcol <- 1 if (m>2) { for (j in 2:m) { lines(a$all.lambda[,j], col=j, ...) lgd <- c(lgd,substitute(lambda[j])) lcol <- c(lcol,j) } } legend("topright",lgd,col=lcol,lty=1,...) } mixtools/R/testequality.R0000755000176200001440000000405411665556372015237 0ustar liggesuserstest.equality <- function(y, x = NULL, arbmean=TRUE, arbvar=FALSE, mu=NULL, sigma=NULL, beta=NULL, lambda=NULL,...){ if(arbmean==arbvar) stop("Change either 'arbmean' or 'arbvar'!") if(arbmean==FALSE){ w=1 while(w==1){ if(is.null(x)){ H0=normalmixEM(x=y,arbmean=FALSE,arbvar=TRUE, mu=mu, sigma=sigma, lambda=lambda,...) k=length(H0$lambda) # H1=normalmixEM(x=y,arbmean=TRUE,arbvar=TRUE,lambda=H0$lambda,mu=rep(H0$mu,k)*(1:k),sigma=(H0$scale*H0$sigma),...) H1=normalmixEM(x=y,arbmean=TRUE,arbvar=TRUE,lambda=H0$lambda,mu=NULL,sigma=(H0$scale*H0$sigma),...) D=2*(H1$loglik-H0$loglik) df=k-1 alpha=1-pchisq(D,df=df) } else{ H0=regmixEM(y=y,x=x,arbmean=FALSE,arbvar=TRUE,beta=beta, sigma=sigma, lambda=lambda,...) k=length(H0$lambda) # H1=regmixEM(y=y,x=x,arbmean=TRUE,arbvar=TRUE,lambda=H0$lambda,beta=matrix(rep(H0$beta,k),k)*(1:k),sigma=(H0$scale*H0$sigma),...) H1=regmixEM(y=y,x=x,arbmean=TRUE,arbvar=TRUE,lambda=H0$lambda,beta=NULL,sigma=(H0$scale*H0$sigma),...) p=nrow(H1$beta) D=2*(H1$loglik-H0$loglik) df=p*(k-1) alpha=1-pchisq(D,df=df) } if(D<0){ w=1 mu=NULL sigma=NULL lambda=NULL } else w=2 } } if(arbvar==FALSE){ w=1 while(w==1){ if(is.null(x)){ H0=normalmixEM(x=y,arbmean=TRUE,arbvar=FALSE,mu=mu, sigma=sigma, lambda=lambda,...) k=length(H0$lambda) # H1=normalmixEM(x=y,arbmean=TRUE,arbvar=TRUE,lambda=H0$lambda,mu=H0$mu,sigma=rep(H0$sigma,k),...) H1=normalmixEM(x=y,arbmean=TRUE,arbvar=TRUE,lambda=H0$lambda,mu=H0$mu,sigma=NULL,...) D=2*(H1$loglik-H0$loglik) df=k-1 alpha=1-pchisq(D,df=df) } else{ H0=regmixEM(y=y,x=x,arbmean=TRUE,arbvar=FALSE,beta=beta, sigma=sigma, lambda=lambda,...) k=length(H0$lambda) # H1=regmixEM(y=y,x=x,arbmean=TRUE,arbvar=TRUE,lambda=H0$lambda,beta=H0$beta,sigma=rep(H0$sigma,k),...) H1=regmixEM(y=y,x=x,arbmean=TRUE,arbvar=TRUE,lambda=H0$lambda,beta=H0$beta,sigma=NULL,...) D=2*(H1$loglik-H0$loglik) df=k-1 alpha=1-pchisq(D,df=df) } if(D<0){ w=1 mu=NULL sigma=NULL lambda=NULL } else w=2 } } a=list(chi.sq=D, df=df, p.value=alpha) a }mixtools/R/plotseq.npEM.R0000755000176200001440000000306111665556372015024 0ustar liggesusers# plots SEQuence from npEM object # function for plotting the scalar parameters sequences along iterations # if object x comes from npEM, just the x$lambda sequence # if object x comes from spEM, both x$lambda and x$mu sequences plotseq <- function(x, ...) UseMethod("plotseq") plotseq.npEM <- function(x, ...) { # ask <- par(ask=TRUE) r <- NCOL(x$data) n <- NROW(x$data) m <- length(x$lambdahat) iter <- NROW(x$lambda) xlabel <- paste("iterations") nbcol <- 1 if (!is.null(x$symmetric) && x$symmetric) nbcol <- 2 par(mfcol=c(m, nbcol)) # in all cases, plots the lambda's for (j in 1:m) { estim <- paste(round(x$lambdahat[j],3)) tt <- substitute(expression(paste("sequence of ",lambda[j], ", estimate ",widehat(lambda[j]),"=", estim, sep=""))) ylabel <- substitute(expression(paste(lambda[j],sep=""))) plot(x$lambda[,j], type="l", main=eval(tt), xlab=xlabel, ylab=eval(ylabel), ...) lines(c(0,iter),rep(x$lambdahat[j],2),col=2,lty=2) } ## for symmetric location spEM case plots mu if (!is.null(x$symmetric) && x$symmetric) { for (j in 1:m) { estim <- paste(round(x$muhat[j],3)) tt <- substitute(expression(paste("sequence of ",mu[j], ", estimate ",widehat(mu[j]),"=",estim,sep=""))) ylabel <- substitute(expression(paste(mu[j],sep=""))) plot(x$mu[,j], type="l", main=eval(tt), ylab=eval(ylabel), xlab=xlabel, ...) lines(c(0,iter),rep(x$muhat[j],2),col=2,lty=2) } # legend("topright", legend=round(x$muhat,3), fill=2:(m+1)) } # structure (list (call=match.call())) } mixtools/R/compcdf.R0000755000176200001440000000176411665556372014122 0ustar liggesuserscompCDF <- function(data, weights, x=seq(min(data, na.rm=TRUE), max(data, na.rm=TRUE), len=250), comp=1:NCOL(weights), makeplot=TRUE, ...) { if (NROW(weights) != NROW(data)) { stop("data and weights arguments must have same number of rows") } # First, normalize the weights so the sum of each column is 1/NCOL(data) weights <- t(t(weights) / (NCOL(data) * colSums(weights))) # Next, give a binomial count for each row of the data and for each x f <- function(row, cutpt) colSums(outer(row, cutpt, "<="), na.rm = TRUE) bc <- apply(data, 1, f, x) # bc is a length(x) by n matrix; each column should be multiplied by # the appropriate weight(s) and then the rows summed to give the # unnormalized cdf estimates. This is just a matrix product. cdfs <- bc %*% weights[,comp,drop=FALSE] if(makeplot) { plot(range(x), 0:1, type="n", ...) for (i in 1:length(comp)) { lines(x, cdfs[,comp[i]], lty=i, ...) } } t(cdfs) } mixtools/R/rmvnormmix.R0000755000176200001440000000074011736707362014712 0ustar liggesusers# Note: normmixrm.sim is here for backwards compatibility rmvnormmix <- normmixrm.sim <- function(n,lambda=1,mu=0,sigma=1) { m <- length(lambda) # nb of components mu <- matrix(mu, nrow=m) sigma <- matrix(sigma, nrow=m) if ((r <- NCOL(mu)) != NCOL(sigma)) { stop("mu and sigma must have the same number of columns", call.=FALSE) } z <- sample(m,n,replace=TRUE,prob=lambda) # component matrix(rnorm(n*r,mean=as.vector(mu[z,]),sd=as.vector(sigma[z,])),n,r) } mixtools/R/spEM.R0000644000176200001440000001151513060302347013321 0ustar liggesusers## EM-like algorithm for a location-scale mixture model with ## independent repeated measures -- some ID, some not -- ## where each block has its own location and scale but otherwise ## all blocks (within a component or globally, depending) have the same ## shape. ## Correction: For now, this algorithm only implements model (17) in ## Benaglia et al -- in other words, each component and block has exactly ## the same shape and they differ only by location and scale. spEM <- function(x, mu0, blockid = 1:ncol(x), bw=bw.nrd0(as.vector(as.matrix(x))), constbw = TRUE, h=bw, eps=1e-8, maxiter=500, stochastic = FALSE, verb = TRUE){ bw <- h # h is alternative bandwidth argument, for backward compatibility x <- as.matrix(x) n <- nrow(x) # number of subjects r <- ncol(x) # number of measurements in each subject u <- match(blockid, unique(blockid)) # convert blockid to integers 1, 2, ... if (is.matrix(mu0)) m <- dim(mu0)[1] # mu0=centers else m <- mu0 # mu0=number of clusters z.hat <- matrix(0, nrow = n, ncol = m) tt0 <- proc.time() # for total time ## Initial Values if(m == 1) z.hat <- matrix(1, nrow = n, ncol = m) else{ kmeans <- kmeans(x, mu0) for(j in 1:m) z.hat[kmeans$cluster==j, j] <- 1 } iter <- 0 if (stochastic) { sumpost <- matrix(0, n, m) } finished <- FALSE lambda <- matrix(0, nrow = maxiter, ncol = m) mu <- sigma <- array(0, dim=c(maxiter, m, max(u))) stackedx <- x[rep(1:n,m),] loglik <- NULL while (!finished) { iter <- iter + 1 bw.old <- bw t0 <- proc.time() ## Note: Enter loop assuming E-step is done -- i.e., z.hat is in place ## M-Step lambda[iter, ] <- colMeans(z.hat) if (stochastic) { z <- t(apply(z.hat, 1, function(prob) rmultinom(1, 1, prob))) cs <- colSums(z) z.tmp <- sweep(z, 2, cs, "/") z.tmp[, cs==0] <- 1/NROW(z.tmp) # Just in case } else { cs <- colSums(z.hat) z.tmp <- sweep(z.hat, 2, cs, "/") z.tmp[, cs==0] <- 1/NROW(z.tmp) # Just in case } h <- bw ## More M-step (means and std devs are location / scale params) for (k in 1:max(u)) { # k is the block number r2 <- sum(u == k) x2 <- x[, u==k] # Subset of data belonging to kth block (n x r2 matrix) mu[iter, , k] <- as.vector(rowMeans(t(z.tmp) %*% x2)) for (j in 1:m) sigma[iter, j, k] <- sqrt(sum(z.tmp[,j] * (x2-mu[iter, j, k])^2)/r2) } ## density estimation step if (!constbw) { wts <- rep(as.vector(z.tmp),r) scaledx <- as.vector((stackedx - mu[iter, rep(1:m, each=n), u])/ sigma[iter, rep(1:m, each=n), u]) h <- bw <- 0.9 / (n*r)^(1/5) * min(1, wiqr<-wIQR(wt=wts, x=scaledx)/1.34) } ans <- .C(C_KDElocscale, n = as.integer(n), m = as.integer(m), r = as.integer(r), blockid=as.integer(u), mu = as.double(mu[iter, , ]), sigma = as.double(sigma[iter, , ]), x = as.double(x), bw = as.double(h), z = as.double(z.tmp), f = double(n*m), PACKAGE="mixtools") lambda.f <- sweep(matrix(ans$f, ncol=m), 2, lambda[iter, ], "*") ## E-step (for next iteration) z.hat <- lambda.f/rowSums(lambda.f) loglik <- c(loglik,sum(log(rowSums(lambda.f)))) # log-likelihood finished <- iter >= maxiter if (stochastic) { sumpost <- sumpost + z.hat } else if (iter > 1) { # This convergence criterion may be too simplistic: maxchange <- max(abs(lambda[iter,] - lambda[iter-1,])) if (!constbw) maxchange <- max(maxchange, max(abs(bw.old - bw))) finished <- finished | (maxchange < eps) } if (verb) { t1 <- proc.time() cat("iteration", iter, " lambda ", round(lambda[iter, ], 4)) cat(" time", (t1 - t0)[3], "\n") } } if (verb) { tt1 <- proc.time() cat("lambda ", round(lambda[iter, ], 4)) cat(", total time", (tt1 - tt0)[3], "s\n") } if (stochastic) { return(structure(list(data = x, posteriors = sumpost/iter, lambda = lambda, bandwidth = bw, blockid = u, lambdahat = colMeans(lambda), mu = mu, muhat = colMeans(mu), sigma = sigma, sigmahat = colMeans(sigma), loglik = loglik), class="spEM")) } else { return(structure(list(data = x, posteriors = z.hat, lambda = lambda[1:iter,], bandwidth = bw, blockid = u, lambdahat = lambda[iter,], mu = mu[1:iter, , ], muhat = mu[iter, , ], sigma = sigma[1:iter, , ], sigmahat = sigma[iter, , ], loglik = loglik), class="spEM")) } } mixtools/R/print.npEM.R0000755000176200001440000000077011665556372014475 0ustar liggesusersprint.npEM <- function(x, ...) { n <- NROW(x$data) r <- NCOL(x$data) m <- length(x$lambdahat) cat(paste("Observations:", n, "\n")) cat(paste("Coordinates per observation:", r, "\n")) cat(paste("Mixture components:", m, "\n")) if (r>1) { B <- max(x$blockid) cat(paste("Blocks (of conditionally iid coordinates):", B, "\n\n")) } dp = match(c("data","posteriors", "lambda", "mu"), names(x), nomatch=0) print.default(structure(x[-dp], class=class(x)), ...) invisible(x) } mixtools/R/tauequivnormalmixEM.R0000644000176200001440000000341613046171330016472 0ustar liggesusers## Use an ECM algorithm (in the sense of Meng and Rubin, Biometrika 1993) ## to search for a local maximum of the likelihood surface for a ## univariate finite mixture of normals with possible equality ## constraints on the stdev parameters. ## It is assumed here that there are three components and the three normal means ## are equal to alpha, alpha-delta, and alpha+delta for unknown parameters ## alpha and delta. ## In other words, this function implements the specific model described in ## Thomas et al (2009), Extensions of Reliability Theory. ## It is a modified version of normalmixEM. tauequivnormalmixEM <- function (x, lambda = NULL, mu = NULL, sigma = NULL, k = 3, mean.constr = NULL, sd.constr = NULL, gparam = NULL, epsilon = 1e-08, maxit = 10000, maxrestarts=20, verb = FALSE, fast=FALSE, ECM = TRUE, arbmean = TRUE, arbvar = TRUE) { M <- A <- NULL if (is.null(mean.constr)) { # In this case, we will be fitting a 3-component mixture model with means # constrained to be alpha, alpha-delta, and alpha+delta for # parameters alpha and delta. k <- 3 if (length(mu) != 3) mu <- NULL if (length(sigma) != 3) sigma <- NULL M <- matrix(c(1, 1, 1, 0, -1, 1), 3, 2) # We will also constain the reciprocals of the variances to be # gamma_1+gamma_2, gamma_1, and gamma_1 for positive # parameters gamma_1 and gamma_2. A <- matrix(c(1, 1, 1, 1, 0, 0), 3, 2) } normalmixMMlc(x, lambda = lambda, mu = mu, sigma = sigma, k = k, mean.constr = mean.constr, mean.lincstr = M, var.lincstr = A, gparam = gparam, epsilon = epsilon, maxit = maxit, maxrestarts = maxrestarts, verb = verb) } mixtools/R/multmixEM.R0000755000176200001440000000643613616576604014427 0ustar liggesusersmultmixEM <- function (y, lambda = NULL, theta = NULL, k = 2, maxit = 10000, epsilon = 1e-08, verb = FALSE) { if (class(y)[1]=="list" && !is.null(y$y)) { y <- y$y } n <- nrow(y) p <- ncol(y) m <- colSums(y) r <- rowSums(y) # These need not all be the same tmp <- multmix.init(y=y, lambda=lambda, theta=theta, k=k) lambda <- tmp$lambda theta <- tmp$theta k <- tmp$k restarts<-0 mustrestart <- FALSE llconstant <- sum(lgamma(r+1)) - sum(lgamma(y+1)) while (restarts < 50) { ll <- NULL iter <- 0 diff <- epsilon+1 # to ensure entering main EM loop # Normalize rows of theta matrix theta <- theta/rowSums(theta) theta <- pmax(theta, 1e-100) # ensure no zeros # preparatory E-step prior to entering main EM loop loglamcd <- log(lambda) + log(theta) %*% t(y) # kxn matrix of log(lambda * component densities) z <- .C(C_multinompost, as.integer(n), as.integer(k), as.double(loglamcd), post=double(n*k), loglik=as.double(llconstant), PACKAGE = "mixtools") post <- matrix(z$post, ncol=k) newll <- z$loglik tmp.post <- (post==0) if(any(apply(tmp.post,2,sum)==n)){ diff <- epsilon mustrestart <- TRUE } while ((iter < maxit) && diff > epsilon) { # main EM loop iter <- iter + 1 oldll <- newll ll <- c(ll, oldll) # M-step: First update theta values (proportional to sum of posteriors * data) theta <- t(post) %*% y theta <- theta/rowSums(theta) theta <- pmax(theta, 1e-100) # ensure no zeros # M-step: Update the lambdas as usual for a finite mixture lambda <- colMeans(post) # E-step: prepare to find posteriors using C function loglamcd <- log(lambda) + log(theta) %*% t(y) # kxn matrix of log(lambda * component densities) # E-step: Call C function to return nxk matrix of posteriors along with loglikelihood z <- .C(C_multinompost, as.integer(n), as.integer(k), as.double(loglamcd), post=double(n*k), loglik=as.double(llconstant), PACKAGE = "mixtools") post <- matrix(z$post, ncol=k) newll <- z$loglik diff <- newll - oldll if (diff<0 || is.na(newll) || is.infinite(newll) || is.nan(newll)) { mustrestart <- TRUE break } if (verb) { cat("iteration=", iter, "diff=", diff, "log-likelihood", ll[iter], "lambda", lambda, "\n") } } if (mustrestart) { cat("Restarting due to numerical problem.\n") mustrestart <- FALSE restarts <- restarts + 1 tmp <- multmix.init(y=y, k=k) lambda <- tmp$lambda theta <- tmp$theta k <- tmp$k } else { if (iter == maxit) { cat("Warning: Not convergent after", maxit, "iterations\n") } theta[,p] <- 1-apply(as.matrix(theta[,1:(p-1)]),1,sum) colnames(theta) <- c(paste("theta", ".", 1:p, sep = "")) rownames(theta) <- c(paste("comp", ".", 1:k, sep = "")) colnames(post) <- c(paste("comp", ".", 1:k, sep = "")) cat("number of iterations=", iter, "\n") out <-list(y=y, lambda = lambda, theta = theta, loglik = ll[length(ll)], posterior = post, all.loglik=ll, restarts=restarts, ft="multmixEM") class(out) <- "mixEM" return(out) } } stop("Too many restarts!") } mixtools/R/mixturegram.R0000644000176200001440000001247313210335333015023 0ustar liggesusersmixturegram <- function(data, pmbs, method=c("pca","kpca","lda"), all.n=FALSE, id.con=NULL, score=1, iter.max=50, nstart=25, ...){ col.blind=rep(c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#7FFF00","#7D26CD"),100) method <- match.arg(method) k=length(pmbs)+1 if (is.null(id.con)) id.con=lapply(pmbs,function(x) data%*%x/apply(x,2,sum)) ###regular pca if(method=="pca"){ x.star=c(list(data), lapply(1:(k-1), function(i) cbind(data,pmbs[[i]][,order(id.con[[i]])]))) Data=lapply(x.star,scale) if(score>(ncol(Data[[1]])+1)) warning(paste("The largest value that can be specified for 'score' is ",ncol(Data[[1]])+1,", which is what will be used for the mixturegram.",sep="")) score<-min(score,ncol(x.star[[2]])) if(score==1){ PCA=lapply(Data,function(x) x%*%princomp(x)$loadings[,score]) } else{ PCA <- vector("list",k) PCA[[1]]=Data[[1]]%*%princomp(Data[[1]])$loadings[,1] PCA[2:k]=lapply(1:(k-1),function(i) Data[[(i+1)]]%*%princomp(Data[[(i+1)]])$loadings[,score]) } K=lapply(2:k,function(i) kmeans(PCA[[i]],i,iter.max=iter.max,nstart=nstart)) lim=ceiling(max(abs(unlist(PCA)))) plot(1:k,rep(1e1000,k),ylim=c(-lim,lim),col='white',xlab='k',ylab='PC Scores',xaxt="n", ...) rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = colors()[356]) axis(1,at=1:k,labels=1:k) abline(v=1:k,col="gray80") mix.method <- PCA }else{ ###kernel pca if(method=="kpca"){ x.star=c(list(data), lapply(1:(k-1), function(i) cbind(data,pmbs[[i]][,order(id.con[[i]])]))) Data=lapply(x.star,scale) if(score>(ncol(Data[[1]])+1)) warning(paste("The largest value that can be specified for 'score' is ",ncol(Data[[1]])+1,", which is what will be used for the mixturegram.",sep="")) score<-min(score,ncol(x.star[[2]])) if(score==1){ kPCA=lapply(Data,function(x) cbind(pcv(kpca(x))[,1])) } else{ kPCA <- vector("list",k) kPCA[[1]]=cbind(pcv(kpca(Data[[1]]))[,1]) kPCA[2:k]=lapply(1:(k-1),function(i) cbind(pcv(kpca(Data[[i+1]]))[,score])) } K=lapply(2:k,function(i) kmeans(kPCA[[i]],i,iter.max=iter.max,nstart=nstart)) lim=max(abs(unlist(kPCA)))+0.1 plot(1:k,rep(1e1000,k),ylim=c(-lim,lim),col='white',xlab='k',ylab='Kernel PC Scores',xaxt="n", ...) rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = colors()[356]) axis(1,at=1:k,labels=1:k) abline(v=1:k,col="gray80") mix.method <- kPCA }else{ if(method=="lda"){ class=lapply(pmbs, function(post) apply(post,1,which.max)) ldcdata = c(list(as.matrix(data) %*% ldc(data, rep(1,nrow(as.matrix(data))),score=score)), lapply(class, function(class) as.matrix(data) %*% ldc(data, class,score=score))) K=lapply(2:k,function(i) kmeans(ldcdata[[i]],i,iter.max=iter.max,nstart=nstart)) lim=ceiling(max(abs(unlist(ldcdata)))) plot(1:k,rep(1e1000,k),ylim=c(-lim,lim),col='white',xlab='k',ylab='LDC Scores',xaxt="n", ...) rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = colors()[356]) axis(1,at=1:k,labels=1:k) abline(v=1:k,col="gray80") mix.method <- ldcdata } } } Kcol=function(x){ temp=unlist(sapply(1:length(x$size),function(i) rep(rank(x$center)[i],x$size[i]))) index=unlist(sapply(1:length(x$size),function(i) which(x$cluster==i))) K.col=replace(x$cluster,index,temp) } all.K.col=lapply(1:(k-1), function(i) Kcol(K[[i]])) K.col=all.K.col[[k-1]] n=length(K.col) K.centers=c(0, lapply(1:(k-1), function(i) sort(c(K[[i]]$centers)))) if(all.n){ sapply(1:(k-1),function(i) segments(i,mix.method[[i]],(i+1),mix.method[[i+1]],col=col.blind[K.col])) points(1,mean(mix.method[[1]]),pch=21,cex=1.2,bg=colors()[553],col=1) sapply(2:k,function(i) points(rep(i,i),sort(c(K[[i-1]]$centers)),pch=21,cex=1.2,bg=colors()[553],col=1)) } else{ ride.medians=sapply(1:k, function(i) unlist(by(c(mix.method[[i]]),col.blind[K.col],median))) prop.mad=sapply(1:k, function(i) unlist(by(c(mix.method[[i]]),col.blind[K.col],mad))) L1=ride.medians-prop.mad U1=ride.medians+prop.mad L2=ride.medians-2*prop.mad U2=ride.medians+2*prop.mad L3=ride.medians-3*prop.mad U3=ride.medians+3*prop.mad srt.colors=rownames(ride.medians) srt.colors1=adjustcolor(srt.colors, alpha.f = 0.7) srt.colors2=adjustcolor(srt.colors, alpha.f = 0.4) srt.colors3=adjustcolor(srt.colors, alpha.f = 0.2) invisible(sapply(1:k, function(i) polygon(c(1:k,k:1),c(L3[i,],rev(U3[i,])),col=srt.colors3[i], border=FALSE) )) invisible(sapply(1:k, function(i) polygon(c(1:k,k:1),c(L2[i,],rev(U2[i,])),col=srt.colors2[i], border=FALSE) )) invisible(sapply(1:k, function(i) polygon(c(1:k,k:1),c(L1[i,],rev(U1[i,])),col=srt.colors1[i], border=FALSE) )) invisible(sapply(1:(k-1),function(i) segments(rep(i,k),ride.medians[,i],rep((i+1),k),ride.medians[,(i+1)],col=srt.colors) )) points(1,mean(mix.method[[1]]),pch=21,cex=1.2,bg=colors()[553],col=1) invisible(sapply(2:k,function(i) points(rep(i,i),sort(c(K[[i-1]]$centers)),pch=21,cex=1.2,bg=colors()[553],col=1))) } props=c(1,sapply(1:length(K), function(i) K[[i]][[5]]/sum(unlist(K[[i]][5:6])))) print(list(stopping=props)) } mixtools/R/regmixEMloc.R0000755000176200001440000000521511665556372014715 0ustar liggesusersregmixEM.loc=function (y, x, lambda = NULL, beta = NULL, sigma = NULL, k = 2, addintercept = TRUE, kern.l = c("Gaussian", "Beta", "Triangle", "Cosinus", "Optcosinus"), epsilon = 1e-08, maxit = 10000, kernl.g = 0, kernl.h = 1, verb = FALSE) { diff <- 1 iter <- 0 x.1 <- cbind(1, x) n <- length(y) kern.l <- match.arg(kern.l) out.EM <- regmixEM.lambda(y, x, lambda = lambda, beta = beta, sigma = sigma, k = k, epsilon = epsilon, maxit = maxit) ll = out.EM$loglik restarts <- 0 while (diff > epsilon && iter < maxit) { old.l.x = out.EM$lambda old.loglik = out.EM$loglik l.x = matrix(nrow = n, ncol = (k - 1)) for (i in 1:(k - 1)) { l.x[, i] <- c((cbind(rep(1,n),0)*t(apply(matrix(x, ncol = 1), 1, lambda, z = out.EM$post[, i], xi = x, kernel = kern.l, g = kernl.g, h = kernl.h)))[,1]) l.x[,i]=apply(cbind(l.x[,i]),1,max,0) l.x[,i]=apply(cbind(l.x[,i]),1,min,1) } l.x <- cbind(l.x, 1 - apply(l.x, 1, sum)) out.EM.loc <- regmixEM.lambda(y, x, beta = out.EM$beta, sigma = out.EM$sigma, lambda = l.x, k = k) loglik.loc <- out.EM.loc$loglik out.EM.old <- regmixEM.lambda(y, x, beta = out.EM$beta, sigma = out.EM$sigma, lambda = old.l.x, k = k) loglik.old <- out.EM.old$loglik if (loglik.loc > old.loglik) { out.EM <- out.EM.loc } else out.EM <- out.EM.old loglik.chosen <- out.EM$loglik ll <- c(ll, loglik.chosen) diff <- loglik.chosen - old.loglik if (diff < 0) { cat("Generating new starting values...", "\n") out.EM <- regmixEM.lambda(y, x, lambda = lambda, beta = beta, sigma = sigma, k = k, epsilon = epsilon, maxit = maxit) restarts <- restarts + 1 if (restarts > 15) stop("Too many tries!") iter <- 0 diff <- 1 } else { iter <- iter + 1 if (verb) { cat("iteration=", iter, "diff=", diff, "log-likelihood", loglik.chosen, "\n") } } } if (iter == maxit) { cat("WARNING! NOT CONVERGENT!", "\n") } cat("number of overall iterations=", iter, "\n") a = list(x = x, y = y, lambda.x = out.EM$lambda, beta = out.EM$beta, sigma = out.EM$sigma, loglik = loglik.chosen, posterior = out.EM$post, all.loglik = ll, restarts = restarts, ft = "regmixEM.loc") class(a) = "mixEM" a } mixtools/R/regmixmodelsel.R0000755000176200001440000001146111665556372015522 0ustar liggesusersregmixmodel.sel = function (x, y, w=NULL, k = 2, type = c("fixed", "random", "mixed"), ...) { aic <- NULL bic <- NULL caic <- NULL icl <- NULL type <- match.arg(type) AIC <- function(emout, p) { emout$loglik - (p - 1) } BIC <- function(emout, p, n) { emout$loglik - log(n) * (p - 1)/2 } CAIC <- function(emout, p, n) { emout$loglik - (log(n) + 1) * (p - 1)/2 } ICL <- function(emout, p, n) { BIC(emout, p, n) - sum(emout$lambda * log(emout$lambda)) } if (type == "fixed") { p <- function(emout) {length(emout$beta) + length(emout$sigma) + (length(emout$scale)-1)*is.null(emout$scale) +length(emout$lambda)} n <- length(y) for (i in 1:k) { if (i == 1) { a <- glm(y ~ x) beta <- matrix(a$coef, ncol = 1) loglik <- log(prod(dnorm(y, mean = beta[1, ] + as.matrix(x) %*% beta[2:nrow(beta), ], sd = sd(a$res)))) emout <- list(beta = beta, sigma = sd(a$res), lambda = 1, loglik = loglik) } else emout <- regmixEM(y, x, k = i, ...) P = p(emout) aic[i] <- AIC(emout, p=P) bic[i] <- BIC(emout, p=P, n=n) caic[i] <- CAIC(emout, p=P, n=n) icl[i] <- ICL(emout, p=P, n=n) } } else if (type == "random") { p <- function(emout) { if(is.list(emout$R)){ m<-nrow(emout$R[[1]]) v<-length(emout$R) } else { m<-nrow(emout$R) v<-1} length(emout$mu) +(v*m*(m+1)/2)+ length(emout$sigma)+ length(emout$lambda)} n <- sum(sapply(y,length)) for (j in 1:k) { if (j == 1) { a <- lapply(1:length(y), function(i) glm(y[[i]] ~ x[[i]])) aa <- sapply(1:length(a), function(i) as.vector(a[[i]]$res)) b <- sapply(1:length(a), function(i) as.matrix(a[[i]]$coef)) mu <- apply(b, 1, mean) R <- cov(t(b)) a.res <- NULL for (i in 1:length(aa)) { a.res <- c(a.res, aa[[i]]) } sd.a <- sd(a.res) loglik <- sapply(1:length(y), function(i) dmvnorm(as.vector(y[[i]]), b[1, i] + as.matrix(x[[i]]) %*% b[2:nrow(b), i], sd.a^2 * diag(length(y[[i]])))) loglik <- log(prod(loglik)) emout <- list(mu = mu, R = R, sigma = sd.a, lambda = 1, loglik = loglik) } else emout <- regmixEM.mixed(y, x, k = j, ...) P = p(emout) aic[j] <- AIC(emout, p=P) bic[j] <- BIC(emout, p=P, n=n) caic[j] <- CAIC(emout, p=P, n=n) icl[j] <- ICL(emout, p=P, n=n) } } else if (type == "mixed") { p <- function(emout) { if(is.list(emout$R)){ m<-nrow(emout$R[[1]]) v<-length(emout$R) } else { m<-nrow(emout$R) v<-1} length(emout$alpha)+ length(emout$mu) +(v*m*(m+1)/2)+ length(emout$sigma)+ length(emout$lambda)} n <- sum(sapply(y,length)) for (i in 1:k) { if (i == 1) { a <- lapply(1:length(y), function(i) glm(y[[i]] ~ x[[i]] + w[[i]])) aa <- sapply(1:length(a), function(i) as.vector(a[[i]]$res)) alpha <- sapply(1:length(a), function(i) matrix(a[[i]]$coef[(ncol(x[[i]]) + 2):(ncol(x[[i]]) + 1 + ncol(w[[i]]))])) alpha <- apply(as.matrix(alpha), 1, mean) b <- sapply(1:length(a), function(i) matrix(a[[i]]$coef[1:(ncol(x[[i]]) + 1)])) mu <- apply(b, 1, mean) R <- cov(t(b)) a.res <- NULL for (i in 1:length(aa)) { a.res <- c(a.res, aa[[i]]) } sd.a <- sd(a.res) loglik <- sapply(1:length(y), function(i) dmvnorm(as.vector(y[[i]]), b[1, i] + as.matrix(x[[i]]) %*% b[2:nrow(b), i], sd.a^2 * diag(length(y[[i]])))) loglik <- log(prod(loglik)) emout <- list(mu = mu, R = R, alpha = alpha, sigma = sd.a, lambda = 1, loglik = loglik) } else emout <- regmixEM.mixed(y, x, k = i, ...) P = p(emout) aic[i] <- AIC(emout, p=P) bic[i] <- BIC(emout, p=P, n=n) caic[i] <- CAIC(emout, p=P, n=n) icl[i] <- ICL(emout, p=P, n=n) } } out = rbind(aic, bic, caic, icl) Winner = apply(out, 1, function(x) (1:length(x))[x == max(x)]) colnames(out) = 1:k rownames(out) = c("AIC", "BIC", "CAIC", "ICL") cbind(out, Winner) }mixtools/R/repnormmixEM.R0000755000176200001440000001176611665556372015134 0ustar liggesusersrepnormmixEM = function (x, lambda = NULL, mu = NULL, sigma = NULL, k = 2, arbmean = TRUE, arbvar = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) { if(arbmean == FALSE && arbvar == FALSE){ stop(paste("Must change constraints on mu and/or sigma!","\n")) } s = sigma n <- ncol(x) m <- nrow(x) tmp <- repnormmix.init(x = x, lambda = lambda, mu = mu, s = s, k = k, arbmean = arbmean, arbvar = arbvar) lambda <- tmp$lambda mu <- tmp$mu s <- tmp$s s.2 <- s^2 k <- tmp$k diff <- 1 iter <- 0 res <- lapply(1:k, function(i) (x - mu[i * arbmean + (1 - arbmean)])^2) comp <- lapply(1:k, function(i) lambda[i] * (2 * pi * s.2[i * arbvar + (1 - arbvar)])^(-m/2) * exp(-0.5 * apply(res[[i]], 2, sum)/s.2[i * arbvar + (1 - arbvar)])) obsloglik <- sum(log(apply(sapply(comp, rbind), 1, sum))) ll <- obsloglik restarts <- 0 while (diff > epsilon & iter < maxit) { xmu <- lapply(1:k, function(i) apply(res[[i]], 2, sum)) z = matrix(nrow = n, ncol = k) for (i in 1:n) { for (j in 1:k) { z.denom = c() for (h in 1:k) { z.denom = c(z.denom, lambda[h]/lambda[j] * (s[j * arbvar + (1 - arbvar)]/s[h * arbvar + (1 - arbvar)])^m * exp(-0.5 * ((1/s[h * arbvar + (1 - arbvar)]^2) * xmu[[h]][i] - (1/s[j * arbvar + (1 - arbvar)]^2) * xmu[[j]][i]))) } z[i, j] = 1/sum(z.denom) } } z = z/apply(z,1,sum) # z[,k]=1-apply(as.matrix(z[,(1:(k-1))]),1,sum) lambda.new <- apply(z, 2, mean) if (sum(lambda.new < 1e-08)>0 || is.na(sum(lambda.new))) { sing <- 1 } else { if(arbmean){ mu.new <- sapply(1:k, function(i) sum(t(z[, i] * t(x))))/(m * apply(z, 2, sum)) } else { mu.new <- sum(sapply(1:k, function(i) sum(t(z[, i] * t(x)))))/(m * n) } res <- lapply(1:k, function(i) (x - mu.new[i * arbmean + (1 - arbmean)])^2) if (arbvar) { s.new <- sqrt(sapply(1:k, function(i) sum(t(z[, i] * t(res[[i]]))))/(m * apply(z, 2, sum))) } else s.new <- sqrt(sum(sapply(1:k, function(i) sum(t(z[, i] * t(res[[i]])))))/(m * n)) lambda <- lambda.new mu <- mu.new s <- s.new s.2 <- s^2 sing <- sum(is.nan(z)) comp <- lapply(1:k, function(i) lambda[i] * (2 * pi * s.2[i * arbvar + (1 - arbvar)])^(-m/2) * exp(-0.5 * apply(res[[i]], 2, sum)/s.2[i * arbvar + (1 - arbvar)])) newobsloglik <- sum(log(apply(sapply(comp, rbind), 1, sum))) } if (sing > 0 || is.na(newobsloglik) || abs(newobsloglik) == Inf ){#|| sum(z) != n) { cat("Need new starting values due to singularity...", "\n") restarts <- restarts + 1 if(restarts>15) stop("Too many tries!") tmp <- repnormmix.init(x = x, k = k, arbmean = arbmean, arbvar = arbvar) lambda <- tmp$lambda mu <- tmp$mu s <- tmp$s s.2 <- s^2 k <- tmp$k diff <- 1 iter <- 0 res <- lapply(1:k, function(i) (x - mu[i * arbmean + (1 - arbmean)])^2) comp <- lapply(1:k, function(i) lambda[i] * (2 * pi * s.2[i * arbvar + (1 - arbvar)])^(-m/2) * exp(-0.5 * apply(res[[i]], 2, sum)/s.2[i * arbvar + (1 - arbvar)])) obsloglik <- sum(log(apply(sapply(comp, rbind), 1, sum))) ll <- obsloglik } else { diff <- newobsloglik - obsloglik obsloglik <- newobsloglik ll <- c(ll, obsloglik) iter <- iter + 1 if (verb) { cat("iteration=", iter, "diff=", diff, "log-likelihood", obsloglik, "\n") } } } scale.order = order(s) sigma.min = min(s) if (iter == maxit) { cat("WARNING! NOT CONVERGENT!", "\n") } cat("number of iterations=", iter, "\n") if(arbmean == FALSE){ z = z[,scale.order] colnames(z) <- c(paste("comp", ".", 1:k, sep = "")) a=list(x=as.data.frame(x), lambda = lambda[scale.order], mu = mu, sigma = sigma.min, scale = s[scale.order]/sigma.min, loglik = obsloglik, posterior = z[,scale.order], all.loglik=ll, restarts = restarts, ft="repnormmixEM") class(a) = "mixEM" a } else { colnames(z) <- c(paste("comp", ".", 1:k, sep = "")) a=list(x=as.data.frame(x), lambda = lambda, mu = mu, sigma = s, loglik = obsloglik, posterior = z, all.loglik=ll, restarts = restarts, ft="repnormmixEM") class(a) = "mixEM" a } } mixtools/R/plotMCMC.R0000755000176200001440000000232211665556372014114 0ustar liggesusersplot.mixMCMC <- function(x, trace.plots = TRUE, summary.plots = FALSE, burnin = 2000, ...){ mix.object <- x if (!inherits(mix.object, "mixMCMC")) stop("Use only with \"mixMCMC\" objects!") if(trace.plots==TRUE){ k<-mix.object$components theta<-mix.object$theta p.k=ncol(theta) p=p.k/k name.theta<-colnames(theta) par(mfrow=c(p,k)) for(i in 1:p){ for(j in 1:k){ plot(theta[,(i-1)*k+j],type="l",ylab=name.theta[(i-1)*k+j]) } } } #regmixMH if(is.matrix(mix.object$x) == TRUE && is.null(mix.object$y) == FALSE && summary.plots == TRUE){ y<-mix.object$y n<-length(y) x<-mix.object$x p<-ncol(x) k<-mix.object$components theta<-mix.object$theta if(p!=2 || sum(x[,1])!=n){ stop(paste("This only works for simple linear regression!","\n")) } par(mfrow=c(1,1)) plot(x[,2],y,main="Credible Regions",xlab="Predictor",ylab="Response") #plot(theta[-c(1:burnin),seq(1,2*k-1,by=2)],theta[-c(1:burnin),seq(2,2*k,by=2)],col=0) for(i in 1:k){ #points(theta[-c(1:burnin),2*i-1],theta[-c(1:burnin),2*i],col=(i+1)) regcr(beta=cbind(theta[-c(1:burnin),2*i-1],theta[-c(1:burnin),2*i]),col=(i+1), x=x[,2], plot=TRUE,...) } } } mixtools/R/kernT.R0000755000176200001440000000007611665556372013565 0ustar liggesuserskern.T <- function (x, xi, h) { (1-abs((xi-x)/h))/h } mixtools/R/summary.npEM.R0000755000176200001440000000325411665556372015036 0ustar liggesuserssummary.npEM <- function(object, ...) { normpost <- sweep(object$post, 2, sums <- colSums(object$post), "/") n <- NROW(object$data) r <- NCOL(object$data) m <- length(object$lambdahat) B <- 1 if (r>1) B <- max(object$blockid) lambda <- sums/n means <- variances <- NULL for(i in 1:B) { if (r>1) { coords <- object$blockid == i xx <- as.vector(object$data[,coords]) } else { coords <- 1 xx <- as.vector(object$data) } sc <- sum(coords) M <- V <- NULL for (j in 1:m) { wts <- rep(normpost[,j]/sc, sc) M <- c(M, tmp <- sum(xx*wts)) V <- c(V, sum((xx-tmp)^2 *wts)) # cat(M," ") } means <- rbind(means, M) variances <- rbind(variances, V) } rownames(means) <- rownames(variances) <- paste("block", 1:B) colnames(means) <- colnames(variances) <- paste("component", 1:m) ans <- list(n=n, m=m, r=r, B=B, blockid=object$blockid, means = means, variances=variances) class(ans) <- "summary.npEM" ans } print.summary.npEM <- function(x, digits=3, ...) { if (x$r>1) cat (paste(x$n,"observations,",x$r,"coordinates,", x$m,"components, and",x$B,"blocks.\n\n")) else cat(paste(x$n,"univariate observations, and", x$m,"components.\n\n")) cat ("Means (and std. deviations) for each component:\n") for(i in 1:x$B) { coords <- 1 if (x$r>1) { coords <- x$blockid == i cat(paste(" Block #",i,": Coordinate", sep="")) cat(ifelse(sum(coords)>1, "s ", " ")) cat(which(coords)) cat("\n ") } cat(paste(signif(x$means[i,],digits), " (", signif(sqrt(x$variances[i,]),digits), ") ", sep="")) cat("\n") } } mixtools/R/dmvnorm.R0000755000176200001440000000177511665556372014173 0ustar liggesusers# Alternative version of dmvnorm to eliminate dependence of mixtools # on additional package 'mvtnorm' # Written (hopefully) to be more efficient than mvtnorm version, which uses both # a call to "eigen" and a call to "mahalanobis", by using only a single # call to the more efficient "qr" (read "Note" under ?qr) # Note: These functions assume that each ROW of y is a separate position vector. # i.e., y is assumed to be nxd, where d=dimension dmvnorm <- function(y, mu=NULL, sigma=NULL) { exp(logdmvnorm(y, mu=mu, sigma=sigma)) } logdmvnorm <- function(y, mu=NULL, sigma=NULL) { if (is.vector(y)) y <- matrix(y, nrow=1) d <- ncol(y) if (!is.null(mu)) y <- sweep(y, 2, mu, '-') if (is.null(sigma)) sigma <- diag(d) k <- d * 1.8378770664093454836 # that constant is log(2*pi) a <- qr(sigma) logdet <- sum(log(abs(diag(a$qr)))) if(nrow(y)==1) mahaldist <- as.vector(y %*% qr.solve(a,t(y))) else mahaldist <- rowSums((y %*% qr.solve(a)) * y) -0.5*(mahaldist + logdet + k) } mixtools/R/npEM.R0000644000176200001440000001265513060302143013314 0ustar liggesusers## EM-like algorithm for a nonparametric mixture model with ## independent repeated measures - some ID, some not npEMindrep <- # npEMindrep is an alias (only for backward compatibility) npEM <- function(x, mu0, blockid = 1:ncol(x), bw=bw.nrd0(as.vector(as.matrix(x))), samebw = TRUE, h=bw, eps=1e-8, maxiter=500, stochastic = FALSE, verb = TRUE){ bw <- h # h is alternative bandwidth argument, for backward compatibility x <- as.matrix(x) n <- nrow(x) # number of subjects r <- ncol(x) # number of measurements in each subject u <- match(blockid, unique(blockid)) if (is.matrix(mu0)) m <- dim(mu0)[1] # mu0=centers else m <- mu0 # mu0=number of clusters if(!samebw && !is.matrix(bw)) { bw <- matrix(bw, nrow=max(u), ncol=m) } z.hat <- matrix(0, nrow = n, ncol = m) tt0 <- proc.time() # for total time ## Initial Values if(m == 1) z.hat <- matrix(1, nrow = n, ncol = m) else{ kmeans <- kmeans(x, mu0) for(j in 1:m) z.hat[kmeans$cluster==j, j] <- 1 } iter <- 0 if (stochastic) { sumpost <- matrix(0, n, m) } finished <- FALSE lambda <- matrix(0, nrow = maxiter, ncol = m) loglik <- NULL orderx <- xx <- list() for(k in 1:max(u)) { xx[[k]] <- as.vector(x[, u==k]) if (!samebw) { orderx[[k]] = order(xx[[k]]) # only needed for IQR calculation for bw } } ### Cfunction <- ifelse(samebw, "KDErepeated", "KDErepeatedbw") while (!finished) { iter <- iter + 1 bw.old <- bw t0 <- proc.time() ## Note: Enter loop assuming E-step is done -- i.e., z.hat in place ## M-Step lambda[iter, ] <- colMeans(z.hat) ## density estimation step if (stochastic) { z <- t(apply(z.hat, 1, function(prob) rmultinom(1, 1, prob))) cs <- colSums(z) z.tmp <- sweep(z, 2, cs, "/") z.tmp[, cs==0] <- 1/NROW(z.tmp) # Just in case } else { cs <- colSums(z.hat) z.tmp <- sweep(z.hat, 2, cs, "/") z.tmp[, cs==0] <- 1/NROW(z.tmp) # Just in case } fkernel <- matrix(1, n, m) h <- bw # This is for samebw == TRUE for (k in 1:max(u)) { r2 <- sum(u == k) # block size if (!samebw) { wts <- apply(z.tmp, 2, function(z) rep(z/r2, r2)) variances <- colSums(wts * outer(xx[[k]], colSums(wts * xx[[k]]), '-')^2) iqr <- apply(as.matrix(wts[orderx[[k]],]), 2, wIQR, xx[[k]][orderx[[k]]], already.sorted=TRUE, already.normalized=TRUE) h <- bw[k, ] <- 0.9 * pmin(sqrt(variances), iqr/1.34) * pmax(1,r2*n*lambda[iter, ])^(-1/5) # Note: Doesn't allow "sample size" < 1. # browser() } if(samebw){ ans <- .C(C_KDErepeated, n = as.integer(n), m = as.integer(m), r = as.integer(r2), x = as.double(x[,u==k]), h = as.double(h), z = as.double(z.tmp), f = double(n*m), PACKAGE="mixtools") } else{ ans <- .C(C_KDErepeatedbw, n = as.integer(n), m = as.integer(m), r = as.integer(r2), x = as.double(x[,u==k]), h = as.double(h), z = as.double(z.tmp), f = double(n*m), PACKAGE="mixtools") } fkernel <- fkernel * matrix(ans$f, ncol = m) } lambda.f <- sweep(fkernel, 2, lambda[iter, ], "*") ## E-step (for next iteration) z.hat <- lambda.f/rowSums(lambda.f) loglik <- c(loglik,sum(log(rowSums(lambda.f)))) # log-likelihood finished <- iter >= maxiter if (stochastic) { sumpost <- sumpost + z.hat } else if (iter > 1) { # This convergence criterion may be too simplistic: maxchange <- max(abs(lambda[iter,] - lambda[iter-1,])) if (!samebw) maxchange <- max(maxchange, max(abs(bw.old - bw))) finished <- finished | (maxchange < eps) } if (verb) { t1 <- proc.time() cat("iteration", iter, " lambda ", round(lambda[iter, ], 4)) cat(" time", (t1 - t0)[3], "\n") } } if (!samebw) { rownames(bw) <- paste("block", 1:max(u)) colnames(bw) <- paste("component", 1:m) } if (verb) { tt1 <- proc.time() cat("lambda ", round(lambda[iter, ], 4)) cat(", total time", (tt1 - tt0)[3], "s\n") } if (stochastic) { return(structure(list(data = x, posteriors = sumpost/iter, lambda = lambda, bandwidth = bw, blockid = u, lambdahat = colMeans(lambda), loglik = loglik), class="npEM")) } else { return(structure(list(data = x, posteriors = z.hat, lambda = lambda[1:iter,], bandwidth = bw, blockid = u, lambdahat = lambda[iter,], loglik = loglik), class="npEM")) } } # Not sure whether the following function is really necessary: npEMindrepbw <- function (x, mu0, blockid = 1:ncol(x), bw = bw.nrd0(as.vector(as.matrix(x))), eps = 1e-08, maxiter = 500, stochastic = FALSE, verb = TRUE){ npEM(x=x, mu0=mu0, blockid=blockid, bw=bw, samebw=FALSE, eps=eps, maxiter=maxiter, stochastic=stochastic, verb=verb) } mixtools/R/gammamixEM.R0000755000176200001440000002027213616551035014511 0ustar liggesusersgammamixEM <- function(x, lambda = NULL, alpha = NULL, beta = NULL, k = 2, mom.start = TRUE, fix.alpha = FALSE, epsilon = 1e-08, maxit = 1000, maxrestarts = 20, verb = FALSE){ if(!is.null(alpha) & is.numeric(fix.alpha)) stop(paste("Both alpha and fix.alpha cannot be numeric!", "\n")) if(!is.null(lambda)|!is.null(beta)) k <- max(length(lambda),length(beta)) x <- as.vector(x) y <- x^(1/3) n <- length(x) if(is.logical(fix.alpha)&(fix.alpha==FALSE)){ cond <- 1 } else if(is.logical(fix.alpha)&(fix.alpha==TRUE)){ cond <- 2 } else cond <- 3 fn.alpha <- function(alpha,beta,z,x) log(beta)+sum(z*log(x))/sum(z)-digamma(alpha) fn.alpha.2 <- function(alpha,beta,z,x) (log(beta)+sum(z*log(x))/sum(z)-digamma(alpha))^2 fn.beta <- function(z,x,alpha) sum(z)/sum(z*x)*alpha fn.alpha2 <- function(alpha,beta,z,x) sum(z%*%cbind(log(beta)))/length(x)+sum(t(z)%*%cbind(log(x)))/length(x)-digamma(alpha) fn.alpha2.2 <- function(alpha,beta,z,x) (sum(z%*%cbind(log(beta)))/length(x)+sum(t(z)%*%cbind(log(x)))/length(x)-digamma(alpha))^2 if(mom.start){ out <- try(normalmixEM(y,k=k,maxit=5000,epsilon=1e-5,maxrestarts=10),silent=TRUE) if(class(out)=="try-error"){ tmp <- gammamix.init(x = x, lambda = lambda, alpha = alpha, beta = beta, k = k) } else{ z <- out$posterior wt.mean <- sapply(1:k,function(i) weighted.mean(x,w=z[,i])) wt.var <- sapply(1:k,function(i) sum(z[,i] * (x - wt.mean[i])^2)/((n-1)*sum(z[,i])/n)) shape.mom <- wt.mean^2/wt.var scale.mom <- wt.var/wt.mean shape.mle <- try(sapply(1:k, function(i) uniroot(fn.alpha,interval=c(0.000001,1000000),beta=1/scale.mom[i],z=z[,i],x=x)$root),silent=TRUE) if(class(shape.mle)=="try-error") shape.mle <- sapply(1:k, function(i) nlminb(shape.mom[i],fn.alpha.2,lower=0,beta=1/scale.mom[i],z=z[,i],x=x)$par) scale.mle <- sapply(1:k, function(i) 1/fn.beta(z=z[,i],x=x,alpha=shape.mle[i])) lambda.mle <- apply(z,2,mean) tmp <- list(lambda=lambda.mle,alpha=scale.mle,beta=shape.mle) } } else tmp <- gammamix.init(x = x, lambda = lambda, alpha = alpha, beta = beta, k = k) lambda.mle <- tmp$lambda scale.mle <- tmp$beta if(cond==2){ shape.mle <- rep(mean(tmp$alpha),k) } else if(cond==1){ shape.mle <- tmp$alpha } else shape.mle <- rep(fix.alpha,k) dens <- function(x, lambda, alpha, beta) { k <- length(lambda) temp = sapply(1:k, function(j) dgamma(x, shape = alpha[j], scale = beta[j])) temp = t(lambda * t(temp)) temp } iter <- 0 mr <- 0 diff <- epsilon + 1 dens1 <- dens(x=x, lambda=lambda.mle, alpha=shape.mle, beta=scale.mle) old.obs.ll <- sum(log(apply(dens1, 1, sum))) ll <- old.obs.ll if(cond==1){ while (diff > epsilon && iter < maxit) { old.shape.mle <- shape.mle old.scale.mle <- scale.mle old.lambda.mle <- lambda.mle z <- dens1/apply(dens1, 1, sum) #M-step shape.mle <- try(sapply(1:k, function(i) uniroot(fn.alpha,interval=c(0.000001,10000),beta=1/old.scale.mle[i],z=z[,i],x=x)$root),silent=TRUE) if(class(shape.mle)=="try-error") shape.mle <- sapply(1:k, function(i) nlminb(old.shape.mle[i],fn.alpha.2,lower=0,beta=1/scale.mle[i],z=z[,i],x=x)$par) scale.mle <- sapply(1:k, function(i) 1/fn.beta(z=z[,i],x=x,alpha=shape.mle[i])) lambda.mle <- apply(z,2,mean) dens1 <- dens(x=x, lambda=lambda.mle, alpha=shape.mle, beta=scale.mle) new.obs.ll <- sum(log(apply(dens1, 1, sum))) diff <- new.obs.ll - old.obs.ll old.obs.ll <- new.obs.ll ll <- c(ll, old.obs.ll) iter = iter + 1 if (verb) { cat("iteration =", iter, " log-lik diff =", diff, " log-lik =", new.obs.ll, "\n") } if(is.na(diff)){ cat("Note: Choosing new starting values.", "\n") if (mr == maxrestarts) stop(paste("Try different number of components?", "\n")) mr <- mr + 1 tmp <- gammamix.init(x = x, lambda = lambda, alpha = alpha, beta = beta, k = k) lambda.mle <- tmp$lambda if(cond==2){ shape.mle <- rep(mean(tmp$alpha),k) } else if(cond==1){ shape.mle <- tmp$alpha } else shape.mle <- rep(fix.alpha,k) scale.mle <- tmp$beta iter <- 0 diff <- epsilon + 1 dens1 <- dens(x=x, lambda=lambda.mle, alpha=shape.mle, beta=scale.mle) old.obs.ll <- sum(log(apply(dens1, 1, sum))) ll <- old.obs.ll } } } else if(cond==2){ while (diff > epsilon && iter < maxit) { old.shape.mle <- shape.mle old.scale.mle <- scale.mle old.lambda.mle <- lambda.mle z <- dens1/apply(dens1, 1, sum) #M-step shape.mle <- try(rep(uniroot(fn.alpha2,interval=c(0.000001,10000),beta=1/old.scale.mle,z=z,x=x)$root,k),silent=TRUE) if(class(shape.mle)=="try-error") shape.mle <- rep(nlminb(old.shape.mle[1],fn.alpha2,lower=0,beta=1/old.scale.mle,z=z,x=x)$par,k) scale.mle <- sapply(1:k, function(i) 1/fn.beta(z=z[,i],x=x,alpha=shape.mle[1])) lambda.mle <- apply(z,2,mean) dens1 <- dens(x=x, lambda=lambda.mle, alpha=shape.mle, beta=scale.mle) new.obs.ll <- sum(log(apply(dens1, 1, sum))) diff <- new.obs.ll - old.obs.ll old.obs.ll <- new.obs.ll ll <- c(ll, old.obs.ll) iter = iter + 1 if (verb) { cat("iteration =", iter, " log-lik diff =", diff, " log-lik =", new.obs.ll, "\n") } if(is.na(diff)){ cat("Note: Choosing new starting values.", "\n") if (mr == maxrestarts) stop(paste("Try different number of components?", "\n")) mr <- mr + 1 tmp <- gammamix.init(x = x, lambda = lambda, alpha = alpha, beta = beta, k = k) lambda.mle <- tmp$lambda if(cond==2){ shape.mle <- rep(mean(tmp$alpha),k) } else if(cond==1){ shape.mle <- tmp$alpha } else shape.mle <- rep(fix.alpha,k) scale.mle <- tmp$beta iter <- 0 diff <- epsilon + 1 dens1 <- dens(x=x, lambda=lambda.mle, alpha=shape.mle, beta=scale.mle) old.obs.ll <- sum(log(apply(dens1, 1, sum))) ll <- old.obs.ll } } } else{ while (diff > epsilon && iter < maxit) { old.scale.mle <- scale.mle old.lambda.mle <- lambda.mle z <- dens1/apply(dens1, 1, sum) #M-step scale.mle <- sapply(1:k, function(i) 1/fn.beta(z=z[,i],x=x,alpha=shape.mle[1])) lambda.mle <- apply(z,2,mean) dens1 <- dens(x=x, lambda=lambda.mle, alpha=shape.mle, beta=scale.mle) new.obs.ll <- sum(log(apply(dens1, 1, sum))) diff <- new.obs.ll - old.obs.ll old.obs.ll <- new.obs.ll ll <- c(ll, old.obs.ll) iter = iter + 1 if (verb) { cat("iteration =", iter, " log-lik diff =", diff, " log-lik =", new.obs.ll, "\n") } if(is.na(diff)){ cat("Note: Choosing new starting values.", "\n") if (mr == maxrestarts) stop(paste("Try different number of components or different value for the fixed shape parameter?", "\n")) mr <- mr + 1 tmp <- gammamix.init(x = x, lambda = lambda, alpha = alpha, beta = beta, k = k) lambda.mle <- tmp$lambda if(cond==2){ shape.mle <- rep(mean(tmp$alpha),k) } else if(cond==1){ shape.mle <- tmp$alpha } else shape.mle <- rep(fix.alpha,k) scale.mle <- tmp$beta iter <- 0 diff <- epsilon + 1 dens1 <- dens(x=x, lambda=lambda.mle, alpha=shape.mle, beta=scale.mle) old.obs.ll <- sum(log(apply(dens1, 1, sum))) ll <- old.obs.ll } } } if (iter == maxit) { cat("WARNING! NOT CONVERGENT!", "\n") } cat("number of iterations=", iter, "\n") theta = rbind(shape.mle, scale.mle) rownames(theta) = c("alpha", "beta") colnames(theta) = c(paste("comp", ".", 1:k, sep = "")) a = list(x = x, lambda = lambda.mle, gamma.pars = theta, loglik = new.obs.ll, posterior = z, all.loglik = ll, ft = "gammamixEM") class(a) = "mixEM" a } mixtools/R/multmixinit.R0000755000176200001440000000063711665556372015070 0ustar liggesusersmultmix.init <- function(y, lambda = NULL, theta = NULL, k = 2){ n <- nrow(y) p <- ncol(y) if (is.null(theta)) { theta = matrix(runif(p * k), k, p) theta = theta/apply(theta, 1, sum) } else k = nrow(theta) if (is.null(lambda)) { lambda = runif(k) lambda = lambda/sum(lambda) } else k = length(lambda) list(lambda=lambda, theta=theta, k=k) }mixtools/R/testmixedequality.R0000755000176200001440000000520311665556372016263 0ustar liggesuserstest.equality.mixed=function (y, x = NULL, w = NULL, arb.R = TRUE, arb.sigma = FALSE, lambda = NULL, mu = NULL, sigma = NULL, R = NULL, alpha = NULL, ...) { if (arb.R == arb.sigma) stop("Change either 'arb.R' or 'arb.sigma'!") v = 1 while (v == 1) { if (arb.R) { H0 = regmixEM.mixed(y = y, x = x, w = w, arb.R = TRUE, arb.sigma = FALSE, lambda = lambda, sigma = sigma, mu = mu, R = R, alpha = alpha, ...) p = nrow(H0$posterior.beta[[1]]) k = length(H0$lambda) if (is.null(w)) alpha = NULL else alpha = H0$alpha # n.i=sapply(y,length) # N=length(y) # tmp=apply(H1$posterior.z*n.i,2,sum) # common.sig=sum(tmp/N*H1$sigma) # H1 = regmixEM.mixed(y = y, x = x, w = w, arb.R = TRUE, # arb.sigma = TRUE, lambda = H0$lambda, sigma = rep(H0$sigma,k), # mu = H0$mu, R = H0$R, alpha = alpha, ...) H1 = regmixEM.mixed(y = y, x = x, w = w, arb.R = TRUE, arb.sigma = TRUE, lambda = H0$lambda, sigma = NULL, mu = H0$mu, R = H0$R, alpha = alpha, ...) D = 2 * (H1$loglik - H0$loglik) df = k - 1 alpha = 1 - pchisq(D, df = df) } else { H0 = regmixEM.mixed(y = y, x = x, w = w, arb.R = FALSE, arb.sigma = TRUE, lambda = lambda, sigma = sigma, mu = mu, R = R, alpha = alpha, ...) p = nrow(H0$posterior.beta[[1]]) k = length(H0$lambda) if (is.null(w)) alpha = NULL else alpha = H0$alpha # N=length(y) # tmp=(apply(H1$posterior.z,2,sum))/N # common.R=matrix(0,ncol=p,nrow=p) # for(i in 1:length(H1$lambda)){ # common.R=common.R+tmp[i]*H1$R[[i]] # } # H1 = regmixEM.mixed(y = y, x = x, w = w, arb.R = TRUE, # arb.sigma = TRUE, lambda = H0$lambda, sigma = H0$sigma, # mu = H0$mu, R = lapply(1:k,function(i) H0$R), alpha = alpha, ...) H1 = regmixEM.mixed(y = y, x = x, w = w, arb.R = TRUE, arb.sigma = TRUE, lambda = H0$lambda, sigma = H0$sigma, mu = H0$mu, R = NULL, alpha = alpha, ...) D = 2 * (H1$loglik - H0$loglik) df = p * (p + 1) * (k - 1)/2 alpha = 1 - pchisq(D, df = df) } if (D < 0) { v = 1 lambda=NULL sigma=NULL mu=NULL R=NULL alpha=NULL} else v = 2 } a = list(chi.sq = D, df = df, p.value = alpha) a } mixtools/R/normalmixEM2comp.R0000755000176200001440000000331311665556372015670 0ustar liggesusers# fast function for two-component univariate normal mixture normalmixEM2comp <- function(x, lambda, mu, sigsqrd, eps=1e-8, maxit=1000, verb=FALSE) { arbvar <- (length(sigsqrd)==2) mu1 <- mu[1]; mu2 <- mu[2] sigsqrd1 <- sigsqrd[1]; sigsqrd2 <- sigsqrd[arbvar+1] mx <- mean(x) const <- length(x) * 0.918938533204673 # i.e., times log(2*pi)/2 dl <- 1 + eps iter<-0 ll <- rep(0, maxit+1) a1<-(x-mu1)^2; b1<-(lambda/sqrt(sigsqrd1))*exp(-a1/2/sigsqrd1) a2<-(x-mu2)^2; b2<-((1-lambda)/sqrt(sigsqrd2))*exp(-a2/2/sigsqrd2) l <- sum(log(b1+b2)) while (dl>eps && iter= 0)) { xe.v1 <- diag(sqrt(xe.v)) } xvalue <- cbind(xe$vectors) xvalue.1 <- solve(xvalue) out <- xvalue %*% xe.v1 %*% xvalue.1 out } mixtools/R/rmvnorm.R0000755000176200001440000000155611665556372014206 0ustar liggesusers# Alternative version of rmvnorm to eliminate dependence of mixtools # on additional package 'mvtnorm' # Uses eigen decomposition assuming symmetric==TRUE. Don't know how efficient # this might be relative to other approaches, but some suggest this is among # the most efficient methods to find a matrix square root. rmvnorm <- function(n, mu=NULL, sigma=NULL) { if (is.null(mu)) { if (is.null(sigma)) { return(rnorm(n)) } else { mu = rep(0, nrow(sigma)) } } else if (is.null(sigma)) { sigma=diag(length(mu)) } lmu <- length(mu) if (lmu != nrow(sigma) || lmu != ncol(sigma)) stop("length of mu must equal nrow and ncol of sigma") e <- eigen(sigma, symmetric=TRUE) if (any(e$val<0)) stop("Numerically negative definite covariance matrix") t(mu + e$vec %*% (t(e$vec) * sqrt(e$val)) %*% matrix(rnorm(n*lmu), lmu, n)) } mixtools/R/density.npEM.R0000755000176200001440000000266211665556372015022 0ustar liggesusers# Method for "density" generic: Takes an npEM object and returns # a corresponding KDE for the appropriate component and block, evaluated # at the given set of points. # Does not use the FFT like the density.default function does; still # quite fast, but not optimized for speed density.npEM <- function (x, u = NULL, component = 1, block = 1, scale = FALSE, ...) { if (is.null(blockid <- x$blockid)) { coords <- 1 } else { coords <- blockid == block if (!any(coords)) stop("Illegal value of block argument") } m <- NCOL(x$post) xx <- as.vector(as.matrix(x$data)[, coords]) if(is.matrix(x$bandwidth)) bw <- x$bandwidth[block, component] else bw <- x$bandwidth if (is.null(u)) { u = seq(min(xx) - 4 * bw, max(xx) + 4 * bw, len = 250) } if (component > m || component < 1) stop("Illegal value of component argument") if (!is.null(x$symmetric) && x$symmetric) { n <- length(xx) d <- wkde(x = rep(xx, m) - rep(x$muhat, each = n), u = u - x$muhat[component], w = as.vector(x$post), bw = bw, sym = TRUE) } else { n <- NROW(x$data) wts <- rep(x$post[, component], sum(coords)) d <- wkde(xx, u = u, w = wts, bw = bw) } if (scale) d <- d * x$lambdahat[component] structure(list(x = u, y = d, bw = bw, n = n, call = match.call(), data.name = deparse(substitute(x)), has.na = FALSE), class = "density") } mixtools/R/normalmixEM.R0000755000176200001440000001362713060301704014712 0ustar liggesusers## Use an ECM algorithm (in the sense of Meng and Rubin, Biometrika 1993) ## to search for a local maximum of the likelihood surface for a ## univariate finite mixture of normals with possible equality ## constraints on the mean and stdev parameters. normalmixEM <- function (x, lambda = NULL, mu = NULL, sigma = NULL, k = 2, mean.constr = NULL, sd.constr = NULL, epsilon = 1e-08, maxit = 1000, maxrestarts=20, verb = FALSE, fast=FALSE, ECM = FALSE, arbmean = TRUE, arbvar = TRUE) { warn <- options(warn=-1) # Turn off warnings x <- as.vector(x) tmp <- normalmix.init(x = x, lambda = lambda, mu = mu, s = sigma, k = k, arbmean = arbmean, arbvar = arbvar) lambda <- tmp$lambda mu <- tmp$mu sigma <- tmp$s k <- tmp$k arbvar <- tmp$arbvar arbmean <- tmp$arbmean if (fast==TRUE && k==2 && arbmean==TRUE) { a <- normalmixEM2comp (x, lambda=lambda[1], mu=mu, sigsqrd=sigma^2, eps=epsilon, maxit=maxit, verb=verb) } else { z <- parse.constraints(mean.constr, k=k, allsame=!arbmean) meancat <- z$category; meanalpha <- z$alpha z <- parse.constraints(sd.constr, k=k, allsame=!arbvar) sdcat <- z$category; sdalpha <- z$alpha ECM <- ECM || any(meancat != 1:k) || any(sdcat != 1) n <- length(x) notdone <- TRUE restarts <- 0 while(notdone) { # Initialize everything notdone <- FALSE tmp <- normalmix.init(x = x, lambda = lambda, mu = mu, s = sigma, k = k, arbmean = arbmean, arbvar = arbvar) lambda <- tmp$lambda mu <- tmp$mu k <- tmp$k sigma <- tmp$s var <- sigma^2 diff <- epsilon+1 iter <- 0 postprobs <- matrix(nrow = n, ncol = k) mu <- rep(mu, k)[1:k] sigma <- rep(sigma,k)[1:k] # Initialization E-step here: z <- .C(C_normpost, as.integer(n), as.integer(k), as.double(x), as.double(mu), as.double(sigma), as.double(lambda), res2 = double(n*k), double(3*k), post = double(n*k), loglik = double(1), PACKAGE = "mixtools") postprobs <- matrix(z$post, nrow=n) res <- matrix(z$res2, nrow=n) ll <- obsloglik <- z$loglik while (diff > epsilon && iter < maxit) { # ECM loop, 1st M-step: condition on sigma, update lambda and mu lambda <- colMeans(postprobs) mu[meancat==0] <- meanalpha[meancat==0] if (max(meancat)>0) { for(i in 1:max(meancat)) { w <- which(meancat==i) if (length(w)==1) { mu[w] <- sum(postprobs[,w]*x) / (n*lambda[w]) } else { tmp <- t(postprobs[,w])*(meanalpha[w]/sigma[w]^2) mu[w] <- meanalpha[w] * sum(t(tmp)*x) / sum(tmp*meanalpha[w]) } } } if (ECM) { # If ECM==FALSE, then this is a true EM algorithm and # so we omit the E-step between the mu and sigma updates # E-step number one: z <- .C(C_normpost, as.integer(n), as.integer(k), as.double(x), as.double(mu), as.double(sigma), as.double(lambda), res2 = double(n*k), double(3*k), post = double(n*k), loglik = double(1), PACKAGE = "mixtools") postprobs <- matrix(z$post, nrow=n) res <- matrix(z$res2, nrow=n) # ECM loop, 2nd M-step: condition on mu, update lambda and sigma lambda <- colMeans(postprobs) # Redundant if ECM==FALSE } sigma[sdcat==0] <- sdalpha[sdcat==0] if (max(sdcat)>0) { for(i in 1:max(sdcat)) { w <- which(sdcat==i) if (length(w)==1) { sigma[w] <- sqrt(sum(postprobs[,w]*res[,w]) / (n*lambda[w])) } else { tmp <- t(postprobs[,w]) / sdalpha[w] sigma[w] <- sdalpha[w] * sqrt(sum(t(tmp) * res[,w])/ (n * sum(lambda[w]))) } } if(any(sigma < 1e-08)) { notdone <- TRUE cat("One of the variances is going to zero; ", "trying new starting values.\n") restarts <- restarts + 1 lambda <- mu <- sigma <- NULL if(restarts>maxrestarts) { stop("Too many tries!") } break } } # E-step number two: z <- .C(C_normpost, as.integer(n), as.integer(k), as.double(x), as.double(mu), as.double(sigma), as.double(lambda), res2 = double(n*k), double(3*k), post = double(n*k), loglik = double(1), PACKAGE = "mixtools") postprobs <- matrix(z$post, nrow=n) res <- matrix(z$res2, nrow=n) newobsloglik <- z$loglik diff <- newobsloglik - obsloglik obsloglik <- newobsloglik ll <- c(ll, obsloglik) iter <- iter + 1 if (verb) { cat("iteration =", iter, " log-lik diff =", diff, " log-lik =", obsloglik, "\n") print(rbind(lambda, mu, sigma)) } } } if (iter == maxit) { cat("WARNING! NOT CONVERGENT!", "\n") } cat("number of iterations=", iter, "\n") if(arbmean == FALSE){ scale.order = order(sigma) sigma.min = min(sigma) postprobs = postprobs[,scale.order] colnames(postprobs) <- c(paste("comp", ".", 1:k, sep = "")) a=list(x=x, lambda = lambda[scale.order], mu = mu, sigma = sigma.min, scale = sigma[scale.order]/sigma.min, loglik = obsloglik, posterior = postprobs, all.loglik=ll, restarts=restarts, ft="normalmixEM") } else { colnames(postprobs) <- c(paste("comp", ".", 1:k, sep = "")) a=list(x=x, lambda = lambda, mu = mu, sigma = sigma, loglik = obsloglik, posterior = postprobs, all.loglik=ll, restarts=restarts, ft="normalmixEM") } } class(a) = "mixEM" options(warn) # Reset warnings to original value a } mixtools/R/wquantile.R0000755000176200001440000000066011665556372014512 0ustar liggesuserswquantile <- function(wt=rep(1,length(x)), x, probs, already.sorted=FALSE, already.normalized=FALSE) { if(!already.sorted) { wt <- wt[o<-order(x)] x <- x[o] } if(!already.normalized) { wt <- wt/sum(wt) } x[findInterval(probs,cumsum(wt))] } wIQR <- function(wt=rep(1,length(x)), x, already.sorted=FALSE, already.normalized=FALSE) { diff(wquantile(wt, x, c(.25,.75), already.sorted, already.normalized)) } mixtools/R/regmixEMmixed.R0000755000176200001440000003735311665556372015256 0ustar liggesusersregmixEM.mixed = function (y, x, w = NULL, sigma = NULL, arb.sigma = TRUE, alpha = NULL, lambda = NULL, mu = NULL, rho=NULL, R = NULL, arb.R = TRUE, k = 2, ar.1 = FALSE, addintercept.fixed = FALSE, addintercept.random = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) { Omega.def <- function(rho,n){ Omega.1stcol <- NULL for(i in 1:n){ Omega.1stcol <- c(Omega.1stcol,rho^(i-1)) } A=Omega.1stcol for(i in 1:(n-1)){ A=cbind(A,c(rep(0,i),Omega.1stcol[1:(n-i)])) } B=A+t(A) diag(B)=rep(1,n) B=B/(1-rho^2) B } rho.max<-function(rho,z,sigma,y,x,w,alpha,beta,V.beta){ n.i <- length(y) Omega.ij <- Omega.def(rho=rho,n=n.i) -.5*z*(log(det(Omega.ij))+(1/sigma)*(t(y- as.vector(w %*% alpha)-x%*%beta))%*%solve(Omega.ij)%*%(y- as.vector(w %*% alpha)-x%*%beta)+sum(diag(solve(Omega.ij) %*% x %*% V.beta %*% t(x) ))) } `%L*%` = function(x, y) lapply(1:length(x), function(z) { t(x[[z]]) %*% y[[z]] }) N <- length(y) n <- sapply(y, length) if (addintercept.random) { x.1 = lapply(1:N, function(i) cbind(1, x[[i]])) x = x.1 } if (is.null(w)) { w = as.list(rep(0, N)) mixed=FALSE } else mixed=TRUE p <- ncol(x[[1]]) if (mixed == TRUE) { if (addintercept.fixed) { w.1 = lapply(1:N, function(i) cbind(1, w[[i]])) w = w.1 } tww = w %L*% w tww.inv = 0 for (i in 1:N) { tww.inv = tww.inv + tww[[i]] } tww.inv = solve(tww.inv) twx = w %L*% x twy = w %L*% y q <- ncol(w[[1]]) } tmp <- regmix.mixed.init(y = y, x = x, w = w, sigma = sigma, arb.sigma = arb.sigma, alpha = alpha, lambda = lambda, mu = mu, R = R, arb.R = arb.R, k = k, mixed = mixed, addintercept.fixed = addintercept.fixed, addintercept.random = addintercept.random) alpha <- tmp$alpha lambda <- tmp$lambda mu <- tmp$mu R <- tmp$R sigma <- tmp$sigma s.2 <- sigma^2 if(ar.1==TRUE & is.null(rho)==TRUE){ rho=matrix(runif(N*k,-1,1),nrow=N,ncol=k) } txx = x %L*% x diff <- 1 iter <- 0 obsloglik=-Inf ll <- NULL restarts <- 0 if(ar.1) Omega.k <- lapply(1:N, function(i) lapply(1:k, function(j) Omega.def(rho=rho[i,j],n=n[i]))) else Omega.k <- lapply(1:N, function(i) lapply(1:k, function(j) diag(1, n[i]))) while (diff > epsilon && iter < maxit) { txOx<-lapply(1:N, function(i) lapply(1:k, function(j) t(x[[i]]) %*% solve(Omega.k[[i]][[j]]) %*% x[[i]])) if (arb.R) { V.beta = list() for (i in 1:N) { V.beta[[i]] = list() V.beta[[i]] = lapply(1:k, function(j) { solve(txOx[[i]][[j]]/s.2[j * arb.sigma + (1 - arb.sigma)] + solve(R[[j]])) }) } beta = list() for (i in 1:N) { beta[[i]] = matrix(nrow = p, ncol = k) for (j in 1:k) { beta[[i]][, j] = V.beta[[i]][[j]] %*% (t(x[[i]])/s.2[j * arb.sigma + (1 - arb.sigma)]) %*% solve(Omega.k[[i]][[j]]) %*% (y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% mu[, j]) + mu[, j] } } z = matrix(nrow = N, ncol = k) for (i in 1:N) { for (j in 1:k) { z.denom = c() for (m in 1:k) { z.denom = c(z.denom, lambda[m]/lambda[j] * (det(x[[i]] %*% R[[j]] %*% t(x[[i]]) + s.2[j * arb.sigma + (1 - arb.sigma)] * Omega.k[[i]][[j]])/det(x[[i]] %*% R[[m]] %*% t(x[[i]]) + s.2[m * arb.sigma + (1 - arb.sigma)] * Omega.k[[i]][[m]]))^(0.5) * exp(-0.5 * (t(y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% mu[, m]) %*% solve(x[[i]] %*% R[[m]] %*% t(x[[i]]) + s.2[m * arb.sigma + (1 - arb.sigma)] * Omega.k[[i]][[m]]) %*% (y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% mu[, m]) - t(y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% mu[, j]) %*% solve(x[[i]] %*% R[[j]] %*% t(x[[i]]) + s.2[j * arb.sigma + (1 - arb.sigma)] * Omega.k[[i]][[j]]) %*% (y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% mu[, j])))) } z[i, j] = 1/sum(z.denom) } } # z[,k]=1-apply(as.matrix(z[,(1:(k-1))]),1,sum) z = z/apply(z,1,sum) sing <- sum(is.na(z)) sum.z = apply(z, 2, sum) lambda.new <- sum.z/N if (sum(lambda.new < 1e-08)>0 || is.na(sum(lambda.new))) { sing <- 1 } else { mu.new <- matrix(nrow = p, ncol = k) for (j in 1:k) { mu.2 <- matrix(sapply(1:N, function(i) { beta[[i]][, j] }), ncol = N) mu.new[, j] <- apply(t(apply(t(mu.2), 2, "*", matrix(z[, j], nrow = 1))), 1, sum) } mu.new = t(apply(t(mu.new), 2, "/", matrix(sum.z, nrow = 1))) if (mixed == TRUE) { a.vec <- c() A.mat <- 0 for (i in 1:N) { for (j in 1:k) { A.mat = A.mat + (z[i, j]/s.2[j * arb.sigma + (1 - arb.sigma)])*tww[[i]] a.vec = cbind(a.vec, (z[i, j]/s.2[j * arb.sigma + (1 - arb.sigma)]) * (twy[[i]] - twx[[i]] %*% beta[[i]][, j])) # a.vec = cbind(a.vec, z[i, j] * (twy[[i]] - # twx[[i]] %*% beta[[i]][, j])) } } # alpha.new <- tww.inv %*% apply(a.vec, 1, sum) alpha.new <- solve(A.mat) %*% apply(a.vec, 1, sum) alpha = alpha.new } s.tr <- matrix(nrow = N, ncol = k) z.n <- matrix(nrow = N, ncol = k) for (i in 1:N) { for (j in 1:k) { s.tr[i, j] = sum(diag(z[i, j] * (solve(Omega.k[[i]][[j]]) %*% (y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% beta[[i]][, j]) %*% t(y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% beta[[i]][, j]) + x[[i]] %*% V.beta[[i]][[j]] %*% t(x[[i]])))) z.n[i, j] = z[i, j] * n[i] } } if (arb.sigma) { s.2.new <- apply(s.tr, 2, sum)/apply(z.n, 2, sum) } else s.2.new <- sum(s.tr)/sum(n) R.new = list() for (j in 1:k) { r.2 <- 0 for (i in 1:N) { r <- z[i, j] * (V.beta[[i]][[j]] + t(t(beta[[i]][, j] - mu.new[, j])) %*% (beta[[i]][, j] - mu.new[, j])) r.2 <- r.2 + r } R.new[[j]] = r.2/sum(z[, j]) } lambda = lambda.new mu = mu.new s.2 = s.2.new R = R.new L = matrix(nrow = N, ncol = k) L = t(sapply(1:N, function(i) { sapply(1:k, function(j) { dmvnorm(as.vector(y[[i]]), as.vector(x[[i]] %*% mu[, j] + as.vector(w[[i]] %*% alpha)), x[[i]] %*% R[[j]] %*% t(x[[i]]) + s.2[j * arb.sigma + (1 - arb.sigma)] * Omega.k[[i]][[j]]) }) })) L.n = t(apply(t(L), 2, "*", matrix(lambda, nrow = 1))) newobsloglik = sum(log(apply(L.n, 1, sum))) } } else { R.inv = solve(R) V.beta = list() for (i in 1:N) { V.beta[[i]] = list() V.beta[[i]] = lapply(1:k, function(j) { solve(txOx[[i]][[j]]/s.2[j * arb.sigma + (1 - arb.sigma)] + R.inv) }) } beta = list() for (i in 1:N) { beta[[i]] = matrix(nrow = p, ncol = k) for (j in 1:k) { beta[[i]][, j] = V.beta[[i]][[j]] %*% (t(x[[i]])/s.2[j * arb.sigma + (1 - arb.sigma)]) %*% solve(Omega.k[[i]][[j]]) %*% (y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% mu[, j]) + mu[, j] } } z = matrix(nrow = N, ncol = k) for (i in 1:N) { for (j in 1:k) { z.denom = c() for (m in 1:k) { z.denom = c(z.denom, lambda[m]/lambda[j] * (det(x[[i]] %*% R %*% t(x[[i]]) + s.2[j * arb.sigma + (1 - arb.sigma)] * Omega.k[[i]][[j]])/det(x[[i]] %*% R %*% t(x[[i]]) + s.2[m * arb.sigma + (1 - arb.sigma)] * Omega.k[[i]][[m]]))^(0.5) * exp(-0.5 * (t(y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% mu[, m]) %*% solve(x[[i]] %*% R %*% t(x[[i]]) + s.2[m * arb.sigma + (1 - arb.sigma)] * Omega.k[[i]][[m]]) %*% (y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% mu[, m]) - t(y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% mu[, j]) %*% solve(x[[i]] %*% R %*% t(x[[i]]) + s.2[j * arb.sigma + (1 - arb.sigma)] * Omega.k[[i]][[j]]) %*% (y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% mu[, j])))) } z[i, j] = 1/sum(z.denom) } } # z[,k]=1-apply(as.matrix(z[,(1:(k-1))]),1,sum) z = z/apply(z,1,sum) sing <- sum(is.nan(z)) sum.z = apply(z, 2, sum) lambda.new <- sum.z/N if (sum(lambda.new < 1e-08)>0 || is.na(sum(lambda.new))) { sing <- 1 } else { mu.new <- matrix(nrow = p, ncol = k) for (j in 1:k) { mu.2 <- matrix(sapply(1:N, function(i) { beta[[i]][, j] }), ncol = N) mu.new[, j] <- apply(t(apply(t(mu.2), 2, "*", matrix(z[, j], nrow = 1))), 1, sum) } mu.new = t(apply(t(mu.new), 2, "/", matrix(sum.z, nrow = 1))) if (mixed == TRUE) { a.vec <- c() A.mat <- 0 for (i in 1:N) { for (j in 1:k) { A.mat = A.mat + (z[i, j]/s.2[j * arb.sigma + (1 - arb.sigma)])*tww[[i]] a.vec = cbind(a.vec, (z[i, j]/s.2[j * arb.sigma + (1 - arb.sigma)]) * (twy[[i]] - twx[[i]] %*% beta[[i]][, j])) # a.vec = cbind(a.vec, z[i, j] * (twy[[i]] - # twx[[i]] %*% beta[[i]][, j])) } } # alpha.new <- tww.inv %*% apply(a.vec, 1, sum) alpha.new <- solve(A.mat) %*% apply(a.vec, 1, sum) alpha = alpha.new } s.tr <- matrix(nrow = N, ncol = k) z.n <- matrix(nrow = N, ncol = k) for (i in 1:N) { for (j in 1:k) { s.tr[i, j] = sum(diag(z[i, j] * (solve(Omega.k[[i]][[j]]) %*% (y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% beta[[i]][, j]) %*% t(y[[i]] - as.vector(w[[i]] %*% alpha) - x[[i]] %*% beta[[i]][, j]) + x[[i]] %*% V.beta[[i]][[j]] %*% t(x[[i]])))) z.n[i, j] = z[i, j] * n[i] } } if (arb.sigma) { s.2.new <- apply(s.tr, 2, sum)/apply(z.n, 2, sum) } else s.2.new <- sum(s.tr)/sum(n) r.3 <- 0 for (j in 1:k) { r.2 <- 0 for (i in 1:N) { r <- z[i, j] * (V.beta[[i]][[j]] + t(t(beta[[i]][, j] - mu.new[, j])) %*% (beta[[i]][, j] - mu.new[, j])) r.2 <- r.2 + r } r.3 <- r.3 + r.2 } R.new = r.3/N lambda = lambda.new mu = mu.new s.2 = s.2.new R = R.new if(ar.1){ rho.new<-matrix(0,nrow=N,ncol=k) for(i in 1:N){ for(j in 1:k){ rho.new[i,j] <- optimize(rho.max,interval=c(-1,1),maximum=TRUE,tol=.Machine$double.eps,z=z[i,j],sigma=s.2[j * arb.sigma + (1 - arb.sigma)],y=y[[i]],x=x[[i]],w=w[[i]],alpha=alpha,beta=beta[[i]][,j],V.beta=V.beta[[i]][[j]])$max } } rho = rho.new Omega.k <- lapply(1:N, function(i) lapply(1:k, function(j) Omega.def(rho=rho[i,j],n=n[i] ))) } L = matrix(nrow = N, ncol = k) L = t(sapply(1:N, function(i) { sapply(1:k, function(j) { dmvnorm(as.vector(y[[i]]), as.vector(x[[i]] %*% mu[, j] + as.vector(w[[i]] %*% alpha)), x[[i]] %*% R %*% t(x[[i]]) + s.2[j * arb.sigma + (1 - arb.sigma)] * Omega.k[[i]][[j]]) }) })) L.n = t(apply(t(L), 2, "*", matrix(lambda, nrow = 1))) newobsloglik = sum(log(apply(L.n, 1, sum))) } } if (sing > 0 || is.na(newobsloglik) || abs(newobsloglik) == Inf || newobsloglik < obsloglik) {# || sum(z) != N cat("Need new starting values due to singularity...", "\n") restarts <- restarts + 1 if(restarts>15) stop("Too many tries!") tmp <- regmix.mixed.init(y = y, x = x, w = w, arb.sigma = arb.sigma, arb.R = arb.R, k = k, mixed = mixed, addintercept.fixed = addintercept.fixed, addintercept.random = addintercept.random) alpha <- tmp$alpha lambda <- tmp$lambda mu <- tmp$mu R <- tmp$R sigma <- tmp$sigma s.2 <- sigma^2 diff <- 1 iter <- 0 obsloglik=-Inf ll <- NULL } else { diff <- newobsloglik - obsloglik obsloglik <- newobsloglik ll <- c(ll, obsloglik) iter <- iter + 1 if (verb) { cat("iteration=", iter, "diff=", diff, "log-likelihood", obsloglik, "\n") } } } if (iter == maxit) { cat("WARNING! NOT CONVERGENT!", "\n") } cat("number of iterations=", iter, "\n") colnames(z) <- c(paste("comp", ".", 1:k, sep = "")) a=list(x=x, y=y, w=w, lambda = lambda, mu = mu, R = R, sigma = sqrt(s.2), alpha = alpha, loglik = obsloglik, posterior.z = z, posterior.beta = beta, all.loglik = ll, restarts=restarts, ft="regmixEM.mixed") class(a) = "mixEM" a } mixtools/R/gammamixinit.R0000755000176200001440000000151613210325520015137 0ustar liggesusersgammamix.init <- function (x, lambda = NULL, alpha = NULL, beta = NULL, k = 2) { n <- length(x) if (is.null(lambda)) { cond = TRUE while(cond){ lambda = runif(k) lambda = lambda/sum(lambda) if(min(lambda)<0.05) cond=TRUE else cond=FALSE } } else k = length(lambda) if (k == 1) { x.bar = mean(x) x2.bar = mean(x^2) } else { x.sort = sort(x) ind = floor(n * cumsum(lambda)) x.part = list() x.part[[1]] = x.sort[1:(ind[1] + 1)] for (j in 2:k) { x.part[[j]] = x.sort[ind[j - 1]:ind[j]] } x.bar = sapply(x.part, mean) x2.bar = sapply(lapply(x.part, "^", 2), mean) } if (is.null(alpha)) { alpha = x.bar^2/(x2.bar - x.bar^2) } if (is.null(beta)) { beta = (x2.bar - x.bar^2)/x.bar } list(lambda = lambda, alpha = alpha, beta = beta, k = k) } mixtools/R/kernB.R0000755000176200001440000000012511665556372013536 0ustar liggesuserskern.B <- function (x, xi, h, g = 0) { ((1-((xi-x)/h)^2)^g)/beta(.5,g+1)/h } mixtools/R/lambdapert.R0000755000176200001440000000021511736711332014574 0ustar liggesuserslambda.pert <- function (lambda, pert) { temp = logit(lambda) + pert temp2 = inv.logit(temp) new.lambda = temp2/sum(temp2) new.lambda } mixtools/R/mvnpEM.R0000644000176200001440000002533213060302103013647 0ustar liggesusers### multivariate npEM with multivariate conditionnally independent blocks ### functions for mvnpEM with mvwkde code all in C ###################################################################### ## mvnpEM final (late 2015) version calling C codes for mvwkde's ## both for same and adaptive bandwidths ## adding a default bandwidth matrix passed as an argument ## bwdefault: a r-vector of fixed bandwidths per coordinates ## default to Silverman's rule per coordinates, only when samebw=TRUE ## bw returns the standard deviation of the kernel density ## init= for initialization option (June 2015) ###################################################################### mvnpEM <- function (x, mu0, blockid = 1:ncol(x), samebw = TRUE, bwdefault = apply(x,2,bw.nrd0), init=NULL, eps=1e-8, maxiter = 500, verb = TRUE){ x = as.matrix(x) n = nrow(x); r = ncol(x) bk = blockid #b_k=l indicate the kth coordinate belongs to lth block. B = max(bk) # total number of blocks # ! Caution if blocks are numbered not consecutively, use unique? loglik = NULL tt0 <- proc.time() # for total time # coordinate of X in lth block (block of dl-variate densities) # dl[l] = number of coordinates in l-th block # xx[[j]] = (n,dl[l]) -matrix of block l observations dl=c(); for (l in 1:B){dl[l] = sum(bk==l)} xx = list() for (l in 1:B){ xx[[l]] = as.matrix(x[,bk==l]) # coordinate of X in lth block } if (is.matrix(mu0)) m <- dim(mu0)[1] # mu0 = initial means else m <- mu0 # when mu0 = number of components ## NB initial means used only if kmeans used for init ## z.hat are posterior zij's, initialized using kmeans if (is.null(init)){ z.hat <- matrix(0, nrow = n, ncol = m) if (m == 1) z.hat <- matrix(1, nrow = n, ncol = m) # ?? m can never be 1 else{ kmeans <- kmeans(x, mu0) for(j in 1:m) z.hat[kmeans$cluster==j, j] <- 1} } if (! is.null(init)) {z.hat <- init} # ToDo: check consistency dim (n,m) lambda = matrix(0, maxiter, m) # storing lambda's along iterations finished = FALSE bw=matrix(0,m,r) # only used for adaptbw version if (samebw){ # bw computed once for all iterations and mvwkde calls # bw <- apply(x,2,bw.nrd0) # bw <- bwdefault^2*diag(r) # diagonal bw matrix appropriate for mvwkde bw <- bwdefault*diag(r) # diagonal bw matrix, not variances, appropriate for mvwkde } ## loop iter = 0 #start point while (!finished){ iter = iter + 1 #step "iter + 1" # t0 = proc.time() ################################################################# ## M-step for the Euclidean parameter lambda[iter,] = colMeans(z.hat) #\lambda_j^(t+1) = 1/n* \sum_i^n (p_ij)^t ################################################################# ## Weighted Kernel Density function step # wts[,j] = normalized weights for component j cs <- colSums(z.hat) # or use n*lambda[iter,] to avoid re summing over n ? wts <- sweep(z.hat, 2, cs, "/") wts [, cs==0] <- 1/NROW(wts) if (samebw){ # default samebw=TRUE lambda.f <- matrix(NA,n,m) # lambda.f[i,j] = lambda_j f_j(x_i) fkernel <- matrix(1,n,m) # Faster new version: no loop in j, all is done in C block per block for (l in 1:B) { d = dl[l] tmp=as.matrix(bw[bk==l,bk==l]) # updated here, not sqrt anymore h=as.vector(diag(tmp)); ans <- .C(C_mvwkde_samebw, n = as.integer(n),d = as.integer(d), m = as.integer(m), h = as.double(h), x=as.double(xx[[l]]), u=as.double(xx[[l]]), z=as.double(wts), f=double(n*m)) fl = matrix(ans$f,n,m) # f_jl estimate fkernel <- fkernel*fl # product over blocks } lambda.f <- sweep(fkernel, 2, lambda[iter, ], "*") } # end of samebw case # Adaptive bandwidth case - NOT YET BEST C VERSION (for all j simultaneously ?) if (!samebw) { for (k in 1:r) { # compute adaptive bw h_jk^t # use o <- order(x[,k]) to re-use it twice var <- colSums(wts*outer(x[,k], colSums(wts*x[,k]),'-')^2) iqr <- apply(as.matrix(wts[order(x[,k]),]),2,wIQR, x[,k][order(x[,k])], already.sorted=TRUE, already.normalized=TRUE) bw[,k] <- 0.9*pmin(sqrt(var), iqr/1.34)*pmax(1,n*lambda[iter, ])^(-1/5) } lambda.f <- matrix(NA,n,m) # lambda.f[i,j] = lambda_j f_j(x_i) fkernel <- matrix(1,n,m) #for (j in 1:m){ #lda.f = lambda[iter, j] for (l in 1:B){ d = dl[l]; H = as.matrix(bw[, bk==l]); ans <- .C(C_mvwkde_adaptbw, n = as.integer(n),d = as.integer(d), m = as.integer(m), H = as.double(H), x=as.double(xx[[l]]), u=as.double(xx[[l]]), z=as.double(wts), f=double(n*m)) fl = matrix(ans$f,n,m)# f_jl estimate fkernel <- fkernel*fl # product over blocks } lambda.f <- sweep(fkernel, 2, lambda[iter, ], "*") #} }# end of adaptive case ################################################################ ## E-step (for next iteration) z.hat = lambda.f/rowSums(lambda.f) #p_ij^t, Z_ij^t, update z.hat loglik <- c(loglik,sum(log(rowSums(lambda.f)))) # log-likelihood ## End finished = iter >= maxiter if (iter>1) {maxchange = max(abs(lambda[iter,] - lambda[iter-1,])) finished = finished | (maxchange < eps) } if (verb) { # t1 <- proc.time() cat("iteration", iter, ": lambda ", round(lambda[iter, ], 4),"\n") # cat(" time", (t1 - t0)[3], "\n") } }# End while ##### Output if (verb) { tt1 <- proc.time() # total time ending cat("# iter", iter) cat(", lambda ", round(lambda[iter, ], 4)) cat(", total time", (tt1 - tt0)[3], "s\n") } # final bandwidth output depending on samebw switch if (samebw) bw <- diag(bw) return(structure(list(data = x, posteriors = z.hat, lambda = lambda[1:iter,], blockid = bk, samebw=samebw, bandwidth = bw, lambdahat = lambda[iter,], loglik = loglik), class = "mvnpEM")) } # End function. ####################################################### # plot marginal (univariate) wkde's from mvnpEM output # a plot.mvnpEM method for mvnpEM class # lambda, mu, v: true parameters, for gaussian true models only # ... passed to hist first level plotting plot.mvnpEM <- function(x, truenorm=FALSE, lambda=NULL, mu=NULL, v=NULL, lgdcex =1, ...) { mix.object <- x if (!inherits(mix.object, "mvnpEM")) a <- x x <- a$data; r <- ncol(x); m <- ncol(a$posteriors) rr <- sqrt(r); frr <- floor(rr) if (frr == rr) par(mfrow=c(rr,rr)) else { if (frr*(frr+1) >= r) par(mfrow=c(frr, frr+1)) else par(mfrow=c(frr+1, frr+1))} # if ((r %% 2) == 0) par(mfrow=c(floor(r/2), floor(r/2))) else { # par(mfrow=c(floor(r/2)+1, floor(r/2)+1))} for (k in 1:r) { # for each coord (whatever its block) xx <- x[,k] uk <- seq(min(xx),max(xx),len=100) tt <- paste("block", a$blockid[k],", coord",k) hist(xx, col=8, freq=F, xlab="", main=tt, ...) for (j in 1:m) { wj <- a$post[,j] # weights for component j if (a$samebw) bw <- a$bandwidth[k] else { bw <- a$bandwidth[j,k]} f <- wkde(xx, u=uk, w=wj, bw=bw, sym=F) lines(uk, a$lambdahat[j]*f, col=j) # add true Gaussian marginal if requested/known if (truenorm) { lines(uk,lambda[j]*dnorm(uk,mean=mu[j,k], sd=sqrt(v[j,k,k])), lty=2,lwd=2, col=j)} } if (a$samebw) subt <- paste("same bw:",round(a$bandwidth[k],3)) else { subt <- "adapt bw: " for (j in 1:m) subt <- paste(subt, round(a$bandwidth[j,k],3)) } title(sub=subt) } lgd <- NULL for (j in 1:m) lgd <- c(lgd, paste("comp",j)) legend("topright", lgd, col=1:m, lty=1, cex=lgdcex) } ########################################################### print.mvnpEM <- function(x,...) { n <- NROW(x$data) r <- NCOL(x$data) m <- length(x$lambdahat) cat(paste("Observations:", n, "\n")) cat(paste("Coordinates per observation:", r, "\n")) cat(paste("Mixture components:", m, "\n")) if (r > 1) { B <- max(x$blockid) cat(paste("Blocks (of conditionally iid coordinates):",B, "\n\n")) } dp = match(c("data", "posteriors", "lambda", "mu"), names(x), nomatch = 0) print.default(structure(x[-dp], class = class(x)), ...) invisible(x) } print.summary.mvnpEM <-function (x, digits = 3, ...) { if (x$r > 1) cat(paste(x$n, "observations,", x$r, "coordinates,", x$m, "components, and", x$B, "blocks.\n\n")) else cat(paste(x$n, "univariate observations, and", x$m, "components.\n\n")) cat("Means (and std. deviations) for each component:\n") for (l in 1:x$B) { coords <- 1 if (x$r > 1) { coords <- x$blockid == l cat(paste(" Block #", l, ": Coordinate", sep = "")) cat(ifelse(sum(coords) > 1, "s ", " ")) cat(which(coords)) cat("\n") } if (sum(coords)==1) { for (j in 1:x$m){ cat(paste(" Component", j,": ")) cat(paste(signif(x$means[j,coords], digits), " (", signif(x$variances[[j]][coords,coords],digits), ") ", sep = "")) } cat("\n") } else { for (j in 1:x$m){ cat(paste(" Component", j,": ")) cat(paste(signif(x$means[j,coords ], digits), sep = "")) cat("\n") print(signif(x$variances[[j]][coords,coords], digits)) cat("\n") }} } } summary.mvnpEM <- function(object,...) { n <- NROW(object$data);n r <- NCOL(object$data);r m <- length(object$lambdahat);m B <- max(object$blockid);B mu <- matrix(0, m, r);mu v <- array(0,c(m,r,r));v[1,,];v[2,,] normpost <- sweep(object$post, 2, sums <- colSums(object$post),"/") for (l in 1:B){ coords <- object$blockid == l;coords sc <- sum(coords);sc xx <- as.matrix(object$data[, coords]);xx var<- list();mean<-list() for (j in 1:m){ wts <- normpost[, j] M<-mu[j,coords] <- colSums(wts*as.matrix(xx)) if (sc==1) v[j,coords,coords]<- colSums(wts*outer(as.matrix(xx), colSums(wts*as.matrix(xx)),'-')^2) else{ for (t1 in 1:sc){ for (t2 in 1:sc){ v[j,coords,coords][t1,t2] <- v[j,coords,coords][t2,t1] <- colSums(wts*outer(as.matrix(xx[,t1]), colSums(wts*as.matrix(xx[,t1])),'-') *outer(as.matrix(xx[,t2]), colSums(wts*as.matrix(xx[,t2])),'-'))} } } var[[j]] = v[j,,] } } rownames(mu) <- paste("component",1:m) colnames(mu) <- paste("coordinate", 1:r) names(var) <- paste("component",1:m) ans <- list(n = n, m = m, r = r, B = B, blockid = object$blockid, means = mu, variances = var) class(ans)<-"summary.mvnpEM" ans } mixtools/R/normalmixinit.R0000755000176200001440000000315413210323717015353 0ustar liggesusersnormalmix.init <- function (x, lambda = NULL, mu = NULL, s = NULL, k = 2, arbmean = TRUE, arbvar = TRUE){ if (!is.null(s)) { arbvar <- (length(s) > 1) if (arbvar) k <- length(s) } if (!is.null(mu)) { arbmean <- (length(mu) > 1) if (arbmean) { k <- length(mu) if (!is.null(s) && length(s) > 1 && k != length(s)) { stop("mu and sigma are each of length >1 but not of the same length.") } } } if (!arbmean && !arbvar) { stop("arbmean and arbvar cannot both be FALSE") } n = length(x) x = sort(x) x.bin = list() for (j in 1:k) { x.bin[[j]] <- x[max(1, floor((j - 1) * n/k)):ceiling(j * n/k)] } if (is.null(s)) { s.hyp = as.vector(sapply(x.bin, sd)) if (any(s.hyp == 0)) s.hyp[which(s.hyp == 0)] = runif(sum(s.hyp == 0), 0, sd(x)) if (arbvar) { s = 1/rexp(k, rate = s.hyp) } else { s = 1/rexp(1, rate = mean(s.hyp)) } } if (is.null(mu)) { mu.hyp <- as.vector(sapply(x.bin, mean)) if (arbmean) { mu = rnorm(k, mean = mu.hyp, sd = s) } else { mu = rnorm(1, mean = mean(mu.hyp), sd = mean(s)) } } if (is.null(lambda)) { cond = TRUE while(cond){ lambda = runif(k) lambda = lambda/sum(lambda) if(min(lambda)<0.05) cond=TRUE else cond=FALSE } } else { lambda <- rep(lambda, length.out = k) lambda <- lambda/sum(lambda) } list(lambda = lambda, mu = mu, s = s, k = k, arbvar = arbvar, arbmean = arbmean) } mixtools/R/logisregmixEM.R0000755000176200001440000000772012777447234015261 0ustar liggesuserslogisregmixEM=function (y, x, N = NULL, lambda = NULL, beta = NULL, k = 2, addintercept = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) { if (addintercept) { x = cbind(1, x) } else x = as.matrix(x) n <- length(y) p <- ncol(x) if (is.null(N)) { N = rep(1, n) } tmp <- logisregmix.init(y = y, x = x, N = N, lambda = lambda, beta = beta, k = k) lambda <- tmp$lambda beta <- tmp$beta k <- tmp$k xbeta <- x %*% beta z <- matrix(0, n, k) diff <- 1 iter <- 0 comp <- t(t(dbinom(y, size = N, prob = inv.logit(xbeta))) * lambda) compsum <- apply(comp, 1, sum) obsloglik <- sum(log(compsum)) ll <- obsloglik restarts <- 0 while (diff > epsilon && iter < maxit) { j.star = apply(xbeta, 1, which.max) for (i in 1:n) { for (j in 1:k) { z[i, j] = lambda[j]/lambda[j.star[i]] * (inv.logit(xbeta[i, j])/inv.logit(xbeta[i, j.star[i]]))^y[i] * ((1 - inv.logit(xbeta[i, j]))/(1 - inv.logit(xbeta[i, j.star[i]])))^(N[i] - y[i]) } } z = z/apply(z, 1, sum) z[,k]=1-apply(as.matrix(z[,(1:(k-1))]),1,sum) if (sum(is.na(z)) > 0) { cat("Need new starting values due to underflow...", "\n") restarts <- restarts + 1 if(restarts>15) stop("Too many tries!") tmp <- logisregmix.init(y = y, x = x, N = N, k = k) lambda <- tmp$lambda beta <- tmp$beta k <- tmp$k diff <- 1 iter <- 0 xbeta <- x %*% beta comp <- t(t(dbinom(y, size = N, prob = inv.logit(xbeta))) * lambda) compsum <- apply(comp, 1, sum) obsloglik <- sum(log(compsum)) ll <- obsloglik } else { lambda <- apply(z, 2, mean) lm.out = lapply(1:k, function(j) try(glm.fit(x, cbind(y, N - y), weights = z[, j], family = binomial()), silent = TRUE)) beta = sapply(lm.out, coef) xbeta <- x %*% beta comp <- t(t(dbinom(y, size = N, prob = inv.logit(xbeta))) * lambda) compsum <- apply(comp, 1, sum) newobsloglik <- sum(log(compsum)) if (abs(newobsloglik) == Inf || is.na(newobsloglik) || newobsloglik < obsloglik){# || sum(z) != n) { cat("Need new starting values due to singularity...", "\n") restarts <- restarts + 1 if(restarts>15) stop("Too many tries!") tmp <- logisregmix.init(y = y, x = x, N = N, k = k) lambda <- tmp$lambda beta <- tmp$beta k <- tmp$k diff <- 1 iter <- 0 xbeta <- x %*% beta comp <- t(t(dbinom(y, size = N, prob = inv.logit(xbeta))) * lambda) compsum <- apply(comp, 1, sum) obsloglik <- sum(log(compsum)) ll <- obsloglik } else { diff <- newobsloglik - obsloglik obsloglik <- newobsloglik ll <- c(ll, obsloglik) iter <- iter + 1 if (verb) { cat("iteration=", iter, "diff=", diff, "log-likelihood", obsloglik, "\n") } } } } if (iter == maxit) { cat("WARNING! NOT CONVERGENT!", "\n") } beta <- matrix(beta,ncol=k) rownames(beta) <- c(paste("beta", ".", 0:(p - 1), sep = "")) colnames(beta) <- c(paste("comp", ".", 1:k, sep = "")) colnames(z) <- c(paste("comp", ".", 1:k, sep = "")) cat("number of iterations=", iter, "\n") a = list(x = x, y = y, lambda = lambda, beta = beta, loglik = obsloglik, posterior = z, all.loglik = ll, restarts=restarts, ft = "logisregmixEM") class(a) = "mixEM" a } mixtools/R/ise.npEM.R0000755000176200001440000000260011665556372014113 0ustar liggesusersise.npEM <- function(npEMout, component=1, block=1, truepdf, lower=-Inf, upper=Inf, plots = TRUE, ...){ # returns the Integrated Squared Error # between f_{comp,block} as estimated by npEM, # and truepdf #true2 <- function(u) truepdf(u, ...) coords <- npEMout$blockid == block bs <- sum(coords) # block size xx <- as.vector(npEMout$data[,coords]) # flatten data wts <- rep(npEMout$post[,component],bs) # duplicate weights if (is.matrix(npEMout$bandwidth)){ bw <- npEMout$bandwidth[block,component] } else bw <- npEMout$bandwidth integrand = function(u,...) { (wkde(xx,u,wts,bw) - truepdf(u,...))^2 } numint <- integrate(integrand,lower,upper, ...) if (plots) { # plot of estimated and truepdf ise <- paste(round(numint$value,4)) temp=paste(component, block, sep="") title = substitute(expression(paste("Integrated Squared Error for ", f[temp]," = ",ise,sep=""))) if (!is.finite(lower)) { lower <- min(xx) } if (!is.finite(upper)) { upper <- max(xx) } u <- seq(lower,upper, 0.01) fhat <- wkde(xx,u,wts,bw) ymax <- max(max(truepdf(u, ...)),max(fhat)) plot(u,fhat,type="l",ylim=c(0,ymax), main=eval(title),ylab="") legend("topleft",legend=c("true","fitted"),col=c(2,1),lty=c(1,1),lwd=c(2,1)) lines(u,truepdf(u, ...),lwd=2,col=2) } numint } mixtools/R/postbeta.R0000755000176200001440000000347611665556372014332 0ustar liggesuserspost.beta <- function(y, x, p.beta, p.z) { N = length(y) k = ncol(p.z) g = apply(p.z, 1, function(i) (1:length(i))[i == max(i)]) min.int=min(sapply(1:N, function(i) min(p.beta[[i]][1,]))) max.int=max(sapply(1:N, function(i) max(p.beta[[i]][1,]))) min.slp=min(sapply(1:N, function(i) min(p.beta[[i]][2,]))) max.slp=max(sapply(1:N, function(i) max(p.beta[[i]][2,]))) par(mfrow=c(2,2)) #All posterior regression lines. plot(c(min(sapply(x,min)),max(sapply(x,max))),c(min(sapply(y,min)),max(sapply(y,max))),type="n",xlab="x-values",ylab="y-values",main="Data and Posterior Regression Lines") sapply(1:length(x),function(i) points(x[[i]], y[[i]])) for(j in 1:k){ sapply(1:length(p.beta), function(i) abline(coef=p.beta[[i]][,j], col=(j+1))) } #Posterior regression lines chosen according to the membership probabilities. plot(c(min(sapply(x,min)),max(sapply(x,max))),c(min(sapply(y,min)),max(sapply(y,max))),type="n",xlab="x-values",ylab="y-values",main="Data and Most Probable Posterior Regression Lines") sapply(1:length(x),function(i) points(x[[i]], y[[i]])) sapply(1:length(p.beta), function(i) abline(coef=p.beta[[i]][,g[i]], col=(g[i]+1))) #All posterior beta values. plot(c(min.int,max.int),c(min.slp,max.slp),type="n",xlab="Posterior Intercepts",ylab="Posterior Slopes",main="All Posterior Regression Coefficients") for(j in 1:k){ sapply(1:length(p.beta), function(i) points(t(p.beta[[i]][,j]), col=(j+1))) } #Posterior beta values chosen according to the membership probabilities. min.max=c(min(sapply(p.beta,min)),max(sapply(p.beta,max))) plot(c(min.int,max.int),c(min.slp,max.slp),type="n",xlab="Posterior Intercepts",ylab="Posterior Slopes",main="Most Probable Posterior Regression Coefficients") sapply(1:length(p.beta), function(i) points(t(p.beta[[i]][,g[i]]), col=(g[i]+1))) } mixtools/R/regmix.init.R0000755000176200001440000000406711665556372014743 0ustar liggesusersregmix.init<-function (y, x, lambda = NULL, beta = NULL, s = NULL, k = 2, addintercept = TRUE, arbmean = TRUE, arbvar = TRUE) { x <- as.matrix(x) n <- length(y) p <- ncol(x) if (addintercept) { x = x[, -1] } else x = x if (is.null(lambda)) { lambda = runif(k) lambda = lambda/sum(lambda) } else k = length(lambda) A = round(lambda * n) while(min(A)<4){ lambda = runif(k) lambda = lambda/sum(lambda) A = round(lambda * n) } A[k] = n - sum(A[1:(k - 1)]) B = c(0, cumsum(A)) w = cbind(y, x) w = w[order(w[, 1]), ] w.bin = list() for (j in 1:k) { w.bin[[j]] <- w[(B[j] + 1):B[j + 1], ] } if (addintercept) { lm.out <- lapply(1:k, function(i) lm(w.bin[[i]][, 1] ~ w.bin[[i]][, 2:p])) } else lm.out <- lapply(1:k, function(i) lm(w.bin[[i]][, 1] ~ w.bin[[i]][, 2:(p + 1)] - 1)) s.hyp = lapply(lm.out, anova) s.hyp = as.vector(sqrt(sapply(1:k, function(i) s.hyp[[i]]$Mean[length(s.hyp[[i]]$Mean)]))) s.hyp[(s.hyp<=0)|(is.na(s.hyp)==1)]=1 if (is.null(s)) { if (arbvar) { s = 1/rexp(k, rate = s.hyp) } else { s.hyp = mean(s.hyp) s = 1/rexp(1, rate = s.hyp) } } if (is.null(s) == FALSE && arbvar == TRUE) { k = length(s) } if (is.null(beta)) { x.x <- solve(t(x)%*%x) beta.hyp = matrix(sapply(lm.out, coef), ncol = k) beta = matrix(0, nrow = p, ncol = k) for (j in 1:k) { beta[, j] = rnorm(p, mean = as.vector(beta.hyp[, # j]), sd = (s[arbvar * j + (1 - arbvar)]*sqrt(diag(x.x))) ) j]), sd = (s.hyp[arbvar * j + (1 - arbvar)]*sqrt(diag(x.x))) ) } if (arbmean == FALSE) { beta = apply(beta, 1, mean) } } if (is.null(beta) == FALSE && arbmean == TRUE) { k = ncol(beta) } list(lambda = lambda, beta = beta, s = s, k = k) }mixtools/R/kernO.R0000755000176200001440000000010711665556372013553 0ustar liggesuserskern.O <- function (x, xi, h) { (pi/4)*cos(.5*pi*(xi-x)/h)/h } mixtools/R/ldc.R0000644000176200001440000000175413210334313013216 0ustar liggesusersldc <- function(data,class,score){ data = as.matrix(data) K = length(unique(class)) # The number of classes (expect the classes to be labeled 1, 2, 3, ..., K-1, K N = dim(data)[1] # The number of samples p = dim(data)[2] # The number of features # Compute the class dependent probabilities and class dependent centroids: Pi = c(table(class))/N M = aggregate(data,list(class),mean)[,-1] # Compute W: W = cov(data) # Compute M* = M W^{-1/2} using the eigen-decomposition of W : e = eigen(W) V = e$vectors W_Minus_One_Half = V %*% diag( 1/sqrt(e$values),nrow=p ) %*% t(V) MStar = as.matrix(M) %*% W_Minus_One_Half # Compute B* the covariance matrix of M* and its eigen-decomposition: if(p>1 & length(Pi)>1){ BStar = cov(MStar) e = eigen(BStar) VStar = e$vectors # 1st linear discriminant coordinate ldc1 = W_Minus_One_Half %*% VStar[,score] } else if(p==1){ ldc1 = c(W_Minus_One_Half) } else ldc1 = W_Minus_One_Half[,score] return(ldc1) } mixtools/R/augx.R0000755000176200001440000000110112112160772013413 0ustar liggesusersaug.x <- function(X,cp.locs,cp,delta = NULL){ X=as.matrix(X) if(is.null(delta)) delta=cp.locs if(length(cp.locs)!=length(delta) | length(cp.locs)!=sum(cp)){ stop(paste("You must specify a correct changepoint structure!", "\n")) } if(sum(cp)==0){ X } else{ new.x=NULL sum.cp=cumsum(cp) for(i in 1:length(cp)){ X.i=matrix(X[,i],ncol=max(1,cp[i]),nrow=length(X[,1])) if(cp[i]!=0){ CPs=cp.locs[(max(1,sum.cp[i-1]+1)):sum.cp[i]] SPs=delta[(max(1,sum.cp[i-1]+1)):sum.cp[i]] new.x=cbind(new.x,t(t(X.i)-SPs)*t(t(X.i)>CPs)) } } cbind(X,new.x) } } mixtools/R/repnormmixmodelsel.R0000755000176200001440000000274311737456006016423 0ustar liggesusersrepnormmixmodel.sel <- function (x, k = 2, ...) { aic <- NULL bic <- NULL caic <- NULL icl <- NULL AIC <- function(emout) { emout$loglik - (length(emout$mu) + length(emout$stdev) + length(emout$lambda) - 1) } BIC <- function(emout) { emout$loglik - log(nrow(x)) * (length(emout$mu) + length(emout$stdev) + length(emout$lambda) - 1)/2 } CAIC <- function(emout) { emout$loglik - (log(nrow(x)) + 1) * (length(emout$mu) + length(emout$stdev) + length(emout$lambda) - 1)/2 } ICL <- function(emout) { BIC(emout) - sum(emout$lambda * log(emout$lambda)) } for (i in 1:k) { if (i == 1) { avx <- as.vector(x) mu <- mean(avx) s <- sd(avx) loglik <- sum(dnorm(avx, mean=mu, sd=s, log=TRUE)) emout <- list(mu=mu, stdev=s, lambda=1, loglik=loglik) } else emout <- repnormmixEM(x, k = i, ...) aic[i] <- AIC(emout) bic[i] <- BIC(emout) caic[i] <- CAIC(emout) icl[i] <- ICL(emout) } out = rbind(aic, bic, caic, icl) Winner = apply(out, 1, function(x) (1:length(x))[x == max(x)]) rownames(out) = c("AIC", "BIC", "CAIC", "ICL") colnames(out) = 1:k cbind(out, Winner) } mixtools/R/regmixEM.R0000755000176200001440000001432711665556372014223 0ustar liggesusersregmixEM = function (y, x, lambda = NULL, beta = NULL, sigma = NULL, k = 2, addintercept = TRUE, arbmean = TRUE, arbvar = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) { if(arbmean == FALSE && arbvar == FALSE){ stop(paste("Must change constraints on beta and/or sigma!","\n")) } s = sigma if (addintercept) { x = cbind(1, x) } n <- length(y) p <- ncol(x) tmp <- regmix.init(y = y, x = x, lambda = lambda, beta = beta, s = s, k = k, addintercept = addintercept, arbmean = arbmean, arbvar = arbvar) lambda <- tmp$lambda beta <- tmp$beta s <- tmp$s k <- tmp$k diff <- 1 iter <- 0 xbeta <- x %*% beta res <- (y - xbeta)^2 if(arbmean == FALSE){ res <- sapply(1:k,function(i) res) } # comp <- lapply(1:k, function(i) lambda[i] * dnorm(y, xbeta[,i * arbmean + (1 - arbmean)], # s[i * arbvar + (1 - arbvar)])) # comp <- sapply(comp, cbind) # compsum <- apply(comp, 1, sum) # obsloglik <- sum(log(compsum)) comp <- t((lambda/sqrt(2 * pi * s^2)) * t(exp(-t(t(res)/(2 * s^2))))) obsloglik <- sum(log(apply(comp, 1, sum))) ll <- obsloglik z = matrix(nrow = n, ncol = k) restarts <- 0 while (diff > epsilon && iter < maxit) { for (i in 1:n) { for (j in 1:k) { z.denom = c() for (h in 1:k) { z.denom = c(z.denom, (lambda[h]/lambda[j]) * (s[j * arbvar + (1 - arbvar)]/s[h * arbvar + (1 - arbvar)]) * exp(-0.5 * ((1/s[h * arbvar + (1 - arbvar)]^2) * res[i, h] - (1/s[j * arbvar + (1 - arbvar)]^2) * res[i, j]))) } z[i, j] = 1/sum(z.denom) } } # z[,k]=1-apply(as.matrix(z[,(1:(k-1))]),1,sum) z = z/apply(z,1,sum) lambda.new <- apply(z, 2, mean) if (sum(lambda.new < 1e-08)>0 || is.na(sum(lambda.new))) { sing <- 1 } else { if (arbmean == FALSE) { if (addintercept) { beta.new <- lm(y~x[,-1],weights=apply(t(t(z)/(s^2)),1,sum))$coef } else beta.new <- lm(y~x-1,weights=apply(t(t(z)/(s^2)),1,sum))$coef # beta.new <- sapply(lm.out, coef) # beta.new1 <- apply(t(apply(z,2,sum)*t(beta.new)),1,sum)/n # beta.new2 <- lm(y~x[,-1],weights=apply(t(t(z)/(s^2)),1,sum))$coef # beta.new<-as.vector(solve(t(x) %*% sweep(x, 1, t(t(z)/(s^2)), "*")) %*% apply(t(t(z)/(s^2))*y*x,2,sum) ) } else { if (addintercept) { lm.out <- lapply(1:k, function(i) lm(y ~ x[, -1], weights = z[, i])) } else lm.out <- lapply(1:k, function(i) lm(y ~ x - 1, weights = z[, i])) beta.new <- sapply(lm.out, coef) } xbeta.new <- x %*% beta.new res <- (y - xbeta.new)^2 if(arbmean == FALSE){ res <- sapply(1:k,function(i) res) } if (arbvar) { s.new <- sqrt(sapply(1:k, function(i) sum(z[, i] * (res[, i]))/sum(z[, i]))) } else s.new <- sqrt(sum(z * res)/n) lambda <- lambda.new beta <- beta.new xbeta <- x%*%beta s <- s.new sing <- sum(s < 1e-08) comp <- lapply(1:k, function(i) lambda[i] * dnorm(y, xbeta[,i * arbmean + (1 - arbmean)], s[i * arbvar + (1 - arbvar)])) comp <- sapply(comp, cbind) compsum <- apply(comp, 1, sum) newobsloglik <- sum(log(compsum)) # comp <- t((lambda/sqrt(2 * pi * s^2)) * t(exp(-t(t(res)/(2 * # s^2))))) # newobsloglik <- sum(log(apply(comp, 1, sum))) } if (sing > 0 || is.na(newobsloglik) || newobsloglik < obsloglik || abs(newobsloglik) == Inf){# || sum(z) != n) { cat("Need new starting values due to singularity...", "\n") restarts <- restarts + 1 if(restarts>15) stop("Too many tries!") tmp <- regmix.init(y = y, x = x, k = k, addintercept = addintercept, arbmean = arbmean, arbvar = arbvar) lambda <- tmp$lambda beta <- tmp$beta s <- tmp$s k <- tmp$k diff <- 1 iter <- 0 xbeta <- x %*% beta res <- (y - xbeta)^2 if(arbmean == FALSE){ res <- sapply(1:k,function(i) res) } # comp <- lapply(1:k, function(i) lambda[i] * dnorm(y, xbeta[,i * arbmean + (1 - arbmean)], # s[i * arbvar + (1 - arbvar)])) # comp <- sapply(comp, cbind) # compsum <- apply(comp, 1, sum) # obsloglik <- sum(log(compsum)) comp <- t((lambda/sqrt(2 * pi * s^2)) * t(exp(-t(t(res)/(2 * s^2))))) obsloglik <- sum(log(apply(comp, 1, sum))) ll <- obsloglik } else { diff <- newobsloglik - obsloglik obsloglik <- newobsloglik ll <- c(ll, obsloglik) iter <- iter + 1 if (verb) { cat("iteration=", iter, "diff=", diff, "log-likelihood", obsloglik, "\n") } } } scale.order = order(s) sigma.min = min(s) if (iter == maxit) { cat("WARNING! NOT CONVERGENT!", "\n") } cat("number of iterations=", iter, "\n") if(arbmean == FALSE){ z=z[,scale.order] names(beta) <- c(paste("beta", ".", 0:(p-1), sep = "")) colnames(z) <- c(paste("comp", ".", 1:k, sep = "")) a=list(x=x, y=y, lambda = lambda[scale.order], beta = beta, sigma = sigma.min, scale = s[scale.order]/sigma.min, loglik = obsloglik, posterior = z[,scale.order], all.loglik=ll, restarts = restarts, ft="regmixEM") class(a) = "mixEM" a } else { rownames(beta) <- c(paste("beta", ".", 0:(p-1), sep = "")) colnames(beta) <- c(paste("comp", ".", 1:k, sep = "")) colnames(z) <- c(paste("comp", ".", 1:k, sep = "")) a=list(x=x, y=y, lambda = lambda, beta = beta, sigma = s, loglik = obsloglik, posterior = z, all.loglik=ll, restarts = restarts, ft="regmixEM") class(a) = "mixEM" a } }mixtools/R/flaremixEM.R0000755000176200001440000000101513616712264014515 0ustar liggesusersflaremixEM<-function(y, x, lambda = NULL, beta = NULL, sigma = NULL, alpha = NULL, nu=NULL, epsilon = 1e-04, maxit = 10000, verb = FALSE, restart=50){ j=1 while(j<=length(nu)){ temp=try(try.flare(y=y, x=x, lambda = lambda, beta = beta, sigma = sigma, alpha = alpha, nu=nu[j], epsilon = epsilon, maxit = maxit, verb = verb, restart=restart),silent=TRUE) if(any(class(temp)=="try-error")) j=j+1 else j=2^100 } if(j==(length(nu)+1)) stop(paste("Too many attempts. Select a different barrier constant.","\n")) temp } mixtools/R/segregmixinit.R0000755000176200001440000000606212112167614015342 0ustar liggesuserssegregmix.init <- function (y, x, lambda = NULL, beta = NULL, s = NULL, k = 2, seg.Z, psi, psi.locs = NULL) { n <- length(y) p <- ncol(x) psi.counts <- apply(psi>0,1,sum) if(is.null(lambda)|is.null(beta)|is.null(s)|is.null(psi.locs)){ if (is.null(psi.locs)) { psi.locs = vector("list",k) psi.locs = lapply(1:k, function(i) if(psi.counts[i]>0) vector("list",psi.counts[i]) else NULL) for(i in 1:k){ if(!is.null(psi.locs[[i]])){ temp.locs <- which(psi[i,]>0) temp.labs=NULL for(j in 1:length(temp.locs)){ psi.locs[[i]][[j]]=sort(runif(psi[i,temp.locs[j]],as.numeric(quantile(x[,temp.locs[j]],.05)),as.numeric(quantile(x[,temp.locs[j]],.95)))) temp.labs=c(temp.labs,colnames(x)[temp.locs[j]]) } names(psi.locs[[i]])=temp.labs } } } else k = length(psi.locs) xnam <- colnames(x) fmla <- as.formula(paste("y ~ ", paste(xnam, collapse= "+"))) TEMP.lm <- lm(fmla,data=x) EM.out <- regmixEM(TEMP.lm$res,TEMP.lm$fit,k=k,epsilon=1e-2) posts = apply(EM.out$post,1,which.max) if (is.null(lambda)) { lambda = EM.out$lambda if(length(unique(posts))!=k) posts=rep(1:k,n)[1:n] } else k = length(lambda) A = round(lambda * n) while (min(A) <= 4) { lambda = runif(k) lambda = lambda/sum(lambda) A = round(lambda * n) } w = cbind(y, x) w.bin = list() for (j in 1:k) { w.bin[[j]] <- w[posts==j, ] } all.w.bin=vector("list",gamma(k+1)) all.inds=perm(k,k) all.X.aug=all.w.bin all.lm.out=all.w.bin avg.res=NULL for(j in 1:length(all.w.bin)){ all.w.bin[[j]]=w.bin[all.inds[j,]] X.aug <- lapply(1:k, function(i) cbind(1,aug.x(w.bin[[all.inds[j,i]]][,-1],unlist(psi.locs[[i]]),psi[i,],delta=NULL))) sapply(X.aug,dim) lm.out <- lapply(1:k, function(i) lm(w.bin[[all.inds[j,i]]][, 1] ~ X.aug[[i]][,-1])) all.X.aug[[j]]=X.aug all.lm.out[[j]]=lm.out avg.res=c(avg.res,mean(as.vector(unlist(lapply(1:k,function(t) lm.out[[t]]$res)))^2)) } IND=which.min(avg.res) w.bin=all.w.bin[[IND]] X.aug=all.X.aug[[IND]] lm.out=all.lm.out[[IND]] s.hyp = lapply(lm.out, anova) s.hyp = as.vector(sqrt(sapply(1:k, function(i) tail(s.hyp[[i]]$Mean,1)))) s.hyp[(s.hyp <= 0) | (is.na(s.hyp) == 1)] = 1 if (is.null(s)) { s = 1/rexp(k, rate = s.hyp) } else k = length(s) if (is.null(beta)) { x.x <- lapply(1:k,function(i) try(solve(t(X.aug[[i]]) %*% X.aug[[i]]),silent=TRUE)) test <- sum(sapply(1:k, function(i) class(x.x[[i]])[1]=="try-error")) if(test>0) stop("Lapack Routine Error") beta.hyp = lapply(lm.out,coef) # matrix(sapply(lm.out, coef), ncol = k) beta = vector("list",k) for (j in 1:k) { beta[[j]] = rnorm(length(beta.hyp[[j]]),mean=as.vector(beta.hyp[[j]]), sd = (s.hyp[j] * sqrt(diag(x.x[[j]])))) } } else k = length(beta) } else{ for(i in 1:k){ if(!is.null(psi.locs[[i]])){ temp.locs <- which(psi[i,]>0) temp.labs=NULL for(j in 1:length(temp.locs)){ temp.labs=c(temp.labs,colnames(x)[temp.locs[j]]) } names(psi.locs[[i]])=temp.labs } } } list(lambda = lambda, beta = beta, s = s, k = k, psi.locs = psi.locs) } mixtools/R/spRMMSEM.R0000644000176200001440000003447613055603106014034 0ustar liggesusers########################################################### # semi-parametric Reliability Mixture Models with Censored data # for 2 components # D. Chauveau # ref: Bordes L. and Chauveau D. Computational Statistics (2016) ########################################################### # Simulate from a lognormal scale mixture # like model (3) in compstat paper # lambda = vector of component probabilities # xi = scaling parameter rlnormscalemix <- function(n, lambda=1, meanlog=1, sdlog=1, scale=0.1) { m <- length(lambda) # nb of components z <- sample(m, n, replace = TRUE, prob = lambda) # component indicator x <- rlnorm(n, meanlog, sdlog) x[z==2] <- x[z==2]/scale x } ############################################## # Kaplan-Meier survival estimate # vectorized version ONLY on ** already ordered data ** # cpd = (n,2) matrix where # cpd[,1] = t = ORDERED censored life data = min(x,c) # cpd[,2] = d = censoring indicator, 1=observed 0=censored # returned value = a step function object allowing to evaluate S() # at any point, needed by SEM for computing S(xi*t_i)'s KMod <- function(cpd, already.ordered=TRUE){ # order stat of t and d ordered accordingly if (already.ordered) { n <- dim(cpd)[1] s <- cumprod(1 - cpd[,2]/(n - (1:n) +1)) stepfun(cpd[,1], c(1,s))} # starts with 1 for t<=t(1) } ############################################## ## KM estimate integration # s = a stepfun object as returned by KMod() # returned value = int_0^max(t_i) S(u)du KMintegrate <- function(s) { ks <- knots(s) n <- length(ks) # number of knots=data points ks2 <- c(0,ks[1:(n-1)]) # shifted right hs <- c(1,s(ks)[1:(n-1)]) # heights of survival sum((ks-ks2)*hs) } ################################################ ### HAZARD RATE SMOOTH KERNEL ESTIMATES ### ################################################ # WKDE with Triangular Kernel and global bandwidth, vectorized # returns vector of K_h(t_i-u_j), j=1,...,length(u) # t = vector of data # u = vector of points at which K(u) is computed # w = weights defaults to 1/n, not forced to be normalized # bw=bandwidth, can be a n-vector or a scalar # nb: this is not specific for hazard rate, just a wkde # NB: triang_wkde is the default wkde called by HRkde() for hazard rate triang_wkde <- function(t, u=t, w=rep(1/length(t),length(t)), bw=rep(bw.nrd0(t),length(t))) { n <- length(t); p <- length(u) xx <- outer(t, u, function(a,b) a-b) # K[i,j] = (t_i-u_j) xx <- abs(xx) # not necessarily for all kernels, separated from outer h <- matrix(bw,n,p) # bw repeated over p columns # works also if a scalar bw is passed! xx <- xx/h # now xx is |t_i-u_j|/h_i K <- (xx <= 1)*(1-xx)/bw # K((t-u)/h)/h mw <- matrix(w, nrow=1) # weights for matrix product as.vector(mw %*% K) } ## Hazard Rate kernel density estimate based on *ordered* censored data # weighted kernel density estimate passed as function argument, # defaults to symmetric triangle kernel # cpd = (n,2) matrix where # cpd[,1] = t = ORDERED censored life data = min(x,c) # cpd[,2] = d = censoring indicator, 1=observed 0=censored # u = vector of points at which alpha() is evaluated, # defaults to t itself # bw = bandwidth *vector* for the kernel density estimate # kernelft = kernel definition # return value = alpha(.) evaluated at u_j's # NB: ordered data are necessary from weights definitions # (n-i+1) items at risk etc HRkde <- function(cpd, u = cpd[,1], kernelft = triang_wkde, bw = rep(bw.nrd0(as.vector(cpd[,1])), length(cpd[,1]))){ # gaussian case (not recommended) # if (kernelft==gaussian) HRkdeGauss(cpd,u,bw) else { n <- length(cpd[,1]) aw <- cpd[,2]/(n - (1:n) +1) # weights d(i)/(n-i+1) kernelft(cpd[,1], u, w=aw, bw=bw) # old, non vectorized version (deprecated) # nu <- length(u) # hr <- rep(0,nu) # for (k in 1:nu) { # K <- kernelft(cpd[,1], u[k], bw) # hr[k] <- sum(K*aw) # } # } # hr } ################################################################# ################################################################# ## Stochastic EM algorithm for semiparametric Scaling ## Reliability Mixture Models (RMM) with Censoring, 2 components # t = lifetime data, censored by random c if d is not NULL, in which case # t = min(x,c) and d = I(x <= c) # rate = scaling parameter # centers = initial centers for initial call to kmeans # averaged = TRUE if averaging performed at each iteration (cf Nielsen 2000) # NB: averaging can be done in (at least) 2 ways; # here the current theta for E & M steps is the average over the sequence, # but the theta^next not averaged is stored on the sequence # batchsize = number of last iterations to use for "averaging" unscaled samples # for computing "average" final KM and HR estimates # alpha() and S() # kernelft = kernel used in HRkde for hazard rate nonparametric estimate # # NB: since the sequence of parameters is a stoch. process, the # stopping criterion is usually not satisfied until maxit spRMM_SEM <- function (t, d = NULL, lambda = NULL, scaling = NULL, centers = 2, kernelft = triang_wkde, bw = rep(bw.nrd0(t),length(t)), averaged = TRUE, epsilon = 1e-08, maxit = 100, batchsize = 1, verb = FALSE) { k = 2 # fixed number of components for this model identifiability n <- length(t) if (is.null(d)) { d <- rep(1,n) # but better use specific algo for noncensored data cat("warning: undefined censoring indicator d replaced by 1's") cat(" i.e. all data are assumed observed") cat(" better use instead a specific St-EM algorithm for uncensored data") } ##### Initializations ##### # init function call to do later, in case lambda = scaling = NULL if (is.null(lambda)) lambda <- rep(1/k,k) # not USED for init!? if (is.null(scaling)) scaling <- 1/2 # ToDo better init using centers fro kmeans! # sequences for storing along iterations lambda_seq <- matrix(0, nrow = maxit, ncol = k) lambda_seq[1,] <- lambda scaling_seq <- rep(0,maxit) scaling_seq[1] <- scaling sumNaNs <- 0 # for total nb of NaN's in iterations while computing post # dll <- epsilon+1 # not applicable for SEM versions, always run maxit post <- z <- sumpost <- posthat <- matrix(0, nrow = n, ncol = k) qt=round(maxit/10); pc <- 0 # for printing of % of iteration done if (qt == 0) qt <- 1 # initialization for batch storing if (batchsize > maxit) batchsize <- maxit batch_t <- NULL # for storing batchsize last unscaled samples batch_d <- NULL # and associated event indicators # kmeans method for initial posterior and z matrix kmeans <- kmeans(t, centers=centers) for(j in 1:k) { z[kmeans$cluster==j, j] <- 1 } # init values for Survival s() and hazard rate a() # strategy: use a M-step with z from kmeans result zt <- z*t # recycling t m times => zt[i,] holds zt[,2] <- scaling*zt[,2] # t if comp 1 and xi*t if comp 2 newt <- rowSums(zt) # redressed "unscaled" sample newo <- order(newt) new.od <- cbind(newt,d)[newo,] # ordered data with associated d s <- KMod(new.od) # stepfun object, can be evaluated by s(t) # note: <=> to use KMsurvfit(newt,d) which calls survival package functions a1 <- HRkde(new.od, t, kernelft, bw=bw) # evaluated at the original t's a2 <- HRkde(new.od, scaling*t, kernelft, bw=bw) # and at scaling*t's ####### SEM iterations ####### iter <- 1 # while (dll > epsilon && iter < maxit) { # EM-only version while (iter < maxit) { ### E-step post[,1] <- ((lambda[1]*a1*s(t))^(d))*((lambda[1]*s(t))^(1-d)) post[,2] <- (lambda[2]*scaling*a2*s(scaling*t))^d # observed post[,2] <- post[,2]*(lambda[2]*s(scaling*t))^(1-d) # censored # print(post) rs <- rowSums(post) # post normalized per row post <- sweep(post, 1, rs, "/") # posteriors p_{ij}^t 's # check and solve NaN's: may cause theoretical pbs!? snans <- sum(is.na(post)) if (snans > 0) { post[is.na(post[,1]),] <- 1/k # NaN's replaced by uniform weights sumNaNs <- sumNaNs + snans if (verb) cat(snans, "NaN's in post: ") } sumpost <- sumpost + post ### S-step # ~ matrix of component indicators simu checked OK z <- t(apply(post, 1, function(prob) rmultinom(1, 1, prob))) # cbind(post,z) # checking simulation zt <- z*t # each row of z is 0,1 hence zt holds {0,t} nsets <- colSums(z) # subsets per component sizes ### M-step for scalar parameters newlambda <- nsets/n # or colMeans(post) if EM strategy preferred! lambda_seq[iter+1, ] <- newlambda # update of the scaling (xi) parameter needs building of KM estimates # from each subsample, and integration of both # ToDo: use parallel on 2 cores to perform each subsample task! tsub1 <- t[z[,1]==1]; tsub2 <- t[z[,2]==1] # t subsamples dsub1 <- d[z[,1]==1]; dsub2 <- d[z[,2]==1] # d subsamples o1 <- order(tsub1); od1 <- cbind(tsub1,dsub1)[o1,] o2 <- order(tsub2); od2 <- cbind(tsub2,dsub2)[o2,] km1 <- KMod(od1); km2 <- KMod(od2) newscaling <- KMintegrate(km1)/KMintegrate(km2) scaling_seq[iter+1] <- newscaling # stored for final plotting if (averaged) { scaling <- mean(scaling_seq[1:(iter+1)]) lambda <- colMeans(lambda_seq[1:(iter+1),]) } else { # averaged=FALSE case, just use last update scaling <- newscaling lambda <- newlambda } ### M-step for nonparametric alpha() and s(): # unscaling the sample and order it, # keeping the associated d censoring indicator zt[,2] <- scaling*zt[,2] # unscale t's from component 2 newt <- rowSums(zt) # "unscaled" sample (sum 0 and t_i or scale*t_i ) newo <- order(newt) new.od <- cbind(newt,d)[newo,] # with associated (unchanged) d s <- KMod(new.od) # stepfun object, can be evaluated by s(t) a1 <- HRkde(new.od, t, kernelft, bw=bw) # evaluated at the original t's a2 <- HRkde(new.od, scaling*t, kernelft, bw=bw) # and at scaling*t's ### batch storage: collecting unscaled samples if (iter >= (maxit - batchsize)) { batch_t <- c(batch_t, newt) batch_d <- c(batch_d, d) cat("-- adding unscaled sample at iteration",iter,"\n") } # change <- c(lambda_seq[iter,] - lambda_seq[iter-1,], # scaling_seq[iter]- scaling_seq[iter-1]) # dll <- max(abs(change)) if (verb) { cat("iteration", iter, ": sizes=", nsets," ") cat("lambda_1=",lambda[1], "scaling=",scaling, "\n") } # printing % done b=round(iter/qt); r=iter-b*qt if (r==0) {pc <- pc+10; cat(pc,"% done\n")} iter <- iter + 1 } ###### end of SEM loops over iterations ###### colnames(post) <- colnames(posthat) <- c(paste("comp", ".", 1:k, sep = "")) lambdahat <- colMeans(lambda_seq[1:iter,]) scalinghat <- mean(scaling_seq[1:iter]) posthat <- sumpost/(iter-1) # average posteriors hazard <- HRkde(new.od, kernelft=kernelft, bw=bw) # at final unscaled sample # Would it be better to return the final unscaled sample?? cat(iter, "iterations: lambda=",lambdahat,", scaling=",scalinghat,"\n") if (sumNaNs > 0) cat("warning: ", sumNaNs,"NaN's occured in posterior computations") ###### FINISHING ###### # ToDo: average over batch set of last unscaled samples # compute average estimates via a S+M-step using posthat etc loglik <- sum(log(rs)) # "sort of" loglik with nonparam densities estimated # mimmics the parametric case, just like npEM does z <- t(apply(posthat, 1, function(prob) rmultinom(1, 1, prob))) zt <- z*t zt[,2] <- scalinghat*zt[,2] avgt <- rowSums(zt) # unscaled sample avgo <- order(avgt) avg.od <- cbind(avgt,d)[avgo,] # with associated d shat <- KMod(avg.od) # stepfun object ahat <- HRkde(avg.od, kernelft=kernelft, bw=bw) # eval at the unscaled ordered avgt a=list(t=t, d=d, lambda=lambdahat, scaling=scalinghat, posterior=post, # final posterior probabilities all.lambda=lambda_seq[1:iter,], # sequence of lambda's all.scaling=scaling_seq[1:iter], # sequence of scale loglik=loglik, # analog to parametric loglik, like npEM meanpost=posthat, # posterior proba averaged over iterations survival=s, # Kaplan-Meier last iter estimate (stepfun object) hazard=hazard, # hazard rate final estimate evaluated at final.t final.t=new.od[,1], # last unscaled sample s.hat=shat, # Kaplan-Meier average estimate t.hat= avg.od[,1], # ordered unscaled sample based on meanpost USEFUL?? avg.od=avg.od, # t.hat with associated d (for computing ahat outside) hazard.hat=ahat, # hazard rate average estimate on t.hat batch.t=batch_t, # batch sample t (not ordered) batch.d=batch_d, # associated event indicators just rep(d,batchsize) sumNaNs = sumNaNs, # total nb of NaN's in iterations while computing post ft="spRMM_SEM") class(a) = "spRMM" return(a) } ############################################## ## plot function for spRMM object : remove lognormal true pdf! # sem = spRMM_SEM object # other are true parameters, if available, for plotting references # ToDo: plot true f and S only if true param availables # pass the true pdf (like dlnorm) as well plotspRMM <- function(sem, tmax = NULL){ t <- sem$t ym <- max(sem$all.scaling) par(mfrow=c(2,2)) plot(sem$all.scaling, type="l", ylim=c(0, ym), xlab="iterations", main="scaling", ylab="") plot(sem$all.lambda[,1], ylim=c(0,1), type="l", xlab="iterations", main="weight of component 1",ylab="") # plots of Kaplan-Meier estimates # finding max time for plots if (is.null(tmax)){tmax <- max(sem$scaling*t) + 2} u <- seq(0, tmax, len=200) plot(sem$survival, xlim=c(0,tmax), ylim=c(0,1), pch=20, verticals=F, do.points=F, xlab="time", main="Survival function estimate") # plot(sem$s.hat, pch=20, verticals=F, do.points=F, col=2, add = TRUE) # pdf estimates fhat <- sem$s.hat(sem$t.hat)*sem$hazard.hat ffinal <- sem$survival(sem$final.t)*sem$hazard plot(sem$final.t, ffinal, type="l", col=1, xlim=c(0,tmax), xlab="time", ylab="density", main="Density estimate") } ## S3 method of summary for class "spRMM" summary.spRMM <- function(object, digits = 6, ...) { sem <- object if (sem$ft != "spRMM_SEM") stop("Unknown object of type ", sem$ft) cat("summary of", sem$ft, "object:\n") o <- matrix(NA, nrow = 2, ncol = 2) o[1,] <- sem$lambda o[2,2] <- sem$scaling colnames(o) <- paste("comp",1:2) rownames(o) <- c("lambda", "scaling") print(o, digits = digits, ...) cat("(pseudo) loglik at estimate: ", sem$loglik, "\n") pcc <- round(100*(1-mean(sem$d)), 2) cat(pcc, "% of the data right censored\n") } mixtools/R/npMSL.R0000644000176200001440000005136113060307260013450 0ustar liggesusers# original version, with weighted Silverman bandwidth unique option # has been improved (mid 2014) by a k-fold CV option # the original version is kept as "npMSL_old" in this file ################################################################ ################################################################ ## nonparametric algorithm for Smoothed Likelihood Maximization ## implementing block structure ## and Silverman adaptive bandwidth ## 2014 additions: loglik stores all the sequence, ## post argument for passing an init matrix of posterior ## bwiter argument for the duration of the adaptive bw stage ## can be set to 0 for keeping an initial bw matrix when samebw=FALSE ################################################################ ################################################################ npMSL_old <- function(x, mu0, blockid = 1:ncol(x), bw=bw.nrd0(as.vector(as.matrix(x))), samebw = TRUE, h=bw, eps=1e-8, maxiter=500, bwiter = maxiter, ngrid=200, post=NULL, verb = TRUE){ bw <- h # h is alternative bandwidth argument, for backward compatibility x <- as.matrix(x) n <- nrow(x) # number of subjects r <- ncol(x) # number of measurements in each subject u <- match(blockid, unique(blockid)) B <- max(u) # nb of blocks BlS <- rep(0,B) # block sizes = C_ell in JCGS paper for (ell in 1:B) { BlS[ell] <- sum(u == ell)} if (is.matrix(mu0)) # mu0=centers m <- dim(mu0)[1] else m <- mu0 # mu0=number of clusters if(!samebw && !is.matrix(bw)) { # create initial bandwidth matrix cat("create initial bandwidth matrix\n") bw <- matrix(bw, nrow=max(u), ncol=m) } z.hat <- matrix(0, nrow = n, ncol = m) tt0 <- proc.time() # for total time ## Initial Values if(m == 1) z.hat <- matrix(1, nrow = n, ncol = m) else if(is.null(post)) { kmeans <- kmeans(x, mu0) for(j in 1:m) z.hat[kmeans$cluster==j, j] <- 1 } else { z.hat <- post ## Currently no error-checking is done here } iter <- 0 finished <- FALSE lambda <- matrix(0, nrow = maxiter, ncol = m) loglik <- NULL; loglikseq <- rep(NA,maxiter) total_udfl <- 0; total_nan <- 0 # eventual NaN and underflow in C code tmp <- 1:n # is this needed? xtra <- (max(x)-min(x))/10 grid <- seq(min(x)-xtra, max(x)+xtra, length=ngrid) # f stored on a ngrid by m by B array # f_{g,j,ell} = f_{j ell}(u_g) # f <- array(1/m/diff(grid[1:2]), c(ngrid, m, B)) # this f was not normalized for being uniform over grid Delta <- diff(grid[1:2]) f <- array(1/((ngrid-1)*Delta), c(ngrid, m, B)) oldloglik <- -Inf orderx <- xx <- list() # preparation for adaptive bandwidth for(k in 1:B) { xx[[k]] <- as.vector(x[, u==k]) if (!samebw) { orderx[[k]] = order(xx[[k]]) # only needed for IQR calculation for bw } } ## CftEstep <- ifelse(samebw, "npMSL_Estep", "npMSL_Estep_bw") #Called directly below # CftEstep <- "npMSL_Estep_bw" # temporary, for testing only the M-step ## CftMstep <- ifelse(samebw, "npMSL_Mstep", "npMSL_Mstep_bw") #Called directly below while (!finished) { # Algorithm main iteration loop iter <- iter + 1 bw.old <- bw # is this needed? t0 <- proc.time() nb_udfl=0; # nb underflows, K()*log(0) ~0 cancelled in nems_Estep.c nb_nan=0; # nb nonzero K()*log(0) cancelled in nems_Estep.c ## Note: Enter loop assuming E-step is done -- i.e., z.hat in place ## M-Step lambda[iter, ] <- colMeans(z.hat) ## density estimation in M-step: WKDE-step cs <- colSums(z.hat) z.tmp <- sweep(z.hat, 2, cs, "/") z.tmp[, cs==0] <- 1/NROW(z.tmp) # Just in case ## adaptive bandwidth update IF in adaptive bw stage if (!samebw && iter <= bwiter) { for (ell in 1:B) { r2 <- BlS[ell] # block size = nb of coordinates wts <- apply(z.tmp, 2, function(z) rep(z/r2, r2)) variances <- colSums(wts*outer(xx[[ell]], colSums(wts*xx[[ell]]),'-')^2) iqr <- apply(as.matrix(wts[orderx[[ell]],]), 2, wIQR, xx[[ell]][orderx[[ell]]], already.sorted=TRUE, already.normalized=TRUE) h <- bw[ell, ] <- 0.9 * pmin(sqrt(variances), iqr/1.34) * pmax(1,r2*n*lambda[iter, ])^(-1/5) # Note: Doesn't allow "sample size" < 1. # browser() } } # end of bw adaptive stage if(samebw){ z <- .C(C_npMSL_Mstep, as.integer(ngrid), as.integer(n), as.integer(m), as.integer(r), as.integer(B), as.integer(BlS), as.integer(u), as.double(bw), as.double(x), as.double(grid), new.f=as.double(f), as.double(lambda[iter,]), as.double(z.hat), PACKAGE = "mixtools") } else{ z <- .C(C_npMSL_Mstep_bw, as.integer(ngrid), as.integer(n), as.integer(m), as.integer(r), as.integer(B), as.integer(BlS), as.integer(u), as.double(bw), as.double(x), as.double(grid), new.f=as.double(f), as.double(lambda[iter,]), as.double(z.hat), PACKAGE = "mixtools") } # z=.C(CftMstep, as.integer(ngrid), as.integer(n), # as.integer(m), as.integer(r), # as.integer(B), as.integer(BlS), as.integer(u), # as.double(bw), as.double(x), as.double(grid), # new.f=as.double(f), # as.double(lambda[iter,]), # as.double(z.hat)) f <- array(z$new.f, c(ngrid, m, B)) # check sum(f == 0) # print(apply(f,2:3,sum) * Delta) # print(max(abs(f-f2))) # browser() ## E-step (for next iteration) if(samebw){ z <- .C(C_npMSL_Estep, as.integer(ngrid), as.integer(n), as.integer(m), as.integer(r), as.integer(B), as.integer(u), as.double(bw), as.double(x), as.double(grid), f=as.double(f), as.double(lambda[iter,]), post=as.double(z.hat), loglik = double(1), nb_udfl = as.integer(nb_udfl), nb_nan = as.integer(nb_nan), PACKAGE = "mixtools") } else{ z <- .C(C_npMSL_Estep_bw, as.integer(ngrid), as.integer(n), as.integer(m), as.integer(r), as.integer(B), as.integer(u), as.double(bw), as.double(x), as.double(grid), f=as.double(f), as.double(lambda[iter,]), post=as.double(z.hat), loglik = double(1), nb_udfl = as.integer(nb_udfl), nb_nan = as.integer(nb_nan), PACKAGE = "mixtools") } # z=.C(CftEstep, as.integer(ngrid), as.integer(n), # as.integer(m), as.integer(r), # as.integer(B), as.integer(u), # as.double(bw), # as.double(x), as.double(grid), f=as.double(f), # as.double(lambda[iter,]), post=as.double(z.hat), # loglik = double(1), # nb_udfl = as.integer(nb_udfl), nb_nan = as.integer(nb_nan)) nb_udfl = z$nb_udfl; nb_nan = z$nb_nan; total_udfl <- total_udfl + nb_udfl total_nan <- total_nan + nb_nan z.hat <- matrix(z$post, n, m) if (sum(is.nan(z.hat)) > 0) cat("Error!! NaN in z.hat") # obsolete ? loglik <- loglikseq[iter] <- z$loglik loglikchange <- loglik - oldloglik oldloglik <- loglik finished <- iter >= maxiter if (iter>1 && max(abs(lambda[iter, ] - lambda[iter-1, ])) < eps) finished <- TRUE if (verb) { t1 <- proc.time() cat("iteration", iter, ": lambda ", round(lambda[iter, ], 4)) cat(" obj function", round(loglik, 4)) cat(" (change", round(loglikchange,4), ")") cat(" time", (t1 - t0)[3]) if ((nb_udfl > 0) || (nb_nan >0)) cat("\n ") if (nb_udfl > 0) cat("average underflows=", round(nb_udfl/(n*m*r),3)," ") if (nb_nan >0) cat("average NaNs=", round(nb_nan/(n*m*r),3)) # Note: these average mean nb of nan over ngrid convolution cat("\n") } } # f <- array(z$f, c(ngrid, m, r)) # obsolete in block version if (!samebw) { rownames(bw) <- paste("block", 1:max(u)) colnames(bw) <- paste("component", 1:m) } if (verb) { tt1 <- proc.time() cat("lambda ", round(lambda[iter, ], 4)) cat(", total time", (tt1 - tt0)[3], "s\n") } return(structure(list(data = x, posteriors = z.hat, lambda = lambda[1:iter,], bandwidth = bw, blockid = u, lambdahat = lambda[iter,], f=f, grid = grid, loglik = loglikseq[1:iter], meanUdfl = total_udfl/(n*m*r*iter),# average underflow meanNaN = total_nan/(n*m*r*iter)), # average NaN's class="npEM")) # define a "npMSL" class ? } ## updated late 2014 version ##################################################################### ##################################################################### ## nonparametric algorithm for Smoothed Likelihood Maximization ## implementing block structure ## 2014 latests additions: ## option for Silverman & weighted k-fold CV adaptive bandwidth: ## parameter bwmethod = "S" for Silverman's rule (the default), ## "CV" for Cross-Validation ## loglik now stores all the sequence ## post argument for passing an init matrix of posterior ## bwiter argument for the duration of the adaptive bw stage ## can be set to 0 for keeping an initial bw matrix when samebw=FALSE ###################################################################### ###################################################################### ## preliminary function for computing the weighted version of ## k-fold Cross-Validation # Splitting 1:n in nbsets subsets and return indices # n = total sample size # setsize = size of sets # nseq = explicit 1st and last indices splitsample <- function(n, nbsets=2) { k <- floor(n/nbsets) klast <- n - (nbsets-1)*k setsize <- c(rep(k, nbsets-1), klast) # n per set if (sum(setsize) != n) {cat("warning")} ni <- c(1,cumsum(setsize)+1) # indices for splitting nseq <- matrix(NA, nrow=nbsets, ncol=2) for (j in 1:nbsets) { nseq[j,1] <- ni[j] # 1st index for jth set nseq[j,2] <- ni[j+1]-1 # last index for jth set } a = list(setsize=setsize, nseq=nseq) return(a) } ## computes CV(h) in k-fold CV case # h = bandwidth (first argument for optimizing) # x = sample of data, vector # nbsets = number of folds # w = weights # lower, upper = numerical integration limits, data-driven defaults kfoldCV <- function(h, x, nbsets=2, w = rep(1, length(x)), lower=mean(x)-5*sd(x), upper=mean(x)+5*sd(x)) { n <- length(x) fold <- splitsample(n, nbsets) sumf <- 0 for (k in 1:nbsets) { Sk <- fold$nseq[k,1]:fold$nseq[k,2] # indices of set k learnset <- x[-Sk] # obs not in set k, from which f_h is "learned" evalset <- x[Sk] fk <- wkde(x = learnset, u = evalset, w = w[-Sk], bw = h) sumf <- sumf + sum(fk) } integrand <- function(u,...) {wkde(x, u, bw=h)^2} # computing ||f_h||^2 fh2 <- integrate(integrand, lower=lower, upper=upper)$value return(fh2 - 2*sumf/n) } ## Weighted bw selection by k-fold CV #### ## min search done by call to optimize # x = vector of data # w = weights, defaults to 1, unnormalized wbw.kCV <- function(x, nbfold=5, w = rep(1, length(x)), hmin=0.1*hmax, hmax=NULL) { n <- length(x) if (is.null(hmax)) hmax <- 1.144 * sqrt(var(x))*n^(-1/5) # default hmax as in bw.ucv # computing lower and upper integration limits for ||fh||^2 wm <- weighted.mean(x, w) lowerf <- wm - 5*sd(x); upperf <- wm + 5*sd(x) # maybe use a weighted.sd version as well? fucv <- function(h) kfoldCV(h, x, nbsets=nbfold, w=w, lower=lowerf, upper=upperf) hopt <- optimize(fucv, lower = hmin, upper = hmax)$minimum return(hopt) } ##################################################################### ##################################################################### ## nonparametric algorithm for Smoothed Likelihood Maximization ## implementing block structure ## 2014 latests additions: ## option for Silverman & weighted k-fold CV adaptive bandwidth: ## parameter bwmethod = "S" for Silverman's rule (the default), ## "CV" for Cross-Validation ## loglik now stores all the sequence ## post argument for passing an init matrix of posterior ## bwiter argument for the duration of the adaptive bw stage ## nbfold parameter passed to wbw.kCV, for leave-[n/nbfold]-out CV ## can be set to 0 for keeping an initial bw matrix when samebw=FALSE ###################################################################### ###################################################################### # ToDo: add a nbfold parameter passed to wbw.kCV npMSL <- function(x, mu0, blockid = 1:ncol(x), bw=bw.nrd0(as.vector(as.matrix(x))), samebw = TRUE, bwmethod = "S", h=bw, eps=1e-8, maxiter=500, bwiter = maxiter, nbfold = NULL, ngrid=200, post=NULL, verb = TRUE){ bw <- h # h is alternative bandwidth argument, for backward compatibility x <- as.matrix(x) n <- nrow(x) # number of subjects r <- ncol(x) # number of measurements in each subject u <- match(blockid, unique(blockid)) B <- max(u) # nb of blocks BlS <- rep(0,B) # block sizes = C_ell in JCGS paper for (ell in 1:B) { BlS[ell] <- sum(u == ell)} if (is.matrix(mu0)) # mu0=centers m <- dim(mu0)[1] else m <- mu0 # mu0 = number of clusters if(!samebw && !is.matrix(bw)) { # create initial bandwidth matrix h_lj bw <- matrix(bw, nrow=max(u), ncol=m)} z.hat <- matrix(0, nrow = n, ncol = m) tt0 <- proc.time() # for total time ## Initial Values if(m == 1) z.hat <- matrix(1, nrow = n, ncol = m) else if(is.null(post)) { kmeans <- kmeans(x, mu0) for(j in 1:m) z.hat[kmeans$cluster==j, j] <- 1 } else { z.hat <- post ## Currently no error-checking is done here } if (is.null(nbfold) && bwmethod == "CV") {nbfold <- 5} # default value for nbfold-CV iter <- 0 finished <- FALSE lambda <- matrix(0, nrow = maxiter, ncol = m) loglik <- NULL loglikseq <- rep(NA,maxiter) total_udfl <- 0; total_nan <- 0 # eventual NaN and underflow in C code tmp <- 1:n xtra <- (max(x)-min(x))/10 grid <- seq(min(x)-xtra, max(x)+xtra, length=ngrid) # f stored on a ngrid by m by B array # f_{g,j,ell} = f_{j ell}(u_g) # f <- array(1/m/diff(grid[1:2]), c(ngrid, m, B)) # this f was not normalized for being uniform over grid Delta <- diff(grid[1:2]) f <- array(1/((ngrid-1)*Delta), c(ngrid, m, B)) oldloglik <- -Inf orderx <- xx <- list() # preparation for Silverman adaptive bandwidth for(k in 1:B) { xx[[k]] <- as.vector(x[, u==k]) # data pooled from kth block if (!samebw && bwmethod == "S") { orderx[[k]] = order(xx[[k]]) # only needed for IQR calculation for bw } } ## CftEstep <- ifelse(samebw, "npMSL_Estep", "npMSL_Estep_bw") #Called directly below. # CftEstep <- "npMSL_Estep_bw" # temporary, for testing only the M-step ## CftMstep <- ifelse(samebw, "npMSL_Mstep", "npMSL_Mstep_bw") #Called directly below. while (!finished) { iter <- iter + 1 bw.old <- bw # is this needed? t0 <- proc.time() nb_udfl=0; # nb of underflows log(0) cancelled in nems_Estep.c nb_nan=0; # nb of nonzero K()*log(0) cancelled in nems_Estep.c ## Note: Enter loop assuming E-step is done i.e., z.hat in place ## M-Step lambda[iter, ] <- colMeans(z.hat) ## density estimation in M-step: WKDE-step cs <- colSums(z.hat) z.tmp <- sweep(z.hat, 2, cs, "/") z.tmp[, cs==0] <- 1/NROW(z.tmp) # Just in case ## adaptive bandwidth update - depends on bwmethod and adptbw stage duration in this version if (!samebw && iter <= bwiter) { # adaptive Silverman's rule if (bwmethod == "S") { for (ell in 1:B) { # for each block r2 <- BlS[ell] # block size = nb of coordinates wts <- apply(z.tmp, 2, function(z) rep(z/r2, r2)) variances <- colSums(wts*outer(xx[[ell]], colSums(wts*xx[[ell]]),'-')^2) iqr <- apply(as.matrix(wts[orderx[[ell]],]), 2, wIQR, xx[[ell]][orderx[[ell]]], already.sorted=TRUE, already.normalized=TRUE) h <- bw[ell, ] <- 0.9 * pmin(sqrt(variances), iqr/1.34) * pmax(1,r2*n*lambda[iter, ])^(-1/5) # Note: Doesn't allow "sample size" < 1. } } # adaptive nbfold CV computation, nbfold=5 by default if (bwmethod == "CV") { # k-fold CV for (ell in 1:B) { # for each block r2 <- BlS[ell] # block size = nb of coordinates wts <- apply(z.tmp, 2, function(z) rep(z, r2)) # replicate weights for (j in 1:m) bw[ell,j] <- wbw.kCV(xx[[ell]], nbfold = nbfold, w = wts[,j]) } } # end of CV version } # end of bw adaptive stage if(samebw){ z <- .C(C_npMSL_Mstep, as.integer(ngrid), as.integer(n), as.integer(m), as.integer(r), as.integer(B), as.integer(BlS), as.integer(u), as.double(bw), as.double(x), as.double(grid), new.f=as.double(f), as.double(lambda[iter,]), as.double(z.hat), PACKAGE = "mixtools") } else{ z <- .C(C_npMSL_Mstep_bw, as.integer(ngrid), as.integer(n), as.integer(m), as.integer(r), as.integer(B), as.integer(BlS), as.integer(u), as.double(bw), as.double(x), as.double(grid), new.f=as.double(f), as.double(lambda[iter,]), as.double(z.hat), PACKAGE = "mixtools") } # z=.C(CftMstep, as.integer(ngrid), as.integer(n), # as.integer(m), as.integer(r), # as.integer(B), as.integer(BlS), as.integer(u), # as.double(bw), as.double(x), as.double(grid), # new.f=as.double(f), # as.double(lambda[iter,]), # as.double(z.hat)) f <- array(z$new.f, c(ngrid, m, B)) # check sum(f == 0) # print(apply(f,2:3,sum) * Delta) # print(max(abs(f-f2))) # browser() ## E-step (for next iteration) if(samebw){ z <- .C(C_npMSL_Estep, as.integer(ngrid), as.integer(n), as.integer(m), as.integer(r), as.integer(B), as.integer(u), as.double(bw), as.double(x), as.double(grid), f=as.double(f), as.double(lambda[iter,]), post=as.double(z.hat), loglik = double(1), nb_udfl = as.integer(nb_udfl), nb_nan = as.integer(nb_nan), PACKAGE = "mixtools") } else{ z <- .C(C_npMSL_Estep_bw, as.integer(ngrid), as.integer(n), as.integer(m), as.integer(r), as.integer(B), as.integer(u), as.double(bw), as.double(x), as.double(grid), f=as.double(f), as.double(lambda[iter,]), post=as.double(z.hat), loglik = double(1), nb_udfl = as.integer(nb_udfl), nb_nan = as.integer(nb_nan), PACKAGE = "mixtools") } # z=.C(CftEstep, as.integer(ngrid), as.integer(n), # as.integer(m), as.integer(r), # as.integer(B), as.integer(u), # as.double(bw), # as.double(x), as.double(grid), f=as.double(f), # as.double(lambda[iter,]), post=as.double(z.hat), # loglik = double(1), # nb_udfl = as.integer(nb_udfl), nb_nan = as.integer(nb_nan)) nb_udfl = z$nb_udfl; nb_nan = z$nb_nan; total_udfl <- total_udfl + nb_udfl total_nan <- total_nan + nb_nan z.hat <- matrix(z$post, n, m) if (sum(is.nan(z.hat)) > 0) cat("Error!! NaN in z.hat") # obsolete ? loglik <- loglikseq[iter] <- z$loglik loglikchange <- loglik - oldloglik oldloglik <- loglik finished <- iter >= maxiter if (iter>1 && max(abs(lambda[iter, ] - lambda[iter-1, ])) < eps) finished <- TRUE if (verb) { t1 <- proc.time() cat("iteration", iter, ": lambda ", round(lambda[iter, ], 4)) cat(" obj change", round(loglikchange,4)) cat(" time", (t1 - t0)[3]) if ((nb_udfl > 0) || (nb_nan >0)) cat("\n ") if (nb_udfl > 0) cat("average underflows=", round(nb_udfl/(n*m*r),3)," ") if (nb_nan >0) cat("average NaNs=", round(nb_nan/(n*m*r),3)) # Note: average nb of nan over ngrid convolution cat("\n") } } # f <- array(z$f, c(ngrid, m, r)) # obsolete in block version if (!samebw) { rownames(bw) <- paste("block", 1:max(u)) colnames(bw) <- paste("component", 1:m) } if (verb) { tt1 <- proc.time() cat("lambda ", round(lambda[iter, ], 4)) cat(", total time", (tt1 - tt0)[3], "s\n") } return(structure(list(data = x, posteriors = z.hat, lambda = lambda[1:iter,], bandwidth = bw, blockid = u, lambdahat = lambda[iter,], f=f, grid = grid, loglik = loglikseq[1:iter], meanUdfl = total_udfl/(n*m*r*iter),# average underflow meanNaN = total_nan/(n*m*r*iter)), # average NaN's class="npEM")) # should we define a "npMSL" class ? } mixtools/R/lambda.R0000755000176200001440000000146513616445565013724 0ustar liggesuserslambda=function (z, x, xi, h = NULL, kernel = c("Gaussian", "Beta", "Triangle", "Cosinus", "Optcosinus"), g = 0) { if (is.null(h)) { cat("WARNING! BANDWIDTH MUST BE SPECIFIED!", "\n") } X.mat=cbind(1,(xi-x)) kernel <- match.arg(kernel) inwindow <- (abs((xi - x)/h) <= 1) if (kernel == "Gaussian") { W=(kern.G(x, xi, h) * inwindow) } else if (kernel == "Beta") { W=(kern.B(x, xi, h, g) * inwindow) } else if (kernel == "Triangle") { W=(kern.T(x, xi, h) * inwindow) } else if (kernel == "Cosinus") { W=(kern.C(x, xi, h) * inwindow) } else if (kernel == "Optcosinus") { W=(kern.O(x, xi, h) * inwindow) } A=try(solve(t(X.mat)%*%(W*X.mat)), silent=TRUE) if(class(A)[1]=="try-error") { A=ginv(t(X.mat)%*%(W*X.mat)) } beta.x=A%*%t(X.mat)%*%(W*z) beta.x }mixtools/R/logisregmixinit.R0000755000176200001440000000207113210323761015672 0ustar liggesuserslogisregmix.init <- function(y, x, N, lambda = NULL, beta = NULL, k = 2){ x <- as.matrix(x) n <- length(y) if (is.null(lambda)) { cond = TRUE while(cond){ lambda = runif(k) lambda = lambda/sum(lambda) if(min(lambda)<0.05) cond=TRUE else cond=FALSE } } else k = length(lambda) p <- ncol(x) w = cbind(y, N, x) w = w[order(w[, 1]), ] w.bin = list() for (j in 1:k) { w.bin[[j]] <- w[max(1, floor((j - 1) * n/k)):ceiling(j * n/k), ] } if (is.null(beta)) { beta.hyp = matrix(sapply(1:k, function(j) glm.fit(w.bin[[j]][, 3:(p + 2)], w.bin[[j]][, 1:2], family = binomial())$coef), ncol = k) sd.hyp = apply(beta.hyp, 1, sd) beta = matrix(0, p, k) for (j in 1:k) { beta[, j] = rnorm(p, mean = as.vector(beta.hyp[, j]), sd = sd.hyp) } } else k = ncol(beta) list(lambda = lambda, beta = beta, k = k) } mixtools/R/repnormmixinit.R0000755000176200001440000000212011665556372015556 0ustar liggesusersrepnormmix.init = function (x, lambda = NULL, mu = NULL, s = NULL, k = 2, arbmean = TRUE, arbvar = TRUE) { n <- ncol(x) m <- nrow(x) y <- apply(x, 2, mean) x <- x[, order(y)] x.bin = list() for (j in 1:k) { x.bin[[j]] <- x[, max(1, floor((j - 1) * n/k)):ceiling(j * n/k)] } if (is.null(s)) { s.hyp = sapply(lapply(x.bin, as.vector), sd) if (arbvar) { s = 1/rexp(k, rate = s.hyp) } else { s.hyp = mean(s.hyp) s = 1/rexp(1, rate = s.hyp) } } if (is.null(s) == FALSE && arbvar == TRUE) { k = length(s) } if (is.null(mu)) { mu.hyp <- sapply(lapply(x.bin, as.vector), mean) mu = rnorm(k, mean = mu.hyp, sd = s) if(arbmean==FALSE){ mu = mean(mu) } } if (is.null(mu)==FALSE && arbmean==TRUE){ k = length(mu) } if (is.null(lambda)) { lambda = runif(k) lambda = lambda/sum(lambda) } else k = length(lambda) list(lambda = lambda, mu = mu, s = s, k = k) }mixtools/R/summary.mixEM.R0000644000176200001440000000513213055603106015167 0ustar liggesuserssummary.mixEM <- function(object, digits=6, ...){ x <- object o <- switch(x$ft, "multmixEM" = rbind(x$lambda, t(x$theta)), "normalmixEM" = rbind(x$lambda, x$mu, x$sigma), "repnormmixEM" = rbind(x$lambda, x$mu, x$sigma), "regmixEM" = rbind(x$lambda, x$sigma, x$beta), "regmixEM.lambda" = rbind(x$lambda, x$sigma, x$beta), "regmixEM.mixed" = rbind(x$lambda, x$sigma, x$beta), "regmixEM.loc" = rbind(x$sigma, x$beta), "regmixEM.chgpt" = rbind(x$lambda, x$sigma), "logisregmixEM" = rbind(x$lambda, x$beta), "poisregmixEM" = rbind(x$lambda, x$beta), "expRMM_EM" = rbind(x$lambda, x$rate), # Reliability Mixt Model censored exp mixture "weibullRMM_SEM" = rbind(x$lambda, x$shape, x$scale), # RMM censored weibull mixture "mvnormalmixEM" = rbind(x$lambda, matrix(unlist(x$mu), byrow=TRUE, nrow=length(x$lambda))), "normalmixMMlc" = rbind(x$lambda, x$mu, x$sigma), stop("Unknown mixEM object of type ", x$ft)) colnames(o) <- paste("comp",1:ncol(o)) rownames(o) <- switch(x$ft, "multmixEM" = c("lambda", paste("theta", 1:ncol(x$theta), sep="")), "normalmixEM" = c("lambda", "mu", "sigma"), "repnormmixEM" = c("lambda", "mu", "sigma"), "regmixEM" = c("lambda", "sigma", paste("beta", 1:nrow(x$beta), sep="")), "regmixEM.lambda" = c("lambda", "sigma", paste("beta", 1:nrow(x$beta), sep="")), "regmixEM.mixed" = c("lambda", "sigma", paste("beta", 1:nrow(x$beta), sep="")), "regmixEM.loc" = c("sigma", paste("beta", 1:nrow(x$beta), sep="")), "regmixEM.chgpt" = c("lambda", "sigma"), "logisregmixEM" = c("lambda", paste("beta", 1:nrow(x$beta), sep="")), "poisregmixEM" = c("lambda", paste("beta", 1:nrow(x$beta), sep="")), "expRMM_EM" = c("lambda", "rate"), "weibullRMM_SEM" = c("lambda", "shape", "scale"), "mvnormalmixEM" = c("lambda", paste("mu", 1:length(x$mu), sep="")), "normalmixMMlc" = c("lambda", "mu", "sigma")) cat("summary of", x$ft, "object:\n") print(o, digits=digits) cat("loglik at estimate: ", x$loglik, "\n") if (x$ft == "expRMM_EM") cat(100*(1-mean(x$d)), "% of the data right censored\n") if (x$ft == "weibullRMM_SEM") cat(100*(1-mean(x$d)), "% of the data right censored\n") } mixtools/R/bootse.R0000755000176200001440000004441212777447507014003 0ustar liggesusersboot.se <- function (em.fit, B = 100, arbmean = TRUE, arbvar = TRUE, N=NULL, ...) { mix.type <- em.fit$ft if (mix.type == "regmixEM") { #Start Here... k=length(em.fit$lambda) y=em.fit$y n=length(y) if(sum(em.fit$x[,1]==1)==n){ x=em.fit$x[,-1] } else x=em.fit$x if (arbmean == FALSE) { scale = em.fit$scale beta = matrix(rep(em.fit$beta, k), ncol = k) } else { scale = 1 beta = em.fit$beta } xbeta.new=em.fit$x%*%beta lambda = em.fit$lambda if(arbvar==FALSE){ sigma = rep(em.fit$sigma,k) } else{ sigma = scale*em.fit$sigma } j = 0 lambda.bs=NULL beta.bs=NULL sigma.bs=NULL scale.bs=NULL while (j < B) { j = j + 1 w=rmultinom(n,size=1,prob=lambda) y.sim=sapply(1:n,function(i) rnorm(1,mean=xbeta.new[i,(w[,i]==1)],sd=sigma[w[,i]==1]) ) em.bs = try(regmixEM(y = y.sim, x = x, k = k, arbmean = arbmean, arbvar = arbvar, lambda=em.fit$lambda, beta=em.fit$beta, sigma=(scale*em.fit$sigma), ...), silent = TRUE) if (class(em.bs) == "try-error" || em.bs$restarts!=0) { j = j - 1 } else { if(arbmean==FALSE){ lambda.bs = cbind(lambda.bs,em.bs$lambda) beta.bs = cbind(beta.bs,as.vector(em.bs$beta)) sigma.bs = cbind(sigma.bs,em.bs$sigma) scale.bs = cbind(scale.bs,em.bs$scale) } else { lambda.bs = cbind(lambda.bs,em.bs$lambda) beta.bs = cbind(beta.bs,as.vector(em.bs$beta)) sigma.bs = cbind(sigma.bs,em.bs$sigma) } } } if(arbmean==FALSE){ lambda.se=apply(lambda.bs,1,sd) beta.se=apply(beta.bs,1,sd) sigma.se=apply(sigma.bs,1,sd) scale.se=apply(scale.bs,1,sd) bs.list=list(lambda = lambda.bs, lambda.se = lambda.se, beta = beta.bs, beta.se = beta.se, sigma=sigma.bs, sigma.se = sigma.se, scale=scale.bs, scale.se=scale.se) } else{ lambda.se=apply(lambda.bs,1,sd) beta.se=matrix(apply(beta.bs,1,sd),ncol=k) sigma.se=apply(sigma.bs,1,sd) bs.list=list(lambda = lambda.bs, lambda.se = lambda.se, beta = beta.bs, beta.se = beta.se, sigma=sigma.bs, sigma.se = sigma.se) } } ##### if (mix.type == "regmixEM.mixed"){ #Start Here... k=length(em.fit$lambda) y=em.fit$y x=em.fit$x if (length(y) != length(x)) stop("Number of elements in lists for x and y must match!") w=em.fit$w p.z=em.fit$posterior.z p.b=em.fit$posterior.beta mu=em.fit$mu R=em.fit$R sigma=em.fit$sigma lambda=em.fit$lambda n=length(y) p=nrow(mu) n.i=sapply(y,length) alpha=em.fit$alpha w.test <- NULL for(i in 1:length(w)){ w.test <- c(w.test,as.vector(w[[i]])) } if(sum(w.test)==0){ w1=NULL} else w1=w if(length(R)==k) arb.R=TRUE else arb.R=FALSE if(length(sigma)==k) arb.sigma=TRUE else arb.sigma=FALSE j = 0 lambda.bs=NULL mu.bs=NULL sigma.bs=NULL R.bs=NULL alpha.bs=NULL while (j < B) { j = j + 1 k.bs=sapply(1:n, function(i) rmultinom(1,size=1,prob=lambda)) bs.i <- sample(1:n,n,replace=TRUE) ## if(arb.R==FALSE){ if(arb.sigma==FALSE){ y.sim <- lapply(1:n, function(i) w[[i]] %*% alpha + as.vector(rmvnorm(1, mu = x[[i]] %*% mu[, (k.bs[, i] == 1)], sigma = (x[[i]]%*%R%*%t(x[[i]]) +diag(sigma,n.i[i])) ))) } else y.sim <- lapply(1:n, function(i) w[[i]] %*% alpha + as.vector(rmvnorm(1, mu = x[[i]] %*% mu[, (k.bs[, i] == 1)], sigma = (x[[i]]%*%R%*%t(x[[i]]) +diag(sigma[(k.bs[,i] == 1)],n.i[i])) ))) } else{ if(arb.sigma==FALSE){ y.sim <- lapply(1:n, function(i) w[[i]] %*% alpha + as.vector(rmvnorm(1, mu = x[[i]] %*% mu[, (k.bs[, i] == 1)], sigma = (x[[i]]%*%R[(k.bs[, i] == 1)][[1]]%*%t(x[[i]]) +diag(sigma,n.i[i])) ))) } else y.sim <- lapply(1:n, function(i) w[[i]] %*% alpha + as.vector(rmvnorm(1, mu = x[[i]] %*% mu[, (k.bs[, i] == 1)], sigma = (x[[i]]%*%R[(k.bs[, i] == 1)][[1]]%*%t(x[[i]]) +diag(sigma[(k.bs[,i] == 1)],n.i[i])) ))) } ## em.bs = try(regmixEM.mixed(y = y.sim, x = x, w=w1, sigma=sigma, mu=mu, alpha=alpha, R=R, lambda=lambda, k = k, arb.R=arb.R, arb.sigma=arb.sigma, addintercept.fixed=FALSE, addintercept.random=FALSE, ...), silent = TRUE) if (class(em.bs) == "try-error" || em.bs$restarts!=0) { j = j - 1 } else { lambda.bs = cbind(lambda.bs,em.bs$lambda) alpha.bs = cbind(alpha.bs,em.bs$alpha) sigma.bs = cbind(sigma.bs,em.bs$sigma) mu.bs = cbind(mu.bs,as.vector(em.bs$mu)) R.bs = cbind(R.bs,as.vector(sapply(em.bs$R,c))) } lambda.se=apply(lambda.bs,1,sd) alpha.se=apply(alpha.bs,1,sd) sigma.se=apply(sigma.bs,1,sd) mu.se=matrix(apply(mu.bs,1,sd),ncol=k) R.se1=apply(R.bs,1,sd) if(arb.R==TRUE){ R.se2=matrix(R.se1,ncol=k) R.se=lapply(1:k,function(i) matrix(R.se2[,i],ncol=p)) } else R.se=matrix(R.se1,ncol=p) bs.list=list(lambda = lambda.bs, lambda.se = lambda.se, alpha=alpha.bs, alpha.se=alpha.se, mu = mu.bs, mu.se = mu.se, sigma=sigma.bs, sigma.se = sigma.se, R=R.bs, R.se=R.se) } } ##### # Commented out by DRH on 8-29-2008 due to absence of gammamixEM function # if (mix.type == "gammamixEM") { # x <- em.fit$x # n <- length(x) # k <- length(em.fit$lambda) # alpha <- em.fit$gamma.pars[1,] # beta <- em.fit$gamma.pars[2,] # lambda <- em.fit$lambda # j = 0 # lambda.bs = NULL # alpha.bs = NULL # beta.bs = NULL # while (j < B) { # j = j + 1 # comp = sample(1:k,size=n,replace=T,prob=lambda) # x.sim = sapply(1:n,function(i) rgamma(1,shape=alpha[comp[i]],scale=beta[comp[i]])) # em.bs = try(gammamixEM(x = x.sim, k = k, lambda = lambda, # alpha = alpha, beta = beta, ...), silent = TRUE) # if (class(em.bs) == "try-error") { # j = j - 1 # } # else { # lambda.bs = cbind(lambda.bs, em.bs$lambda) # alpha.bs = cbind(alpha.bs, as.vector(em.bs$gamma.pars[1,])) # beta.bs = cbind(beta.bs, as.vector(em.bs$gamma.pars[2,])) # } # } # lambda.se = apply(lambda.bs, 1, sd) # alpha.se = apply(alpha.bs, 1, sd) # beta.se = apply(beta.bs, 1, sd) # bs.list = list(lambda = lambda.bs, lambda.se = lambda.se, # alpha = alpha.bs, alpha.se = alpha.se, beta = beta.bs, beta.se = beta.se) # } ##### if (mix.type == "repnormmixEM") { #Start Here... k=length(em.fit$lambda) y=em.fit$y m=nrow(y) n=ncol(y) if (arbmean == FALSE) { scale = em.fit$scale mu = rep(em.fit$mu, k) } else { scale = 1 mu = em.fit$mu } lambda = em.fit$lambda if(arbvar==FALSE){ sigma = rep(em.fit$sigma,k) } else{ sigma = scale*em.fit$sigma } j = 0 lambda.bs=NULL mu.bs=NULL sigma.bs=NULL scale.bs=NULL while (j < B) { j = j + 1 w=rmultinom(n,size=1,prob=lambda) y.sim=sapply(1:n,function(i) rnorm(m,mean=mu[w[,i]==1],sd=sigma[w[,i]==1]) ) em.bs = try(repnormmixEM(x = y.sim,k = k, arbmean = arbmean, arbvar = arbvar, lambda=em.fit$lambda, mu=em.fit$mu, sigma=(scale*em.fit$sigma), ...), silent = TRUE) if (class(em.bs) == "try-error" || em.bs$restarts!=0) { j = j - 1 } else { if(arbmean==FALSE){ lambda.bs = cbind(lambda.bs,em.bs$lambda) mu.bs = cbind(mu.bs,as.vector(em.bs$mu)) sigma.bs = cbind(sigma.bs,em.bs$sigma) scale.bs = cbind(scale.bs,em.bs$scale) } else { lambda.bs = cbind(lambda.bs,em.bs$lambda) mu.bs = cbind(mu.bs,as.vector(mu.bs$beta)) sigma.bs = cbind(sigma.bs,em.bs$sigma) } } } if(arbmean==FALSE){ lambda.se=apply(lambda.bs,1,sd) mu.se=apply(mu.bs,1,sd) sigma.se=apply(sigma.bs,1,sd) scale.se=apply(scale.bs,1,sd) bs.list=list(lambda = lambda.bs, lambda.se = lambda.se, mu = mu.bs, mu.se = mu.se, sigma=sigma.bs, sigma.se = sigma.se, scale=scale.bs, scale.se=scale.se) } else{ lambda.se=apply(lambda.bs,1,sd) mu.se=apply(mu.bs,1,sd) sigma.se=apply(sigma.bs,1,sd) bs.list=list(lambda = lambda.bs, lambda.se = lambda.se, mu = mu.bs, mu.se = mu.se, sigma=sigma.bs, sigma.se = sigma.se) } } #### if (mix.type == "mvnormalmixEM") { #Start Here... k=length(em.fit$lambda) y=em.fit$x n=nrow(y) p=ncol(y) if (arbmean == FALSE) { mu = lapply(1:k,function(i) em.fit$mu) } else { mu = em.fit$mu } lambda = em.fit$lambda if(arbvar==FALSE){ sigma = lapply(1:k, function(i) em.fit$sigma) } else{ sigma = em.fit$sigma } j = 0 lambda.bs=NULL mu.bs=NULL sigma.bs=NULL while (j < B) { j = j + 1 w=rmultinom(n,size=1,prob=lambda) y.sim=t(sapply(1:n,function(i) rmvnorm(1,mu=mu[w[,i]==1][[1]],sigma=sigma[w[,i]==1][[1]]) )) em.bs = try(mvnormalmixEM(x = y.sim, k = k, arbmean = arbmean, arbvar = arbvar, lambda=em.fit$lambda, mu=em.fit$mu, sigma=em.fit$sigma, ...), silent = TRUE) if (class(em.bs) == "try-error" || em.bs$restarts!=0) { j = j - 1 } else { lambda.bs = cbind(lambda.bs,em.bs$lambda) mu.bs1 = as.vector(sapply(em.bs$mu,c)) mu.bs = cbind(mu.bs, mu.bs1) if(arbvar==FALSE){ sigma.bs=cbind(sigma.bs,as.vector(em.bs$sigma)) } else{ sigma.bs1 = lapply(1:k, function(i) as.vector(em.bs$sigma[[i]])) sigma.bs2 = as.vector(sapply(sigma.bs1,c)) sigma.bs = cbind(sigma.bs,sigma.bs2) } } } lambda.se=apply(lambda.bs,1,sd) mu.se1=apply(mu.bs,1,sd) if(arbmean==TRUE){ mu.se = lapply(1:k,function(i) mu.se1[((i-1)*p+1):(i*p)]) } else mu.se = mu.se1 sigma.se1=apply(sigma.bs,1,sd) if(arbvar==TRUE){ sigma.se=lapply(1:k, function(i) matrix(sigma.se1[((i-1)*(p^2)+1):(i*(p^2))], nrow=p,ncol=p)) } else sigma.se=matrix(sigma.se1,nrow=p) bs.list=list(lambda = lambda.bs, lambda.se = lambda.se, mu = mu.bs, mu.se = mu.se, sigma=sigma.bs, sigma.se = sigma.se) } #### if (mix.type == "normalmixEM") { #Start Here... k=length(em.fit$lambda) y=em.fit$x n=length(y) x=em.fit$x if (arbmean == FALSE) { scale = em.fit$scale mu = rep(em.fit$mu, k) } else { scale = 1 mu = em.fit$mu } lambda = em.fit$lambda if(arbvar==FALSE){ sigma = rep(em.fit$sigma,k) } else{ sigma = scale*em.fit$sigma } j = 0 lambda.bs=NULL mu.bs=NULL sigma.bs=NULL scale.bs=NULL while (j < B) { j = j + 1 w=rmultinom(n,size=1,prob=lambda) y.sim=sapply(1:n,function(i) rnorm(1,mean=mu[(w[,i]==1)],sd=sigma[w[,i]==1]) ) em.bs = try(normalmixEM(x = y.sim, k = k, arbmean = arbmean, arbvar = arbvar, lambda=em.fit$lambda, mu=em.fit$mu, sigma=(scale*em.fit$sigma), ...), silent = TRUE) if (class(em.bs) == "try-error" || em.bs$restarts!=0) { j = j - 1 } else { if(arbmean==FALSE){ lambda.bs = cbind(lambda.bs,em.bs$lambda) mu.bs = cbind(mu.bs,as.vector(em.bs$mu)) sigma.bs = cbind(sigma.bs,em.bs$sigma) scale.bs = cbind(scale.bs,em.bs$scale) } else { lambda.bs = cbind(lambda.bs,em.bs$lambda) mu.bs = cbind(mu.bs,as.vector(em.bs$mu)) sigma.bs = cbind(sigma.bs,em.bs$sigma) } } } if(arbmean==FALSE){ lambda.se=apply(lambda.bs,1,sd) mu.se=apply(mu.bs,1,sd) sigma.se=apply(sigma.bs,1,sd) scale.se=apply(scale.bs,1,sd) bs.list=list(lambda = lambda.bs, lambda.se = lambda.se, mu = mu.bs, mu.se = mu.se, sigma=sigma.bs, sigma.se = sigma.se, scale=scale.bs, scale.se=scale.se) } else{ lambda.se=apply(lambda.bs,1,sd) mu.se=matrix(apply(mu.bs,1,sd),ncol=k) sigma.se=apply(sigma.bs,1,sd) bs.list=list(lambda = lambda.bs, lambda.se = lambda.se, mu = mu.bs, mu.se = mu.se, sigma=sigma.bs, sigma.se = sigma.se) } } ### if (mix.type == "multmixEM") { y<-em.fit$y n<-nrow(y) n.i<-apply(y,1,sum) p<-ncol(y) k<-length(em.fit$lambda) theta<-em.fit$theta lambda<-em.fit$lambda j = 0 lambda.bs=NULL theta.bs=NULL while (j < B) { j = j + 1 w=rmultinom(n,size=1,prob=lambda) y.sim=t(sapply(1:n,function(i) rmultinom(1,size=n.i[i],prob=theta[(w[,i]==1),]) )) em.bs = try(multmixEM(y = y.sim, k = k, lambda=lambda, theta=theta, ...), silent = TRUE) if (class(em.bs) == "try-error" || em.bs$restarts!=0) { j = j - 1 } else{ lambda.bs=cbind(lambda.bs,em.bs$lambda) theta.bs=cbind(theta.bs,as.vector(em.bs$theta)) } } lambda.se=apply(lambda.bs,1,sd) theta.se=matrix(apply(theta.bs,1,sd),nrow=k) bs.list=list(lambda=lambda.bs, lambda.se=lambda.se, theta=theta.bs, theta.se=theta.se) } #### if (mix.type == "logisregmixEM") { y=em.fit$y n=length(y) if (is.null(N)) N=rep(1,n) k=length(em.fit$lambda) if(sum(em.fit$x[,1]==1)==n){ x=em.fit$x[,-1] } else x=em.fit$x lambda<-em.fit$lambda beta<-em.fit$beta xbeta.new=em.fit$x%*%beta prob=inv.logit(xbeta.new) j = 0 lambda.bs=NULL beta.bs=NULL while (j < B) { j = j + 1 w=rmultinom(n,size=1,prob=lambda) y.sim = sapply(1:n, function(i) rbinom(1, size=N[i], prob=prob[,(w[,i]==1)])) em.bs = try(logisregmixEM(y = y.sim, x=x, N=N, k = k, lambda=lambda, beta=beta,...), silent = TRUE) if (class(em.bs) == "try-error" || em.bs$restarts!=0) { j = j - 1 } else{ lambda.bs=cbind(lambda.bs,em.bs$lambda) beta.bs=cbind(beta.bs,as.vector(em.bs$beta)) } } lambda.se=apply(lambda.bs,1,sd) beta.se=matrix(apply(beta.bs,1,sd),nrow=k) bs.list=list(lambda=lambda.bs, lambda.se=lambda.se, beta=beta.bs, beta.se=beta.se) } #### if (mix.type == "poisregmixEM") { k=length(em.fit$lambda) y=em.fit$y n=length(y) if(sum(em.fit$x[,1]==1)==n){ x=em.fit$x[,-1] } else x=em.fit$x lambda<-em.fit$lambda beta<-em.fit$beta xbeta.new=em.fit$x%*%beta prob=exp(xbeta.new) j = 0 lambda.bs=NULL beta.bs=NULL while (j < B) { j = j + 1 w=rmultinom(n,size=1,prob=lambda) y.sim = sapply(1:n, function(i) rpois(1, lambda=prob[,(w[,i]==1)])) em.bs = try(poisregmixEM(y = y.sim, x=x, k = k, lambda=lambda, beta=beta, ...), silent = TRUE) if (class(em.bs) == "try-error" || em.bs$restarts!=0) { j = j - 1 } else{ lambda.bs=cbind(lambda.bs,em.bs$lambda) beta.bs=cbind(beta.bs,as.vector(em.bs$beta)) } } lambda.se=apply(lambda.bs,1,sd) beta.se=matrix(apply(beta.bs,1,sd),nrow=k) bs.list=list(lambda=lambda.bs, lambda.se=lambda.se, beta=beta.bs, beta.se=beta.se) } bs.list } mixtools/R/mvnormalmixinit.R0000755000176200001440000000322011665556372015731 0ustar liggesusersmvnormalmix.init = function (x, lambda = NULL, mu = NULL, sigma = NULL, k = 2, arbmean = TRUE, arbvar = TRUE) { n <- nrow(x) p <- ncol(x) y <- apply(x, 1, mean) x <- x[order(y), ] x.bin <- list() for (j in 1:k) { x.bin[[j]] <- x[max(1, floor((j - 1) * n/k)):ceiling(j * n/k), ] } if (is.null(sigma)) { if (arbvar) { sigma.hyp = lapply(1:k, function(i) (apply(x.bin[[i]], 2, var))^-1) sigma = lapply(1:k, function(i) diag(1/rexp(p, rate = sigma.hyp[[i]]))) } else { sigma.hyp = apply(sapply(1:k, function(i) (apply(x.bin[[i]], 2, var))^-1), 2, mean) sigma = diag(1/rexp(p, rate = sigma.hyp)) } } if (is.null(sigma) == FALSE && arbvar == TRUE) { k = length(sigma) } if (is.null(mu)) { mu.hyp <- lapply(1:k, function(i) apply(x.bin[[i]], 2, mean)) if (arbvar) { mu <- lapply(1:k, function(i) as.vector(rmvnorm(1, mu = as.vector(mu.hyp[[i]]), sigma = as.matrix(sigma[[i]])))) } else mu <- lapply(1:k, function(i) as.vector(rmvnorm(1, mu = as.vector(mu.hyp[[i]]), sigma = as.matrix(sigma)))) if (arbmean==FALSE) { mu <- apply(sapply(mu,as.vector),1,mean) # mu <- lapply(1:k, function(i) mu) } } if (is.null(mu) == FALSE && arbmean == TRUE){ k=length(mu) } if (is.null(lambda)) { lambda <- runif(k) lambda <- lambda/sum(lambda) } else k <- length(lambda) list(lambda = lambda, mu = mu, sigma = sigma, k = k) }mixtools/R/mvnormalmixEM.R0000755000176200001440000001555611665556372015306 0ustar liggesusersmvnormalmixEM = function (x, lambda = NULL, mu = NULL, sigma = NULL, k = 2, arbmean = TRUE, arbvar = TRUE, epsilon = 1e-08, maxit = 10000, verb = FALSE) { if(arbmean == FALSE && arbvar == FALSE){ stop(paste("Must change constraints on mu and/or sigma!","\n")) } x <- as.matrix(x) n <- nrow(x) p <- ncol(x) tmp <- mvnormalmix.init(x = x, lambda = lambda, mu = mu, sigma = sigma, k = k, arbmean=arbmean, arbvar = arbvar) lambda <- tmp$lambda mu<-tmp$mu sigma <- tmp$sigma k = tmp$k diff <- 1 iter <- 0 if (arbmean==FALSE){ comp <- lapply(1:k, function(i) lambda[i] * dmvnorm(x, mu, sigma[[i]])) } else{ if (arbvar==FALSE) { comp <- lapply(1:k, function(i) lambda[i] * dmvnorm(x, mu[[i]], sigma)) } else comp <- lapply(1:k, function(i) lambda[i] * dmvnorm(x, mu[[i]], sigma[[i]])) } comp <- sapply(comp, cbind) compsum <- apply(comp, 1, sum) obsloglik <- sum(log(compsum)) ll <- obsloglik restarts <- 0 while (diff > epsilon & iter < maxit) { if (arbvar) { z = matrix(nrow = n, ncol = k) for (i in 1:n) { for (j in 1:k) { z.denom = c() for (m in 1:k) { z.denom = c(z.denom, lambda[m]/lambda[j] * (det(sigma[[j]])/det(sigma[[m]]))^(0.5) * exp(-0.5 * ((x[i, ] - mu[[m]]) %*% solve(sigma[[m]]) %*% t(t(x[i, ] - mu[[m]])) - (x[i, ] - mu[[j]]) %*% solve(sigma[[j]]) %*% t(t(x[i, ] - mu[[j]]))))) } z[i, j] = 1/sum(z.denom) } } z = z/apply(z,1,sum) # z[,k]=1-apply(as.matrix(z[,(1:(k-1))]),1,sum) sing <- sum(is.nan(z)) lambda.new <- apply(z, 2, mean) if (sum(lambda.new < 1e-08)>0 || is.na(sum(lambda.new))) { sing <- 1 } else { if(arbmean==FALSE) { mu.new <- lapply(1:k, function(j) sapply(1:p, function(i) apply(z * x[, i], 2, sum))[j, ]) mu.new <- apply(sapply(mu.new,as.vector),1,sum)/n mu.new <- lapply(1:k, function(j) mu.new) } else{ mu.new <- lapply(1:k, function(j) sapply(1:p, function(i) apply(z * x[, i], 2, sum))[j, ]/sum(z[, j])) } sigma.new <- lapply(1:k, function(j) matrix(apply(sapply(1:n, function(i) z[i, j] * (x[i, ] - mu.new[[j]]) %*% t(x[i, ] - mu.new[[j]])), 1, sum), p, p)/sum(z[, j])) lambda <- lambda.new mu <- mu.new sigma <- sigma.new comp <- lapply(1:k, function(i) lambda[i] * dmvnorm(x, mu[[i]], sigma[[i]])) comp <- sapply(comp, cbind) compsum <- apply(comp, 1, sum) newobsloglik <- sum(log(compsum)) } } else { z = matrix(nrow = n, ncol = k) sigma.inv = solve(sigma) for (i in 1:n) { for (j in 1:k) { z.denom = c() for (m in 1:k) { z.denom = c(z.denom, lambda[m]/lambda[j] * (det(sigma.inv)/det(sigma.inv))^(0.5) * exp(-0.5 * ((x[i, ] - mu[[m]]) %*% sigma.inv %*% t(t(x[i, ] - mu[[m]])) - (x[i, ] - mu[[j]]) %*% sigma.inv %*% t(t(x[i, ] - mu[[j]]))))) } z[i, j] = 1/sum(z.denom) } } # z[,k]=1-apply(as.matrix(z[,(1:(k-1))]),1,sum) z = z/apply(z,1,sum) sing <- sum(is.nan(z)) lambda.new <- apply(z, 2, mean) if (sum(lambda.new < 1e-08)>0 || is.na(sum(lambda.new))) { sing <- 1 } else { if(arbmean==FALSE) { mu.new <- lapply(1:k, function(j) sapply(1:p, function(i) apply(z * x[, i], 2, sum))[j, ]) mu.new <- apply(sapply(mu.new,as.vector),1,sum)/n mu.new <- lapply(1:k, function(j) mu.new) } else{ mu.new <- lapply(1:k, function(j) sapply(1:p, function(i) apply(z * x[, i], 2, sum))[j, ]/sum(z[, j])) } temp.sig <- lapply(1:k, function(j) matrix(apply(sapply(1:n, function(i) z[i, j] * (x[i, ] - mu.new[[j]]) %*% t(x[i, ] - mu.new[[j]])), 1, sum), p, p)) sigma.new <- matrix(apply(sapply(temp.sig, as.vector), 1, sum), p, p)/n lambda <- lambda.new mu <- mu.new sigma <- sigma.new comp <- lapply(1:k, function(i) lambda[i] * dmvnorm(x, mu[[i]], sigma)) comp <- sapply(comp, cbind) compsum <- apply(comp, 1, sum) newobsloglik <- sum(log(compsum)) } } if (sing > 0 || is.na(newobsloglik) || abs(newobsloglik) == Inf){# || sum(z) != n) { cat("Need new starting values due to singularity...", "\n") restarts <- restarts + 1 if(restarts>15) stop("Too many tries!") tmp <- mvnormalmix.init(x = x, k = k, arbmean=arbmean, arbvar = arbvar) lambda <- tmp$lambda mu <- tmp$mu sigma <- tmp$sigma k = tmp$k diff <- 1 iter <- 0 if (arbvar) { comp <- lapply(1:k, function(i) lambda[i] * dmvnorm(x, mu[[i]], sigma[[i]])) } else comp <- lapply(1:k, function(i) lambda[i] * dmvnorm(x, mu[[i]], sigma)) comp <- sapply(comp, cbind) compsum <- apply(comp, 1, sum) obsloglik <- sum(log(compsum)) ll <- obsloglik } else { diff <- newobsloglik - obsloglik obsloglik <- newobsloglik ll <- c(ll, obsloglik) iter <- iter + 1 if (verb) { cat("iteration=", iter, "diff=", diff, "log-likelihood", obsloglik, "\n") } } } if(arbmean==FALSE) { mu = mu[[1]] } if (iter == maxit) { cat("WARNING! NOT CONVERGENT!", "\n") } colnames(z) <- c(paste("comp", ".", 1:k, sep = "")) cat("number of iterations=", iter, "\n") a=list(x=x, lambda = lambda, mu = mu, sigma = sigma, loglik = obsloglik, posterior = z, all.loglik=ll, restarts=restarts, ft="mvnormalmixEM") class(a) = "mixEM" a } mixtools/R/WeibullRMMSEM.R0000644000176200001440000002035013055272033015000 0ustar liggesusers# code for Reliability Mixture Models (RMM) with Censored data # D. Chauveau # ref: Bordes L. and Chauveau D. Computational Statistics (2016) # Simulate from a Weibull mixture # lambda = vector of component probabilities # shape, scale = vector of component rates rweibullmix <- function(n,lambda=1, shape=1, scale=1) { m <- length(lambda) # nb of components z <- sample(m, n, replace=TRUE, prob=lambda) # component indicator rweibull(n, shape = shape[z], scale=scale[z]) } ######################################################################## ## Stochastic EM algorithm for Reliability Mixture Models (RMM) ## with Censoring; Parametric model, for ## univariate finite mixture of Weibull with right censoring # x = lifetime data, censored by random c if d is not NULL, in which case # x= min(x,c) and d = I(x <= c) # uses parametric MLE for censored weibull data from the survival package # caution: when fitted by a survreg object, the weibull parametrization is # shape=1/fit$scale and scale=exp(fit$coeff) # averaged = TRUE if averaging of the parameters is computed at each iteration # (cf Nielsen 2000) # NB: averaging can be done in several ways; # DEPRECATED: the current theta for E & M steps is the average over the sequence, # but the theta^next not averaged is stored on the sequence # CURRENT VERSION: the average is stored and used for next step # # About calls of survreg() from the survival package: # maxit.survreg is # passed to survreg.control() for weibull MLE using survreg() weibullRMM_SEM <- function (x, d=NULL, lambda = NULL, shape = NULL, scale = NULL, # dweibull() parameters k = 2, # default nb of components maxit = 200, # maxrestarts = 20, maxit.survreg = 200, epsilon = 1e-03, averaged = TRUE, verb = FALSE) { # warn <- options(warn=-1) # Turn off warnings # x <- as.vector(x) # require(survival) n <- length(x) if (!is.null(lambda)) k=length(lambda) if (!is.null(scale)) k=length(scale) # at least one if (!is.null(shape)) k=length(shape) # should be define !!! if (is.null(d)) d <- rep(1,n) # noncensored case forced xx <- matrix(x, nrow=n, ncol=k) # x repeated k times, used in E-steps dd <- matrix(d, nrow=n, ncol=k) # idem for d # # note: d can be used instead of dd since automatic recycling gives # dd*xx = d*xx, but we keep dd to avoid eventual futur change # init function call to do later, in case lambda = rate = NULL if (is.null(lambda)) lambda <- rep(1/k,k) if (is.null(scale)) scale <- rep(1,k) # to do : init.functions(k) if (is.null(shape)) shape <- rep(1,k) # # sequences for storing along iterations lambda_seq <- scale_seq <- shape_seq <- matrix(0, nrow = maxit, ncol = k) lambda_seq[1,] <- lambda scale_seq[1,] <- scale; shape_seq[1,] <- shape loglik_seq <- NULL oldloglik <- -Inf # notdone <- TRUE # for handling restarts etc, ToDo later! # while(notdone) { # Initialize everything notdone <- FALSE # dll <- epsilon+1 iter <- 1 post <- z <- sumpost <- matrix(0, nrow = n, ncol = k) new_scale <- new_shape <- rep(0,k) ##### SEM iterations ##### while (iter < maxit) { # SEM version ### E-step ### scn <- matrix(scale, n, k, byrow=T) # for vectorized post comput. shn <- matrix(shape, n, k, byrow=T) ll <- matrix(lambda, n, k, byrow=T) # handling censored & non-censored cases post <- ((ll*dweibull(xx,shn,scn))^dd)*((ll*(1-pweibull(xx,shn,scn)))^(1-dd)) rs <- rowSums(post) loglik <- sum(log(rs)) # loglik without the constant term related to h pdf # post normalized per row post <- sweep(post, 1, rs, "/") # posteriors p_{ij}^t 's # check and solve NaN's: may cause theoretical pbs!? snans <- sum(is.na(post)) if (snans > 0) { post[is.na(post[,1]),] <- 1/k cat("warning:",snans, "NaN's in post\n") } ### S-step ### # ~ matrix of component indicators simu checked ? z <- t(apply(post, 1, function(prob) rmultinom(1, 1, prob))) nsets <- colSums(z) # subsets per component sizes # cat("it",iter,": sets=",nsets,"\n") ### M-step ### # new_parameter = SEM(lambda,shape,scale) new_lambda <- nsets/n # or colMeans(post) if EM version preferred for (j in 1:k) { # for each component; vectorize later? tj <- x[z[,j]==1] # subsample from component j dj <- d[z[,j]==1] # associated event indicator # passing maxit and epsilon parameters to survreg # and current shape & scale as init parameters fit=survreg(Surv(tj,dj)~1, dist = 'weibull', control = survreg.control(maxiter = maxit.survreg, rel.tolerance=epsilon), init.beta=log(scale), init.scale=1/shape) new_scale[j] <- exp(fit$coeff) new_shape[j] <- 1/fit$scale } # Next parameter value, depending on average strategy if (averaged) { scale <- (new_scale + iter*scale_seq[iter,])/(iter+1) shape <- (new_shape + iter*shape_seq[iter,])/(iter+1) lambda <- (new_lambda + iter*lambda_seq[iter,])/(iter+1) } else { # averaged=FALSE case, just use last update scale <- new_scale shape <- new_shape lambda <- new_lambda } # new strategy = storing sequence of averages lambda_seq[iter+1, ] <- lambda scale_seq[iter+1, ] <- scale shape_seq[iter+1, ] <- shape dll <- loglik - oldloglik # = Inf for iter=0 1st time oldloglik <- loglik loglik_seq <- c(loglik_seq, loglik) if (verb) { cat("iteration", iter, " log-lik diff =", dll, " log-lik =", loglik, "\n") # print(rbind(lambda, rate)) } iter <- iter + 1 } # end of SEM loops over iterations # } # while notdone, if restarts control implemented # final estimates depending on average strategy if (averaged) { final.lambda <- lambda final.scale <- scale final.shape <- shape } else { final.scale <- colMeans(scale_seq[1:iter,]) final.shape <- colMeans(shape_seq[1:iter,]) final.lambda <- colMeans(lambda_seq[1:iter,]) } cat("number of iterations =", iter, "\n") colnames(post) <- c(paste("comp", ".", 1:k, sep = "")) a=list(x=x, d=d, lambda = final.lambda, scale = final.scale, shape = final.shape, loglik = loglik, posterior = post, all.loglik=loglik_seq, all.lambda = lambda_seq[1:iter,], all.scale = scale_seq[1:iter,], all.shape = shape_seq[1:iter,], ft="weibullRMM_SEM") class(a) = "mixEM" a } ################################################## # plot SEM sequences from weibullRMM_SEM: # color by components, one plot per parameter type plotweibullRMM <- function(a, title=NULL, rowstyle=TRUE, subtitle=NULL,...) { n <- length(a$x); m <- dim(a$all.lambda)[2] pcc <- round(100*(1-mean(a$d)),2) if (is.null(subtitle)) { subtitle <- paste("n=",n,", ", pcc, "% censored", sep="")} if (is.null(title)) { tt1 <- "Shape parameters"; tt2 <- "Scale parameters" tt3 <- "Weight parameters" } else tt1 <- tt2 <- tt3 <- title lwdset <- 2 if (rowstyle) par(mfrow=c(3,1)) else par(mfrow=c(1,3)) plot(a$all.shape[,1], type="l", ylim=c(0,max(a$all.shape)), xlab="iterations", ylab="estimates", main=tt1, ...) # if (truevalues) abline(shape[1],0,lty=3) title(sub=subtitle, cex.sub = 0.75) lgd <- expression(sh[1]); lcol <- 1 for (j in 2:m) { lines(a$all.shape[,j], col=j, ...) # if (truevalues) abline(shape[j],0,col=j,lty=3) lgd <- c(lgd,substitute(sh[j])); lcol <- c(lcol,j) } legend("topright", lgd, col=lcol, lty=1,...) plot(a$all.scale[,1], type="l", ylim=c(0,max(a$all.scale)), xlab="iterations", ylab="estimates", main=tt2, ...) # if (truevalues) abline(scale[1],0,lty=3) title(sub=subtitle, cex.sub = 0.75) lgd <- expression(sc[1]); lcol <- 1 for (j in 2:m) { lines(a$all.scale[,j], col=j, ...) # if (truevalues) abline(scale[j],0,col=j,lty=3) lgd <- c(lgd,substitute(sc[j])); lcol <- c(lcol,j) } legend("topright", lgd, col=lcol, lty=1, ...) plot(a$all.lambda[,1], type="l", ylim=c(0,1), xlab="iterations", ylab="estimates", main=tt3, ...) # if (truevalues) abline(lambda[1],0,lty=3) title(sub=subtitle,cex.sub = 0.75) lgd <- expression(lambda[1]); lcol <- 1 for (j in 2:m) { lines(a$all.lambda[,j], col=j, ...) # if (truevalues) abline(lambda[j],0,col=j,lty=3) lgd <- c(lgd,substitute(lambda[j])) lcol <- c(lcol,j) } legend("topright", lgd, col=lcol, lty=1, ...) } mixtools/R/logitfns.R0000755000176200001440000000007312777447100014315 0ustar liggesuserslogit <- binomial()$linkfun inv.logit <- binomial()$linkinvmixtools/R/makemultdata.R0000755000176200001440000000227411665556372015155 0ustar liggesusersmakemultdata = function (..., cuts) { temp = sapply(list(...), length) m = max(temp) g <- length(cuts) + 1 cuts <- sort(cuts) if (sum(temp != m) > 0) { full.data <- function(x, maxm) { if (!missing(x)) { if (length(x) < maxm) { x <- c(x, NA * rep(1, maxm - length(x))) } } else { x <- numeric(0) } x } x = sapply(list(...), full.data, maxm = m) } else { if (sapply(list(...), is.matrix) == 1 | sapply(list(...), is.data.frame) == 1) { x = t(...) } else x = cbind(...) } cutfunc <- function(x, lcut, ucut) { x <- na.omit(x) sum((x <= ucut) * (x > lcut)) } n <- ncol(x) y <- matrix(0, g, n) y[1, ] <- apply(x, 2, cutfunc, ucut = cuts[1], lcut = -Inf) y[g, ] <- apply(x, 2, cutfunc, ucut = Inf, lcut = cuts[g - 1]) if (g > 2) { for (i in 2:(g - 1)) { y[i, ] <- apply(x, 2, cutfunc, ucut = cuts[i], lcut = cuts[i - 1]) } } list(x = t(x), y = t(y)) }mixtools/R/kernG.R0000755000176200001440000000011411665556372013541 0ustar liggesuserskern.G <- function (x, xi, h) { exp(-((xi-x)/h)^2/2)/sqrt(2*pi)/h } mixtools/R/regmixmixedinit.R0000755000176200001440000000720611665556372015712 0ustar liggesusersregmix.mixed.init <- function(y,x,w=NULL,sigma=NULL,arb.sigma=TRUE, alpha=NULL,lambda=NULL,mu=NULL,R=NULL,arb.R=TRUE,k=2,mixed=FALSE, addintercept.fixed=FALSE,addintercept.random=TRUE){ N <- length(y) n <- sapply(y, length) p <- ncol(x[[1]]) if (addintercept.random) { x = lapply(1:N, function(i) as.matrix(x[[i]][,-1])) } else x=x if(is.null(w)==FALSE && sum(sapply(w,sum))!=0){ q <- ncol(w[[1]]) } if (mixed == TRUE && is.null(alpha)==TRUE) { if (addintercept.fixed) { w.1 = list() w.1 = lapply(1:N,function(i) w[[i]][,-1]) lm.out = lapply(1:N,function(i) lm(y[[i]]~w.1[[i]])) alpha.hyp = apply(matrix(sapply(lm.out,coef),ncol=N),1,mean) sd.hyp = lapply(lm.out,anova) sd.hyp = mean(as.vector(sqrt(sapply(1:N,function(i) sd.hyp[[i]]$Mean[length(sd.hyp[[i]]$Mean)])))) alpha = rnorm(q, mean=alpha.hyp, sd=sd.hyp) } else { w.1 = w lm.out = lapply(1:N,function(i) lm(y[[i]]~w.1[[i]]-1)) alpha.hyp = apply(matrix(sapply(lm.out,coef),ncol=N),1,mean) sd.hyp = lapply(lm.out,anova) sd.hyp = mean(as.vector(sqrt(sapply(1:N,function(i) sd.hyp[[i]]$Mean[length(sd.hyp[[i]]$Mean)])))) alpha = rnorm(q, mean=alpha.hyp, sd=sd.hyp) } } if(mixed==FALSE) { alpha = 0 } y.x = lapply(1:N, function(i) cbind(y[[i]],x[[i]])) a = order(sapply(1:N, function(i) mean(y[[i]]))) y.x = lapply(1:N,function(i) y.x[[a[i]]]) y.x.bin.list = list() y.x.bin = list() for(j in 1:k){ y.x.bin.list[[j]] <- y.x[max(1,floor((j-1)*N/k)):ceiling(j*N/k)] y.x.2 <- NULL for(i in 1:length(y.x.bin.list[[j]])){ y.x.2 <- rbind(y.x.2,y.x.bin.list[[j]][[i]]) } y.x.bin[[j]] <- y.x.2 } if(addintercept.random){ lm.out <- lapply(1:k, function(i) lm(y.x.bin[[i]][,1]~y.x.bin[[i]][,2:p])) lm.out.beta <- lapply(1:k, function(j) lapply(1:length(y.x.bin.list[[j]]), function(i) lm(y.x.bin.list[[j]][[i]][,1]~y.x.bin.list[[j]][[i]][,2:p]))) beta <- lapply(1:k,function(j) matrix(sapply(lm.out.beta[[j]],coef),nrow=p)) } else { lm.out <- lapply(1:k, function(i) lm(y.x.bin[[i]][,1]~y.x.bin[[i]][,2:(p+1)]-1)) lm.out.beta <- lapply(1:k, function(j) lapply(1:length(y.x.bin.list[[j]]), function(i) lm(y.x.bin.list[[j]][[i]][,1]~y.x.bin.list[[j]][[i]][,2:(p+1)]-1))) beta <- lapply(1:k,function(j) matrix(sapply(lm.out.beta[[j]],coef),nrow=p)) } if(is.null(sigma)) { sigma.hyp = lapply(lm.out,anova) sigma.hyp = as.vector(sqrt(sapply(1:k,function(i) sigma.hyp[[i]]$Mean[length(sigma.hyp[[i]]$Mean)]))) if(arb.sigma) { sigma=1/rexp(k,rate=sigma.hyp) } else { sigma.hyp=mean(sigma.hyp) sigma=1/rexp(1,rate=sigma.hyp) } } if(is.null(sigma)==FALSE && arb.sigma==TRUE) { k=length(sigma) } if(is.null(R)) { if(arb.R) { R.hyp = lapply(1:k,function(i) (apply(beta[[i]],1,var))^-1) R=lapply(1:k,function(i) diag(1/rexp(p,rate=R.hyp[[i]]),p)) } else { R.hyp = apply(matrix(sapply(1:k,function(i) (apply(beta[[i]],1,var))^-1),ncol=k),2,mean) R = diag(1/rexp(p,rate=sigma.hyp),p) } } if(is.null(R)==FALSE && arb.R==TRUE) { k=length(R) } if (is.null(mu)) { mu.hyp = lapply(1:k,function(i) apply(beta[[i]],1,mean)) mu = matrix(ncol=k,nrow=p) if(arb.R==TRUE){ for(j in 1:k){ mu[,j] = rmvnorm(1,mu=as.vector(mu.hyp[[j]]),sigma=R[[j]]) } } else { for(j in 1:k){ mu[,j] = rmvnorm(1,mu=as.vector(mu.hyp[[j]]),sigma=R) } } } else k = ncol(mu) if (is.null(lambda)) { lambda = runif(k) lambda = lambda/sum(lambda) } else k = length(lambda) list(sigma=sigma,alpha=alpha,lambda=lambda,mu=mu,R=R,k=k) }mixtools/R/spregmix.R0000755000176200001440000001230413060302403014304 0ustar liggesusersspregmix = function (lmformula, bw = NULL, constbw = FALSE, bwmult = 0.9, # gives Silverman's rule; or use 1.06 for Scott's z.hat = NULL, symm = TRUE, betamethod="LS", m = ifelse(is.null(z.hat), 2, ncol(z.hat)), epsilon = 1e-04, maxit = 1000, verbose = FALSE, ...) { # m, not k, is the number of components here y <- model.frame(lmformula, ...)[,1] x <- model.matrix(lmformula, ...) n <- length(y) p <- ncol(x) tt0 <- proc.time() # for total time # Now initialize z.hat, which is the only thing needed to start the iterations # as long as we use a method such as least squares to find beta if(is.null(z.hat)) { z.hat <- matrix(runif(n*m), n, m) z.hat <- sweep(z.hat, 1, rowSums(z.hat), "/") } L1norm <- function(beta, y, x, p) sum(p*abs(y - x %*% beta)) nploglik <- function(beta, y, x, p, bw, res, wts, symm) { sum(p*log(wkde(x=res, u=as.vector(y-x%*%beta), w=wts, bw=bw, sym=symm))) } lambda <- matrix(0, maxit, m) beta <- matrix(0, p, m) for (j in 1:m) { # Initialize beta to weighted LS solution wx <- sweep(x, 1, z.hat[,j], "*") beta[,j] <- solve(t(wx) %*% x, t(wx) %*% y) } iter <- 0 finished <- FALSE # j <- 0 #plot(tonedata) SPweight <- ifelse(betamethod=="transition", 0, 1) while (!finished) { # j <- j + 1; if (j>m) { #browser(); #plot(x[,2],y); # j <- 1} iter <- iter + 1 t0 <- proc.time() ## M-step lambda[iter, ] <- colMeans(z.hat) oldbeta <- beta for (j in 1:m) { #abline(beta[1,j],beta[2,j], col=1+j, lwd=3, lty=2) if (betamethod=="LS") { wx <- sweep(x, 1, z.hat[,j], "*") beta[,j] <- solve(t(wx) %*% x, t(wx) %*% y) # Weighted least squares solution } else if (betamethod=="L1") { # Weighted L1 solution beta[,j] <- optim(par=beta[,j], fn=L1norm, y=y, x=x, p=z.hat[,j])$par } else if (betamethod=="transition") { # transition from LS to NP wx <- sweep(x, 1, z.hat[,j], "*") beta[,j] <- SPweight * optim(par=beta[,j], fn=nploglik, y=y, x=x, p=z.hat[,j], bw=bw, res=as.vector(y-x%*%beta), wts=as.vector(z.hat), symm=symm, control=list(fnscale=-1))$par + # NP loglik (1-SPweight) * solve(t(wx) %*% x, t(wx) %*% y) # Weighted least squares SPweight <- min(1, SPweight + (1e-4)*2^(iter-1)) } else { # Nonparametric loglikelihood beta[,j] <- optim(par=beta[,j], fn=nploglik, y=y, x=x, p=z.hat[,j], bw=bw, res=as.vector(y-x%*%beta), wts=as.vector(z.hat), symm=symm, control=list(fnscale=-1))$par #res=as.vector(y-x%*%beta) #plot(res,wkde(x=res, u=res, w=as.vector(z.hat), bw=bw, sym=symm)) #browser() } ## Here, we might try other methods of estimating beta. } ## update the bandwidth, if necssary xbetavec <- as.double(x %*% beta) zhatvec <- as.double(z.hat) if (is.null(bw) || !constbw) { res <- y-xbetavec np.sigma <- sqrt(sum(res^2 * zhatvec)/(n-1)) bw <- bwmult / n^(1/5) * min(np.sigma, wiqr<-wIQR(wt=zhatvec, x=res)/1.34) #print(c(np.sigma, wiqr)) } ## density estimation step if (symm) { ans <- .C(C_KDEsymloc2, n=as.integer(n), m=as.integer(m), mu = xbetavec, y=as.double(y), bw=as.double(bw), z=as.double(z.hat), f=double(n*m)) } else { ans <- .C(C_KDEloc2, n=as.integer(n), m=as.integer(m), mu = xbetavec, y=as.double(y), bw=as.double(bw), z=as.double(z.hat), f=double(n*m)) } fkernel <- matrix(ans$f, ncol=m) lambda.f <- sweep(fkernel, 2, lambda[iter,], "*") ## E-step (for next iteration) z.hat <- lambda.f/rowSums(lambda.f) ## Test convergence criteria finished <- (iter > 1 && SPweight == 1 && (iter >= maxit || max(max(abs(beta-oldbeta)), max(abs(lambda[iter,]-lambda[iter-1,]))) < epsilon )) ## Print message for each iteration if (verbose) { t1 <- proc.time() cat("iteration ", iter, " lambda ", round(lambda[iter,], 4), " bandwidth ", round(bw, 3)) cat(" time", (t1 - t0)[3], "\n") } } ## Print final message after convergence if (verbose) { tt1 <- proc.time() cat("lambda ", round(lambda[iter,], 4)) cat(", total time", (tt1 - tt0)[3], "s\n") } res <- ans$y - ans$mu xx <- res yy <- ans$f if (symm) { xx <- c(xx, -res) # exploiting symmetry yy <- c(yy, yy) } ox <- order(xx) np.sigma <- sqrt(sum(res^2 * ans$z)/(n-1)) loglik <- rep(0, n) for(j in 1:m) { loglik <- loglik + z.hat[,j]*wkde(as.vector(y-x%*%beta[,j]), w=as.vector(z.hat[,j]), bw=bw, sym=symm) } loglik <- sum(log(loglik)) a <- list(x=x, y=y, lambda = lambda[1:iter,], beta = beta, posterior = z.hat, np.stdev = np.sigma, bandwidth=bw, density.x = xx[ox], density.y = yy[ox], symmetric=symm, loglik = loglik, ft="regmixEM") class(a) = "npEM" a } mixtools/R/ldmult.R0000755000176200001440000000107111665556372013777 0ustar liggesusers########################################################################### # log density function of multinomial count vector y with parameter theta # ########################################################################### ldmult<-function(y,theta){ if (any(is.nan(theta)) || any(theta==0)) { # stop ("The theta parameter cannot have a zero component.") out = -Inf } else { if (length(y)==length(theta)+1) theta=c(theta,1-sum(theta)) out=lgamma(1+sum(y)) - sum(lgamma(1+y)) + sum(y*log(theta)) } out } mixtools/R/FDR_spEM.R0000644000176200001440000001270113060302020013776 0ustar liggesusers################################################################# ## FUNCTIONS FOR FALSE DISCOVERY RATE (FDR) ESTIMATION ## ## USING SEMI-PARAMETRIC EM-LIKE ALGORITHMS ## ## D. CHAUVEAU ## ## mixtools 1.0 addition ## ################################################################# ################################################################# ## EM-like algorithm for a nonparametric univariate mixture model ## - Component 1 known = N(0,1) ## = the pdf of a probit transform of pvalue under H0 ## - component 2 symmetric shifted from a location parameter mu ## NB: stochastic=TRUE not implemented, parameter removed here spEMsymlocN01 <- function(x, mu0=2, bw = bw.nrd0(x), h=bw, eps = 1e-8, maxiter=100, verbose = FALSE, plotf=FALSE){ bw <- h # h is alternative bandwidth argument, for backward compatibility n <- length(x) # if (length(mu0)>1) m <- length(mu0) else m <- mu0 m <- 2 # fixed number of components in this model z.hat <- matrix(0, nrow=n, ncol=m) fkernel <- matrix(0, nrow=n, ncol=m) tt0 <- proc.time() o <- order(x) # for plotting fhat's if requested kmeans <- kmeans(x, mu0) # is this a good init for probit data? for(j in 1:m) { z.hat[kmeans$cluster==j, j] <- 1 } iter <- 0 finished <- FALSE lambda <- matrix(0,maxiter,m) mu <- rep(0,maxiter) # only component 2 mean used in this case while (!finished) { #while (max(abs(change)) > eps & iter < maxiter) { iter <- iter + 1 t0 <- proc.time() ## M-Step lambda[iter,] <- colMeans(z.hat) # mu[iter,] <- apply(sweep(z.hat, 1, x, "*"), 2, mean)/lambda[iter,] mu[iter] <- sum(x*z.hat[,2])/(n*lambda[iter,2]) # ## second component density estimation step evaluated at x_i-mu's ans <- .C(C_KDEsymloc1comp, n=as.integer(n), mu=as.double(mu[iter]), lbd2=as.double(lambda[iter,2]), x=as.double(x), bw=as.double(bw), z=as.double(z.hat), f = double(n)) #add PACKAGE="mixtools" option when integrated # successive plots of fhat's (for debugging mostly) if (plotf) { if (iter==1) plotfunc <- plot else plotfunc <- lines plotfunc(x[o],ans$f[o],type="l", col=iter) } # version lambda_j f_j(xi) specific for component one = N(0,1) # NB: this is the only place where the known component pdf is used lambda.f <- cbind(lambda[iter,1]*dnorm(x), lambda[iter,2]*ans$f) ## E-step (for next iteration) z.hat <- lambda.f/rowSums(lambda.f) finished <- iter >= maxiter if (iter>1) { # This convergence criterion is too simplistic: change <- c(lambda[iter,] - lambda[iter-1,], mu[iter]-mu[iter-1]) finished <- finished | (max(abs(change)) < eps) } if (verbose) { t1 <- proc.time() cat("iteration ", iter, " lambda ", round(lambda[iter,], 4), " mu ", round(mu[iter], 4)) cat(" time", (t1 - t0)[3], "\n") } } if (verbose) { tt1 <- proc.time() cat("lambda ", round(lambda[iter,], 4)) cat(", total time", (tt1 - tt0)[3], "s\n") } return(structure(list(data=x, posteriors=z.hat, lambda=lambda[1:iter,], bandwidth=bw, lambdahat=lambda[iter,], mu = mu[1:iter], muhat = mu[iter], symmetric=TRUE), class="spEMN01")) } ####################################################### # plot mixture pdf for the semiparametric mixture model # with component 1 pdf passed in the knownpdf parameter # uses a weighted kernel density estimate of the nonparametric component 2 # a = object of class "spEMN01" as returned by spEMsymlocNorm plot.spEMN01 <- function(x, bw=x$bandwidth, knownpdf=dnorm, add.plot=FALSE, ...) { t <- seq(min(x$data), max(x$data), len=200) f1 <- x$lambdahat[1]*knownpdf(t) f2 <- x$lambdahat[2]*wkde(x$data-x$muhat, u=t-x$muhat, w=x$post[,2], bw=bw, sym=TRUE) f <- f1+f2 if (!add.plot) plot(t,f1+f2, type="l", ...) else lines(t,f1+f2, ...) lines(t,f1, col=2); lines(t,f2, col=3) } #################################################### # plot and compare FDR for 1 or 2 EM-like strategies # for mixtools 1.0 # post NEEDS to be sorted by p plotFDR <- function(post1, post2=NULL, lg1="FDR 1", lg2=NULL, title=NULL, compH0=1, alpha=0.1, complete.data =NULL, pctfdr=0.3) { n <- dim(post1)[1] cs1 <- cumsum(post1[,compH0]) # local FDR(p_i)'s fdr1 <- cs1/(1:n) # FDR(p_i)'s if (is.null(title)) title <- paste("FDR estimate(s), n=",n) if (!is.null(post2)) { cs2 <- cumsum(post2[,compH0]) # local FDR(p_i)'s fdr2 <- cs2/(1:n) if (is.null(lg2)) lg2 <- "FDR 2" } i1 <- sum(fdr1 pctfdr # cat("index",i1) plot(fdr1[1:i1], type="l", main=title, col=1, ylim=c(0,fdr1[i1]), xlab="index", ylab="probability") if (!is.null(post2)) lines(fdr2[1:i1], col=2) abline(alpha, 0, lty=3) if (!is.null(complete.data)) { # true complete data available V <- cumsum(complete.data[,1]==1) # cumulative nb of items under H0 trueFDR <- V/(1:n) lines(trueFDR[1:i1], lty=2, col=3) if (!is.null(post2)) legend("topleft", c(lg1,lg2,"True FDR"),col=1:3, lty=c(1,1,2)) if (is.null(post2)) legend("topleft", c(lg1,"True FDR"),col=c(1,3), lty=c(1,2)) } else { if (!is.null(post2)) legend("topleft", c(lg1,lg2), col=1:2, lty=c(1,1)) if (is.null(post2)) legend("topleft", lg1, col=1, lty=1) } } mixtools/MD50000644000176200001440000002471413617252762012463 0ustar liggesusersc70899afe45bfe1e497346d67a8ce4ab *DESCRIPTION 9e46cf1b6617d11f723fb4f834ff7e08 *NAMESPACE eb21a6baa12fcb6aa442898c71c93777 *NEWS b5e2b87ba9803aa4f1f8770641aa301c *R/FDR_spEM.R 96eb008edd1ebc2806b5434abec323b8 *R/WeibullRMMSEM.R 13dd98fd3d2a18d733dc7c1bb00cbd4a *R/augx.R d9b95227c94da60371c9e0729b1ba9ca *R/bootcomp.R 61470e3bebc02d2e166e9c8c5b863867 *R/bootse.R 99dbcce1ed200436d59d035818a844da *R/compcdf.R e2442e00a19de3f3c7d1b2938e61bc2f *R/ddirichlet.R 059b14e5defb7a753aa97dad8c53606d *R/density.npEM.R dda8c96267a045f0ee6517786c06aaa2 *R/density.spEM.R c5ee44461e73caf8c367228400d7b102 *R/depth.R 7b34ff227996abf53f08d4d56935e340 *R/dmvnorm.R 5af58056044f75475f0e3f863457e04b *R/ellipse.R c6208248803f53a4da829615db3d66b4 *R/expReliabilityMixEM.R c5f3c7e90d2a3179dfbecdd4b6478e25 *R/flaremixEM.R 1699b871b012d5d18651764f7bba238b *R/flaremixinit.R 82e6900562074b991234256f1cd017ef *R/gammamixEM.R a056a87a053c20e07e96f00163294a98 *R/gammamixinit.R 6e2e3de7fab7712a39e4a8b119c5e3d7 *R/hmeEM.R 7d443f685764995488abbb46a0ea0b98 *R/ise.npEM.R ae96a525e363c5cf8bb2050f3804f02e *R/kernB.R 17f792c11412089cec2137163f3ad16b *R/kernC.R faadc75b1c05a357cf1a5521a552721b *R/kernG.R 80ee6842adac9ecdada0a80616145082 *R/kernO.R ed42f2f65c534b3934113cf175ed2cae *R/kernT.R d0e8502733acc798d82d53e075a36191 *R/lambda.R e45fb22e39852793e902b2aaec245f49 *R/lambdapert.R 363ca040ab258621547b87f9fd483c52 *R/ldc.R 032531819ccbcbded68c198e157d0464 *R/ldmult.R 3595e4fe4208d8ab0f1ba337a2df7dd0 *R/logisregmixEM.R 928f44fefe43811ebcd2ec09e8f2ac16 *R/logisregmixinit.R 61e764f3c76493bee0965415c85356bc *R/logitfns.R 56a2d4a1563e033194716ed2ead44308 *R/makemultdata.R 25c1dcebc2d339b6721c90f1223d4ad0 *R/matsqrt.R d93b3c1d434ff057cc6ed8198732c777 *R/mixturegram.R b00067bef145c26a1e80ef7a4a5d1042 *R/multmixEM.R d1ab4a3d41015ace82f2874b08d58e6d *R/multmixinit.R 6fe03186e33348035370558fb73975ad *R/multmixmodelsel.R 6cb9ac591955760d8cc031a1d6bacf0a *R/mvnormalmixEM.R 937dd2fa6ddb32592fe9f296bc450d58 *R/mvnormalmixinit.R d313ae47d8d9ff8c642c6dd3b7da5076 *R/mvnpEM.R 50a070a1b9af8bf763c158747b3fd346 *R/normalmixEM.R 087fc04146099dce86123509dea45fb1 *R/normalmixEM2comp.R f1e37c4001808412a10e1513bfe6666d *R/normalmixMMlc.R b3adb02fdb94f3d915bbe1699ef1813e *R/normalmixinit.R 000879d20a17f9129a56faf28a891cee *R/npEM.R 8f642b28048d551bb24509e0d4e90f79 *R/npMSL.R 972d8b4ade9a0fdd4ce7ad4da9579e7c *R/parse.constraints.R 45082d45819fb4d9d14f870baca9579f *R/perm.R 00ec0a98fc4eb484aaf623de0a431223 *R/plot.mixEM.R 1e424fc76c439f80a975b9095797054e *R/plot.npEM.R 4e085b2843070c797f907a5f088a6b51 *R/plotMCMC.R 7fbf03a9406b6ced9e35c9e5bf8bb02d *R/plotseq.npEM.R 02faa99327eace5b409b3481032ea014 *R/poisregmixEM.R e49fc7b49dbec159926f01b6514fe4d0 *R/poisregmixinit.R 7e016ef4d058c1e94e4ddd6f881d4c1e *R/postbeta.R b2174e4aa4a36657d81e50beffb90d64 *R/print.npEM.R 462a5c8f309b2ba25707f3f66eb4a3af *R/regcr.R 8d72c639d2daf98ec0e91e2f7a7199fc *R/regmix.init.R c4d7c8180c138d466589b2addf1c9ecb *R/regmixEM.R ce39ec2a845d99c195d8b7d70176f4e9 *R/regmixEMlambda.R 7b44dde5886cebc9084378e89a256ca5 *R/regmixEMlambdainit.R 2d2112503595576da88c153608d0afcb *R/regmixEMloc.R 3b25992254faebdb30ef0f0b04e7181a *R/regmixEMmixed.R f991619a3c3286205cd7969efb3cd1e3 *R/regmixMH.R 9d88ad51b155591c16a371cc5b107597 *R/regmixmixedinit.R c6d490d169fe3b56c5edc36f3936525a *R/regmixmodelsel.R 6f785fed55c31fe9b268ed7d77f34315 *R/repnormmixEM.R 8af2d7c7f46753fafe07fe4520e18a17 *R/repnormmixinit.R 1995eae3b993f508a9807631aaa21c92 *R/repnormmixmodelsel.R dfc1778aa1914d929c52d63f1cbb6a5e *R/rmvnorm.R 80d0ffe6d790c8a7234126271d5ecdf7 *R/rmvnormmix.R fb34d52c691679b6d56d25b1057e79e0 *R/rnormmix.R 2f4177ab69ac8112f327ff4e8f3c4afa *R/segregmixEM.R 355032dae4f99792fb201ed841021458 *R/segregmixinit.R 2f0b7f0a574080f57c84a4d59e65f0e5 *R/spEM.R 718d2662e5e77f92b2c3f6f8617276ab *R/spEMsymloc.R 9337850b826fb5d15f89aee4de794252 *R/spRMMSEM.R 1c6a0b23bfa6b2dd4eaacf71b6c639c1 *R/spregmix.R a5415d1ac9cf8fce3c7778643a3b7207 *R/summary.mixEM.R ad53a43af0b057d55b7c9a72dbf432d9 *R/summary.npEM.R a875916a07049e1ff3c4c46748d7ce3b *R/tauequivnormalmixEM.R e55cc3fc2dbcd3077af3e40b5e8222ba *R/testequality.R c1846ff0b297a74949447e5345dfadcc *R/testmixedequality.R dcbc0eec3af7fa19cc0924d46912dbdc *R/tryflare.R a445dc664ab054482ecf95121a835d10 *R/wkde.R 129b83daade32c300c7465773b82c8be *R/wquantile.R 27b413fb0c6c42a9c4a993059a3616ad *R/zzz.R f83e7d11bd8e4ccf7052c7cbf4c6a969 *build/vignette.rds d32a6f9223d0455635e212e4df50bf69 *data/CO2data.RData 8cafacfe19c67d59d429a3569970e992 *data/Habituationdata.RData f4d36947145afcfdf9e30a29e3e1a9eb *data/NOdata.RData 2bd5741057a02ef36a5e334bf723553b *data/RTdata.RData 2a329c7bcddcdb6930eebb0600c61008 *data/RTdata2.RData 3cb04c901dfa71aef9c9f09b87a57aba *data/RanEffdata.RData 11d247e4ff8bf69d9b7a7ff18ee13943 *data/RodFramedata.RData debde669810513397d5821349847b79b *data/Waterdata.RData 3d480b157c35174ca0321ee3e989987d *data/WaterdataFull.RData 6cd786ef84b29976ad2c0aafea83129e *data/tonedata.RData 81ed8cd286158732e9ca35685af628a9 *inst/CITATION b58bf32f450e3fdcc9aa29338d7f6998 *inst/doc/mixtools.R 721406b832d61b794a5a2d3439fb0e63 *inst/doc/mixtools.Rnw b0a069030d781e8f64a9f7a08ab5cdf8 *inst/doc/mixtools.pdf ce15e705998acbfc5e8f1529da8c9def *man/CO2data.Rd a07ffa6092f579070260a585852c0667 *man/Habituationdata.Rd f69789f43c3d19c313ec7a6246f08776 *man/NOdata.Rd c2b5d7d2b58a312d15cca3263f4bd5c4 *man/RTdata.Rd 38a6560330c0bba78e1e97bafe8cfc92 *man/RTdata2.Rd df8335c4cb858ea55bd452b10e0fae1b *man/RanEffdata.Rd b7a2fe3dcbde5ffc71c2b5df4bc84213 *man/RodFramedata.Rd 2ccee00d91e3de60c2f062b90db012ed *man/Waterdata.Rd c16678ff4ff5c955ee6f6f68b6371747 *man/aug.x.Rd 16285a31f2a4407c3543fa90e908addb *man/boot.comp.Rd a2eaf73e3a84662ac65231c38457b170 *man/boot.se.Rd d6e746a5ddc415cab5b3bb89a972c364 *man/compCDF.Rd 5423f1bb26fc39b61d7fb52afe5aa421 *man/ddirichlet.Rd 1dff44003e07659287e1a04ecbf7f6c5 *man/density.npEM.Rd 2860ecbcf63169201eeae7f5181e1d12 *man/density.spEM.Rd d13316f4df7c11794c4da96f820068cd *man/depth.Rd 859cc432c2c0014974ff2f43c0d78bea *man/dmvnorm.Rd 896bb2e222f9c7a8f651f001e008a692 *man/ellipse.Rd c976c9278210ab895481cb109b35a4b8 *man/expRMM_EM.Rd b45a859f647026e0834449afbd42295c *man/flaremixEM.Rd d417e710fa3d920d8636ab881381e798 *man/gammamixEM.Rd 8def2dab5420d98f81cbc8d00ee352dc *man/hmeEM.Rd a8d92274b4c4134c57d276d48392b77c *man/initializations.Rd 75eef340021a87cb95c03bc8190f15dd *man/ise.npEM.Rd 5915b4e20695e52e1b37c9b237da30cf *man/lambda.Rd fd08fe01bc6e4156ab9f37fcd163f745 *man/lambda.pert.Rd 806f084784aaa77ba3de702a195fc1ab *man/ldmult.Rd e4fb164fbb53dacfcb340283579df48d *man/logisregmixEM.Rd 6f3a941d9400dc69d0b7b750f792bc77 *man/makemultdata.Rd 1a1a5b5c6f7c8c3a243d42a8b276a9dd *man/matsqrt.Rd 1d4976e8a49388a25d48f7b52c5f4073 *man/mixtools-internal.Rd 532f9457de1a130009a124c137025c0f *man/mixturegram.Rd 2db6e593a73e608d83671bfe00ee0fc6 *man/multmixEM.Rd 94954f75aa38ceb4e494b271fec94692 *man/multmixmodel.sel.Rd 84e378703fa29ff9fa84cde50e1175b0 *man/mvnormalmixEM.Rd 6091b8400e8741e40c594d138bd12b92 *man/mvnpEM.Rd 7ed0f5650ce0a1811085080321cf1c2e *man/normalmixEM.Rd 554c0f83afb47f41077ede6f0478ca38 *man/normalmixEM2comp.Rd ddb06723389f6c3d2a0ce74be7434296 *man/normalmixMMlc.Rd 496b6701ec27721345bc76b79e2e542b *man/npEM.Rd 72e434f64ee18058b710cf74108320e0 *man/npMSL.Rd ae258c402a2b46b3579e7777d846d91b *man/parseconstraint.Rd caeb3e81a3967f6f24546ad91f9c0dc8 *man/perm.Rd 7fb42e765af12a7796b818e6b03fca49 *man/plot.MCMC.Rd 8a05e74457672324e42dc2fc4920b928 *man/plot.mixEM.Rd 661ee1767e1e59f633fd51dd692a6bfe *man/plot.mvnpEM.Rd a468309483c4ae6d9bc4101dfa7c3167 *man/plot.npEM.Rd b2209409d6529e0676e462c94d4bff11 *man/plot.spEMN01.Rd e87708441e3e8c61f828208e47ef5bec *man/plotFDR.Rd d8b1df51aee17a56accdd65eb49371fa *man/plotexpRMM.Rd 53be9f3a2b96df2eab8a22500e9df420 *man/plotseq.npEM.Rd 8f4c53fcf045a3bf9154f0f4917b57ab *man/plotspRMM.Rd ea5419d7416a067e64a1dac170556ecf *man/plotweibullRMM.Rd 11d7b4aabc4c0aa688900f07f8c524dd *man/poisregmixEM.Rd 3f2c65d25f9ed8b0ed9ba988691e5621 *man/post.beta.Rd 547221c68209edc9b6a94998f5d3ed94 *man/print.mvnpEM.Rd 002a8ea6ad9a392548889c4b40603408 *man/print.npEM.Rd 48f62a2a5edbc51a548ffd152c3ceb68 *man/regcr.Rd 66520d118c35bc54dc68cedee7963175 *man/regmixEM.Rd 66f380bcbc7b3acce2295bc2cc958fac *man/regmixEM.lambda.Rd 34be77565bbcea7d45aa1a12afb6466a *man/regmixEM.loc.Rd 5912a91ec4c8eae883c131f7b0c45a24 *man/regmixEM.mixed.Rd 59c53259489056358fff325dc0d5625a *man/regmixMH.Rd cac402f4d34ba2a2e72c42e63a4b2b04 *man/regmixmodel.sel.Rd e5c39657c9e6fb25a70ac513f9a94327 *man/repnormmixEM.Rd 1b28aa913a7e3f2bec0129da9f534090 *man/repnormmixmodel.sel.Rd 4ecea92b3ab4ec6fda9c61aaa83d2d9e *man/rexpmix.Rd 283ccd2e22e6bc187d3b412eb3aa1859 *man/rmvnorm.Rd 91e761f99f003c6298cec7885060a56c *man/rmvnormmix.Rd 6c10958f200a83428bb5648c3454e2ef *man/rnormmix.Rd 759629421e7b9dafeb9c4af939b27d2d *man/rweibullmix.Rd c13dd8b7b8df7f76f61ed4bd76a96f22 *man/segregmixEM.Rd 61f993cbb217ef0ba2f36c258852e83d *man/spEM.Rd 2f1f7c0458163ea6070384373ee38576 *man/spEMsymloc.Rd f53a191d7396316355de08958bf1bb87 *man/spEMsymlocN01.Rd 1e714ea420d69ecd0c55f3345e989415 *man/spRMM_SEM.Rd 8cdfd07205c82a7a7ff15dbdfa16f830 *man/spregmix.Rd d2f4f5d0a553b8dd5bb3e251a1cf91de *man/summary.mixEM.Rd 33b88c122cc1df7b4391da510315dc03 *man/summary.mvnpEM.Rd fb2df20e6f0077e279fee305e1859040 *man/summary.npEM.Rd 110288fe4f5adc938b85007eade15885 *man/summary.spRMM.Rd dc7fae07ee408ba28c53960a7ee54ff0 *man/tauequivnormalmixEM.Rd ac7659fe36cde61769fb5c8fb1a2c102 *man/test.equality.Rd bc5650d17b0df897af8568a8adbcdc08 *man/test.equality.mixed.Rd 99018278282ba70a64ac736aa1e8e5e7 *man/tonedata.Rd ec184a30d24e76b13c021e9ddbf47b9c *man/try.flare.Rd 5673c3b9009b16347fb3a3dee97d0bb5 *man/weibullRMM_SEM.Rd fb40e0bf60b0bbd717f1408dcb5791e8 *man/wkde.Rd 6c35d1136da65dbd2f2583e19a7ece69 *man/wquantile.Rd 1512fde5c77cc87a513bf0bc0c7ec7d8 *src/KDEloc2.c e9ddda60cf37bb1b91a564c31bb26b9d *src/KDElocscale.c 6141ac8718537ecc30d5d768e55c74d2 *src/KDErepeated.c 418488e632cfbf9ec0383c8e2a6b4d68 *src/KDErepeatedbw.c d998563ef2b3b533ae43bcc756bc6953 *src/KDEsymloc.c 0b31b6a360443624d117d3d0c4e8edef *src/KDEsymloc1comp.c 3031711e3958323aa555081e7819333d *src/KDEsymloc2.c 4e866de60a0ecf51dd1962ef3d6f3c00 *src/init.c f2a2b6580952c87546e89ba91d8144dd *src/multinompost.c 92c56f68808db44df1269a4d18a23ade *src/mvwkde.c 949319e5b60fd36c2ed398ee94b8bc34 *src/new_svalues.c c0841c25090c2cc258b4974369238512 *src/normpost.c b6a84b03c05c76bcc76da25eff787f11 *src/npMSL.c 98faec8e3b8a3f838f510800b2166bb0 *src/sd.c 9f56ca76cc30332bda95b27f8ece61bd *src/z.c 721406b832d61b794a5a2d3439fb0e63 *vignettes/mixtools.Rnw b509b00eda098a844800f8ade63048eb *vignettes/mixtools.bib mixtools/inst/0000755000176200001440000000000013617013707013112 5ustar liggesusersmixtools/inst/doc/0000755000176200001440000000000013617013707013657 5ustar liggesusersmixtools/inst/doc/mixtools.pdf0000644000176200001440000204506613617013707016245 0ustar liggesusers%PDF-1.5 % 87 0 obj << /Length 3263 /Filter /FlateDecode >> stream xZKwWts fHO4quENDB+P Ҋ{ v6]q8mfW~}}>wY:Sf~V$e:^~﷋t;n7sgRqbdVW;oYn^Z’.t^>/6,Y_8~/ nxͷmqe4%\%d>?C߂pA{.d(eɒ)]/*m#_#㭊k&TIYkMhTHGQ YfY6Qέ$r$q@c:Ô\`Sӿ -j}w(=ZYNSoHf9BHLl &UğNZs ̓"I5KΰyC[JKj?R&,CkYE ;h3-7% Y|Q^__ v2KgHLald7dU9{U#(~W2U)*b]i r`YWGb)g L=\~P)ns}GҲQX 9`[[{opvXcPƀH!b(RMϧ7d1zn7ߞVfNۏxc h qt(tAG6zhn A\τΕ=ۉNU /<ėp0pxGF!6|;9 (HG 'aw0WD FXk+C¦[* H~V*LۆKcd5? ]$DdpaaK޼D!-j%nEae:Z ~>؄FnA[V yPFBE15U?n UmHĹׁyyGHG+1 @7@%p}G ,B]'ec/~9ntԋ>! 4aݡ&K1>8wlF:\I}`adsMW,;dzIV+gW0lٖRûY›T)th1AWրN.8yr 8P)ŏpn'N-eقIph)_)jaM$Vlt>H%0Eπ{`h^9$+^5<²#ݬpZtulQC|E wUCP56loJ' GbAe8݇C ġ_ 2@)#9y_-֣F&#-FǪT=twW-5guKvW&t3 l I*L&.nNv$C 1b JѬyV y;[\gEq$xf>Pt{. UX4rs6}BהeE\j%{ϥlrQjG[ZrTޝ"/)X7J 2}3BxI H 8? c*2EMXF TrD+|>mPYZ_兓~kl2`S!zΡDfe| R8K#?BR!Cr#D(rqYܣu&9 <k<ɮE܀wOW㫑e h3I)~yR[T W!=9bIv*0!f~RD(~"\@U֞('; Ca+Ц!;!I/2glR}P'p![XδWZ?w!fө0ܡqj;'nB8?Cssqo#ٻe>XWhxA?==592sbrR6&W6 <+u4 ƏBbr QnicJ|l;^ݰ>n A#PܓZ 7٣zQxc㑋u G e6tTC)yQj*/91*=D};FI﷪܃RZs_qIDU{}OH,1$C ྙdQ:lAIDV0-4>PE-o$<> stream x<ێFv>fXd0Yx/2yHl<-Wj=K]ɒZݶnbթS~NgO/ 3u՝bf:efW/&$G#l77b3=f!|ǛRj~wN{o60ځ;uX(SM𻕛'św6 =qش-A5TGcuhi5^1x}>b[;6nw[Z<=׼[ p  !FkxM{+4й2c==JCO d,T MÛv#8AG* ގT2k\t6??& 砕<$+%|@z/-Nzs!~W䑰[s6qCn`*ӢC(‘vb[·fpn2>#Z- V;X@&Wk_Sv-)6逵نy=x(}DTgK(9z2>ў D`G@q6k4aГO\426OKn+ǣnȓ\.#eنL<01%'ujz|s(!"dԏ |qpOQTaⱾǩzO@(E ~Ħ)o6%"4~w>'Gy"P("tsaÞgLn1$R,Aty@ŤX7#'! 6|f{Ś'8d ]{O- * d%arwzq s&[ԄcjNO$0ْ[,IDR%*kO;~c)6m?y {}:yL#cAx:RU 9{_5"5͔DM :rN몱Ui,ؽ1 dxǴqMuCSe]vb3/p5 Cz_<"qR,VD6`ɶ9ۗw:Pӱ--s7Cn]:A<>" $ܰBba[qc-|1ɳ#m2Fx`VoUKaU@Z"Dы&!-gn M*F[/\4ALDȃPXuL4ؑzK$;/~F7OYLij陒̫= җT}\$ѣQ!n_u #\,El.:R 7:p~׳,|ד- =~Yl,G6j N[Kk$)xs&(<n X~SUeyE, n/bk fökl#w}phg B+ -hq^ڝӨI_ɿ]8X81@4b5^Hvon@ d`K[G6J|KtA#}\|JV0W.8VY?ϻ(BT"Bq't6gp,vӆ`HVg[ ,tL4Z@D'eYu+ uysH^28 pҙߟΫ20T=;g Tvttw#'Zt۽ 1n?.E*! V#oWYu ;TG;rLSB"C \MXCK@/zދ'PaSSq6GMP˺ tm2ٓ #:^-!>AQ0>gYzg֕6z_F?0Qgn];x⎰2MH@^n>s{ k;^ğhppxI*Rk}H4e[XLO"]F`kFF;Ѳ:@k1vѭPŒ(dQ^G=k7,nPͬ#[ct#.`4XQ@MacIvUaT f9 uܱN[jiAu GUF(C"A\XĨ8) 6 X>7V12 Ec`@` X3rln,.̠t{]Tx G#£ rzxL 5p)B8w"78U.hG[VwqOc9imM _ $@#SCz{0n)eum[2at!a(斝[IQJg"Ucvx:ގpj Wn}S?J=ꐀ(M T-fSFx8mh`?9E'US4IjWs ~GPM# YW1²VJS(ac/7@Σ+5:hCC|ocJA]S1,Ge]Ҥ*v\e‡X@YeX7rn 0{f)1@W'2q p?u&I> NLI"jvp%.i}.9|Xr>iVa" %O!s_PDmAOqÂS>xjX2Wi9.7o M;*7&ߕQH-X: )psDJo|d5w> KPnۃWQX!VqߠJX 4`IV;eL5]3TI{5IeJRt~ѪD5)wW{n m}pOW^yH~ AĹ\?>zEþjțUYu;qA9Ih>f]|"g9]ͽZ9e|d38  U Otf)Djj>b)k^nYiG5b S]":9p$J%Wz$}1Ӡi~S}I@u=x:ֱKu(w8yIcuE@Ixӽw]]8+r$=#{1іZ+5Di݌JBM;C+Fhp9 hiT`.ذ*%(ܩ('D`s > =|]$P ,}853*Wx̏DZSAҕƤ>i_U[kr\\!TZMe%="|Z|U=)#eXwl#!hV)s̸ ϙt+X0ݙ15ZYXnQ<%x^9;hy &d 1ɪ%@d|lT ddVհ.PB}0uDkNǑSuϺ_j*KqzD)V|URvOd'9>c_[Yݼ$BOCM1cUU 0"* s#@] A=KFn Y@aWe ܄T1텤a±T\p.uA0{iFs(:I^lOqJŒ?zG:WM?QG Q`\܄+v|-17LʭW7s~ 9&#[@ bc!$\5ղn WJEOw^^dj2R4Y9k+e\]b H "L ) , ,TƣT&,m>9GhWoϦS$BjİsMbcZ&?-al}nŽ*U(@&QAp6:j1Sr2]]`W%S&3uLQ8l7@`g$ -oqfi3I͎JaMQXmԩR;=JB9Y(  He +ο1m:`A, X J WyzY pKp@P5be+5h qjkOv"|sZD0 Ne7~:h .;<2k>F>Cw`޻ts֧qlp~$NmRz6\l3w}8 ȗ S }XoK5X-^Xr¶Qy 5Gܹr1TyU/7 #,ᓘC=FҦA j^oOzV|<\hˎ+)޻TeCJPW]BV! F_?9k7ɷnON>_iJDJo6|,SGGA endstream endobj 154 0 obj << /Length 2174 /Filter /FlateDecode >> stream xڝXY~ׯD!!>xegry7đ󘈒_lj1x_o֋wYTqTąZQedQazW JG/[YƘME~;|Vs>tT+G|wiwNS>Py}_3H)(N:\Z Mi 6PAuQ8 M z2Ly'N?(wQRT/čTŸIRY=ȂD6kK\Z> 39* )V`P(ۇJ@:yMv+3eZ~>G5$Jr”d3gDVG)UBmf}$TZ8s않x8%/J'%~}⚿_s]ݡ6MEa%$JQtL"{^"q\LR;`@/!ɝO4<ɧnEX*xQ.%tjYCB̘HD3T tt7ߝ5^qUX8֒+ #i0 +5O0цBY7T Y{|~ Ȓϲ]'nSgd0=Sdkyqt\+h2; z8AOHAfFZm"JN ^i[TxSB>ӂn(;}Fg?)@tcӶKO¦橱4zЄ5qܳ x4"?G҅]3oF񋎯)3is & Hj&@k_vC圖`7椫ݷ/|zib_rr-O{rM}F[HNdJ utʤ]uws*.أdL-$ISrS;<Բ ($'#4SO޸z8(km݉ۇPe5RDl@?ԃ@Ƚ%T_=6,m@]Fw #cBt"LpxWy)^R&e^{$z.?(DX E2RZsZ@v9IJ@Do PY1njrpa>O/*cM/« I%h\2W+swnٞܝ:][LB>=[?Wy,IpIi]m8 hL8=3Q%3S|w&߯`<wC go$3 & x**LK]S0*3dxR7LWL\Dl8sr@&د*r9$E#Y@ S?9q!TAfâ{ݓcLۭ梡XH]]BzX, endstream endobj 142 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./mixtools-geyser.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 158 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 159 0 R/F3 160 0 R>> /ExtGState << >>/ColorSpace << /sRGB 161 0 R >>>> /Length 603 /Filter /FlateDecode >> stream xMo1+N5QV)+& RUڞ+aVmv?\({6W3oAz R *X'rnu[~HrsM73 ˻.SaߕbJ)ř|7Wg൹O9?F6NSj P>p9\ZD%it9shd),@-< )•(}9} +^s̮66%$4b JiPJlƈ-vH`0؍s5~XTaiKqT(nt8X7˘A JcGÊ{IsXqsWao:}]2i\[Ҝ-E$/gx5bPƫB^AxwQ!!Jޡ4E#tw r_J^{ѰPl̞Ι0_ 4!Q*N}M rf,=dllX9)GѲN=UNKpZhL8@md'51}=k~~֡p endstream endobj 163 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 181 0 obj << /Length 4336 /Filter /FlateDecode >> stream xYo= Xi=bضZcIX!9~9C(Y&]fw;Iqq{!.^|Z )VEswwy' =t0Y-/TwSn-<罟g19~`"k FsWbmc` k.Vɫ072{q[<$Y/"o3Mk-Cъ&]`Dʠ(oq.b]wl_Yy7bd#l4TarÅlnXb/P>j "f(Dio f76 6 j.kl<ϒ=5qaNm-ۂB8{$5Atp<XUN$,iHɎ$ehu l;NIa8v]>|s.B֭QvU 5IjCTM(-~U#m'loF!MF;%\Ia!&Q 3d7^.Y=squnb${qǗG~Ao6w`I?Mk'&@kh";q#]bj Mt!b$WdHQG*qqːP.T8N*Ym62+IjY_d'>D(u ' f5/DZlbBAk@̟]ѧ1GeC-7&aIJQ']U^iwFzKCrql潏z!_s 9Ji o]sQi;󨇖 [có~w]^ݦY?not=* x: ѡO mZ/kR@h<5C|zy _~^@QQFA1#PdJڳ^ၬ,,6dnM{R=c(``!5އRu1j.b-,Aԋx-6M~@n] !t"~CQe:J9-xCg202×Th!c)һFHo#W1ʹK֘ 9oJC ɽC79"ifD/˗B_VL#svCI^zMɀ8\+ZI躶M9YPL >֌C[ 1c8ްL[BJrhUBUgsgdLU_~LrO!+P.<6v.{S}jqRMFA 8,qM9)jf=Gw9Nq|PVlUcGnU) jKgrs@26C=ZԶYO)bI;B'X^x5vkRT."[혪#< TUSZn4K!㟇^z|<6ywYj#ڤ=pf2;0F6[Jb[%8<78Lf]G"?=AG3p\?qȏ&'GRƌ[D6uIbFK"ax JEyGӍ TYš<h*ʍSQ7h▾cJWvp@tƏyY}*m  !hT hD1P848eh)J~Lg9czV*Tp̈:!ۉ`IAA۪[$FbJ">^ro('>ZUzJZ^iqQІV>ѧtpc~@Qfwptţ?y4;^%Z|Ѩ CI-s͌K*W_s½~nr(uÙA֑K3$KɩP¯_~W)lz{.:a@lt/6k-\l˺> 0y 5]^ 6ѕ c`6H3ƪSDxgKɿz5 Ybr*u}Um!5N>rML:IJV,~18!{2 p`eL#wOWY{/M^IxQbd֛>,HU=ؙ)Q/ntVOB ~1n?q@HYSj\1LR7rPaYc`1$S捚KGlO S lopn0 %Y̺WC:沢)]fGn*ɞxw?U_8T,2*WBT 0V1v<_)1ZF`F0JAR&Z7Ю ױު&7RD|eއ;}}lFo]w5rU.xQw@ix4E}~:@W7@m>a{!CmeVPR@W|ۂ}.89^W#`Az_9!AQ!=VGB`Ɏu<!sT^<bËż93Juepe jCXPz4dAL޾yg曖-YJ4|˧}qU(\ L;fHR#0̸'ɠaDZi2P s0&lyl%xr^.W:ej_՜h)FPY339#6gq͹e\5_q4A&j̷JJm,YRX|=[EӝIѩ,}tM endstream endobj 2 0 obj << /Type /ObjStm /N 100 /First 824 /Length 2929 /Filter /FlateDecode >> stream xZr6}U^@ʛ*YUqⒽz@s0r‹csFR≼U!FdYKԁcR0e5^0--ijtLzfDf̸tB5024S2-X<20 uJ,dH&rP ԩ A:!23(V71J$<0*iO0  ^sL) *¨ x7+wF0TB=l7aO[ 0 e >Eb Uc(! jcIappBH`*UKFs#I@)k] `)A6_SPz"%` 6OTy'``r%D8K r)Q49kD‡.  K ۓf&'M*aA*ĞN$ˇBE ` ?E& H6,`x{|)w!t BPQC<~̒7,yQX=xJ.˲i0(6Ӣvul9+㫪7\￟<ڊ:'C7699Egz%˪.ګErV?{6=﫿Fc*XU4EyI§YQĶ[r03d}-"(j-R?'~Ob9Ӓm fCaS+~ֵ˪(ޢkUlxX?U23|+B(v+td 762p޳WŇx0p÷߮l1/fENcm|K܏u4W3%UӕcV*ϋxAUQGU1kK;mDٓ{'h`JHrj(0UӲw;ĥ+ `}WY/.&;{AIUOc ?@.l޲wZ;תЈP+x,B,BfcQ~HN?5$7ɿ_Wm.I"mY˫29 rY/0T #T0l@7PO~8`eI,y=<ʺ1s_N!q^ JVr)8vr*5XN5TJ ?#eМ>&FZ܄ u# +=w0}'riSʱQzba Hd\ ح˯tr|~q,:4VӖ4{ۛ9vA )QfAi}L;X;>M:<# uDx)sTP2[ h{Uy$:K5e:n(PIGRAALjQQ:p:K0sZ:"cÂT#v"2F㪲z`?͏gԟUxLvIGB!q#7%1qTa1*%Lq#* Q4&"D戈LV!)"P6;uxzQw $췆!J-cnr"y͵uBj %>R=&xHix',fZKE{-{ 62ha9p BUK*{b u}!+5{ }ZDj%DuBƭ3EIzTIiLuׯⲫ.:s VX]-3eܦte\rn.$#]A*rPKwQ㑂[QtG(r*rjԪ$r r f vxoririj@V@V%guS:R} Q'A>E;G9{Qg˫"o]ړގ.5bdUѫ]R(mMz)'OقI>ܒcl<$ʼ9].R_+ѓj>^um-]cuEyZ6Ŧi1ExB36Ye ߻8kɳɴ]H.cLkcu-f)i4&,Am%sS2-)#qН<[ˬk*$H^l8D.րauyCrA7#Gw0uEk}f D`\hM V>`jypT xnG<{L>ȸJ`Mv/6eJ6Pp@$$-@-}Qz,MDӂ9DKf MS䙧UD[70G 0k F\˹=fLFfT;QfHA5 KCOy]j*{:9\`F}_q=t٤&@#@@@@젌Wr_k=ݐקqlepn1/1"/ "[9a(6'0E>yVb⸧IΧH53l+x'3}vW8o=$;*B"z}A"o." $uH endstream endobj 200 0 obj << /Length 3720 /Filter /FlateDecode >> stream x[[5~_z ,ZV *&lQaY=0{& {.ZjfCxq.~>I;󳓏n"[[/'gp8vr|ߜݞ fkw:Z7/5>}7U@%/.gwũ%]f/Nk^kt[ 8r[Cߗ?pu}qW?6ߙPӞ,Ҝ1r2V]// <\h;GU\KD0M0p76ЙNX`2YYa䰉l:dtj%юiȨ-.n8]3CiYxC%$]جbuDoh$M$փb-xbAzus:U]e `_ Y8@&$<*%d*Hwa- 3@W`=I`?1 8W4tXUF%*'5B% x^-Um5\`Y6GK*wcUU*/V}+iQN[^h7]g~h+3fv_ofdenUWYFwܴK~H a~?Jv©|jt+*!l|7FYtAZi0-cDgչfC_Pf)_6h x/2Xq آ/Oy-PN7rLi42\+)}gD?ԅdx@;,"PMHue,!3jPDIFu_aq Asa]*MS΂Wr)]b K &K^*HFޣ'21¯|y/%lh+ 8T?˜Is3,SBg]!jU\1F%}s}j6fa/O)'zL3搦]ww0{QG41跈3+k lpղKsXl8wr-80v|¬fy¡F`(b⢒⦓ȼ7Ka[(g+k)e'\SKt^Q/.y. .UcD@[fXn#$WsI&"/ r  [^d:'% MyP3TFXKNXaSle+1AsNi"\,goڽ$ f*K~%fO6tX#lcd lj*SxZ{u;N fЎ1e<ۆg/?s> "Xbɩ伋\~8Hz߬VS/!p»#,b5, }]RD-PgMo^>I@RʎbmPwU@6+Jn9&E5bn3"Bv./I. ޭ..7v 븏VRٍn?,rVvXg?|MгԁrF Ѿi/R =+&M K5>'wd=? A?s:m|sˎНG"p/VKDMxwsI .sNf?&0Y-2 f \Z«1.6O?$6_*ROh3Rs˂H-Wf^(78&/9I~뻣Bq'd*ʣ=:BP/6%ݣ@תK4_ 8y,D  endstream endobj 216 0 obj << /Length 2796 /Filter /FlateDecode >> stream xڭZYs~ׯ`DVDxpͱ!ڔQM՘Q!mߧ/ P&[[2g0 mfٟ.Ξ~ ]\j=|gy_UeJ%A4O ==KSw/%[xNwO5 Y!y]>֫,/dggzey&%o7myrl_1Ï7,"c3ZՕ"Pq3oUU|g t5KZmL}]rމ`ۛItҪJ7#kJgϬj|tfe,T|{ /֤KrIwp&?}v<`# i67~ٞ-MQ+ h*#z2D۷{~ Go $ƿe_˙urMvxJPm?modaNZ-LMV 톅$Ι?.=Ҵש\tە pYmGբvTdAϿ#W.ߵg.~jUi9%Y~n:34k<ݹU8-<o/~<;9%xD v{~/m[J9@LuM·<țB(}&2#N(2RV9ek3K^,NʁG< ] E|̅'vuquV) ?/xp;鎕2\u=|6zF7b0 l/vP}d L]F3&^]=1 W \ЂsW#Eٕ&k4$Wu7Z-ne %q kOSb4iElG=W 9C"I;rmS6YB2R !Vwz->a8`h G1mر:kvSWPnV,t$w9՚zQ2Bѯ<)a{BA4`v12= 8Ë!$Wn.ۃ,'w;8Lwܛmݎ8@#Қ!m{zù7h!x"maְ9̂_/~.d^A}BWM3Jw hJpʃ6C<:vh eU>;}P5>NR">:f8HD#@%uUv8LD;$7S>:YHpLWuо8U[?fm9SB@:]!*Hd5@aM ҔC:8v)[8` 2T\nX78KA)c[H$ql!jF+<Wy5ч1wt}jEl>CAKh倔H\Y( vͣ9vt"XMJתqÙDd?z*N=˜}d{`z1?XV>$dᇖ͟=3>I ڏ|ZܭRvF.a)dCEAjރg!,s]t $ 4gߢ(IxoU>>}69hZMӒ@-=52!VE +3*z}5X'9DBAa*&ݭؕS==;CUy Gǯ3_r T8; 'ɹ.]]wФ`udQPukFMmp'q}NS7ՠA)G硻QnT7- @e^hbq51RҙZY*HAa2gB%M][C-*4nFYcaP!vt:B:>MܽnN#5-|6wnG=QgхqX:M:ȭa&;VӆIE5$V+[Cap%;~]٧ GG( gr(~6yX$j])\O&)õɝdi 7RUw,;KXU:Y?qǂtb=NBϟ,M1;(/f0Rbvt݇hb~]A*0@˄O;#‘32^2r7K^$dЏz~$$QQ#{&࢓&-鴫>lhOn0UxOELgnH"MGWC_&S endstream endobj 211 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./geyserEM1.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 219 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 220 0 R/F3 221 0 R>> /ExtGState << >>/ColorSpace << /sRGB 222 0 R >>>> /Length 529 /Filter /FlateDecode >> stream xTMoA ϯc^~(mRQ4 A " X<` OU-ւ z 'J"t{0Sc-5'hX[UFjR"'!G'I􄄛FԦ W=6ѹ; lot5{`\!L G| |3l`:}=UgrYƥ!00i964#SaC]ݦ~%6VE=?AX~! .bBOE,}pZ!,NX>>e ;X2&|=6|= ~ip4*ZW!qiO5$JV 8[S`/W`Xw ;< d7m n6?G7f3C=>s.+T)]>-6's[j;A5mk,ݪpWO endstream endobj 224 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 212 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./geyserEM2.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 225 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 226 0 R/F3 227 0 R>> /ExtGState << >>/ColorSpace << /sRGB 228 0 R >>>> /Length 1366 /Filter /FlateDecode >> stream xKo7 +tUD}MZ0!ȡM6 {6vGkoуKH(cP ԙ{G?gݫ~~>]vFcTyiѽ:@>*I.ԇ\16/ԯѡB~?G5;~8B 1:s$QtqoQP砀8C5;!xUdIޜlX7շGe1uPEGV;.MYX\ QN\+6^.J^'1r^nmАltVCNKԘiC84^5FُQI(ʈFg(1ʴ5i+1ʑvAn=Y֩x{_(!kΦxB\6{9^D\+6$+`/7O7 8yAs.S狛VNutgԇ됖8Zi!ojѭCݾ{"HÐ=8'ˉΚP"1qMP X!TDV9 -''?3 gnλ`5uS0H-=Kby'8h#EpvTD6fSB6˹9Kiv)O/lrnL9N:P|${nPA9mr̀ȱ2PE{Fmg(Ŷ8=jdE^\:N,SSn*)^Ը>2)D A \QrB1q/&n1t40kXt/q\=9l i6D9wsU &ؔӢ{ \HP bwI뗮 %go]5z#S;q(z=bn 5%J`wRKeKϘG4'q4JOwE)SXS!X]ݱX+j|ֲ߰?me d_F(F?"1aKq.[q9ZTIK=ϗ29y椭ws)93ϒֶ\x;ITKn}XxJvyi\k&zz\?3vJc{rf6d.XACĥ.X`m?Vp K8M̥a> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 242 0 obj << /Length 3162 /Filter /FlateDecode >> stream xZ[o#~_>H@^t 4iiEIƒl+kIFf{npƔ&ibՈ~]WRm+m[*wZ[_]];s=vٚ~=\ pkw'45=f/tsf?k4/O;|w_{r];](z`x HI~׸x᜾ZZ53o`:ms# vVծeo`B6vVn i+[΅9程Ǩe˸Ύ䘤|mNrHj٤L,%$ CJfK獋k~v$"r 홦RP{|/Y]_ݮE{:%~mGnSA2z8=Q%dx#i%&Z::m[~De6R>3n#|WÛߓl)g#\Z9& rZD4޸(-TƉ\8d)gG8v a=2~~NY =畩Z@VY#iE]UQGл壂+[Fɢت/y"m`qdQ@9SD}4GvЍD=UJv0G=چ3}']Ot!b;f?3Jz> G&epf90.3v|[Y/d6TDVފ/$*""BDŽو?)jg[OmwpG>`_7)/of%;0@3GM18} 1ru;7ilA͢{p/y9Ŷ3%r0ku?)7+V>l&A oUV<5>?aM͌0=bxgD%+m8hZYdGw{J冢/=q#c*F=5Rۭ"$H wQ_zx;DUzRמvd I^LH/ u7,N%3mMV1Ra]![4\_% \}:yI îv 3o\ϫHt>P9Ýez.Pl=' e*"} 40g\dKSu,2D*$r[b9w9b@v{AM1a}{Xˡ!8fʣ{d30qWgTW_ǚ" }`X0!nSF],;.5C̡~>RTNqrO(p[`㍑}3a"mцb;{ƏYAVT>'O@0sL2?$Cyˋd$jʷ(sOX0=ML년Cb?D>NeUZ`DCjk^ϱ6 !Iz*[@!8:xx/(ϑn]<KެN͒/PvsIэȺ3ǦvgB :uXAt"^6.1/`CҖ1~>(v 3B!v gmQ`-Ac6nI4c2!VCϼv?eVr!F) ;,%S!zO ԯG7byyk&kMw H'b%2-i&Y.w--zzRns\<`[A̶_D5-hT7Djd/N͗mĥ^x?0ïFCcd% Qj-*Rl2)$MhgיRBjƮ`j|a; ww-wymfݒ-WS+74iQ+#ORC{5H䜭ݮ}Pp,rQՅ1^,:7`.T>Ҙ- Ch=e'#䊰|NFOpoZ 2O@QEjVn~Vi@8.g0lLM+/Vʅ0>PQ}A9LSQMAqFTgce3qMsl *ky%uXHmI/:@Pwy"Hsbʚ [:N欃mIeN5YebaUU4ɠwQҫî7D XE9$z|a;|_I lt"**yAy`D0g7Bh˳u60ЊϦM;Y#9pZ<&Ű㝀LҜ LPRunvYsHݟV|)?p ;MgJs|C;zw;R Z 0u:ξ?qJC,X0vcj! ԇdKy[_|bd+Rehտ9S| endstream endobj 265 0 obj << /Length 3698 /Filter /FlateDecode >> stream xڵioŠ_:.Z()h@P5L}'EIS,ҐI>닗oL91y䍙\/'Te5\'o߼|S7 L?W7]^9K=Cߢs\zx\2glf-G9 FkG}{jvwik\,m[䴐 sS G~ rr@_^LޚkXIe|˺66] $4LvyU;j G?xpZYMw tFăa+㲲hk&Taf¯D;lF ׽O!wcQ@\e%bAֵn+bG/9K=uB4z<$F΢^Rҩ4cO‚jw$IM ]p*eAfWH1N/}qE;p{p'B~Fc n /]Z@zTP^Dq<r'QO|wq!~y,z@f-8ʃ\$-ͬ7{ɮUA;m 5IF,aFɖ*ƇDƟ3f9eBB*/ó6x өɃG8my:S$+fk&_QretS̯ԾI0<{!nogU<箅sٜUJ#>K)WP-aYD:fl`UE G.M#S9=ui][2kl^KeHB|Bܩ{ mw!JMwd=lϣvECYy2^[YP2딼.lsM'H,5N<2k-}-NPeܫ-?HBӆh{lh|\ 1ȝEZ4K {Fc(>gB(0`%82DxcǠqh<~ ''F4:r S#%S1W"ls‚3GUJx1LshdgKl4 x4.-j' &@'kEI nDYnلNnh[whfEq.5y;&"N܈ٴ\vqdz "67/l\}TjܟBM$kNNc$LuU(.#d_S.IB?(=h o9 t ٽp^Y 7A*DZ93@QBO9<ҟՕshݨroc>#oBւhzjӚ}3075}wH~s<2X~l*j һj$s6iu>h&1.>AtjCse6&8gIy+#w.6h_DNK(FP`?"TߔX#^`.T tn^ﶇǗW(rQBoV K %NʗII"(q 0㉰Y4&/éf4pS@2l`ĉ?ʤ,5d 򜰺=}W}cyaZ+ѧ6ܮ;TEDtM=xo#@Z;SpƝ4eR&PG#PMTݑjqk^ދުm},Wkеa0B͏z<TkqHVQCB]^]} ?kčAIz4:-3[K890ú7=*9a&9JI BA)bHK径?\~<8~T@g:q x=>/@1owUm_U^vD`FDׯ+/؎%O`M!s@xt}.h6i*mCew =\;|hQ3 "j/!Yb3HJlDgi-9& ^+a8y˦BfW.YB$Vڈ=`{9/*U,j'S~<qEv:R!C&/go>*L)>7Jǥ:iЭRzeĻ76c֔GO|KyBz@6DP ^sJ~eo`1.(zf*}' endstream endobj 280 0 obj << /Length 1811 /Filter /FlateDecode >> stream xX[o6~ϯ=ؤK-ama؊.;;e~FR'> b<<<eWGNO2ӪhTLۦ*_"{;9v2 MskYK/K}fB¿?]ۇ'-5 &xu. N]rohn?N:>>֦0M O )IR9uY<7(4Ϲ_Q߱P',c,7؀Qa<w*:<w2sbv?͝i n{W]"/(؇@Y^H/ cVg+چα7tmYK2ZߕWo+|ôs쭤 \9Quk[x3&\ M` \xOT)uRHC<>Rh_lN^@']|e\Nheє3{݋0-ln2Ew\ pJٯ+BԌ+.Y۝j[m%{lS̱?7jWa gd^mT~ R&n̖jBԤ΄bP{F)w'M+zEa /T)(K\ :rw̻R=5)oƵe_ʐ+Ѽ uשe'LnbcIY|=FUObr߲AIOOi>Z *w*=55od2({Ĵ4/F?x"KJ1їG*jJXW:n@Ҥ0M:\ CF6>WP%h!qR ȱjbjbJD&Ikb{˿Ƒ22o%L(-3*;C5`_,Ȥ$׀0]sʑkؾvLPr ݈]K5UH6az.tػ`EWP\Eox4 aV>8}XAEwoW4pRu5H ~I\l4AZ 2's ZW0S2\$_~U# `7zf .jʊ|)'+ QCAsODy/u+ RXA7[UiCP^apg4BD]CY_&쪛>h'_Bg[."f݊/xjdY_ֺ4wo8e{|MEHM㋦r{vo##ځ~ } ]~ qY eqN5P7cCpf!> /ExtGState << >>/ColorSpace << /sRGB 287 0 R >>>> /Length 4916 /Filter /FlateDecode >> stream xM%G+j _UK0p%{%X Vf`s'̌cBF9Y'*"#s-m_no~??>tql}׫m|cS~m}]eu{ڑwzYt{֫^{?ˎ~ﭯׅ\|1~_uzco|q^:_w _z]^9;OL7_||鯲]ҵ ?6/ۏ?l|{RV5ﺧ;KG׌ b~R~05\zk{)akXוn}?v޶nǗ~Z+Cˎbz5{]w\{}0Y z"rf?99:ǞϯqiV뫇\:?_='E _?K[w/]O6N-)U|w2?d+2 }H)Ç>o_}ۻ'2t|~~vodޚu͹`YıV`WH6[zZ 2ﶏW󞿣$_vO=>~Mʨw^ҢsOyU沨\DlMlmQ˹k+׊%YΟG@ʅu9%V5þSBv-,{>5]3W &,#X9NY4srg-- 9L\4\5N]2$NMqj}:8uV`uT5DesQ!* wY!f[:3*²Є.*:^BTWB. Q]d:3DeRCT.9Der!gR9DeIsPCTW򦅨,qʊ*[0,X]#vT80w X3<!*K#D݅e?CT85DeCT8)DU 3Ӆs/!*K*KDV%N QY%2U,[g%VTÜ'VcNyyLChEo I x2ճ>K C h*[|ܰNJp!7NQn-{t lur!dċu X\>.5v&~*B?؉J_R#b*a*%_i1,V![Zxc 0rW 6Qq+R6PRo |b>k˺,V6d9ggX]@+dz SZ1U랱%ڹ jgDvN'8s-,&Zevva_;b3ˡ*p| \Jؒ-_3jib:LȸPNw6DΨ+Jj"0oŖo鎜x jBe ̽[ jQ\WO,o{)pPdwd+h=g^,J- `a Pn_;XHb|1>˷|1>˷c|Y[Y_µ+g bYvb|oGϚ;g]㓍nXH_(oT0[b|~,eY1>V w0 uD>泡a>F.R{Ol[b|+nJvb!psOw|3h3i3i3i3i3 Ngggggggggώß;gώ~t,ӟҟ'ʟ6y!y!؅gÂ[ٰg` e3zM6W ][ß{V&S'^H\?/?Cx(Bv φ:Aӟ O)^HFh,]Q}$Ll^"?tVnMv;ϝ6;y!CџѳI ktF޲uN ܠKa\HtKt`Z42pH}f4dTH'gF.25Ӧ.HJu`u:yӭ['Ni׋d%ƶK˰yMvn؁iٮOzѸܰӰqe+0T^6.{˰˰0wa;acx>ðp~ݰ!g04bߍ 24l0+7l45bv =ǫY?ܰӰ`ؓݰӰnӰӰCsi؋e؁i؁i؁iMs}qî44444l0;574444lX42l ;0 ɰӰ)j_]OL/LL^,LLLLp^,睋i؁i؁idΏRpӰܱuLLL^,̅70 ;9Iv|:1On.c>2Ӱi5agx4li?ذx-Gκ:{`k6$XQIUm%ؽj`wH'Q}r 6켞P/d&;vN-Kv`Zv`j6ȳ6.Q.ڋeh: Wtm&-|AClܷSEuF,)wJ>܋%|ýY7diMNhyxRtd+}SlݾS= +8*;0<0,.i3GML̵.U/8 }\<0e~8-8N-x` yʑmv rZxHwdG{CrN[x 3jb~sJ%w+s(x]ГU6=>uEg(U2]2kdw~~nf<ҝ2?/۟E_m/ z eKg~={QhVp8?;&.c8X=jo+sylH@<1TxB`spכs< x9W]sa s0tzT֣ni`.NіXUCUGOa*v-lcsƖP [R֫MVqѶVlu¶m 8CV bs áY޴}SX9% nA|L@18S w _I̒\bx^,l4s+kh;[_Ǹ/~<coRэ endstream endobj 289 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 272 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./WDcutpoint4comp.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 290 0 R /BBox [0 0 576 576] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 291 0 R/F2 292 0 R/F3 293 0 R>> /ExtGState << >>/ColorSpace << /sRGB 294 0 R >>>> /Length 6057 /Filter /FlateDecode >> stream xM-q+&@PeD $s/E}XJ$}Y40lQazHǯ㿞~mwO||i}ؿ=?ۿ?W߯ >Z}m8ˢ1=et¸^ն;]g\v۱qe+}]wL׏W\WͱZ'7o~U'rn~A#v5Q_ŵO¥xKoʥJַ-"JleۏT-_;~onoi~WF'}u2ұ[Wnꄯ/WY%c\=춮;֬Oz/qݫ;IEj%{ "+;طr{ֻ\վY}Uz{NUlU>t=`#k`xN|??T?xKB?㏟o?}oKm-ZFE+*?|dMӯ/!Ou?2#|O51|-ds s!O6$GPq u 'TطZmo 4HX +mu$<#W=X57Y(NmV3ZwnJn7Lͩrņtzt lhM@MgHZ\SV6)\VU2-ۖj5*&ݩV@h`ոjuD`eiL:6R,ZzKMxS]VױT+ jePS,hT+LVwT+#EN v c;΄_ oL -p=aݮo&[OV8{U-w6R,pzNM)VmG X1v$ckV8%ժbvLjeR,pjΞjebqV9;f*muQmRֻʲ8/54ӳZ1Vl m~FZڬs *v0 R `Q'[-6D$y҃,a A~ }MQ>0^jcįojhf?FVnncWi&g l/0?WL XbV{)494I41q4OqqLVof3i[=3W̥:!iii0:~\1&؉D j[2L&'f\YcvM</qȆ7ḺOLo 6ݹ~ ~ ~o-5f ?qt|@$ϲ‘gs,j[ gvY\?gܝgg gr78$1[8rdc $O‰/LqN<0$>1&1òrY~s=a[+s}՞&=x՞f_۫<]#x՞+-{Ar`_~D?Bn=?t3i3(ppppp31YY~g8ّ/u`vmS+N,\!`[ܥ]y~jKz/NL^Im\)صq vb 6F Q; vS??lz O`7/NLnVh 6Hqۘ`K=f.6Pevw`Sm8;X @K+;Xm(\\|`Kpd}\ZwL悀g`'`co̅X$ؖ;11l~ v)F^ 6`{ISs|p& vb 6`'`#y v;1;1;1۽d 68`~.؉)؉)؉)؉)F`'`'`'@M>K%[LNL3upN} x];1;1;X I%؉LNLNL`9`^ĜSS%؉)؉)؉lOk';qg{:ٞg`sWB E[]Rx)z-/vEÔ{*z<K{fRtnO̊L{Rt:=1&3RCKagRTtSa.EOLEǒ=X):LQg艥ikO[,EG>sE}`*0=X^<_n5=X^}^pEpIR`)2PŮMtf]fvSуolWKSуC .EoUK SћTŮ-9LEd=Rt붷K9ƛ=X,Eト:=X}s*M3R~i i*TŮ8c.喢Egl?LE4h*P`)ؽ]чSG/vEGy%RtKm2}\?\уR`)zxdE?L{Z^WtZWŮRtL|%ݩRSуR`)U4gNEcI񨏥XR튞xhcx7?wR]KOsɆ]$$-6ݾb3|QM]mx6]7;y3ϱ/'/͝oBlW:N,''''OL'';r.9y?HhqlN^ 1]N 'W%C9mt9yfNsɃ`me:y;.|牷hAʫevpׇ_Kʹx/{p.O;WydKUyK,LGy**?fA"V$~s87Y,J^#9?/~r~~2j"87e^s›4·o:O=`:7S͍kco:l_{X*sa iw1'c|.<%_tfL`~w*}=9yqܣ{H[!{""cOB/s~Oy+y'QH߽erX< XD8^!uIl;āt#EH{za=7\Y Ń#~gQ) cEXₙ'kjb,&ܿpUz`c9V61?/T9[vWtf9qZ0YWyS|׹bY_hb"EVm9aRRֻXVW$'Ε,h48Y`n0pO\TfY+oHOhւ5Yܲ Buչ rvY&!=0cfndij_1ON'Ot?{41Y?U"v{p\. deG,# 떧_g^>|V?*<>ճu%Ui5uϔ|E%9W+~AJ|eyJ*BzRzXPVru=0jˊ'Xkt^Gn2> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 308 0 obj << /Length 3443 /Filter /FlateDecode >> stream xZ[o#~_G ]1s&E^6HH hXmeuq4v﹐3䈲 Y#!y.߹v&g߼yw泯))Z٪ͬQh]g?̕\ϟ}|N2D.x=X0?_,1uNx^㗼FFv?98xw)-57p-%wR [:m,{xl)4mqFj!^Kz-vq;`LMHgˢ(Z8DTvuɯxɛA3:qGM_nWJt.nn]|/V3K$mb)" 6L`2wh*eDKFk26Q4L_Es%k'T%-3g  Xx̶U^i4rd10GΈńRyD'>ŝN$H6Je mm>T/d6l^M$IW3GOT.^1IdTy{$%a5ULMW-_.cSfX8jf\^Ax;u^X;.7Tb<,!;)t0k^6B{lYXڶ,,:DN'4+'}7\M XHrpϏ)'?;/*">7ѐ{ ܵ缵r(Qq=N =ާ@Li>5C>05d^|_JB++!d*C%TM !L@InjCko9~_`4Qx2{t l'|¾^xW#:UψO#:GB;Cigd=$1\CD` R%uN?ΨUx9+ö-;ιeJF:%4̧uMN՚&hB +51NC2 /xn&inWԠ@ Ih=F.l shp AKב唪,H(b#mҢs,MgngMGZߠU&HK/d1x*@1aD0y҇G诖^4c; APߐ=Ļf?Z(Qz#Ǻ2KMI$&[u}$i0t%.c/]_5jlU"TpXiA19z[<`KY/M`pXԥEbnK:M~Fm<>pO]DePmi=|I|h#npYGJ?|Cv:pC 4X}f5)XF4F[s̭\tvgU`l<+,xWS)(Q{IAJAOJ%F_8O|Jݘʱ]#!> x=@BJ}nֱ&OHRU҇1Eoka4. K [uYT4Ensמ X$ Qz%b_zbprc[Q K Qc*g5O촳m h &?KȞf AQ73.C20f_4 1R$w1c52t(qcsIJ"Ca#tҳO n($8mVh[$d2uBpeT?;?. $cnPy!:m_/~$Q7c q}{N/π8.pM t%CZVw;7V䷟~p/ +P倢 s$>VgB3 #1LCem}P[[7TE\ h \@ f㧺 Un .f7~:Oicx-9h!&y~:0L~FGX%ǧ&cjy>G> stream xZk6_ KyIA$Iv;EL,=W=g8D$M"10] Avƈh'zIvDjΗ `+$1n7"vrB^C"Z&], ޤ~َ(1:b\U\.cњG}VFϟ_.Ky3-?6'կT٫3ތeш3kIÙfsVؐ$|^ENdEv")[d^I-3w#ɉ8ӑz+ dLZNv+ˡQLJ*6rimx Wj+' N{U-ÜM)9\#p>xgI3E[}_~ 㲹\6ZEYs.DkΏme P33 |QΔPN/IU|<.Z=ָ&r>Y+2}jtg?Mbr}29?/1!H2i>سJ{x[/v=5rGGku O+8 Dz/bye{LhC2:,Fct`5ه'9:$:#%iHPx(< 5i[1yHYY JPV,&,s 4kR- endstream endobj 333 0 obj << /Length 5003 /Filter /FlateDecode >> stream x\Y7~ׯ`;V)ܨqazgg#D̮5f9nvK<PDdU( Hd~ynNū/~6]ۉۉP]Z=q527Οt-w5SJM_-_M9ChxKs?oﮤ.2{a~%F7o?|,ߡ˷|ݷ3S^60qEX1i-&3eo-Xf@1N&.,aDNߴhkaVg`f9gjLUbs;5of@(T\_ >q ni*KN_Q4{bM}Ԁ'iu!o&Tct8ܗ\F{y:4RƈjU6wIz}lDJ0 FgB(_򷵎5~kFJ3|92e.:r4{Y?jS2hHgR36vAe=ъx6!7ܺ;nM; ʘ?-ņu*oa%3yD]Ɨ߃JQL]~dCoNDJIlLJ`0wx ԈH5k~Hʓ_YW)˲r8&)h# %z&v“3&\#=DU+g}x Vҽ) εQ!q&6b")`J/ G2 6l uZ ҍiW\6L4VBLtdA:h`Ь^  /Vmo1zB09YZTWU$CgeJ'Όɍ΁>dZ\ڂCB_?P$htй UuM¢ep6`Hov)QpQEHy@{ӶZUGousRˠ5C>6A˱Æw4u[ҹ8Zf2`!@#06P Ȁvf};ouD/;Ь`@-YSMfN&ͤth8QIiD`|8C9R{4Cx s[Q2ˇ8K&,-<Athȁ->N2]c۴(ZId޴BX(<6}gӚ,v춙T#nȩ>T iCWu=d~aŏ榊iaed:o}9..(A,k-d ˞?3^pzr耕g*_7ƻ3V9]%4qZ(Ee.0 \ DhƉ(-ʋ?~ AVL,'
  • S BMf8"pg\ٙVb7*QYBNKAk0 u+$d)q,ѴJ^تBW'EK a*=7τYZJ;zK25OeF4~}U  aNT>A[)kTgef,ն#kFJ[L3'ZwaO 0҅w_͛#ڳ䰇C'{#U,_>_ט. O@-tl 6 >83\Z3Cm1W,R{oB81Bk~pz0n'/˪ jP 2 }N|cg8ޒ)Uh7c앱‚pMx6NIX+0f*GFd#A1/ȃU*$ywy#&<* _}:%=s-ЉFCp(a @xѿaۖrx>uA8<~sS}v V!F=oIf q0A(l~X5u_coKДmY/Q=X t}?;lϲO4b);cIa} ֔kH~%@ v_t) Y]ff_iklH5f+)F-QG)*!}ՀJA%8=G v?'~Kp=] w5نbziWt;9ҏi8+~—B)e+jP<+JӃ9_F2/cQ{ZHS}X$~o{QnYp&됙 D>;uvbdeq-.LM+T(+?M4B&.pu

    &1'\fB}HC#ᓖspy bw h t[yʢ6qr<)[cI%(g$HɁU}f !Wn (`weUZJ7Ϙ1%V0 ݱ6q0"gcVWqT>dٴtqt cWxnWX&ZѨE Jf #Vbh+h[7R{VGf>ƥB!􎖂4U2+Z2+)g#|Ixϰ֦)KQ#[x ی gd~jsWH%+5ܠki%u:ﮌ~頰 A:X.E<㡳k,űvoK~bq%{A!owWyU`/'mP Ws_EלiH;6/ ͹pV]v& lt6\Q R#`,)ăo_v>(ٞp]6"/KX*JXO4JA\!UX":/!@ ]!z޷n>O(e a`E}&U4S4 XT9/|CgUs/Q!|5 e-jؗ1T5jUsHVsK(Ǣj9jqn5ǩb9o9UpTsj Ph39Z̒kPrbyq1G x `( .W/L?N"C^̨>tM)^>PEݻTٽ}Mő5L:=L2zCN쀧 )>} m>4p bmc{9n `̺> T:OeA0;^ELf)dǑk, wI4?7!MJ9fA@rM;% pizeb,E #Gh B2Uxv+O|o#~nٓLci1珛$bUgY&KjDd=j-߸٥* ­$p~3Z k}u4}&+A- E]*Gl:=TE3QvbvNm^֑<|YĜK{VF\`v:*]p[aG+_ P4hy ?\tgtRd9(^ *^"ɒڵ@\Ŭ%cVZc|$م!h:D.HW>! Cs{P8CO=1܇->d$<]0pS8Fnⱓ^} bqy'A3 IKBhTw÷"q.}J(T7fs: VqmO|cݒՆq:jNwvu.+56ێ/ceei4x@Zg)EhT#Y]xSENrzO)eHyʻ9:`U'`mҖK,tsw|0զ5Ɵ bMPqs<}C9@Y^7DdKQVT bOM L 7jF>Ϧ"شTw¦lP*`6&.W#ĉBj4/5C;{,$4lts4"s psC+8[.@WF]ʃU(p@0zǩC1sCb(i`sC]}Uϴ$TA]R`ӷY1O=z(># Q1oZD:&GxL> +AO\ ,w:UNyHp eb-_(Np3&hkB( C}hhGݐKIc֢[ |{ 4GV ő\.arܫ>i5E>xκ>He?(=_P/اdن W祧 U챮?- g^ _+.IZw|`{/|8qHo'V5boy}:E$ѹXk3WV#f(q?RCY,k?GUӷ_h"Tw'R endstream endobj 365 0 obj << /Length 4189 /Filter /FlateDecode >> stream x[o#_*#vMK_A-Z d-ɶKrpK.uZ}1fȭ'zիf"ꪩ1x1qU2䛩_;VU6zu-t#\gJ>݅/:-}]fqX26im<HVUjX"yrPSKmJ׷Bij ]rk0qϊkT&\p=>ojS_e&;uꋫ7b*I~뙶vzs&ݮhE7=i\^gJ$ ,Aʸ&yߢ-b7 w`O8[$k{zcٔ&fশ`p2 ƒ`V2ep٭#)DW%qX2s F$ojQ+R' DL+'(Yp=&\A3QYp^R\ӗ{SKQԄ ӗnaN+MW˼Kc6/(=+vgGYXw(BRW+HbMfjjS_ jRJ ߗ0?-ρLGS4Vaf(8d'QYTgv Уj8eLT&nUF#ќ='fkb3UWJ0&™l\/VgE%f#un]9KzEo J~||3oUgyMQm|ZQ~SG<^[t\m;,e#u)s7|ː]'E5AoVD @́mx{<.6lW`"Xys2 2xP5vL4C˻,N⏧Bj~R\J5P}U5OKm\>1a(m #y4>Fi$rRT)l8wn/x8cɇ=S80 H9SDZP)cm*L8_&Ta'!e|@zmF\T09ǯU<ץc6h`EYyxj 8nE}|ux,ΖBP:uJ ?`&y;mЌ~q F$DJ)0>OMԆGm W~qN*35mdN OeE! qv0m}dBcN!'?*z.eI}u+AZ+7q<]UPC"v}B,S}vV NtibLI{$H f8X(B!bdm, _p\ B2y ^D0oo;w~<5qgѵ4xp x\eokx|gh_^-N۾$Q$8)dc; Q]I{i$__,^ccMnp^׋pf Dv6%Spj;(JR!\w_/{0SF`>߻ѐyy2!ɁQqp=/&=%֘> ہ\(+-^C6+&T)Y} B[T(1`AZI5CO(1T1KD>(OrCmY[b c-Ivt0aSfha^T4˯gSbJPKo9GX$OÏ yټ{{z#7pH6>FٿV&K“8-q`m.eH o_17HL^;S什eJ+ e2.KtTdT FmDAY:L҆?.𤋮хk!1G~~l lXo/O+Ya@{ d,Y9g{r  sg^n92L%XT()xكUt0$+=i dֵZa }.o;|ע_n:*׃>?n!J|ŝk! qI OTiqv!@n79쐧VNj.V8Ь!pńN9}#z/z],N"{ \3`k3ɍt8EG6mqW*hzh.ܬ@;nd s 6 w=}B`ȦBN/TW>\U+= aF2.IfQP1~$ gհ^Zl{K+cw6߮bJI몦& ɟ ,-~~s/(D endstream endobj 383 0 obj << /Length 2161 /Filter /FlateDecode >> stream xYY7~_S &gw,q,vZGR,'jc&LbI:KVIz9z~]T5:$6**"-7lR۴=NtZtYmq}&Nρǿ]'轟:uԌSy Vp~5ek]'Q)t&m*(+ ,ImUYI+&Y~R*hzhа_"+ﺫ2Q5~œU62F&[$o">heu"&S4id3Ze=`zRiAw*̢QGNVo3ѡRze|@iY4Т:m D0͞5\3  m5Õpu3P``w9ӏFv<?Qd_"gNiTadaSa--+n@EYQO%AVhⴹmv䂌^KCtdA$vJ|Nuv[@n'P.tCaKjhJ4Zzfxÿ9kX˕Qu5h2 2dUy $~ 4e&Z0! Գ&Pٖ 0!k*=8jOb7 RRnR5z&,ʋIya3_7MjZ.@ypWň+ gھT攛hi?b1J(>>1G\y" xx },PQ՘LSv8|#FBH_{ A+a)g ҿG~dT1J vGOW>ǭpۨ(:,Ny8 NLx #=1֗gB s(vrh:bs`.IB8wx:9?gN [<Aw?_ExpX?eFypLBr͝i\bJYNPs5CJD'DH7MgۡZ<)Rl ޅCucy UhnEW.dC{܎(sW~TPҥZDg^K&7'*SwQWuIb 6EsTKjeˋJ9{FS] b|^4X7=N+?S@d:OX k+afZPx-05g,^@2 !T"6e14o~lp=&G';堆sssȲNƛЌa-<NG] q_\VU\Cϵ 쭹7}n4}yh?!U8Aeye~ H}0 ^Aڕ } Dm$qOLj8aw" I o^zD9^ K <VMQ_6S6cRV2ȂLJcH > /ExtGState << >>/ColorSpace << /sRGB 390 0 R >>>> /Length 4082 /Filter /FlateDecode >> stream x͏W>dBB$$[8$(F`#>ޫ] 5v;_U gKX_sG/珗o^\lm_ӇKܖt|/~?|NtP֞Xn_|<츽c_g~ӐZ3 9%~:U3pc>T*ܯ_n}}.SFϽrsp|< ')If~VmNӼy4Ѱ╋W6eۓ2ãmAwKhk uwzֽp-ʚ^qM~l_}mNB17wW|j3{wTM3 ݱ@Yb_Bɳ-; 㧯ױ`ۃ܅NEM42`!bn{ԺtZFY[ m%{nl݅K i mn RfJ105s73 Mtc,h0}[$M4Keޭo-7%o>t&@ױ-hs:oB87Ѝ E. !dD(Þgfkvf:]Jzӭ%Xl*Kkږ,$,bc_ r4z˻JBLVe))4a% ;$||]|/(+[O_'U*IUn*y~<~T>w#I3ܣCSKf)w(Yo/Y@Z^/z[)7YYȶ|R?vRoK1\n/[ Bt࿮ 3a@Gt\kgo1$XhUbke.6>uZZPkq{i;ȹX0{ﴐ;)?h<Jyp]4=[CТ;KR<പ9YKH)އVE!rZ5 tZEYa5D; Afh,͈2EUϴ8⁽.mbl@@Y,BDe YlyS๟n܆{I7I V[Zxűʔ?)ʼn)ЀM5o;2Hw!Cfh GX(Qu +K d@žґk^Wج0-78+ x-񆕉IXؒkUA ٪ǒnVG{ z@3a1Alz݊!fbhMARn&LDiVf6%i>4qV$ #Yrˆ qhŻ[\,:D{L80\jFP,Y9 ͩH6C =`309Ͱ6 y_K_'k}:{Aj#(*$,B?qט$D]!š stB8qͤŁk/Zpkx6eR *2 {cg%"j_$MZy>9 GLY&,Z Y^'~eR'X2~jR:^Y~+Z]YUph|gs lst&((\ai+`W` 3^p1 .V%={`Qpʢʜ+&;\u Wx-UW8:\5nMj2}WZp;`V!!{-,{N,Up P8\aZXqg WqVpxBu_z,_DXɜT:!VwMϗA~6/w¬dN8 4­堕Ɯ ~0kt1h+GyǭlZXټ:\W".rYjt+ՋO}{)ʿ9wbiOe(>2rͯ ad$!ь&Ov݋OiTtox)X^oP\J'DC'Fcqie4 e4XpPCcuRCqi%cYͬ_f2h6Ym/: /:`Y> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 351 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./spsymmfig2.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 393 0 R /BBox [0 0 576 576] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 394 0 R/F3 395 0 R>> /ExtGState << >>/ColorSpace << /sRGB 396 0 R >>>> /Length 5736 /Filter /FlateDecode >> stream x[q߯%u[;qNM#(&1)9z!DNOw=ϩj+^_˷_|O !\?NxOr9?\bݧopwoJ>RoO?7/l_m_?=#r<|zنK~j]p}ǒZ{&=>C'[.y];Oxf?۳?UJ*"-O,c3&H2&u(` Y}a, !Q%SgW T؇ e vF|VR1 \1#>mlKńdhbOLsb)>,ʕQLj?]Ʋ1Xrbk tXCih>Zd#! ' 8h-F@z $y3B&`iz9CT}̕0% 0bRXkPp, HlcHhYm*#eɂ`&S?52M߯H ׷Hu*;LCJa:RNrh OŁo g;ϐ`flfM>lҪ#/0g}Gflf~:c2$ZP= k?#(39RM ͍~`Gʘcʙ3EBY3GwoI g~|= &TW :o Yr=~LpB9GGxHaK4I;,KD~Fx,l, $gnIEȓLC ʔšB| pΣZC4W%gAWBK_A@j j%3F_7׳b*_W 02ܔSW|/Ɛ,?%hZo2_ٶ/,BRڂ\,xY;eܠ|((ٿgcXT4ʀq4L<8⊦LѰG(D[XdO3aa|ȷ3+.~$qo_}=UwSɑz6b,\,਽b0B˜FCv"6/\Xy~ܠW eb>3B{e+_7HbbFlyARe<)DB)dM>5 dLp1y^[Xal YEx JY\ǔf+Yb·ء'P&N8-Z|y#Xjb8[Rej"; ߢ{=;IR0Xj WDcC,KRı.q.C,~bP{H9^RHRVH *ﱔ -ptX:^j/V<^,RCСbUP{;2wXj/)ڋSwRENvSvRVyN[9ҋڭj;H9ڭjsX"TEʩv6ځ򏥀厥ƞjH9Վw,5㎥,jE)ګj d\jWЀcNWCc)A-SUnT{H9^jVb{'s1o,m/g[b%V%%k-)-!/IȏoȎoxI΍HL|t1"yIoQxi oNER-o1v:z*n[̧Qn*w|b[L֝o] xE[/jbQ-n1]wE[/jb~Q(ExoQ|wEb[l/j׽EQ-n]4wT-SNqN;E-v-֥v}Cv8Fuf;:H#^;:S8g[lNjYi!)) Xhgp,B wyEDgg]Z-ѹLNEEe͎D8D<, 7)kfU@4,ZV,6:5;kXlHŚUY,,jbCUWkМ9 [,6Rbƴ~,,y.Yk\,6$l2),VWan,]P~Y,H,]PYY]Mg_i,LO<6nDN+)& W9T&˖l ;-KmjIlxYt}%qBڐѦ]jŒڟH-̖VwɤPs&ͫ 堶>m/ r-6 G,0- e\&J d%Boq=\cG,?繄ܟnÿ/[ڟ.dv͜V$ub}QDwq}ba/d(/dƗs^_?^/-hPd_^__OsC}_췌.bjoioNK3`L_NKܗ]q'yqJ,8D9L'93L8_7T]q\idiu64]]ü{B$B]}1buSi91Gsbbdh"(M2qc{)y}pnLONj9;b;=FW]bTA~OK S$K_bҦ#xv *)eԷŔѥ*v09W?,f,˨eNQY12[3?09iXN4ݤٌCgwҌknA\EXt \8$ Kt"ІrAUtZXJdHH ~T`F\v=̌|. 3`nM Z#CD_st0l{._Yy/ e͝55G~ ש#=57bWChC 0bkN 6IpCknGdP(X":C>PdR?M T3Q.R!HMb,0s>y;yE{S*V+a?$o uAAΧc 5yLG͝ݠWG6E_Kjfb`0kkG篅ιZεR'JOW4 !5&Ѻ[l쬭wm=`U]=~ 4 GsS%t@L]&ǗVa͍Ǘ/ARTB;d_L ھ<&BspgB. O' Ӌ jF O',"Ua/= 7N$2`!xq+9F*$6QNWqY{69ܞr=_؞ kN_Ԟթ6Yܤ4:6 VfϾneuYY Q"1~A9pQuQZ-]\%} `s"fH- ( L B'aq  `S  `]ؠ/P>A#R `YJndp `lKOrcih"e]K]YEW]`u9m]]/J/=qmQgr8ELbLKuW;-Vj=Ap4ayOj N lpmaDllU:Er\Xv5tQ;196p6p'I6G@`k& `ϊlNJ4 !5eqع62 Cl:0vA6ޟE#ld?lLrD3Q#"mLjc$B%kS<:dKQh6FM-[ p7&jA r%/̍I@@wZ\Qw[ڂFH#̡yoZOi$F9:F&7D)h"δFI#Ap˿?pI؅Q(M`(`q ]h:gCNx['?p8B< Y}y"r438&GQHZsT2X!/\B|K2>QQ}s!NtJDW>gw 5s+ :=$1tXs&>}}zon< endstream endobj 398 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 402 0 obj << /Length 3284 /Filter /FlateDecode >> stream xڵk ͇=b}u$Sh'V;zW'3!\QI`%Ù$|wٗ/e1hFN.'TE%]L.&o3i/2y8f8GxٜOUm0ܞO5v!aI?˙1S&c Q%qiQ:\[vTNQP3;o,H| v ?EΈte2zR% Ϊ4.?O:{B؈<7Y8#ji@|كeY˝VW̴="U#J&xoBJ!SX lPϑĚBI?.$Bx# ƀL@0W)h#4c+[HR)P*|jL]ޢue|lIdӀ -~ 0Eaԯ}!\i]Y*/(&=;[K6PMyD;x5Ѭ6{">˹e*ϋΌ2;Zz;A'ЙuNqI7ۛ%=yty|5*O:`UlN UѴiy()p -+Z۱ZNMdo66(nap;F5î[AESm?`]\nՒc.M;cuZ`Ece6\6^j- ~@7=mGPW@%ܨ}dĊb!㚅Ѭ.G NU5%'(Di|BYSā5~ e|Յ^0[=I>RJs5iz^i&Z܅< |6CXKœ9ؒ- 97[K)R4CNӧҧZ!@ ,W#KeȒR&@b޳d|[GS,mぢ6TRTRǦ8KdnF2w)Nkԩ8x rb h6V-E-'"W: AcZԷ)A82QA8>*Vk+j"MdkϟuQ ^raE~$!w5~*)4+iVN'XW(yv78\ŋEIǕ\(I_*;2{u70O9k27µ[:v[s.I=SǕVX]Iw%<)#wՆK֧oID Ț@p("NAӸw@ia~YLT2gHC ';8ߐHlxwvysm. MrysC < ~?%2\b XM)T]RL]Ohݓ|p󁦍po^A"mN%>N^3y%SIε@H;{ׯ.^~cDTGtt"^\?!E+WM櫳?+X# B<Me98m6.]$Ն`+Q!(U,ܿP;:4D h>}.SҊ:g`nE2ӕO]6`?ȸO Q{=wq?NdZX+}?=7hwrL"A]e5{v8+2#/QBv7G E7^A@muMɂZ#4ӗݳ󩔐$wD&%]B{e{aBH3 /UoM@U Tm5ե41F-+wapw#-zԶ;[zkԬsAk H~~1H-lM1wffz(~oQb]ID3MKNQfjj}%؂ WTidx/T1C=  GY ~Ψ>A ;0=<.#an 8(W&s܀IQg;jُR5xO.Tq[Xq#y빀c[RV׊p8j>PN\R]Q*mP$1lQ( _QXIuT ڕ}Tͨp OnՌʟ֤T{\OAeKaX2n%Yڡu9R_8bD? @r(v$utsƵv-!~Vy8&GEWIa"O7AO[[0bD(xSzwpS?H_sttƴ?0?t|nlZTQȠ4ɨicM#%ݹڞov~Y*Z5ٞjCho7{/DYy>Žb)buoEԶE}‰@槢זzF0DqWQ6.G'˽wBq VӊA^ZC6 tԯ|Kzx7B 6<h;D:8F肀>ݧiƇo ";=Ltxqǔ}&'{`?H[j"egU/ v-#-WlE˳ endstream endobj 419 0 obj << /Length 2558 /Filter /FlateDecode >> stream xڽZ[~ qHunj')x"quJMsPi7~0 gw WMn'j݋/~U٪VOo&ڶeܤ겵z1P\OS],6tf-zq±o ߎ{4EOɂnj⑗˟Jkw_Bnv^UmLiZNI/延MgϜӓ{^LW.`L۲rm͏}c?d?8aD/<{2)͏UU n{ efanx^p#6.5Dv23Uf `>9\H&TeBW5\ b{-܍-Qt\=,P.r焧gt:D^yNs* 'Hb|&LLF@n0bkSO n/wE+vk+t3!w=ۇ$9 ֪$r 1&\< D D8s}u6   S.ZIKKo}8kH t=If&L3ّ 4 8pbGն3L&Ws*?Lr!$o\$){4y'MI Rkljk s4-<*{ڄToîcwȰu V=do?dBɖҷ1._72")ɀyXdhڪ-ljhu+|ET[RV-=ZRw1]El/fBF-%)+g0JLZћ( BW”#{dr g6CgԐ@#,0Y,2%&isn i|<'\oȂE*ǟk V}CCZs~3Rp>5^g~x)GA$q|= TlN۪tc}ȦeCggMŷI%y&VdxPOZB] 2بd3=a]2ټ/:TzKBoMfcj]zqUr9FwlC8PEt6.{G~\~hWk5鬏k<\%#_VO`,0BM-z9 㯍4Q t db(2x0? 7d0<هS:WO֗X3q5?NJ(Qւ!|8 gnF^F țl"[Nn#_FR4O K$Xc1L"U"W̘p{ endstream endobj 313 0 obj << /Type /ObjStm /N 100 /First 903 /Length 2900 /Filter /FlateDecode >> stream x[ko_1ǝW`:1&mi  E:|9˥e$JV4f3gǹwFe7FYhr9uJ\@U=URP.)k(3E[DyLjWl),8W$cvn+I9cx>eD O U0!`#@Ix *B(/I9/*HxUĤ#D; ~c,"Z3`zH$DQ%KI DŽ٩^RS8@4XxЁ,FHq#_ʁ_$oL[S8rXIWgPJ"T/@jm1#|2U"](1ADYcCYԖ31+OHHT0Z*8D˦U \kY`PVӅzgmx6* ,i-$% Q?jU=Qv}j~U狳zV͇U''M=YvP ɢmxA&D]|AgSUUՏwsU&֧ͪy cK~Gż NA'hxl{SNKԿǫf>VP2}lAJ)AXC5}F'R? Dt$3È (imhs)j/Brz_/8HQy[C_T䥪՟WC__/QoYVKhz1-h/>kU ,{=^`to{ɛiӦHisyC2MkuZnNf$Ҧ݌a ?a923HKN]2F|?`K,קKлOio==~|M#f_Kcm{6mGattX::,KGCw?tCw?tc7_nP d-+{0:@u)knud->JԌe } HW㒎>,yLۉ /ڳen [.4jc Qf h.$?6ybGqpY#po;\QW;&!p`?}48{y E[+0P C䰏Z/SdE~$HRӚ9$Cervhx n8+pe8"w;k̀t0&"W cq鎆 hIƐpa+uX,W7$!2ipJY*DB KnϪRpQGowLaBrw7m>? 9|{nƅm~At_"N{.zntUO5;z 9 pU%(uEu}~#tG%(5ѓVߝ|',Φ'@2?޻f5ŝ7fTW֫'<֓z p\s^z hxюǗ`ejZS {Y5l2?kfJCqW^#g\{;sm،z|fllo45?{%lf%}=_|*r].b1Ɠ&b<֟U3=d1U}x2RTg X6JC'j j:_]!/֗zU/j2xOI#ݨI?{ɻ$h87 QwTT n@" i#+?UˁTXh!Oԇ܄\f1(t<߲cxl6xXK= !e{oYcwY<>A@fob<ۂ%sݣFzĪGMnCY~2|utЯ.>܈qFnRGnҖl^6]4uUOsW=-|_ Ap_hqPTK=a2̚Se.hhЂoFvvN4ԭybfa3˕p5/p kۡ'1%>h%Aw*Փdi[4HQ磡٫بbVB3TI B +ʎN3/Pm Io_M{t/Mo[v͎^`l~$][֚];raRwF Í, [چ Q:1H],b60k0΁5Ys%w{~@[rGY)椽?J\H/$ endstream endobj 432 0 obj << /Length 1574 /Filter /FlateDecode >> stream xڥXK6W(V[iEAES=$9ȶh,˕M;3(ZSpޜY;zlrs+G~™\8YE8mT" DSo}za6;vS/"wI}l_%NJ |l"`8#? b'E΢3 rZ7v͛:w~i&ßZ򘣧Y{).\zZĩh)-/+;[..\څ|c`NIHǢ%s߭e9Ku8:u\d~V$~~v.|ؐ;=ND֠. > /ExtGState << >>/ColorSpace << /sRGB 437 0 R >>>> /Length 9539 /Filter /FlateDecode >> stream x}K%u~]JNɭdlfax15 [}"0󰪭n-ԭh^yxo^׿}׿u__~ïW_WwvO?]”ۻ___WU p^iwͬ׿?ogu^!~cī^zWN]Ï~R쨼GfiһxçOç|5rw8?U,,?Ts-z3B{; zy:y GNڵF^;}s^vw띛WթLrydTtJmテ&Mkt {zK3W9kJ=ŖSl>oC_SRO,-s6޵Sb%X*^SG:o=*6L6, A h탍Gm>hwDKh^O@8*6憐G?Ԏ(5 4Xolྖ(/oK*z /|HC z5/uSYlgW[;O-X!Zg0ԑ]hIxOKeeOmOk} \Ӟw׬jk{B{@0dw~-F!Ks6 ʼn|- 7=}o5^, wE;VHWէ:ݟǾl58_bma f<&D>jsk UJ!jJ+64W<䨸?-7⋥VY +̅3h`uG}c{ﴆjl '&I# --,""[BoYG])Elc_Jyx7hw[yK@Gx}D2 g&.lĮ .yCIy!CR PoT!.wyG>V͒1 ^&#O乌̎"TCjL>-XMck4:&1Lhd.,t0C 0d(KHNB@x[L)Sf'cַISB*0 Qd 1xGG^=C?J-3؇AXx}rfgCx)!X@[SCZOCkc@I%HO k ew$HKj#@oon ZH*Zұ< *sȆI±PY9ԐW8uo!V?HSs8+N*Fn=D*İ lHXp{~l< #u,yi赧(> R? u.`A<"AF8+t9)259p6hI *^ W{C ȱ/23?=LJp97}@{|zx u1 8{`f pW!zĺSaw^F^뮝6\ld/SČ W;`H}붘 !Xwԗm</S_7> !eC{$ix,(HYK<(Ǥ3(sZceMQe w8҆kDm8 PHDj̳6f#uEҊ-<ˌ6ص0L1dl8 TPD)Ϙ m )P@ϳؠ:< (,6Ⅱ0G6FHƌX1c<˼bڜGG<,1IU88$. iY/=oڑʺ}PQ{c)u1 ,aZ:P-6?IJ1U'$:A-a^&X~}o4Hu94Y`ev`9Y[ ״kN9EHg17%ѤIy}YMX}m|kkD}.J_jx @ NY@:{uݤsRZ7tG_uE@x'Hu8(Yn"ZRA=% {b7t>5cL6pl?-S2 ( 4Cmʋz(=[mlca E@^كhp[EU#łwdrcˍC'.r^(I78V{C4^ʩRyAC~vr.<HAC,"DJ!d,Dz/&H 6,fp*SH3,)G;Ւ*MnS~ic "}l ! la61)r_2? V)pޓ"sx}}_mD'# Ж( lt;'҆%hPU #*&\j1E/5XJ@||JmxtZY[DT%v$#s z3,Ϡ].5hhgf]x'5̬q+0Im Vk/uDc﹬Og] <m0.` _e3kkscrh3rX" rT& X (9(u_xАx)rzXKWX+0Qf!$ʢ.+{G28険)!TG= WhlJ^4R~ VY-b`0O1ˡdrI6w57.5 T7{ K-60bgJ+ۃkg%S~kWvoK A+M!㺚UXY_}MEhYINT=/NT ?{u>#ca'__i?+_f?P?fq_U۾2 >noU/E,Av>p*T-Jxaֽ!#oD 'Y1C`Qɉ4D_22EH@$P^ہGe(6򗌇9A,Y H^^ c:\4 r5詠P.AXȄ$xAHUIO"P\ o:!)!7Z1U"Cz_G lmh!?Nr 3}$StN*ɡe KeQ%!}D>>?lЂa٫$/uq= %+O9g뒆*dBdy""d5%.\.XL:!^L"4[2\S*'TG2'tc a # #S!F1tUz)AQKӳ%+Afޘvc,;A'[ I~zzIՇRu0VzrQ"0Q![aT*sU)`X@>[Bˇr\KyJ!_]nFV6L[1}.FUdJbƫiAi\?S<6*[>IX#PK&ZZxYg!1=/ĦѠ Įef*W _J]*uw`6WiB}:u{(;MgXgQ@B,'=0\Q|b WOJ{b'2A e5 &ZV2LƳKdl-MdlZu /&cC ^2ÚNzh_`2zGd8!-BR5\2t}?$∂G7nL4)No\UweP,eB3c݂kE6)`i޳*U֊Vz]cި hV/(>0~n c0`RQ?|N8 .1_q!YËFOL1YWtY6ƈ6!mB0ڐCmy`AF0!m1*F+!mB.  6a5!mx s>qxImc!kKix)w6aGrxңt60a9R1l s@ c:xm z̬bG TYn'{v1*g+#Aj7B`դBֶ{`i{*ړ\ rH(tNVi: Չj|ʹ H5Q."A &=cS^un!vc+]sXMv]$&7`_nT [@*iڄx WsnpV #k"] )Lحc`66h۠}Ijl-jDjξ'_x=şmZ|"Uƞ´6^r,پ(0pmxH]49E`yc"%NZ,z Lr+emsZ؛Ŧ YPfPfx\RO`U{֕"V;4SEwQ}Qdl=59A_iGݚhgSer s:< bK \M*p QyE^t\\]n_&5eW,gZtnO܌Y 2aM`W.s:ޗBaOt (\$&y;@?yg/c0 ^m͵> aW|北dw,ߪ6„c,%'-s؇3AfV>sۨ>f)}Zؤ6$C-S)xI=Vdz7:gQNR^R/FW03EtSN^~?geשTI=AeY׷R͛R8nܬg򆩔\xރ2['MD]8 Azp,E3 %'}nɫ%/35 eFVe;\b:B!76mW2P+-<*pkIiN2^\@Xd=*on%fL v$8+YSdB  V׸n1m q6odrH Ax3 Wi`խC$nM.ı"L L̴"6mKѧ ŖHnj]{=SXߵqekcZ" @YI5M.kzqcˇSu xt̃ĹV|4lx<.{<}O& gT>v5mRMSKh:aDœ|jN|u`;laD/էxHib.I6^Jk0R06%/7YM?ɣ':ab󒸔.4_8 su}ngȴy5L8R '=<Ԁff/b3)gTuz ~4sFSZ?hoԵ ӝᤫMϜ)'/{OjOλ+=ѧzercWm:#Y]$?wlă?ٱ2;z|]C*''ӲLÁCGug x>hE]xs%^R4pǑ8q^/-zqKM0oz9NeE^k:kqnhFsHZ8GIppRWOg ㈛Z6'z<ϩ1a0ӧq~[AR#ч}S0cŃ-0_mx`֑>qF }÷ֱℱʡ~'upCXsֱŪ~^] G]8뺢pFbWqq'wv,fCj0mw]<6~?^:hvmxqh%`lv~ற[>nfo|?95KG@`cÈ=jd"{=݆𼁸fh>fh~qpF:}W>1ku>8`a#1hk;AWbkyq˛}nf_M³|`6Ԧ$^;iz'vonKT򗑽N u)m|.n~|jFq6;0BBwaKITp,DU{$^" ث5RSuc?-k~b!U8.˞>EO`ݝ #̀%"I*SSz (絙=,R-wpxc}-T'Be#*.U **/]3J4 ",y Bԏ, ҋūqiRq`n?U0ſ `8Auz*WOs~=mDd@a[9d D)|Âmߘ'$OxX\vZxl(' {PaH]ܰ',<1ry<&K_k%C߻Quvk8_#^*vmEO߳Ӿ{N8+ ВE${qI싞TrKawZc0-zk(,yT6\S5ZI-PNdӉ+\#qwUC5ItXtJ+KuwKrWՒH& U;<3pc\~>42ϯՃ]> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 451 0 obj << /Length 2982 /Filter /FlateDecode >> stream xڭZ[~_AIDc΍CHĉ̓V˕]2Y8XhI̹~e(⧫otU!Kє,n JS8DmqsW\z2;\ ~-j'߷XcklT;o/#<~IK k&4j|-'~8sïHW.Ki'ӵt&pUY?\I) YHوVZ*曫\ m#zg?o+KZTڄ,ǫMuU1H2U_z,?`멩ۏIpZЃ{ߡe'Ir{ucm@Ob(럮436@ْGnE ynp=UnL#%;$laact*aN+ml0쎷Emjt_Vtw~؜h\I*+ [T~gr82wi')kiiJ;DБu8e-f(B'yD䌫(xrݼubЬFV;jĂ;<><-R԰M˄Vl҅Lo9;6eb0ʇqcv>>< ޾ȿ'TE"qdW(*LzHiB=Dy<"c "׀i=*;%dT0bvǭg:#%kyK!M#.ՄwR@:J łd-E~緪j4iZ_=IU6, KQA"ѭQ2hj=am!!`#u䆎UZ/x؈۴!T'/0rd8XO])ߩ;`SHI j=e"t<*d ; .PH߆v <±ѣmBf%H%]%Z$m.,pv:= ېkwX6RU3 B(({8` 1Fzo<^ |:¿پe8^6 dBO)?m9Av`8o!Ysbʄܫ^M/_thka FTj2ToY>|8Jnц*q%~*c{up/ ]<roJ9%]pWbNق|'ќFUF}\[Ja˥HAu!:UN4Q뜁r|%?5>:J1UuNˮàЈ&!Q U(Mkem_%cnUx:skHu#[Oj@6m+2c]Ujr $dz &gV_gy~<e/A6!s+g(*7/_\<Ə߫4r_{}9md_9,3n%{TnF8g)!c1Nࡗ~H0%s)Y@%*) H [O1)fDʀq}JFȥv0fPL1F \c>v(އ 2A0F=Y(z+UEc5V+q<턯u㒟ˊVQRyײ ls1r*s6Eل+-7t!G Nhkn 5ǎ4g\lHUCsmdC֡-})1SN>j°H`Qj@k7c tuݓ1}-ܚ'#'ڥp.&3?ef%+of<:NT ~>0҈RC0R>px,0pG*bY7Wr_Aa5JR&\+Rk-\BhlN*.KkFfhڣKP1aD(!C"&Q>|97w;AϘ=}F]'iYl@2摈G0XN/}%VVo #R¾&.Ļ޺X[ eM%\R٢mE4G3rH^$9T?*AyRA t)$@8~!wt0i2] ZЇ?p.}PsrǗix')9BC9 6W–P B byyGw(R endstream endobj 413 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./truepdf5rm_block1.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 454 0 R /BBox [0 0 504 504] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 455 0 R/F3 456 0 R>> /ExtGState << >>/ColorSpace << /sRGB 457 0 R >>>> /Length 4198 /Filter /FlateDecode >> stream xZM&qb*1 -Q$ &Y,drMV=t|ߏ?-35G g:91U|: I9t}Q¢f)=j-gg&D2XƷ[cdPCd\Fg+ 6[@M#k&oa`:k>ҩ0RsFm{5Άa0sn|oMgޚρ lǪH.ݨg{7ABߗm}yso|7yvprm=joFCۅ ]?loڄ mVt_6P}fLwp5ρ1M|$plh#z6Zf1ˌܜO\o+8##Kh Qd&2P`$#gmb)JjRNhQk*Xb= TpOAъ~X@v.Dh?@1?`"?V6 
 ,ld юma$(?^h e)6ζj4>DČ8432>gD՟-$_3rn#07Bڴ~ļxΈzle/c 狍XčOU*j Om q_N\I߇m.< s30F_ p&_ki$zk0vIX|gψq擌_KEaE`ca]߇bZOKia#ׂ?K9\~Zg$2lLAFW9-`Br9?-[{Ac! NaFۮv6Xd>_s|3fcGb4~eؖ/m0:HsÙ|eKx8f~%"`|XtyX埤}#}//#KT#rm*?4\DbMv8>jcU{-xW=^Wށ}RϦpV11|W7x+T,T\~by?H_ o̯9itS7_ c /Yutۨj"eJ>P:7,^,ح(D=f&ge٠䝢 Qy䎸\{子#V2?xg,B r"»Zve_9xebWiL@F"Y Jr?u7*6ݜRz'ba_")R$sK[pVy-R U겄"K+";%j F\&"ďI/&]JnW&MՒ0/BH_Tj]#OJM̤I*٤IWWsPp7ȸMΙsfWD9JXq ?rI ΙD8SfE|v|sμ0V9*$u5YPUoዴ8$TTRļ~c|KHl _㛏[kjvD⤊vvweN&vdu箯_}O,r|o7FӠ~|;>zI2uC kP {P-ROHt,Iy:'OWqvqk1upi[I25UV ;JBN= -eG+Q'B:](Qx+BUJB^V հWhZ2*WE{֭yThaE*BB+Z$8Ъ)B k*B+{ZThx+.bUh /*oWhjQvQUeI [fjB%ǪЊ ¨Њ 2^nZm '*k*XZ^!*z`UhE. B뎊,^ZY*DBxUh*$vG=*KʮBkZ^*$sv%*vR OThy> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 414 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./truepdf5rm_block2.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 460 0 R /BBox [0 0 504 504] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 461 0 R/F3 462 0 R>> /ExtGState << >>/ColorSpace << /sRGB 463 0 R >>>> /Length 4469 /Filter /FlateDecode >> stream xM'ϧ#2"!%ٕr@B"|TW6Z-\sόǮ]=M8?x뺎k/X|q} Ǘ`?8R+㇗sT7/: ` `07[qlxҝL|ӑ}H.gx\ rۂ1x8}OL}HgD&B>]z̸<ϋѺeς?jd\?._K] 5v!~m3k\!a*#b @D FLIde1.Տ~waZXx6ܯ@CMcH<&h+6/yh[[4,qRLJ@X?Q8>7'2<bx8\#okL?XCӘx0-2aDLF Y޿fb ڋEL4㥨 y5B_E"d+=!KkD@cmokLq<a& ׈0}+~XD+z9[=-$ Wk^g|z \jg`kË[76o/kr# za0r2a $ʆ?h5`jzi^#z XkM+1 ôsbL׀ #ydACo&dY!cT,,W #׀y?4`lY.ċ/~^Ӹ^nAt^ bN-S \G|qd11W[؎3b|?Lx?L<!053ޣ ɞm3n+!LkNZlƃmY>8?gx+8z;yG5_e|q>ж QGcmzŶ"vN ozsLl\!~g6Nk0!/^n5$Ŀy=JӫWBρ.rmIB^m#ۘiB^τ7b?i|>6zo "b^#Mv GՖq6*23;^DFě^0ezsKh&exLQ{^I7Ֆ1gzo]P} v\`hzk~^)k02#10?y\y\MaڱLBlzc_yp5Ƽ@/d2pj˼~S|^oFT3?;u'ݏKjqT;d^C{W?&QR&I]=6+WlccH _lMj=Έ+WJjȅ|CjB!~z\B6^9ۋor@&yqΡIU硌e V΋ +]q^k{hZ8y1\ׇ:6rIz}HMz}ar´p^÷p5pz2.u?AGOOsmO?@xfMZ`tfn펳=q;.퍁 gݧ>8߾}Ǘ~>^ g/1n=q*8>Aӧ_~~v,ï}:նïuڟ>\>jaTG y*UV;a͎Kelh;.ٍ]8cj gh_~e< !bAj`/dKgL6 [oۯ??@~#~wxӋۚcIݏowdwtuos$T qie8 H8 8 ȣD:=p<4xl,9^8< '=x$yI`?\vmH`?~z ۖ|؃Gu$.^GᾝqH`?\vs΋K$Ӧ%|ofI`;MK9nz]e;/. luI`;MK9o%6 l߬ lq&m'eG-?˒˾Ȓ[P, KnK ?ʒۊZ)ODՔyn5e?yMmL5e3ה%2VՔEШ)*kZ 'ݮ2Ք!2wԔ!TC5e5e\~c5eר%,@ɒ[MѺ%gZS}#sK[5fvrR,T%Kn'3ȒG%_ӒI1kʊ5e8j| m!SMJ5?^SȸU2Da2y>^SH1Ք7w^5eiה7[,1W2 y2[Tc2zMI5eF!SMdGMu{MהAF)ʟpKnWM,ya[uKފװ%Gp&-y >n/տ9K^_;\'iɫׄ KE՘ɒ5Y2,aZra1-9YT3&Kn'#՜ɒa-Vv2S,yjH.K&-9CըQT&Kn58 %A<_^\;մiX +̸%O^7,9’7,휪!%O},wKUFXrX,7,kֆ%KF[=,y|`%^#;,y5DnCUKa1Y1,yTs&KqXr%4w^\NlZ5/Ò_]4,9Xa/)rK~^*^d,%&kܒU#a5`Ò_KV [+4Y!%F [r%K {n醼{\G܍3.\/EVUjF'olNJ6VUŹGWhYH޳hɂ%CrV p ;]c1DMt݋|w/vt %jR9OvJNu2Rѧ{nsZ2qKn m;vt-6;{ Ԅjxg9n[n~r T٠\6PUg2ن*n_j$[^Xf_QMՐ]Q%sF񕼵jhXn^IƺyuQn=n^ڊUDFOݪDR䋎.Q u!)?ݪ!GRn3m.zVn Y %' HpK>Pn}6e Y%ݪ-J蠛W6s'>rφ,<+|D:gCYybBfC\D<<(<ًBaX\бiW]Ex'2K]EX\W/_l endstream endobj 465 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 481 0 obj << /Length 2936 /Filter /FlateDecode >> stream xے#}œ]n-X2 =cdw9[j˞Y jkioL|\^OƋ4䧩 \~WP!=A0뻙mfs{xƘ5C?0^`\6nz~yyENԤBj7^ dׅK+MxJMKss^s-Zg^8o#bnzf?m7={OTÌn#pxs8HdzӍuïo#|vydo @nK[o1?"y`?Lv7' [y#*MAB:;\!m0 %9"ܴR8Nʈƶ#'ukTHbk5>D0"ar|$ǙV _f{ָiG< F2~֋pXEfvcL=I)JQH` H+"Q1%3a,2Z8#]M|NX erQXx;m焃uh$X/4g e|~Vq ut9yWJ.0p|2׀a^vwYbJţ8f\ C#9R%R\[̋AAczY!,ة*PAJ1Q昔aeTݵzȅ?~>[ m]!=ŪZVC 4C0_3X[ e(w"܏G42͵Q6">~|028[ Acigq|)r?fAb2OO˙QOBk籺6Du:sXGܱz~>bY՜^?ŶXlyK٫tr& RH͸Tku7_A0?*e$ tc~sd{R2մ̝M^W Vf?~\uQ?]Տzy a JIekWC'{Gopvĺd nߟen]V>áJq'36ޚ*?:Y`;;]J7#vG< ϺM^uзkNs-<ß#?Ѽ!ɮ,FW9Ca쩳zrqpA^5s ,P1n Y gC,6]%RlPvh>2[jW|1lsG;&z_%aj*QP™(x#ĀPhS,x ̒<׾#k/<~Mjwn^iܲ~UGiVߎ!p vP' ʶ_)u^M%rwdyau3+߰'tB[H_B͆hLJDʴD Cxjؙ}.E_Bam'1hQ7wáqv2 9:LG=Vrzr V1 >K^ Qǟt4g-v/@$\cK}MJP c8#etv| d˖+Ӻ˲lȉ>eR1YCFwM9c&y>Kp* lʕ;(ǔ"r4"o̊"R;mtq?9ݣ)Hnl .Av::ުx2$@D&'N];/[ endstream endobj 426 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./npEM5rm.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 486 0 R /BBox [0 0 576 360] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 487 0 R/F3 488 0 R>> /ExtGState << >>/ColorSpace << /sRGB 489 0 R >>>> /Length 6764 /Filter /FlateDecode >> stream x\K&qϯ"@~J `H2 bIA٣]jgE81Տ쬬̌,//#=Sҏ~A=^~׏Rq(νKx}o> 7w}կۗKehY={֞{;|հϢќssa/?ϕq0sk\/y>w sίɝңg~?414uI%-ܛ`7,\z|s+8{5us_ ]sao5.~~ 5_x)9.nai9cg2D|n&ȣ7_-[@tĥЎ]܉wm{7/vm>[~Ii Sh\>6<`oYm-M~o}آ~R< ˃ᑟ{lq\?99Lq9:cw#wZJ~tYe|;ǫ-qMWIjeaהF`?W2߅Ë">v g[%L(ᙻlαbSB :58r|wa()'4Ǩn#d~ܛTSv93L&[Ag0pUFcfń;b>L|ŰxBerƑ/ɻ`nhtgk3'Ƽ`n^`Ĕ/a3Q*/ -TCylk|9&G3ï9mdhr WZ^*b.l+ؓ6[Jlq|:ddw,lև]<5jZ"8>L|@ENy0=qw3 v&dSZ"hkü=F\}u x.:*Xa Y4{[uA 7#BqlN$?\X^9 yk!U,X Q@ίD!wshٷe/8^=j2Ms1H5O#AFm)H/T(X143s{ho0FzrCpqv)ׂ.YT7ǣ$=Ѵ1)EF]iZhԕlRÊrUW2] +KH|A1:gpQIK7r̃zڢ{7?0]\#ؕ +ƿ0/oo+7o˼G1 FG b?AHx~+.h'LfAHs=1i'\LhQ͔!t7w‚\A }C68h(Ƥp!SGҢb bXw"BZoA !c\A #|pE1v4b1,\A (-? bXd_aD1f4~XEİPp1,/KBy0~ BZd/A *7^ FgOh=ERn0Eİha4~#a=En0cf`WcYܽ8lف"iXM-caۢb%3B%EЊ&t}G x%EPd2ح+P@t ЌV8g$Cyɉ9# 6r'b%-zd5O+[, ϽD=jGvԧϝ>Ap]>}/ɐt[Ն>-}i.P'M FArJAbX"=ցY:#i!   )Xp  f ~re+"E7CLf?ڃ4q?̠&f*d3Ui>o G(n% &`fbz@ G"}[(ۑ>m;Q)`c>T?54UJmC'.6kky]usc\,nKz+]B Wem%2NX\ANjT ?K !x/moO9qJY!*u-R0A/ bA2Em:h6 i1R hbXWQyUT6HbJwvP7U(0PҭM/62^EfVԀrT@J@)3'N T-ryQQuà9. 94:\M/碋!=*e_+e1zStFIe;R]c5G(&+TjRr=cJϚ*JϚ c]ҊYbIiYbPz֤ckR15)m޻\Ϛܞ5)^=>&}ME 홏'GzX"_FGL?qQ+_5+pSl]4ߘZ ~5?=S=_=wlinu<&~Ƹ"W"cjQ_ßL~x/˗_?79pk}v_?v3޷]:^;z*0.xB 9n}x'ݫ۶v ?|| Hw7ܽ \֧AҚ/M/+M{/#5qwB8^W W8W4f }u +%(?b?~;{B?~[Y O֨tgq|'zBOeޕ{YWGy8`qΰJQ8j+`P[-WB-}p }`^DRE@ @x%*5+Ў⽊r 48+ 4+l]^ AzwaL ?(P/@ק_ 4PVD6G DqC]vm"|A?l_sI<5Sg Ȃ|O0{,⻕d5M$Z5H?5qG' IECeuQNvA7 cySE:FÚJljhڐ(* rtuwUYeDɂ )6{ċUzkcAވnbceoͮ"Fuv=~\fԈ3Xba2SdŶ`OWNa0E>ܰ*/W{y{S)YSҲ.Hwb>rlD&~|87Yd"V1O/WcXO֩Q(uqM4XWR+jvX*_;Ƴ~S1*zs^EʯZl< ̒h0KXM^7}9ƾ15ޕEm6I>TèpG\-р=-UmyNީ"fK*gc'1 Mx`Kg01|7qhζ X}R̎Nf/IY]V޻Ǧ|㬁LB?Z{Kw3 O}^{j_]ֱp?{_KCpc3$ͣF A0A-nڔ>w{ג>/бP.{E!Kz_z]˒jڞExmýr]/ߌꕍO{abڪq[u54/S.Sa2|VcvWq|kLq7eF^}3g۳޹nE;Ԭ6}n0ʤ>\_蝁w @sg/WW7oy[6ЇOpwQuڔؾ 'hf LIԮusa Nm`/:'-kcm} b1:y21쭍P)j\SrLVh,,yjG [@Pcz Ws$~PRNy1kSI>hg @tܘy  G-&j|(%V7q*l[M'aٯ)l2Oa =82zƋ-mew*[pzrтt<rW]ᥢ.>q5xbMLv. ]ޖ# >gNluSK TGi_:(>~QeO?ckU{vWbw:+y7{wI) G]"{bIp\ug%QhoPl?=άm](>0'D~8y(#m=[(̘A|öOG~ v ,8 GIn!Νr;uy 0dzr[,S-dxyxx V,4Xth{jHu/$FoSWxQOW8vJ7Nsrb{s`/}C $;y8`BËF9 XƳr&y8Vi"/㞟ENj<#-?Iݏ6 iԿ$ؾWff endstream endobj 491 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 497 0 obj << /Length 2230 /Filter /FlateDecode >> stream xYo7ݿbG؈9$t6bI&,+,WR )) i8G~EYVJVߝ:=y7"ʨӳJ(w^Dy>SU^dj_-eyk3 p|my>Mt4lh'ןxM %n@x/~"oK22m͖pqS4K3z"z$6-nPk$Mgs{>v`wzG"ۭu\U D_ڌf$'(ј&YJ`cP"F_ KJHhPCp62u,K@-|`肋:hLܾ,jZ(AH8H=+az Qj)ܒEN%TbJ?BbSCS\qcu5{~3vӇ1:JkI"bztEy4qt\MYJ)f' p9񵖐@WP`Mx Z_mb[nR[Q[xlwWoRos:!EeŻ]]/ 8u_@v]>oqs =Y?õXC'9eU϶*e$ӳ$^.<ồ# u.#>8~ Ke/{Z~\$_πco5#5OoȮQH'28$&;$)zw{f/x뿰@Hpi KAK;IƂڋd)tXGWD0/PNCqo3]2D:K0.&MN2d᫤ge! .8tsȚJ]k czhfN&0 Tه(|nw*XҮ9*9bMG5Tհ"X؊9TAE0ɵT5nJݏ!RG}AmǼ8+ 7LAܝ2rʀIڅ@THޑm \})SK TOETQX3I*OYf":mq=!waJ>]-l.]L)Q9OFDaw%հRZR; t-90=)4ϋ|AYvCbECIES1l0N)uqR{/Mgz+ iǜ4JWX%*VP*%%7nb׳S#ac!xMI~^I t8 |4'KjȠr/U8wz,ߙ[":<6߉^=_f9Ԥa5d%Dr=H )IRR# N]9WF.6_vwL endstream endobj 442 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./ISEnpEM5rm.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 502 0 R /BBox [0 0 576 576] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 503 0 R>> /ExtGState << >>/ColorSpace << /sRGB 504 0 R >>>> /Length 40149 /Filter /FlateDecode >> stream xܽMq_. +3Ȑ f/-4,iIb܈sNTefkd/ [uNfdfDO_/_ݾw6jiU_?|?_ۿ77ſ|3w־ߍ7c}wU?;~8qN'?n9XOl'lju>ў'rb=O'׉D{༎rˉvb?q8O\'kXN'y:qhO]rb=O'׉D{b~8qN'yU2'y:qhO]'~`=O'׉D{t~biۉqXN|J:~RN_ԯS)uJˉO_ԯS)uJ:~RN`9uJ:~RN_ԯS),'RN_ԯS)uKtXN'JqўRr" DJ)H@J@{NH HHF H_H_H_H_ JRWnJ]4݀=m@JTz(6zO؀8hM3ppC XM䗍_Ooց8h R?p$FޔV)-RX XqC'~PꗷE ,Ԉ)~y+,D6`㧝OC$~y \Q}S_IJ[F_|& hM0;I_ Ryc)͛Rټ){6mKC7I XS1NE|(S`%.`#n`?1y:qhCz]^Iן@h0ps(uLm~@bVbԳ1|q ( kA6Jn1~Ro3J~Cꗿs/QꗿՁx ( /WJ,i'SJ=nlJU ~SJ=dJ}RR`)!RFo)!2}W;R;Ry*J}(-*>tR[RM/Ӏ*L*<K>x xű _0J=(rk9#kYjdxf|W^f٫{Xr15ՇX=1{r"{yihiwizܫNfeO!{xF 2Jc;qg}4JLW3ԫRD)BAH\`#N`'QoQ꧿ƁCF*~.B;WptHy;yu$w2. vk&Sٜ xr%wr#N6F^莛dq4W#N~zL`M&Ӝ.^Ŷn)8?s=OKȿ?M!]!m!/}zgO:4,X>~ؾ >gQؾ)lCcȻ~/l"/D4|?9Wp[۹+y'y0&Gn/->\m\ȓ-Σ<-Ϋ tu\̨U2Uv:U{l{im[glhb{22ClZ̰b4V'3rj ]-]Da;3P^2R_<^<_^l'='#FŞOƌjdŲ:YAV4kd]:-9M_rTM_@J͠&Gy<WΊkxt~Į9[ɱEX9st6x#JqOn9c [Ahd29̘qC'[ߗTD?RR#3 ~\Bdt _' 4gF3e⸾I?z^M}/!sX'U'[dz'F?/~gjO~gB?1I<>O\'h' \ :?~Sy';/+R<f{` UY1Lq70^6Ft/18V^wdO|+ϻO|aI!w0O|ixM~K\t>W^ s6T_U8Ou}_Rɋow, ?2p}^ɝțy' Mϻ|'4+;Nj'y׋vp~r_^EO~r~ou~=?#p%r:~g{8_Tݾ'θ8<#&W 7d?OfgO;yu>#gEV'OnM/vn~*\~O~O:O~2?U dpK|vO0v?4n'[kl/gГ?ٜs.`0~O|bs;xsS\ FO|No'V ~^~g5 &'&?kgTq?iM.`^o!hϋw0-_E7y|˹ qFt'aZpqV>} ?q~qyO'x:~MǛfŋׯ|OaAn`>aE`aI/8/O9={A\8?E^O'zI/w`y}'~}z?ØFMJ'μ_'ݻ{O=W>\x>ܜo=~ҫ8b$~{Ow&_1POw;:y>~a 6ߜ+߷~a?nk\[oz{3='Ћ~ۼ?<ޜ'lo'I{3Yk?q=y۹Ozoj":CzIp=xb?]~;8/q?УQ=~G>:1WMx;`dOw?x~Nv?K~Bv?>w\F~L~[8ɔ?tɺ|''J'],O;di~w| ZIv??O/ _'[??l~E}SpOL<'G~b׀ظ&w~bG0)^ϋ5|Aۇ #z5OOvng?{#d\og?Gɸ4?^:_\byIp%W"w0<ş{O6#5O0:2O;?qf3 y^f{ ?70(b~w3Sw'z5~Obo ?q~ ?q?cU3W2_:} ?q ?q+..&n`xE~~z~2c;OxW>UBߗη}'dO H;O~L :whtyOOq&'xė^dėЊ}|y}|In}dxb\OW''[㓝?'[㝉4_=MO?3cx~ڎL] eGLO|%=,7''K&'Kz3O~7ftgd?#&:6}|25;1>ݷO2<1>)ft/R?bxdk_sq}2 I/z~.ӐX >OO#6G0^\j(>:1> k8O\A '[<1>OOO|<>>yt&'M̉Ij'>:O>6Z&42>~gfXd4H#ħmt>x1=/WvKڹ~na|R 㓪 㓪 L˃#?. ~Rѷz#~6u=xb]w_]'KG<,G~ri}˂\KS.+⣦O.ͯ-_ $ ~? ߂2Jv?eڝ;d|}~67|>Sjt7}O|׏S0O+ęOϟ'x}~g),̟M.`w̟,rn`V3c|}~gi ?E}W-|R|m|&z8/}ԏg ?qf{ ?ҋUl`ika>~)>OX' !uO}yO;yghKxxvIp!0? n`pX/>*5΍BM7Y=*A7+;$ _j@^ϧjb@a .|Gnɒmw#?'S ?n`G~Flw ?~@^!ۦnOI~2=l=OWa<~'S ?ܖN ?aO0+d~OA~257֟5gO?Og?[F^d}_ӆ87r!rzb>?IYۓzf.?{p}^]ZόDإx?zf_vi=3OagIo+ZoENgz~?6zd0gף=[!s5շEwh?A Uor'O\Kk>' ~Oȃs?o?cF7'F`W~O/D14GIS47'Mka6wZoOF?Q'3k7F?D];-4}~񅥟t}~~O?kt-⭶t}[Zό-g-g]ZϼZ\Zό߹y޺z'Zm~O;F?D ~F?zuOF\ 5zfM[fZt~gSK뙣}zbgq=s,8/r#or!8^Bnd.>[AB?ud}1Ϲ?&^Ec¿U?f-r\}pn8wpӌ1MZљc.'[L?Ybɬd(d~K?)L?9O'z'\/\ q}U~_: 9o8?E\l~xsF?) p͹['7p.~OΓ|cz'<'ڏX/zAB ?V,mVl:$_O:[yOhəAMs?sAy?'c}O_y &ڛ)d$3ۃ)C~ wf2_S3 Y,+YYy\dhЙ3٥3٥3٥G~~<ȅ\W';x ?y< d,C r.ABfz2GOu>&(k:|LQt>(:UY`xO|5W󷑙ϣi>GSqM ?)])?+;d`zޙҏ/64\#祳 JideМd߇ޕf|^Ce*/72TdLm*k2TdTbS)$3G1RbgRb4hu),4[:?(}^j-Ki͖~yo%6c{lDJmF=m6'>%3l7l%8Kf3m18c{78Kf3K1e9K/VNe#+&+؞\La,~0v/n///~Wv^/V*c\2oxSyG{f;ynG2{gR{g;iˋۋNj׋q$w.//n////^/~&wyw/n////^//~漯uIˋۋNj׋kM?I.//n////^//~<5$xxxxY~\^\_^_<^<_^_|^'b;''''''''~w'''''''''~~I{I{I{I{I{I{I{Iwl~".//n////,~|X~#n'[I2~L?AO'/%Ǔ{|bߴZ G-~TK?Yp=&x ~Om'[#kzivGx_;wudcvV:JV>NVBf:)_d:>cxr#+/$+1^^27&393zQLT/=p}UiOIV~f#3?3އ^ϼό٫3ho*4ڗ'72G=7MǷg<.?2?To]:qlO]8z̛̋Ff~făzW~f̟ɬU> \^g~fg~fg~fg;?c(1?s*4ygvnfg^dg:qЫc[DZ}.gFϼ{;/M;Г4CGk'Ffh@St'X1O~O;'d#s~~L?z~<_L?|H?IMs_?}06)/$@w8uyu=3W?FF~Ϳ|/{LK~_W_?7C]yG$B?U1+R<,@#+ŏ-ޞs2ρE49yݠ9y9cs9`}wqKLc_}?*u8~ǀǷw;M2eixT?o2hOo~k^_ 5,_7_}oǯOA~Wk•z|۟<QGAlgk?~5qXKk > ?'/;;ژ/s_n1)9NJ~(2OdDVK.r<Ĭ6zVO WϜv 6+'ݯoз#7=Ql'lju>'3|b9N'eȪa}=^G_yqўX. N'DUL@{b=Dey:qhOlDsXOl'ljkfH>ў[ ɉ y@ DYLM /PF8N'_%dXDXO|՚3kjlju"51)%,'z >3^8N'7=qˀDJP/`?q8O} Y RXNǸ~ DJ}q̔z>R4X.Rv">fz쮔za?qH+^O'f:ZY]v">鯔zQm] HRcm ljRD{bJ}lZYQXOJUFр}=1>('*>_qN'*24^XN'Rct lj؂RDJ}/_)r" (&Vr >fVJP'ScRb 4X)q">3VJpHR/LҾ  !1MSDJ}̡z!>fvJ=0^H闝)q">fvJp?=z`JP(*- HRcp ׉=){)B}_v"^Hi͝R/$NRct )1P+ljmu~z`J}k)BUvj)BJ}z!>_z`J}z!>wҀbfo )1/SꅔؕScRp )e i@1~RCvJR;^H$;>f"wJRvJvJ}Lbz!>,z!>;^H;>fCwJk'i+]ʶi"M脚f׮emrX;PSO Y44*j bKN^@ͬ`==uojQt:Pktp:fZweyw!g{c,iY=V6'xV4Gȕ91,1˺1,.䪜=YueMXt6pbAO L,Xò{ ,{,k{-+GXxA@uU޵ue[n.[ۀ,]S0 V32JB({jz1JR>F_ZR,aF_k2'M]UR hF_PRF_~c~QuX*FgUsQ(  sQFg5/ ٹX}R`Fg?;Vnf9vl+PPۦ;pgF\QMgy?Y!.Ȯ ٕ"{>"2Vdolٕ^8Y/eTVdWʊRY]+++}eEvU\*+7AeEvf fA#f ț >3"h-`}f0D{E od|~(~ Ε~*D@+D?+d*dU@eନ(K_p<*J =_ w:7 A=sESʰՕagJfu> KZ'S'\jo+J?ayy'H_'K~Ah<_QQ$3Wƻ~hbuhT٪'['[?9E`?VO70/.4m(bE~b'&=mSE?!xw?1UHhSŒ?/JCESEކ"?1U|i[M]Ħ'iIiS|O}L񽆊"YѮx;~)>Px"pCEKˆuT< /Ux;*^O:e*bP[+V.lp60/*VOtTЭwTmQA׻!j0^)ѹ!TSzW,-t3Ȗ ݜn㤸Wmꨠ蘒g!D^?+y]:WP:*vU訠Un_j7n>QV{W~ґ Sߏ YAW xQAW ^/8{]dP Wk=g|xE㽂nV騠7`;:*Np stt{ujW+ԮzW~Go ޟ{5o kt׾ٯoi~OuV?A=I'mN'{b<*@ݭ?^Ȯ UUah"{5X}p0XK;+/=*uDy}-*O]YTY7~\׍g/Fɸ4~Q*c\Ď+w3#nԸ4=Oƥx?q^Q4~|c.3(0QT}DEQ}黑UQi8 ???8pGs 'pAs0'A`{ ?E F(Oܨ,='hv <~~2E=G*E?~zx~O'ѾOo#iFG ?yG?y0DG~<޿7<~Ї~<%?QQOQQ_'} ?o8OnD~;'ϘؽcbI??LcO/~":"MIۄMTdOD׉I/&oOO''ݿD@iȻҎweb|Z?(%u&'DݿB»5OJ 7LI0QvQ?tݿDi91>)J}~rogT(-v''ݿFi=1>9Jwj ?3R[DLO4~h|39>gr|D'W3r&'9~nb|㿉I1u1u/''9'tKs5wb|I''9D=D=>}?|g,3*I&'d|/ߘdcb|~rOf_QQ䎿`,O2~0>΂dgO2>'?Z/-IƟ$SB0ۅ]߅^x U_^P煊"^?IƷO20 ?__P UD_(,Οh`qD tsanO_,T< U9PQ$G0wϟ?'Y7k; sga-T U9PQ$Or~ja$c '9P=*BÜ?[xk rmBUab[?[??\ tsqaO.T˅ U9PQ$?)i"?]?Յ]H tsva.T<߅ U9???ި(˛jިȞts~{noT<񍊇9Q*7*TFEߜ|7*FE\QA7lT<sF\Q*Er}FE\9sFE\_QA7_lI\'cOrFE\QQ$׏lk}Ɇ ?)~W6$׷lI\'~fBU٨(o6$lI٬Ȯ=ٵgmV Z_YAW ZYAW6$7mIڬȮQٵ~jOr}f]ڬYt~kwg&.>lO?[\_lO?\֟U1ן"mVd͊Z_'nOr}fEv۬Ȯ}ٵo"nVd͊Z_Y]7+k}ުx}[ϩ'YCVd_:+O>";_%+1>ڦEdz";'Vd͊omVdWk"֯nVdVcEv5VdxX]g ~kٵX]cEv_55VdxX]:+ZόgȮV+ZόiE뙗7뛭h=s}gxˊ3+kUhVU/x0VdpcEv7VdrcEv?7VdtcEv_7VdvcEvٵ>ޚ*O}ΊC>䋼/ˊ]";OVdZVdز";D~eEv3ɪndVd7]+eEveEvo'/D;l"?f?7Dah"sM폩:|1Eslwli;*!hO6F?%hO4GD ErH2gD~]F?Q|'f3D%Cܟf(׌~mF?Q'g~uF?^K'ܿ\s9p`E&:sxD~KTи.Eh?(*<_EAQ*6W~PT.:^-uztޏk-dEj?(*\UAQ ,DW[U@_c_dEEi?hdm~5E ~c;}/vlޟݯ}_v.\ٹ{t$iؿs׺g;3oW~ߡt @Ŧk(P~[qdGk(*\Cާ O@k*T~sMߟ:|w^?_^K~,@k)ߗ|_ BѵܿG3ߧ| Ff~_[~mhFf~ȼ>op^d 4 4)GVgJQ})U9ʶIʶYʠ7)M*$ס|LȌ@Ȝ̔ԑjJ33X 7X)ߨYA7əYA79{XXɐ+xXY yXy/R"r"S"'&3/2C5+&3a3K5+&392OՔ9џ̐V ܌W ݌X5IKyU"NjUAw׋UAwGٓNjǖtU=G|gɯgbf9-$~mK?IO'&KO_n'[Ix1~_lO'迷b -$yX\OˋUeNjUe׋U7$YŹxX^:yxΊϴbzbz)b;8+'O0~j'ZIz~1㳞ٓ'zIx|1㽞~lgEdbbӳ"{ry*ۋNj׋Uv]b =$yd =$x~zO?I/.Y7b =$yd =$xX!yx^YR\^ ^_<^<_␅_l$BX%"/rx|z~1~\^\_L?I/ O׋#$~x*NjU%xb b;8~\__<^<__lsyq}q{3~<^<_^_l >8z7xxX/~=(Z5[H[UO䅧q x\pHPKP䁷8yI^hQ'@˭wpkqB,GET%o"Yq}@-"˧q}w`SQA,KGƗe# 8koG<绠bQbߞbj߷*Ԋ^l?aڸ˰ʒQTUpwKv]ݱ#ۑu6WEv,e컞x|{26Y yhxΡް (η;G"Ѿso]@|{bw{m8_^ , ~L9O/ETŷ_ߋ匑!V{q|T~y(rQڸDvwU]ܾ gg7:dY؞o~">}yEs﹟Eܸ* ѾGL98xxÐ%zg V%pY`Va̺?Y`V,Yܝ5]QYEQYQY%H̬7cWYoFfro.kfdyMFz#+y}ҙ! L?~'|#9@8Ao$`8q^J?g3|/oUj??雿!N?8~9|} ~^~ɯt5W#eر vc7z( Tp oyw׏|ӌ?ǯˏ_q|}%8~+_aU8~%ADD_GtHN`t(H}DXbd~??s??OO?~_'LGg}3-i8zH_^86ˏ/~#ufdunMsF/0"4lU]+Y> WERHbO?;q\>_v+?b; +9c䌽3J+9cd^{+k2cɌ&3k2b7#z3b7#ňތ͈ތ}ݼ01l# (>8bM~p^{}0b7#z3b7gk2cɌ&#z3bɌk2cɌ&3k2cɈ޼_k2c7#zsE {}@䉌{}p^כ{}pA #Vd|=Gތ \5"vr 9ܑ!:y #sr^Aovr^oFȓi{}pGF≌ #(ތ# ˓{כ{כ{d^oFf^oFf^oFf^oFf^oF5כ{כ{#y@F 䈽>8b͈>(>!}r?z2+'Bx2+r+ͬ8͈ތͬ@̊ɬ̊ɬ .VdVNf5d^{Mf5d^{߱Wr^{%g앜Wr^w|^w|^^aIhs0޽عzP;4A0A~8hك n*ՃAqcAy:hdzP;4AQrCPyTx*އ Cwy:>Tx*އ CP}>Txק#Ã*އ CP}>Tx#t}>Tx*އ CPݟP}>Tx*އ C{<aZއ CP}>Tx*Z'=Tx*އ CP}^OGP}>Tx*އ CPὟއ CP}>Tx*tmǵ*އ CPa;TCz:]OGCPa;TCPa;Tt+OGCPa;TCPa;TCDCPa;TCPa;TC8v*l ۡv*l cA㠇 ۡv*l ۡv*l x:*l ۡv*l [AyU AvP?h4 ATa_m¤rP=̓$كRIzP;*lq<*LAQ 'Pb䢿$ʼnD \F;z](EFNKMFKUF="ȾĮmrIQ6'BQ:#%yN> Ui(*rUj\h\Ѩ0tIQpJ^%FyK:jz"'.I5J^+.e@Q6^E`H%FK4j.6 8\Q|[PfeGTPns,(% q ]P:![eU|Zc]7k\pnV+4\/8v;@;ld:p8 ԙPgv-z!*\uAͷZ;|BϷZ}!AU#낤"vͭoTA+4=2Ⱥ]W5 ®b][1>M7ڮʵ]..uɧH^M@Mw $^%/h*Tb~]jR# _h8Q=劲֩v`e|^R ]żOzUB1-xJHRѕ\R✫$g((J/m=s@A u%Nh*%~O _EJHLjuu -Qw|qü/]U4KĪ,SB=܈5%OJHG g _^Barէ;.~7ûF,xacpW (zTC< din.w s _pRJHRR`RRtK8*(MSB:'%*u#kN/).!kBTc^[O?C8ټC^/IA-KKdPބRBQCXy!Yڴo,pM_Cg|!QDvb"Cw՝ \'@!SJHtG?wWX!@!;[lRސ=Cwɐ=ƃSR#q.~35oVbfRXz~hBpgԱ+G5=BTk!2!ט;YCM e5i^C곜s 7KwBs"@[ (Ӏ#EjH}ּ!d7fs@} 7<.XfLX5ԍq[LRo,8p#h(kYce?R__Jk}yxj~ xy;W5'(5P?F`D ܝ;u$DW_`!a6/irEK5zѕGoy++ :~MOQX|yFȟ?,sie C[Ǵ>sV_k-4O qCXuĹѾZ8Ii'ʉZÐfrOʟidfo2VWQ?*2}TIxʟ5NIld{{>&?K\ܟ۟,|UdAtFlUd)WOVZcΰ<`zELʟ-ɪ~Udz^۟5'Oy'{KYeoW5_='X?YD]jzO N[3O;g5IqٿI[CaO6[֕$`'sik5|VuUÑJ?ɗ)[[wijry? $`:I=? f|PU+INpJuv? x$_ONҶO.]'g-|c} 0d? ߲NßoX'-w ϓpr"tTß ijq Ƌ\'o+Up&=0Cr))0OZɒuV|? X.-"|Mz?^CqT]/qw /i$`Z:K'JUZ[E̪Y-I0QzJ'? :Igt GM&8AB 8Tj/-Ith(IZ? T. ] \j'SZ&R7Bɟ?LJH߬'ONSZɟKZ&R5BɟdO *|z[S:}I'rC8\l'&R.~IqέLXG۟dzIO2]+ObݾO2 >CoXoMJpYoÊ. x _eʟ0l$<1o =WٟOM?'ߗIZ̵Y#-;"ڒ?4۟$92'ݷuTmIyvMJwK'OG$1Oϫ˟~.]or1O.fzt^tԟ?\?%G?]}up>3Ou..M?9 9%Sf^vOEy/?u/:'+޵/w-{Wk g, E?M=ֲ=({|>uŻ;]wkHYS=Ÿl-{ßxu_wMyWkw4:I{Rk,_4=I;u?O"^tAx`+5:WA+5vŻ!=IudNjd vI_⯇I?|?%RI'!R ^Jqȟ Øu%"8t`ȟÐ?al=A0t+I-h"$ٺIk~|ȟT/I=_fktL" oF6p'?zJY1ǔ?9SdOsN\I^͠4KdOt,-0OҾM"U[~WԩO2 Sd )hl!bCcyd؄BlBeCnYBAِnc^|a꧇kt/4ې"|EW4aJ6awSS~aǢo!LX|tar7'Aa??L5dC Icb'.)Ls<"8@YC6d Y=*0{]dmh v٧dB Hj,k:)l!rrLQonOS?*rR6'CF\H^%KHuraT=>t [ΐr"9L֩)!i%e ^sM!';Ud ~T{L2T8S˲S$e(cA+rFz+2ފ>~OWR_rGF~rQlFṛnuweDSOAHL)~-e g~s-9U燐MdJVKݕ|9LНzIV$yY;Ivr[qHCc$~9 ;*?/Y,de /Tf^Fv"G nd^LWG4䫜C,,[˲tˑCrZηܦYd9?d~˒DS|"S-29p|?"SOǦؕp~^Jz^.2l6{jP3uEVWdiJ?-V s(`*2^el?"SgU~C:V/T-24L}_(2g۲PzwEx|T$zT'PsƒH`.~.q.%\곴֖*ՖJku.4mToaTK/4BL[dY`'(űSJG`e LB̒ŲyzoL}{ySS~"S_)2N2j'Rd+L:8Pd+NE!+Q/rK%6I(2,S-do9r= ?d. ?d-{^?P< b8s^]\e2Uzޑx2*Sۯ2*SO˩2*S_Tk oM= WͰmY$eMQepL=U-eBPcw V*Se2^ָL`g23wvo܆L,a qd 0$TLLn+&S1jfS_8L}T2S'N%S+z+wݺ}YUzd -{nU]eU.DBrsmu`fgL}{Y-dak̖#?+z,U>$#fddgZL=OU>ى(s8w.,e9C.I(82Fev'L䷬!٫L=3\U ~rJ^+-L=a5b8R'\eORމ2[nS$e]&g!yYΐ -I]HU-s=Wܦ7%*o_ݪ2εrJr%wd/-*Q@v,yYnSj[VbBʶ7ztfvWrue]~fKU`aΧ`Es/zWݲIsKn9T$;v..b.+Ѳ)ka $dewfZX[f9% O>0}Т?ߙmw%woY)e PBCZu 1oBjIXfrFwrؖUY6I;BN$#ߚu~ᘏ䗏_haou?6[>k.yo]\_qͿMHCõEg>\8c.Uqr}xo$r W<m.ful"JrK>pRgݷ=TL8x]gÿQi?׏wѫחeB]2cۈ*^Ӿ/|eOZqgGݫ?woW_/~?> w?||w[Uu_}Ws%М~)F O8DHVs MuĖX%sȎ9oSEIr ;ݟ~VΓD#?~~y} PG}ԣQoz7@= PG}ԣQozo[}ԣQ/VK/e ^/+^VxzYPo_a ^/+^VxzY7ު ^/+^VxzYe ߀z^/+^V2ey-[-S, ?2ey-[0w)mFoY޲e{6)FoYҖzXoicokdz6Ӳe{-m6Q:2j\X#[31 ~zJl\#[ʌ?,lP&c_zHs0uaяI?r0mc@=;/̺*\#e Dzl˅iW ۮ"Y]5.JPԀz_^/,]]xUﹰ*nsaUkʫ΅W% ;BGw0{!S"; SobL!-SNNU;|OT#dTS WHjIS9]%XNN$;X%5@}jVDw1kB N95규ʷ*@Rvjw* P岜wdA Pܦ~e)٩c}RdވЗ PO֣HPO>$_POeٹHs 9عϗʗUoS̀zJpP|0޵-_g@=' w:3#ݠK2HVXޝZ"SJ POW2~ϭRL &5gmR,3 ԳR€`ݯr֌PB/Ye@J~P^UAq!mdsa@|%d؞`_Hw[^zH P?:-'La+@6HZ"@P?>;L 38=L#@PP?d0BI5u3篐@|8PS?D, !I\~RQEa'nCn#z\B+~G^/ [UELzE<T,mR_IOF|~n_C^/Am_P_pť_`_ j{82M%LĥA!u_;L-h;JR,ji%_[E_ƻ[p~š1FL_^p ]@蓡_^*"y@Lc4*|_@n Bm6 z<z@z#@;PyC @lC ':/ vA Ⱥ!ș!{ hERjzך9Dz @)"@[v3\?CT^ o@ 듊u@J^/B3~ =T H!CΐZwNT^ B E{p `}@?@E :.Jj}7> @ ?)C@X*{C:+CC)@CweЫ%2PBsc'ci/|9 @w.Űna4o@'oC'C?'!_=#2XB?*B? YB?(\|'{nݥ_Dz~9U˟,zΆN[nQtEzY}9SK۟,WQBx/ A/#Ya Ȼ!mCPDmA/ڇ Q ㇠jՐGWCZy$k5{<$܂Geݥ 2-h6t\ QwGQl){Ÿ:i)> 3҆ \0 }z@748fJ.he M O}jA\rI9 􉁅! c BG'b׆c.҆~ k>SݿCs)8߆ g!`!{$ެO 4 Te)}  .7oh0wC3P)C5dr u0~<>)( >wC^ e}Za}!|ς!!|=B߱ڗahEzf3) UCǟ'x> zz @tzW<z/ !B BC>nB?#^Z>IJ !H![s>O2so5_o}Y'i_C:YlgBӲ\oJ˾B×WXˁr!U+:Bӆg ПLzUr p!?@ !ggIBxB/gBRi'B3z;@BO=v'h|C뫴C.c O|' }BO<=q W_?2 BB /(IZ|/';Ĵ!*e??ߋ!wYwe7z 7B:n= @̀B_B_,B =-CӁ˟P{i2>1]h}bt?IO@ɀ}B =$ ==G W 'j|s_>'k'@DǴ.J?t>kuAŒ4X`ϰ=Kʟ1>X:ldU.5XAzǻ_Ļ zǻz6~c= QЃ@BO/_/EЯZ!(ת@ڙzV ʵB\"ΥzKM3~<.\XAҾ\s7]:f=@ /CT@ dݤvf! !{XpYoҀ B]7~.la oW0Vx'z?647 Ⱥ!h!̷'~>6 3 s I$k}f|i}^@ >SCEr l/BgBszHȟ(ˁПx!;/BO(z9fb ?zc@+zޏ!g}q> 2>cO2i@ }% g=o o_ܿ!ȟ$!'e}Z7~JH}џ0>&hpb|e}a}:I$1>6@ WzOB1>u'g>D~!@O\ H!'~ e6@ 7@+nC$z@ c=m@3hCBO~+z !G'^ @ ww%gC/ڳ!ײ=0^/BjnH'+C/ڷ!%_$BMǧ 4b!{-ذW۟\ B0_aE!WdE||'O[BO28z9W 3>Bo>ǁq _= o=E w p gӀ3~BiГ z+5tHG>uI gz@f "4P_.OoZ| /wş_Dʅ_ͧfZZ(VdE'Gy`xdŝ&Yΐ+ 4s~   X;HH{J,G57j~uTn} [GuyҔn|iy rb~U V5mBAOQ|@QxULG{A\~[^ hyL_ m_٘+{ + p  pka(w(w(D̃Q>s]ܚ((DDQ>DEOEOEO<,Si%&{mD+QFd9Y+ Qn^F QPd("'~_|WF'SLt=DoA|.SDIv!;~B?tvϐ~Lu3SHLp$+%#%%X7`~}pYϲbw^fx{qÛ( Q޿Ӌ( "iAwfx~Ae>Dy| (ߚ (WH&OM͛ $eCZ.+z'֧7$֜{'e'^IW(?-(9 ͯu 3fMϴ+zX&gwLls CJ.` /nC4+`kaY0nu`m +&ca+tM'ڤL=fx(DDO4Q k&= 3D~OPeQ>A+wD\ᰉD\MO&'g;(sȴL}{/S@DyBOeZQ w]{/S`|LP@OpemM?k߂ 3M'8`厫v L=6[b]rc[ ,sWl-S!9,< 7Q7Qeꯛ7_B+3Qe/ (;ႋ(ODH5\ⓔ"n</Pz&_|(OxDy&_|ɲE!_|e={2Qs.{/BD{[i I?v!8(/w!ʫ;}6:Dy[ m _\wW]04QO zwa-{ D%&ߠTwz}[DyC?TxedQ^&'Fȋ(M 7Q^Q޼yH(/t Ӌ(/u("^z(o=DyCW'卽(/kv ,!˸z(߸+eQ^]C?RDyYB噈4Qމ彮B'Zk!;BpR$?ٖڞRvY>[_z݁bEZv&euCo|>?^m׏/>m?8q\ˏ > ׮sGrsmC2bLaMsM{H?M~g|!w~xz<_<ZkQxK_{`)G>u5ӏGƥOgM mr8fwTrD姘%LD>O =O?|??~퇏3>ʯ^ b{_}!c>r T6_83y᏿ 0<61Iw7rO"fjd2XEgs΢ ,ħE7CoSgItE)!皚?/>Y_oD)D4˞ڷhhLo;ZiYQGx[x#ez@ey@=A22*SҡJi8XgqU*cu_DUH Uz] U U:srǒtENEEN2'T颲'T7BճE'TIU*>C TPe"PtPPe&TPe UH U=#TPeUhӡʡ tє̄F#T|Ti**nUGt‘c*'[BUvMYtib XtcGqrC XC*8Oy*͗#V4dbsb ѱJ'Vwґ.*(:VDb Vy)XgdױJf܅T;V*'MbNT*$ORO**gVbe@\:2&O+8IR1TcqTCc4XB'V)ybdXXXXxg4F endstream endobj 506 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 519 0 obj << /Length 3062 /Filter /FlateDecode >> stream xZoȧ;Է' M$ 󁺣Kܝ}ZINma$ٙzjRb Ɯ"s,dDQVi3(Qx"R@ߗ0.8ގ(`.(4j } 9 V:T*a vMI# ID{? 2ϲuiL< dqv"y6amG?{n?K@mB4 ^>rZ kS.LW=챌 ,R~q,Xa5)z½qhq]bs=QQ{+ܻƎ`{ky_M^V $ ^02=NZLIa/`>;+F2E DhcmU<6}. 1^`s!Q6y(Yk2b5 楠˵}a8v:x~^x `5"8x7D}%|UYW0DBlb?U_qĖˑyL%w 0Na2V2GC`dlFsrtQ [*Ot{sl 7bЏ*cX2bTɌ16KnW쾵>\–WTr\ z z*~hD̳Qrq9@(֤  ABi|q>JrǴQЃp(j6%3wMAm؝$D_BD%N>BuK]ȉ;LDEoy43O QOSܣl9m–7(f80VLR'cr45$Q'=:7] m} 鮗*!߉ q~$?О1hXДr ul;:'~י(m>od] DŽq:&sNN-dE<ZaXk4U䐠Q~ZfN },W b5bgeZ/Ph_0(-Y&MѠ`yg"Yۨt![Κ~Sa7m.n;:n.etF \{Z>]%߷Gnb.Qz- (y+ݰ;dJ h2g)N^h7Kp\' 77TŒk 3Pw#l[&)9;xqw~׮T.N_RA0sPFoA%yuܖmied4WýLNDĀD02aiptTQ z_|4%X*Ta Ns7\:]T$&PZ>#HBs8JױAhT>Sݖĉg\%ѐId90Mr#? gnǧ ՒP'qf3)I`i*W$TÑPmRHDSެ^hBzoxa ^ާY$S Kcsdl̪ͰPO3ѦOa4"8:5\Tb1>|J(R2cdS W5#ן5ҝ`_\!6GuD1 xm] SU9Ә*u,u#R['<y[~W[XCce g 'p?vZi$x >p\D#גOPrX.LUG0'3: gB'7ʗvX=?B{R endstream endobj 470 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./gnpdata.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 523 0 R /BBox [0 0 504 504] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 524 0 R/F2 525 0 R/F3 526 0 R>> /ExtGState << >>/ColorSpace << /sRGB 527 0 R >>>> /Length 1146 /Filter /FlateDecode >> stream xXr7W(XG6-T\>$J*-6T~?"VACzx~hЪKeս|TU:S&mh'>X & Foemc%+/ywjn<*jM"%>C5{hA[VhMTsĊdj8qmGm)w:rH&I= p w hvIn{)`Ғ"ݪy?q|:p(#N"/ՙ5y/f'Zk17~F„v/":/tYX).ȶFC0"vkH[`3x `gn .Y9-(˦A`B+wbn[3bn%ʿ{q[+mk5 hJVw1&Х7vM4[·΄ҫm)ܸ|ae:zyҕ>%W)Y ,5!hOjx\=?-|gfr^F̣-K5`5^<}פ,L2L"Wse)%2GQCB 6tu8z%Ms]g G;QIY(a> ͆uDesNp4y+hS^7Y. Py su]1!s¤6sMjfN'dY(hf^Mé i6ASA9 3dbhӋD67׺LuE*AmNaln;eީdYq+]=U,.kޔzXA(a!r-V$ arAr8(0Kw vc9ᶂk[աAΖsr\F[0绛id\-}B=}V GT tVÙaYqf|M:~la& endstream endobj 529 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 544 0 obj << /Length 2881 /Filter /FlateDecode >> stream xڵko{~p@ ǡW ^.-8\E4E˺H@Jq;咦pAs*^l_|g Ge\$e"wyT&nqY_^dYVfu$ }l8V]a]I&nieeC`A/wߞqBon>rǿw߮so O6e3Y^B:Mb2񪇍M7 [mkt"!6)d-wu }Zl{F2:E*NՍ/|"XJ:ݎD;y!ڢ"cFE.YdQtG(!nYdV<ʍQ#&]>ct R1#8132z(O5`cM백Ć,(s d.z% W,ɾJ`8^FQ30HF\* ɧ`iU%+K;Ϛ:q$-cDҮ-1LCv"vEje#G-r)Ov` &:NAy{ #R󸤛V"ҭ$47QRn f#5ԙ&h}&xii!굘׃gي֔xFbG/C뷔LKp6$8{UM#KG_{c֎)UZHI ξ2WtoYW'.HG]d&N4vꬕW-QtEiiOCAc,+ΩaX\*DnwRmGly2Y^yD-tu: čN xu+n@!=!H}lٖ;樚*^N8P Ey\cjs5 %S~J4s58=Q49Co$@,D 5ˋ5yb|6vEgLG#1QU _zYwqL5ِ>Kݚۀ%,y:v$bsZ?I+T|vi8*i<8 9(zOrfAƐItEAMGF3_ ^:x@M!{P()FY,O< 05sf0}(=6DN00 ~ /hwAD2k D?[G4HI\|Mt PfTa_́ JȽsd6Y?atd{!˖7> ꦜJO  If n-5;X(SGF#MF:uIy{n8{Hd&,Dhؗ]]H&w!0#o$L J6%b+WeՉ!iL'MX$*`x*XFDDH2lO>ّ I$[H @~{p?xe'^!.aO8Hf@g5>r͖H>{-⎇;D;YqlN6q1P44I䟣2L8[+qj$g 8W6{p동\b9w#ft_}e dzo(Ͻ3ٍ H50C|_Мӧtӗ51*ݹ]œ} +zz[Hp4uSw6'JK1΀ˮoͦ\Y,> stream xZn9}WqloE.{=`wg1Жڶvt2n9,JqI5dUsewe-)FAP:R61J_?.QwQE+Y1I =oʑ0LVd48E(r'+V*AQQrrd{ҏG,EٌU%yDJF?)HMYA91 syVN>)Ue,6ETtIK eYy-9O2(ܲ^:l!˯lC6+`T2f0(hPO&(/cU:zKYfV1b R \F CL +c2TYU`p:T``qb g%PRVLNLzD`yxF"^B錷 *"Fc!HFFSEFMC g2D0b#k$QGF`rdŠd 2I0p\]Lp_F2&]wwṠŦ>tPϟj$ltWuh`XOnrW PhU!~m?+aS!LlS-ԥ*~>PBϻ M+ s5Yq+^Wr֯[ ˗R 'l;V0mlLRVpq]RKp6p:9/X$ 3N{#V?ʻa~Z.g~5Vڹzśբ)WK/yUQ-Wmҟ=j//q/WggO|6f '/&u JPG}9]Tz`}χͬ*rQᬿ_b8TŸϦjV^)}( 1|8/4t?.0ѨxܔxT.f:~+7+ՖvN~jA5-Ѽ+~,^gT艂kj,u]i U0};U8]oJ39?H84pMZGOHRYGz7Y|{<պsZ{ӆ t;tYj 2$KDD^ҷe2}}+|&v庾LJq#ov4X \]{@ P\&톶jak wbӈ#iYS pҿ/`A|֡81id݋W8dI[u-b7-Fcf YgV!Qt`3vM B3r#b5%&khcNAaP% (MDN&߆U8w#qQx&tρZ%3~7t+:H70zoZ@"l r$ W- Q,FnB4 \i\-BWb)@imu߾Dt91B#|i3˯N3TA h$%"lŮrH r ]mh@6$8!ܚ [e}%"Q} K K~'I ]F5W ;b%h džظ nGŬ@/u ' 5 Dmڲف.> 5}:Zϲ3(`XyCR| Ir{pq?Y n" ދ;r11n]~{4uZ^,e~Q׈"|apu8S2J KbʵW?.u-`χ26 endstream endobj 563 0 obj << /Length 3354 /Filter /FlateDecode >> stream xZ[ܶ~_1P@x^%F։sqڦzS@;ݝxgf3\HrfwcMaG\I9g<3fJ /jVJxf//fZϿǟUeJJ#dcz5W[vUn0u/9>vhGd`7>{yAԬ6KNr9[~<NJ#5n_a{g۴.vyhn:H#*gǼܭ7j2,ꪗS/l-Ͽ?jx% bƿы%} ;PM o9leq #ͺ 퐻%}/hmW0*ܴ0Jn|%5w4uKJQ $-Vlse`jG >+M~D(Ҋ,X+md `{ ;z|.iJG'mhD}\LmI$!btz%Juz%dº MhㄯdnY 0 tFh>3W9c݈ Sŷ(빮m_9!|X؅׫a?q# ӿY\3u$ [k[_ 06pπ [|Anɸ˒.>my#{l-OGy/ EuuhjsS`` \foKaJ3Yyܕw=V.E, m$/5?ACPEODuP!I! D|]XW.ؼ8ԁ1rB;}s} R )T2 <,|,o *#vO,pKE@ٷhUk]|Wci!ƚn5׫|ơ_ҷԚbc0@S(:)J_rA79Os I1H-ۋ ]gp,>~ *D-縀'(4M;A- !{y }DJ^LP~)ziq 0r{&߼ 8MGq,/=bG{\yFVK^ـل laq{tӲ9L4 B-m0"m I`zXGUIq\^-j;FRfZk ZXd-igh+Emǂ67 DNOc6j) xN!lM guff͘a?xWpƨ f1LGLaYm"! G'DӇ-"`| 1z󖦈 B'z`ׇ) Bes;΁Qbrʌc.(h 3Wmߔ 1Ǖ;B0Bvf 27FfUR MrkW.'4c=9mLkSl8?/y̯G$d揭)U=Z@W'Y!j*1 GGKR(iQʗ?KQUUÐ<$E=#U|tP`8%GZ)DN#4֑bݢ cq qjc`)D ^o ݹ\o!=~ {%BhO=鯜bIp(ԣ%_qő{q]ix+eqiYt"|,0ZMͺLxŞb+RE> /ExtGState << >>/ColorSpace << /sRGB 569 0 R >>>> /Length 512 /Filter /FlateDecode >> stream xTKsAϯ#h=GIEVyHP }2Aaۯ~ L@j JKtdh$<܋ۏo.r"`4?-8>K xF:8PJ2()cg؛G[콋X*BM Y58c% ^W>&ORE03NX깆zb+ƼeK z_K+lR/? _t5wq^8>`(c˜R5 xe]9Srծǀۘ*zy2f1iT>@wl.1 4Jy -6'Q˹iL(b5!;wpݵ6?g endstream endobj 571 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 532 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./CO2reg2.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 572 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 573 0 R/F2 574 0 R/F3 575 0 R>> /ExtGState << >>/ColorSpace << /sRGB 576 0 R >>>> /Length 2798 /Filter /FlateDecode >> stream xMG+-gFȻmFڃ2vȮvʊ|=vG}={yJ{Ji?_>u/[~;9=eimR׶.[ɮS*uH{ux~qoIw`?LmhjW{t\|*I?iI1\md>{ /RHuΟcaO^=|~m={뫧[ongw?}i{q->]7<D7?uE_֐A{zn0ufBNyM u|8ctϡ']1y$<*~Ih/C.cpBJ`RG|p:$XT2leKO#\UJxE.j?Ǫ̇NSWCpժ"TNSG$BtR 0̒ym|nR)eLlgbq<.ötb@h*j\m7SVtd:cꩪ؛i{xϨ2MіZ-g_CioI!Y(z/|t몁6S~]:*UKA ]ՀD)YY]Uq\ulϪ:hШTTca:MuRO(EkMA更K"LY㏍-l}1OXlzx7;'BR5CЍUihNď5苉]uW?jDOPZ4mV?BtM2'?ƪ05I;zAk>sׁ-^M5Ӭŏu@<8rQvR!!8#r:P нp5@.p@B,p&8MQ >])] FgvGfl uCt A8y~)C!7GՃ7įp@YkVKh-8 k[-)CB Vc[}u @98j4]pH80 5Ƶ#bpHs K Y(,a;Q؍!5CYGq/pe| Љ8xh8̓8p&t%B a9W ~l_98̏8 Z^+ppk6KiW:&xqX]RRti|q^5U9ZѨXjd !]+n#kN򺘇<:!I9+ٜ+gl?>j?GïY⌧jp#g3۞XǞ9,pEy`|=O%2~Z1߼ YGM)0b1^#"VoVs8AsGvcԻ3*e8IˎҚ/u3`E/h)9d ͡&g֚;c<3^BsHm isgvf<5c|7xHUc3{Zͣ26_cX5xf3ә/QVs㋜XX_䛬,1_țMg|iMYekehϰl 0[|vo榆FpS}L-gK7ܼqSKJ05n*nj؄sScej؄V(lj94.LԒ7njmZV.SCrl\LM-{c\L--3SۉZwZ.HI=NH^;Sfynj(GSK)u禆BNЖ5:2SôX薢%7)75ILMN B"_p05Bڦ\F4z4M-WoX]ejwO<ΦF~B榆x[LOpIQ025㦆x[cᦆxF^&hjˤo[75`!1=751=-2􈃩%2Ԓ2\qƥ Ѱƥ }8<h endstream endobj 578 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 586 0 obj << /Length 3740 /Filter /FlateDecode >> stream xڵks&I]<&t:I:#˴;/ Yr$žwŨxyuuGPMHFUE9\FW7Wڎg/o&SkeK/.Y&o ww~bqKɂT>~/6zo#[YL~ۋڥ;Qqړ8Vxml:{Zz4N k*hWƖV気 Y?Nᰳ9󔞯'S`C l7{Mܭ09ͬ!Il ?g\\( ҥ0SnC^o.FR+yV!/Y}dx;-; $R(=Zʆl6MsKbb #Ae$psI⵴e=Wovz :5BɮQuIJ_ءq 3 ;ԞwWӠ=1 Yi{DR@5~Y#tOHqA]7<=G`˛wb=Mt¸Z/f[=a*C7 3;#Ki6 #HmQϠ͞s)U_|C2 8>\2A ԊNjꪔ!hߢ"'BHǡRy?Bmpy k^3U}Ev"͕VM2 vݒ6="o\"汾~{kP(ub"9+d?7'7ʲQ5DX`jo>"+G9JUu{-d;m٢܌÷R@8"AYҞV~oZ@~PԘ? :w$d89N+LLIP-Wc.ό 2,ʾ>LOvβEtījH&[ 3[HBֈo) ћE>3V2{4n{&D|\HA2BÀqψ]$'Ű*,nY.USھ8yDi m:"6 RʜRNevJH~R!kCx"4L+]&'E^)ǁijL|R&s6+I:4V{C%(GJuk"z)~,`(J'%BJ M}H4m!V#jlOJ=峰CvMw{o Flγג:jV;1'FIaSP}$%w0+/E9LّTf2gg^v+T\VQɳ-ojFα<\,.)Tթ1e9jG DGMO`u6G*ñ C;/C?&fT( [Ļ19ͧ$IBnѣ;2{^#EJ@"S|Vv;4ǨC*Rٽ@BzTؑA!@SLΐKc֥~Cp]mEz*J!eTk_%|G:V~<ڎ 9%py48# !S/ -HQ9 {q5G!?t2SŰ!gؕF$0isj9󾆠ѕ(XxAlms k Am#M~: ZB\Jhh]AB?tشjx$e SmߌrOZ_iI \bJbflNC]:0G蕷<4RG&3n <aK'@{Qq(: OCY=YDT*Q>vP,&q`t!ɟ:ԺF.C32]t=)/2!J1L =52+(yK0ds:rZEz[( MzuuӅ&)ꑁʙjTzK7/ (=zkl%&wӻ>Jӻ9µSˍ:ǁ^u2t&gĶݧ~雈c쭁/"2䦷_jbicQϼBJVhMѱ $I8u*֪SP=穪=P˫˫l.p5wL`fh=hM bp'P`fdX2@"#( K``Z*x^mZ2A1='> SMQD}A<ׄ0C6^=zߜ.;VyK]M  MRz೤/GC0IUWZq`PiI:&'*ZfKG43'|Q+uXT|$^lut,`^*]NOa2ۏ]]vbFE`I=__P-0ɾ>Ϟe٬3Λ]J'6e*<& hlU@n;VAkzC҅dqeP]Cl3: |)i6Ց.u3sV^WJ+։@kA|Jkx)G!%}Ff0%{4B%>XremP2GVpÂDu/1ZS":ºԺ\psߵ=y;"wlvE:O:\ɰcte$Dwի !GpfuWʁ#~B]&~?Xz"N0/}=%C,}}4Yx;>bM×d\9}r\^Y]_DsQb*]d]L%:TK!su"CR'g]=`ps#62-n}8sx^~"r|-̘ӜusN baԘ3^Ge?գK^l~??{Ofa%۠=aCFܭVmͷ.^#ȘjvuBIu<!~E;ń&*Z.u˿A&%aV\_x01?FL!i1WX$WyAp,å١݇뤬B]!o E6"WbЎ*lL].Rr؎c:Cu+r)VnXs@4D(Wܐa~]JI.UB[3BP% m?jAW \g^ ˇ=4'V8,qC|LeWaVvC/TBCdH2,&Y2ѩ>L:FWw endstream endobj 595 0 obj << /Length 2388 /Filter /FlateDecode >> stream xڵY[o~}p3@/-mZ&hIՈBJ^6Ҵw0d 9gΜ9n.][/t֋Ee^:כǥmYTTZi+|>*1;pX%Y-omoaS_z sz/l~SY#rU axl 8nyn<8!H|'AV[h+nY&T^.~HMdcY 6/5A(QŀaL,S1 u0FߵO_A:̞(*ާJe-H-oUlyeeQ(Z/|c !a7q79 gT/5|~ ldY~ *,Z>Xc1Ba_<[BP#X8љm *y/HuCPsҎƞW7~&;Y[M,F>DBeb SM+oc&g7Z>߈;%N ;ߍlk7 *}m;8D۬z|{4@x<)!w9Uß=fl#(j9;##'"uyG׮Tܯ򂳲N|vn"?ذ7+Sp2s9{%a^9.@ڞ)ړe*(2a-2 )Pv` 7F~˫04KPMbe7԰j Bŧ&"mBd^[ uӊG$UU҉!B8 QlG^Ѻ`ZsmM5[^ɊX@'GnC³W3\ BaafΌXPe@wǀw$)< ǀ30 D6@) g@OpD1 $qi Pd7W2LlҢ^۫5U^|4)J^~bfDN*^hl:}ڶ>qExLRrqؘ~ɬ5ZMÖIeG)z$% (aNDS;8hfZ\xeY\e|> /ExtGState << >>/ColorSpace << /sRGB 604 0 R >>>> /Length 862 /Filter /FlateDecode >> stream xWMo1=`k*:ͬXG\$ޫ/vÉzw9mQ9F~Wʨ3|d5=%,&4Z'+%Gu9VXWdu*ymr3Q8.fZ|.:qa9:]l6{w&ȑm&͡M{VSm: <99`^s]>@ǎSoIt!8|H!ƈi+%YEâ?_9:aPR}7I7*p.+ AblԆ g_ ן"vw NmU/j(`F'ҿJ6L _>4dhd2eRJQƍ:71 (L:Mhv" ;Zwhvj1+u}L#ǛF%&)%HJFe%]_e\v1Fiu^NO$bFә$gQUp`i5adp=ſX$nz#Mbɀk*ο$'sFy'u=ƻ^vYZIʋWͿ+o䱡LОWُ6 @2b4%/5a3`4SA)eb٪\z(ZQ Xa)9qmgT8ck09(ld~ؚQpoqg,ɮYq{iY)26?UlsP `Сiџ(5ا$_nQ+bʡ*R4ʡ] Bz endstream endobj 606 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 610 0 obj << /Length 2045 /Filter /FlateDecode >> stream xڭۊF}B<ذV4J[ٖB[.lRZĶe'~{m4#lJ̜9I觫իkeuDZ.F5G-qzyB&ĺ]OyuP2SXN2Q{F8KhbҸ26q:زݢ[ϲ(eW!_EinqEFi,qLC'I*MwC«RN3f~<٨|^{ ks;hI1q ]4:d# >x!A?QBhtdvGSt/jw駯, %Mzb ϣX:8SgdzB ! }r/SlE{ܹqt\5[D",TT*@]y~.֬qf1 &Ӷ=`eְϦ{o&]E Χ9B;Em9@B3TR#_7`z"̙O<BLϳ~}}_dֈo[v'@^6\Bpc+NRm莲LАr!eI񑋑F́??!!VΌo-tI^^)*ۣMdd9awX+3dT*..m\=*֤jjBu]YC*ӥZZ+|h1&'tC]Hƒ$;G~,lio2.9ňig4JEG_CJz*qC\P^Gu0 䐓8NIx䩇J_]&P04 KjH͂5.o 5cw@&S&}-LCW-J.G 5@{)9V:wwvӜwCߓ3z=}u+"vna+['Xؤ} DASP ̚g!pc@}-`|kkFTVKynGD=tͣϡ> stream xZ[~m-sm 8q@@>p%l$f8(@̹ UNn'gxDE]jru3 jR\-&?L_|N) BM>ۋ/fƘ >?>~ |+JakXYURqE0b,oӬ`N |{K܆IZarSaʷԥ.ǒ*jW][`UYqn方=ܵ|05'@ȅ|hniA8[TQpYCMwkb@X_^usIzcȑ8Bvn78 SxH#]?VEޏq *U$K6?+ ^["_=9DWl^ߣ< BOLIM-X D %آJ"*{[tdVumFlף"=]1sVg@꠿SU(s>5 *БҕR*̀dmUZ\J<*wNZ}e0qvޘo!G5 օ'PլF 0-'T$(PL*Dm$6RNM&UV,-5xH.&t.Y}҅U[[|  uO,bw͞oЦXYvtd#2Ai@"hwӪ>b.;]^F].aho.&$Pǘ\vͪʺ_ 8RpZSy^r s:{^`!!Ȫ:%v,V8xg ssYŅ[z)˿9UF2n7 Y#KQxrBh^iKI$hSueNDsPUIa͆0m餟C+h ^V=c"?ӕN*E#2VeO5mBt o/xΊ+-oYQSBW~tv[-%, [2\áL:Fб.YH8͆T(}Q4`ĘbBe 2}Rg0}}+& G9ՙz4Ly%=s|C] fY-8yXRH00'\R-y5TxQkO;mX*+u6vJM6PK>Uj~p(Yr;L,a+tKIDSbfE%c;Ж^clz.㣨:2 5ǻ /dxU֠2ZUa1چz["R}:؁iW4wZggtF2+s HVu&vt [u;Ri.;Ev{2'3-}R[ӬGҴe!$x]nA~$5|Jfl8~´U>H ۊ^֟ Ũ)fԊ| 4d%ޒ nSrP!VpEe@,z;-Aq ͎;3q71֏iIzmP2 Js{iPŘB{IЪ4oHجrB29=W֧[ ۛT:_U0 Ʌ$l|)Qĕ5z$Z//t0[#܂;tJ n6ZawGN:1Y]'/{ʊgJOE O` }q>͋Z  goۤZE{Ăwemʂ,x^[÷)솟pXOwtzasGIs Т|~G)J1+j)ݝt̥xt03k]`\i2y%,Vf2᳣gm[tKӸe^W;~ו'8cxfGɫSP6E鲕/0م*;{nv{gۡ C(y=m)EXOlrFLpX)˳L9YZ}?C "ûD[vLķK1 yYzSyr͖1O ^9=>LDhe/Ct=yJ;5zmZ'd7LX(k~chҫtuݯ5' XrQfsNPl_ŽG,;3a1 $EKI>xǓp|}bGkq/3>F?Ϗu }N;BYQ}-{`P'Jm m"~;eHMTGe <”o7(rtU7Vt0Kr>(hv#>E63XCp3i{J9`F'(e$l2< QBb>.31bN#2SeJ3k$rp,Q&\& 1 ]f*u$E\0PN6"Ϥp:'D3otjr9} aHJ4f [7FELTL3QDTP"5 _z_o endstream endobj 647 0 obj << /Length 3211 /Filter /FlateDecode >> stream xڝZKsWrH%Mʖ7%)JysIbL AZϧ_3hP@a0놲rM^?{y+&*KˬTO[.&i'~Lgy'/kzY+ut\ೆ?;?ebo@5>è:z3W 7Y߷=ldyS]:88I{,Qd7Kd\~l23FMfM x2qͤ~t*Y揆0XZ*tT~|)l~OP<ͻ1G-ij="U[嗢JRV3U%尛3eKe4D'%,m!K-Jk^ieuZẻ3IGeSmj!&O36x?p߰m& lݘ4/mUm%7{$hL;[ֶK9&":$d ֔0Ku Ԛ0ǬC>jHr`h/6 /.u ~H-SkݓHD6jr:kmˀٮWbEfU;xˣ0Ko% `(cТv2jưs|eqkz-dEr?vu'JYW&ح.|- @q80Mn’I܋%eZS({.P%n:n'+"pU#r&uQm2:8CSc =EZfld FN;x5E^\sF@a.Ħoq-W-䒖cvlwۨy6MhB?`J+./r}ȲiVFSlIu736[%Ӻ#:qHN肿J{ #d<-Džţ$SY4ɩ DsE&@$wЃu=0r :s BF`^'Eu4h""Maݣ?бESq/:vB~@+._Iof^#z]Fy= _NќdӢϒ\U76Hg@jI!hlY+u^Ǡd'Uz<3-k `zpB+(+2<'㯐nId*ˇȼ?E'^|4Nofl.gֱ}^$wQvU#6Fw7MG% nIf犪WpEelj)7Z o$[W" ?lJPZ u*L(XN+~:%6 sv9Ih`J\7'96EՃM3PDa@cG񾩧Vр?w86|%38µmH{M$ld )]My;/{H1 .)X %@ݦdLp.HI4M Aur[q*9'bw)UP&7b5"AY4%3)g O=cZ΄FG7MOBNM~[mH&tVjӧ9DaiSY..[?{D mbq!4n lh19"u,xjf( %_~iV컧}E LX$\PPM@N>ZN!ۃA-OB[C{D"u<2>;8YUo$S[*O s׈qxkt] 3p>w};kɈ>BIPb3;3,.=^`Y ~{ţ|,d aJgUZ@QnWR>"RFqK"8ݏ%B"- =ߝ$ +9\ބ홱ȂcS1Yg0 ~9<1Z?/ c~lEo+^<_z\NFOi*@p< ǚbQS`"ϐvrTZ_wa¿iz) fi,8s ڏε-?g--v-c3vs8EG&<^˔~01r丙W6-ߞ'K+t}O$gS;Բ!CUt|M-lopo^f.E YQ`Mi] -~.GvwvWe$NV!~ϘoAG&L0 `h^0="8#}nD ^84NQX@)[f$98O(G6܁E$G:W9!ٟ"Gˍ}}6gXH endstream endobj 548 0 obj << /Type /ObjStm /N 100 /First 892 /Length 2712 /Filter /FlateDecode >> stream xZmo7_1uHF$kp^mծ~=Zɫؒ߶ ; )rH> gPB$Q$5A ('EpiVލ|B6.15Ў /fACMT\Mr JL1F02V Xc5Fyt%<^ `x#,bi^c/ KFޓ!qK6'f0 iQiM%1x爙n\3B@#y c * @ RAH{n pD &-[' %"/u)1v D+m83b q'oxTC00~"$miHdD0": !OF [d")!#2A`vaYE 16155ǃmRF@\"D&H6/h2-*.meBV%AUsZ \ M#qǮ[_b:0{ѽf8<+8ӍxfU :NRC _u`6kZp P#H\J H2,y^FW"{_~i?.Ktg({ 嬩G킣mYWyQ֭M?I"|`i1#>U9 mYRW]S]k]ahn߭ӱm 띿y3fGyS'Gό2tqo{?iy+^IQ9h=^{$i*n:d<({8iڟܨG? 3E^XeߗOe3)QYQ'`.}J{O_Tgo^!c[k5b9z|\uMNOKHұp>5t!}Q5`K`3M:` Ԫu ᗟN>07[ /Ge"]6,l P5] vYh7T! bfO"@{w|)+8g gS#X 1 xWFN[m*qrJC|-[iܹLf:9[KՀ0ڥehiF'0#NKVEtz=Fw?61;>kF ۧ{U7'Yo{kط}[ #^]旈Xs/I-~|ӝ̆Qp 4c,TشOAr2^z@?^s Dg]nv_=ApN'70Brn4/4,dpe@ǂ#gߚ]zw'ŀ(أ'FWs+ t ^6[p 8j7e-OT)HQLv6qiOOFgnNYϲOdEvt'+AR^rh?# @q޴k7 endstream endobj 656 0 obj << /Length 3215 /Filter /FlateDecode >> stream xZIs8W%UY `Wv8.۝ݳdE[hqiyH0s }{ <߼y^GYRFq74RƠ>I_o Eb U`c˗P,ᯨV7Lw=Lk`\䓄'wo_|A1:8<Ăk ~|4Jgn>s7c,_q_>.z QY{Dsa>+KQ+v pd'`Llƫ]X`I#. \W+5 zԐАxpÖX\ N2^eQuuK,S~ɆfL?-v=ZO`΢s|fHrW&Ps׸\aOЎ34zD'+>C}U儧_"<.ICq(q.p0iHi ElbmjV EjLZ.2DR `C࢚;:Bv@xp1 -ea)U>NkXXu؁_ySPTCBda&KzAc8h45M8#4'pj"ԫo$t1HZں.¤b'6y (.wTT{9nrwWR=DܲvRŻc8HS3T XbDi% }Ň0 m ~#a =o%_ṗqB8N)G^LcEZDN"Y,y~%pcxVj(#Y'-QMQwAJ5x[de\-x"'6d镀LIqbMZ[%O{']^O8 jzD~#Q&iqo3r`oxd09y"?Ⴧԏ\0g"dVxgO3~b^ 0qLzmt;Z |P`I0{d<՚@гr^&9؅L\i* w-،V azΐQceԠ3w X5ymc܃RF-# ]^L$Y>R,7$S)Aw@H4Y KMͫqT)s,,cek৥f% D ;AE52! ӧA ŋt.:)/`IqD/Jo*}~2q|֑;^PP( [b3ض("n{I\*_,+'xtM[n8,ִCsC|#5eMbX#īԸt|x򁟫n1Ĉwd*F!$}1J]f9j.Kn-wx-Iy+ɫ0jApF{!4XBApM BX_psЈ% CC8x۔-u`,(\HF5 ?ߕ*RorOAS0`ԓh狔?  s5ԙ<J;o)R׏49"!)GhSۿ.gkaİEA5|׸ TQz#\JF bI-IUz!ȅ=1(9YA9I~+")V-j)튲ѧsլP8 flKH+DlN噍PgL97YcX[[͗LmJ >~ðYahؚ<0& G4E&V "r].߿ i|{L tXy%&{M!0d,__nCxa@si퐋"4Na S鶓,@ž"p-YH ;RJCyKp%Fk8,[wAK8,8II-vDw3B&$}N_4Rus`t3~7:}"boT &YLZYyJXF_еDLB %p]>v QԲ;:*SԒ΂91'olUxRGmP.W#y~4ycugZ6;QA(\f0/ |Zw!$F"'J,h5aAKM ~2g58Gu7#zhV06^ (vp]gJ={ݙk7@i mꙂp&3t tjBc >=k`50;k#Im;8TPHǹ9<RSՂ%8x5vґm{rjW@;ı49MdqV}1m_>M#Ć\懐ǻǺ]p{?\ԋIt܉4jbr[4 !:xr9`?Jihs &u*[HS:Q5h?6> stream xYYo~ Ϩ9ZOsd; 0)YjHoO]==J 5=}tU}UuK j7y6*@[ 4OMߣaar=~e6: ʷiS8,ф/Qw 'zMQju?/:mkf ⤓8S>sz4)WDoHyTl= 򌟟UW@SCS}]?>qΠ |:XaD:%[>ElVyAĝN` srOZ$Eyh@3&[ J#MI npK@+.I?!JMyMZI^MVѩK DՅeߏ&[0?s;L B]4ٌ ͇) b,BXzsN$0#Qy$mԸ0MPdm! zNU7 z! (|ը$sprv[Ŷ܃>fъ|2}$CaAk/p_ef)^Y K ΠX o"D$NF}ʧnσu۹_ҦB,d%ʞGMY-fH<ޟ%N8{ l@&yf2K,(.(3׀ 7[n~P ][dlow0:ORM, S|B_NsՄ;dF)_#UGԵ7CB([!^G] w:,D ՘UO%zt^pTkp:.8'Hڃ-{;q$JҽE!2&0L Mb;Vó5q=ۦ@fv NF '}[]6W[i(g'rfr(t7>"`* eڳF9jL)U1sVpCv:e@$%WR"݄#-D@2_mOCj*gJ(F2.M6]W}B)ӧ tYݶDexb#,;~{>:Ľ-paRY=i;R$NAeю;™˓7QBt|Aa|pEC=&[s,B %M"7ir"I_7[Q{ߐv3D :|F_Ū<^7 Z!< :i?WlVx Zuv*)r|BT*oi]96"{Y;."kPEf;e$ԊZ'>M 9T R+i@~(yu0ϓL&Lzk$+.7I _xC#N{Q*L${316K[2x> Y;tRFm,;:]Uũ:)_@oϑ`䃨7nBi㏼:S/G:,gMI\,z;!oFޟ(ďTU8qǭ\'?ɳOT7!1kͯ$}+2hN?NZ!nQO/ºmw)uzs/Qa-; .ڍ7qD.U'@hp ]!CǁU _m;頱la+-yq˜<~͍- endstream endobj 699 0 obj << /Length1 1559 /Length2 8279 /Length3 0 /Length 9303 /Filter /FlateDecode >> stream xڍT6,ҍ" 2t-H4  3ttt7"JJ7H# {[}JMMnÜـBi) ' N %Ơ;:A0H;͜d2fv*p@ r|B@~!NN' B3W@0+k49AL ?ow2T̜vAfP&;{WFkgg{!777v3;'v+ l ;]_ ^فtƎAв8k-Ü<\``GCr2@ cǀl@vl`TٝݝYf0_fP'jf`r3:s9B읝؝ _-r 0eY4 svU  zǟ`KW.0 XQ/?2+3; k_<x{M! O'3W0o# `9V?`?!>eA=1}Jr/YtNJ dqrF WUY}*v-c? `䆜/31W 9(6@=2x ?K+ZNa $aVпq- ?\#cP wzTl@N=,pz oao;, `\|3GG3+~@O&ZS;?X1~ݧ ?pF8@n%o ~x+qxAdvRr|e u k8 GLJ7y&) )8H8*B̍m}Xtn]7s5jt%sɄUY39;xfQ]/lZ^w^:Jt.8jn]_Mo|{y~-R;зh.<+153#wɳY#J,{߹^,rj#%'@<{:8F)D4Y\$:; 5bǀ6ѴʭmYn4Gh%3X2$suGC`yށv+ )V=^¤vLEYA1g,|USz.% .xe\^AXf&& Fn u@S0FoE`A OKt|ZЧ+vڽ??76kj/9`cF\sх.7.٧&W x[Г/}Éx[bx^])65kNwiWTJp!%]<1rhIodY?OU)sݚ HוhPTGe!Umt8ï|,dMP9: aDع& R U E,I7f0qbH\tSR~л{Bt@S~c1W'-ݯbL9'>l ^CZMa>2zjܕ)'dE$21eNNP оuf>(q#;oc&i^秔A8wI~ZV. F?,^!dXdX;hT y )B?՗J?ΰ퓯9$Y{me:i ;dA8w?~3@RmͧzQ$7_2]zN$PtÎl]V<ˑFBI:^Uh$m]j& 2Nص~Jx~Ӷ%LL>fO27u^ͩ~7J((c;,B[۪[r[Z W1=q(')Gk%d6vl@|_Jf?h,Jwa85K[9r(?~d{w-<~8 I%v"o-" *_8)!6$"M]$2rpc^IJR/;b'Z4\F@^pY D9ySsI#. WYlO|66ߴ6p6#2Oϭ ҈Icx8@'FY_ V QLΔk|cŻ]`GCg*VI0%,6y{67cӊ|Χ#(+FjHq!󵔲ғX!fpY@AXT\Sq5qEć䳔/>`u% Qm<$$tꔪ / 8t;yFҞH}QK4}zGAhynV>r\T(5ahj#E m0p{fXަzˁhL%x&T|/|FD㝍dI6;hK Niihn"O]uv"(>%;m ;[WXO]Ϙ3Tr2t '_bv\I6e:q@GL[8{M anYS?/k72Z-wFg:9wV(4Sʜ2Sg?'Xct@Ci7Z˾'/Pԅc-X03Ź2(r{r8)x.>'\3gUߎ8OjJ RQ43UV棐 %rSZT8g(t_M}((eՓ# >}k swZFΈ>{jqX&,'B;=dG)nI6 nF=(;N7c{iT(7;lst Nf؇8xlt#T5V>sƆ0_[ (m cvݑ5%7]ؔwu)by!Y<)s 1Z w"'o:2x bc=:/%]T4sKK#sS-q'^0Ɇa#MkW"-y/zWiR#? %v*޳ߐPM 3?um Cb.Fsi*̡Q>v$ziH/ tCzAU}'P6x4K>ZƍaWY|- p/H;J(3P HE<6jz۩r_o n{BFbgE~-?)eQ:&П@Rnb%`K6ie|Ą=3V>r#E2=`3d;UBK!U eZWf_>F}h}(<è$E+d]O_/ݙ15IW{^QbV~nLU/qO, ;@C!F{{b (&|KNI½q^NZov = G\),3n52iW!:M MT=!)ťmLo;3f8풻@=lR gzuϧT ԃ yKX'5-2*=#zo7e zDi7?Ho&&!+);}u0) %vSDsQlB91DZ  ۷g QvXЕt<;J(]s1|sʁK5q 8J0% 9Qi;lrN`#&yO SIDdi%}|a-8/kN^)3 ډ,I?8:;3Bd6$ (~ӭmOP`.;,=Ub,|U@imX׼n_487bxD youhe$ew`l?W}Cu:N-tQ`[Dڢ7gA|?ߓǛYh\^Blq :z%^I/|ʶ(E|0|v:k@N%]i8- {NwV{rUDFWƳwuOsTb /!iQKfc@dt "IRs&&KQ(l\pq[Ǩ|% A%mj̱q;USOQ[Cp$NpV;3i=%eӛ`e7QlTSJg-(^H!ou .{_FOR[Ol="q%LEN?=7lw1X !Osd Beô}Wl8ŽUi|]FHXd'a W6m^8O. NqrҳCw{czH^הHD H{)bML3`iurO{$b&ָj,&WO *zØ%pP O:d3 &]ܞnm$i2$ϳ1䒦k}H֛vx."p6FK1/dJL< \lRlf3)AX>1f ~~dHӰkrhpVwsM9MPOCl13پgχ~bY *#!.fYHnwc qÜ#ŢUWB- VVFT x^vw?{p0519;Y|!SrNFf!:Psӹfչqh떭3dnH_ cANK٩+g*YY-hex M'!squm {K2a;+zjqRm^[~DHxU^&$,Q3͜Ym7RLwIֹ*$?ڝk6#5N^/( =/C=ުEv36AMIW]>=N# k[cUְFܡ~U}Ebq4Q^Z7[7y,tA#9--fjda>}ooTMQ4Yc:z*ȳ"[N\s>Ũ@uW qo,[zG5 m N9V:4J0$qgyV젷:(l+zݫ2DBBᾉ=7D&yS|Mp#;~mY;8G1(j| ʐ ߴ042:/@!U]֏'qz]_؞Mu`?825Z-$",g(F Qe:dsphݻR&+iĨSeإr*< MἐԹ}*g1ʪ(AZNCLs_lM\Q vYD}f@x13uV.; *lX'ڴJQ.^9{bVRD&}('|HܒIw6]@!rɹS0r!ߗm&?zMu(KjLd;AHQsoR{&%RikSQhIlGn}v'$YcjIr?Kӭ5[H% ׼xdžcb/!12> t|;A!&>E #Fm '^T8;թu;oJ䌉>;o dN?GSrU,.]^'a*( yNhz;Hm% c-Q乣R\TsfzGR]׾hx\ bS%dq3 z-}U}lݲlHo" oH?%HW yb,·h.|fg!d{iO|&=_q8 xظ0~u./Ʉ ,#wApާ-&QFp`,qDU# * y@}9C<eȾ*^٨Z5r 1ࣆ|5Z /Ͻ>yy(|RJDhЎYS΢٨mR\<rq$ |Ds 8H[bHTT!g7#%8W$`Xo *f'ڟCƯs|>yD8hZb7@hM+M;{2S]-)- !*ҮsM7*wtDo}cKqi< ѫ/R~G﵍2f-Mf]*6sҩ^մ;06l{&L/#_CP#ONSZ?bE׸ IXU㕼W0{au,K\MjGt{};oI7|j2k3IEEly&n2S&c3z $ J+ވ(.{kU\8M=n d qA9RHl\'Jڢܶw!uD3UwA/Kx v#rHv4;)62yvnKqIGIz},-z}# }C}^jE:Tqfb* Q=R :T[~O{5`Sszxmڃ:X1> Ȅ~HqNz)MW #>ȢpRt5FF8]/35:}4Ol e&6I6UnOmΥXf/}|c}:BZb-;;1\v)zfbN(%A4* T!if92*xë6ƢqtX7mم^w&Q! ^'Ʌ!^Wǵ1([rhq/.HQ^sBRȴ.{8,,VNAK=u'-f_`ÍqJޗrǍa(KE)B/YjH_\1-S!.$+N&Yq2)*i^xU'cP2Af"8ʀdo?[D} ry(1.q=GO {Q7Ar&'7߿u_ivf*?)cvI#Ӹ xmckžx,mUwT1ڑ&J%UށN٦r/a2z?f[y)E"׻NT{v]|çk 3Oj9wVnGd6a!uJ.$3*0x0I^ĉuPZΜZqb Q v1&Wi!V× <(/7U]608 Kd,;}qTtePZ2egw2 u_{NKfh`Kj$K~DM*kE+ Cc\n>Ӧ> stream xڌT4]h!Xp'@p {p\>}ƸƀUjլUu7%:)H (`aagbaaC԰vǎ@rqvtCt{Iމ9w;+; @amPd9:\(]-K-p= PYO4ͬAnF͉ӓ hb)Dv\A. s_J@{п1!P4]Pwp;k3{9~:@]VrY_`eboG``am(K)0y1v@w@*\\\\W6K:;ۃ\Odwo__2ݝ5A漛,AnNn.2b o'Nֿ}2@@O"VVdi'd/~.^}cWf`3K+H[bb^_FN.#' ]?be,*O)3@o.%h  'w)+ߊ[y>n[  9ڙ_}D,FkW)k/տ_vͿjף|efp}ɿ]#%2/q|YC.('Y/ӿ7Y0Kq3w`V/yiA>"^AL?t"w{c[f>;]N0[r ϊml%WO*P]߃^{2GO8Xmuư lw* BQ9`{H{5 M/p#>2j|,7^#qc$øB5;F*HWo-agR͵1 g9%-Ёy-|<2F f.vZfSJ*j80RijRl" b$ps>޳ :x<>$1w`+3 Щ`J8%F]c|3@|+v7@U|2UiE1_&{D}in0p# c$iMA TKXCjG5dNG mإktps袗%ZO>PE*E*S෤\A~&Ӳ Zg89z8]W].;\•sVb/xDJWB냈mԨ[+$OE^u&R<)3#> Z*wyENa6QO~;ND))d1G0uA"63B&1<|: 4+`[Ce n=+ v'~ϔvo"ˀӨqZ\g?j΋тV&>[ֳv<[²sզi<+ڠS{nuPt8|~8WMI6)1y{dNl_Sg;\o1핪ADTYbag.L`GNuVAo qpNyq?ɠVQTIE]Po`Rowebnoyy.}QC1g4SL; 5xwt;">%Tө\šWli0Q=4_lL$^G Rmi0FmќjPL~}{Pg 1DWxR+6e/j *ǒ+ 0^W8Fd2bh=*7ԑTQT܍m \8SKZ9kP{VOjc&S%b:n,lSƺzf{rY9Z!_ A5Mgz0ya_&Jbb!hOH ,$-i@q%D.wOՙV: ӫ6Դͽ_8|.iqӛ| j؁,M2T0$`fW6t9L..bY[ymbpZ'orr6Pve]hk[&7| 3z}y6!Wztk^@4hM&(dh\h%dş&{U;;S4QhX3Zk/1> 7Y@[y!|?IGL)Z ;}z,[w`xd L- |7nD8A ]CaG5,1kӯgkZR VuhYoK?xh%OL Wr`.kX-?l(EC!1ihv\&D}aډ "?s( >F rKUPxlQiX6" 0ܮ8cSIMv/Cᖢ/}٩(ZrfUmMg5>RI^j|x:whM5\Ds{r5 i s-=V_r(H5B=襠Խ@k(݁ w%ѵX-k7S ŲڮƘ*] kha2 ֛/K_`j渾Xd3:M]Uvs7ݒY#FD4ׁF= YJD"VG&5bR7(5:0@AtjIvhw7Z b^-49^csUalnC`,;>Zm9n#Qk`vZE~-X-6ԅ0v3#& F˜bՈ7s3iaeJh"$9|[\2Yx3%'ltv^I 0ZE홑m&>r#NVdbT50ڭ]v8g/G!A)["9G<*8 4,WRjCQ ޾Np<ۙJuq[ L1'L&f/Zzm\XMRGh7'{#`3|UI ~ẶԶj<94 $5=呤i5": 2,h8)1-j\1z p/jdMw|/~5)`Wm(ɤņ<~eR6Y72kL6fY2j jylxPSÎ\&d:9W5)'F]X^ъozc05 k<XgWr{4KSW d"\Hp, {w$I4}Lψ9cuԐLJ}ayMyQwV0$'^F+<'iܚ{ Hbb&nEMA2jCɥ2Nuj?{O][UxphcckOf,M gnjB_}6ZNdϱj0,gHZ{LcZ@rr9R;@^FhɜE識 'OI$EA+|SPyЇ~.Lj>J0 _[̮PV~76 IAjC7N뼳 xrn~C{]bl0 nl.r1 AP`q`b::`"S9KnRLo{A^pt֬ ́@pSq2 N0XX\]? }R*8I}N8M-i󳤼ec/z4`y.'ǜZ3J˯v\li1`{Ί#DyJGr P;e>tx[cM)]I0GՄ(qpp#uVj˾?~϶9*b1X^0`8 Zk)Jr#mhLLɎfn%ӼBg!'*7gQS}rZ=_!"\P\ bAgf/B֕B'2+mO(2f``SFŴiqeKEDZ2k~ `[|@.;Ȣy6y@zεhipUu @Q]g" }n&_fCp%}`슨~{2:WlL#y'o:KˁUu5HWx*/! Y0hĸ_ ;rf|$=!04#'`و!UZڮayz¥٦xRzm΅7oݛxoS;S]B>W il)_ Ӽ [ 1=eʔk mj9b¥1,qטdEIX)0?m<}X~TIv n :Ũt{ken9Oj>4-h1>Mu+t Jn Mh5rMJxYM@& t*$ $oT8n{dDF12Cٰ2wzd Txn+?@L _&P\"rΤr~0_qGn|qrϱ%.&LǓwVWJ|=n=uiO@1<ȨNy%7Pnw\a1Z5\$=5cJHԹl@"-> }UɧtNK ٣aVLIHvX[\MDc}ldq42c^daAcCIcX7h#C;ݩ]>닒@6E\f< 42Id  SoXB4<=× RY\r8p==)9o/l=fh!1:m`;=UGaR#c,Y;GʅuJ| !MR994<9||&^Zzs`6,g4Qկš.#IVHFa@HjM28Ln$Ǣ*O@b:0=_j *,fW"3~>fk8=PGZa,T &k" ¡,㟴\@b[=B:5e/|8]VҨGTI خ$!qc~Zyp&zsX~ca.JI>zSY 27 LahB]xCJq 9o,$/o!ck2 Ũ pfԿ9=,RXɝ|/Fo6?uMm9r,_>T$ی˽so+Y"1_)v &bkTCߖ"qh̍zw͒P8VwKbuQ6=r> ~̚_҉i.U+PdDr9J\tOWU:U%l x"pו㝮˜9+\xzDIᑜ!ͩJQK4 *1bC\w3am&tn?āYqx+Ήcrv Tw* V=N roK14uڝ,ǣd!sSz(n {va͑mJ- ﺊv0x?Du.7y ό)}M_ hWbv'vd.!|nc+(JGT^i:M|iW0OsO=!vP-yEU, k4e\ 9nu"r{ML7p (-=8R^lfr``Cݏ,q7G-!L+U>~S7 7֓db"u{;E!'ƴHo Az@ERPm-\ =[07]MkorW4{nFQr9Ob9]/S?%3W'l"l8. *#YkݯDAiWE : 0 z njkGlRpǗWʳĽUyWqOO'tpJrQӤDe(!We(igz r#(D&4S|mG<\|w%rdkx|7eNjX2"ks%$H$"y3UFEl_Eғ%i.;F?C-h=?#s٣ ^Fz =2*B2%ejxKjYa:MXSW|纜VcOBva64|Enӫ`SV,SZezщU;tّwkC+DbkT>`l?B*bPO|F,#iFB9~Uۧԯܙ|&yx^&Y1Yb'XZe۩q0OY>eKaؐ_t`K9,S\Oăd)'wNz1-I "ev)uuzӧ/?}'jO4OZ :$?XeďI7Tu4?]X ˤ}˞V~~dȨf|9Hu_ׯUg@+R1=ZPWOKž\Z G#7rLxk]vO71QIg*x⡬"leݿL`@/Kww ^vp,THÏwA CSZ8О@a?+me8hmqd&\d=P|~qvIMDdqN=.P{sрrK"7D|a]vpuR=j`3pCG}U%>O_NT&wk{gvD/XMVF=)&GѺkZY l2Gg@YF$v5i=ػb{~'ѐwH#g#uw&[$LM.--Y򇅢ueXRù̂}O[k$$츓0Y s{,Kqk9 @%| yŖS:Y6KF%]av|1镒9љ C >{oc޿4e؄F|p(|ܮ] u5# b$V}#Mԓ叴ՋD oG%N! I 0 &;AB$ZnXw&ڐF?(=0!U~vY l^0-ȏzGYlcڲo'aPDnЇ#5Yd|ےO^B7YW,?KAސ_ʥW,MڔX' Cq),dJgn[ݪ~n4s);{`C}As7k$=җAG4wgSmyO<{нՓh2X&f/;Ne$hÀ,;:h c|o|؏kki*AF@o7ۮ,Ⱦ?Xf%d~ ϙhP 7#Md~`q`j)BVrc ]1: jr\̎(k$m>nwƙtV!\!0秓vSrw^9* LFILVbS@7q7t"bQoxu${ , Ai8eD(c7Au/yNh0`YL7p@[2ospf6O1 z9;q!41asMl/5ߛjK I5$~ȩoE/kv[)GoVI`E0 1yX %/|kDpU˃i8HQԡ\D{oz6䫪:YIkj́B^QݪXwqnqی^(Hk)p,>±>-jLj6$$*kFX 硏I<{te$tcr&+t{ʗ} |g~U(qxaʝ\D5N:Gl4Ma`D0Ûrf)Pg5)}2BSoVg~,ITOK coY?wWq{-S)ܟ(P{318=?n.!CqEK66x"Nep#Zx!JyCrH-=FY9A>T ׋H hXԾ| 9RCzyYa)ڊVһ\d%-=*b "jmhr>Ao/ȩ;V>$~=E?9IHGoW{kPۡ++_WLS`pK,@ј[&lF; Sp~ǞY*`,J%)U"1ePk[ 0@gM=ûF".65诔 *dKŽ+F͆"] xiJ8zq5espiOP/~3oũ888yB Y!BGR,}ػ@2Ya47aX=XdMp[z3i+kI"vQm0Dig古ac!2fgE{PikVYGor4Jp }|}MݰzYn!B 5]"PC3.2EXyCs>4@ Y;{hN޷`Q%#@zbYpa2l O? Nlo'6*vS,L$G`w)-GTur>Nki:sZeouECMM(Uɮ`,<1 A/ް=<lEW:GQnsKa qܟHM ]XT V5/u#rԙh|z@|ji/$uӬ*/ceʧjkԊ˽jKuUְb3Z wZ/ ܂ "+Wkٌeܴ'잿t¥0m"|=brٝ澾 t!i|r]/={m NȬ;VfgyA:I msz"I]n\S6j P >Œpo'1'= M3R b\d߈C2X4PK^$'0FvrqBm[¼kpUH`Dَ?^Ք [4qYl&>͓M` ǚ,N9Q[ ֖nHWdlzJ"* -k12mFy-w Vr$O2!K&?1XjwsE/W<(s k\kW@֝]Z.06_ӻ5#&JXu([5sxU' T?^CY\1hCm]2nz_v/cz÷o ɸBzWi2ВI&.]X0[V*MKNOib}=nDEn[ cu-< :^sɡ.ײ#U+JZW csc0Aa|D @s8 }&ލ/z,22'ʝ.R~(v鞀%;/3 _&QnݳcR#YLJoY:=j-%(}IBhL49ΒHzsHȌzjU_|qb EKUyGl$[DDֺ1wD~& vi!)QoA?[AV&wŒ`g'#;V(+(q̻rg {=Vk#l Gۑ/n݂ _MHIlQiC+ٳ{5bFRVdWt&,YLQPS "x4򒐰G^T~x_4|O eV-"bp`Ӯq=D\863' Lh o-,]Vυ}[]8gͫ~o>a}D;`gFpZtє?;_ƞzuW:;TڻcYȅ%G_:<&6#)xm\jxF_FRxѓH2g^W(z1l`].<\2dՄkJ1'S[PC̱z #H/33aؒkKc2zYpTD^7#U%#%;dGp{`&=rF>]ZJ41*xE[} 1# v`hY"<3h_-@MQM;2W#`P `7'ڣ(Q?Z%#Z*~ m sPiyp\ vӫy%dZ\U+ӴrxTWt#a\z>d^퇧.&:; jwIsB#Cz\ *j]ԈOn\K.#A z"jk=Dt.;ؚmh 0#k_S{@yGAx6P쒒RA3E{:800|3E@b13…i-#6Ұ6V3U@4{+ X1?[ȌBQKEHOӅnzq+R}b8 )NG>'wX;T98Cs w^Jo˭]T%.qV^n@te: t,1ڠeOM+9,Nth+*3~h!b0fJy5C7W_?dFN_|GŘj7^g(?Plro#!衠v8knp._;6 + 5t@gKoo϶;#*V mҋX,q˒ҭZ41EgguK;vRjjWx0046)jpl^_R01DÂ߰ZB[pb:%i9YaE]ч\U29F6<f&؞P%;= endstream endobj 703 0 obj << /Length1 2141 /Length2 15262 /Length3 0 /Length 16542 /Filter /FlateDecode >> stream xڍP\ q;oqww%܃w 8JsLk͹7% !HƉ "' `ffcdffET5wǎ@rp4n:;,lN^.^ff+33mx@sc#@@)bk`nj^?4F.B s# @d~h' /+#ڑjdP9\@ƀ$֠KcD;P5qr:+s#{1^"% PY_W"sFFv@wsS .Dr}ͭ ) ɑ/Ly?f1c[kk#_;ݝߗkicjdbncl cg;&5s{g}Ml '333'd1U@7]]tAޞ$!͍ Ss? w0wh3 OI]F틊<ݿ%ux2pX9X,,l<.fQ?R6&~Ni3@o.yh 3q;ߔuoGVV4rVx\g-} Z_N B6V=FsGqs7ٿ_vhhף2||8}y_[ :8G hcx 0u@F99LB8LI0A<&".f`XLIz W/~ϩgQ޳A&?轳/E<{?迈3zXX[놘ߕߛ4ML?}!ߡ˟px[g{w1|Oh3s3-Z˷Yޥ#SOw?fsc̶8C {kO?{?{q'3?;'Wks|ޯ?N=Xӻkj9;8`~o3 d4ok)Ȣ.FЕawO]Z%NGDꌀu[h+b47$/ǭ _ڟcwp& 1 y{[BwKS;s(a޻K(52W)\6M-Rǿx20s Ɖ#ƹO7X:olZQs=ZxĐ7cTIҸ %E!E&z@{Z%QC}2h#-JC+. "=c,gӈ cf\%KQŗ>!krJB $Ooֲ5nKQ{u~FxnԐg)Q9#†=+$pi[*|Wn^9m>gLl5p5S#0O+٬4j.`fHtb)Wq6BgadCl>ےJsð$( Fnme8.xe4i|Z|ᜋi2fyRKһ4c%F:Ocq}gb)A]T:*uVcY&ɮw^KS]q= vl?0 p:pe,62uq\=l5ܖ?NOaڱlbցiͷ=ȳݼکƼSF/"Or;cUWx;C]>F9 r%a&~zI*!p5jt5Z($l, ٯ0ɬ1i{,`'? LFso)j[[DJB9cx\>k DŽߧsiAs`F#iԈ}*jFYkKPdev Q#?0mblSV֠OME8Fq*K8/#ks[.9,'+QDUc2'pw{ͽ D_ch=; &@-bE4Zy:;MP'GvYȚkJ'fawP@6j]2=gHmvO~;e&Cv^cY|䒺o_>1s^ٙH-l4+{=Z!LkAg*G j'(a΄ADCWZf_b"OK춣!2[%d ~ySI,\NaHo%>fԼ?<@tWp`)jy;} ULpncBu% x>|?g5ꘟb?Hg؏XKd&9UQo%9]ปu!.|hZJzsGi>r ;*tL|6* " %qQXp,k$WEƕ%~N* 2+ŀwF""ܥDD|=o]Z̪c_(nO巰M~cF':r *T,6`61'YA>rҽMXKPfJu`/8>8Ƒ{,I™K_w:qPG^6h(ZXh*rR NS3@@4F/Xc.TߧN[΍!㇈əaHo[[rSPuNU"lkx T]jfRǏw\zD\6EA@yşP&yL2se5R:!V6E-]KtN<>}YiĢ(yh4 n1,Ni]؀ÐuVoMZ9u`Єl'mWWoQ[}eJ $"i&"A=7N!f=X<5٪H14yT)1fwo7ro=%H'1Aoˡ[C.0*Ǽ ;偱a{-rȜU/w6CM׃kQpX?76(@pS1,/LLr#G0J] yo&)ۃ^JB,45!_ĪN2dHO>ѥ N@ alq%]pvI?na> ';F0ůKtׯ$tJu 7(n>wю 0Db /qU1pn 7m+KR ydxꤘTn,{)BN"Z,!ߪ;V%Ϫvām*.3M3/ioGB(G}u Aa?uiΖxw/mӘ++9YpuV?{K]m3&+]DžcR|~~b:ƁVB۴EZht>sMG{Nv?J Zk`-qWXd] MA=w+Pl?G]7iF>쫌8a m,{̈́]Bo2sow.uM$ݼhH8j?']zuZYXI»xnhgqۡ%{:=D1߶ԯ['5} oN}_fgLaiGc*3_=Cv7-pK}ΨNLhA a?*2CGH+lQn齑Bwv~'l)?G[+5òb"Y/ysLz(kN\҉e[ )Wr~UIN+\:.ϾuSlچ~BFD DYZ J5{`ܒLp^BaҲ~g B=|Cߓv) )C6m:H>n_`aqȣ:f4!P *ף*vy{g.m$CE1U=,Yz@&K٧7p@FcX-#6/bD>3$^ 0ma_iMd%3|ǰ%a=y)]\SQr%wo˔l0rv@QO`ŧf B4lq1$j]$+",((#@11v"> oh.?.Ŷ}?K&V&0bWHC1D[O_bz=o3 y0k pD"j~ӣp/{]iz޷y^UfvMʊXvpLU )#!mEa W"LմM&_0r}n++yTZCMᬥÅTXTEKeX!w4!/)D=ԏ$lB\">t{)];BNk] ~[סy\;|>-n7"PӤ2YTڿzܘ՘`[ƻG>}Lt꫼-?1sJ#  '~x2ӈuժ) Vqh?ȶR*|1- 竏!`M}ւ|]ݒYSs[ Lͳ @ ;$H#.m*LD[N6)5t+Dnl d{\>DPg(mvh=vLZ[~*ޭ(c}#m6yhKbY^1 S|奸=xU7ŞV`sh3(& ~\9HhఇԜB%O7Lg>"Q [9~Pr2BZ9]ȱXcUJk*8͊f*8{./ijxg zw Rc}ΨӔz/x!}&R*BUd y%|>':L !4 oݐ#?xZ&m:g(e|VFR݁JFS6IE+^]CԴ{OR_L`]yWWGQt>l挽yғ>$az#KTIZGAfn#>mƮ烬(㈱gU; a9&;Dayo}94Xv2 $% BMO>Ѣ2s+ZES] ݮ:qIk0Y'~ L`q:uE`;[3Eڠ+!R` [6[XkXs#v#:Pa'c-۞N:2(u8])9)# V?MNO o<.EʹKvjL8&Ii/򇞺upLAW>xk)YδOZ6ꥢp _/K:S$݃/s*e/2Oo{^$$XAZB31ǔKoZ+$9Ir&!:Lixq땣|Mq}E{f4,(2"aYO8Au]l=yR8㇠Ai!4v`sۭ+-mU'Ru7ᤵ^kVŹͷxқ#\aAt V͝¹.g;¥1vpq,)u˓ GWvfJJ۬|XY9PX*zBztBzVJQ&]* yrͭIBA g[^ )3LS.>2^&vW_9 n-{V 6Li':攰h }+߷i]3ZC%T yp1žgEBDޚ]끐K,A" @=saKz}ڌraVTck0gmĘOp: ;T!Iؚ\0 q^1l_ZSyE35Ze,zLUM1~zp2&yޏ0|eA9Gcbh g4"!N^ ?HVpp ,sd=1u \{긼 $<2^x6(7TFx`YP?`'~JCdMVR@A(P=wGP╳\ވ'1wm5uo F4Ŗ#oεw%^Xu.z#? v=AW$َh;GgQi[5rVoA/]TmO˯-)"tEOY]$ˡ㒕25 x#Ԕ\hw5eU+)5GW3#čczlF,e-37=s͓-8#t 2VXB}=R"u-߁1v 3Kjd&{Yͧ̇:UAJk/?ЬĨfRO#2s?(d$o$'EԿëS[=8#ZɢjsnϖԿ56%c4Huc#~6!{tV9z@uPKS,qS;.`{{ea׽d/1؃uطdxmꮓ Iw7Z{VP8Zcرfg׸/uſE;Q_^\ICe "MM>+pY52 F_)Ԏ2D~+|LT}mv kMڗ# ka}"HחPu?h; 9 :jS-^lh3{d/҈شF q0V>d6] GXT(E\b۸-z@n:ppS~ ^w-C2芏'aQJ\Q34Kj (`=1*~NF[eLn]β _&됸:4h30ajC+Nd{yY&L <XnU%!7(HtxX:ۖgQ3sJ/?zTwa?uEFwG/(jc[SS+ZR q3S(8䏜іX{ K'佪A먷:֔ ['V؟~Ep7~v-M2'8 г`C>6 & /9t5 && \-IL8?Z*.Eצf4__\Ucꨴ7aYxM6 #'^[wuA%J:CVJyp%[ۃ[{9_up:aKN_aCy+ =vO}LeeC&D 2B=hTdT誘"Wk+IOcUH9D,ON4 Y_V,q(b<ɹ 5~M< K:]dIo%^;Dz"Ħ1iVo32LGt[^uq+fs{VZ-҄sԿ㹪*ak~YDi4F -oẸUasyˍ[7mֶA+">]v{rwV^}r ^G/&*#>9;36|;k '|Q!27@?_Ug=g* %)CSL{͗_*i-Wڏ,rI(eeGfQ"Q ɛcCYZ۰T2+u>WqYR 8}A&$^5tEa*r^o#$&XߜxԽ?O(9q4A֙5Y>qKӏnݬ'Mtه#! خhxϨ[RcMS5(3{ v6Ji^=vşPY٠At9SzQIHfko xϋѨgsb,pcQA=V064Sy6~R\ncdFto+ }IZpj%޸^Lpb+I0CVsYj@J S͑wm=Yt1Q|\H2Fmc6T¯6ԘNЈsgP!rq~ɚ?Q]/W8K?OϿiy1rtKwl~t"im_f3Z5Ȋ ~DPuCfVdPd掶^dKYQ /Pm9gRpzy3.q1tr a{R-ы]ò>'ؒ(CUǴ7GT0dNxzjl*eEIo_6A-ڥKGȉJvH)LJbe $wWvglt`'ɌϷ_No6ERS❑8DA;$s.t;&"FVݯ68䀠J` vMqogo"P=i^E,>5!Jˇвvz{Q^A<`ϖKu}]O"i&x6hNH!H=ܿ|f4vB7 m[cpiraCa8A/:=%Ec8Lǂ  Zr}|*]Q>c[jlc=nvGE9`{"-jt޾mGJP@:A2mJI4 %0mOaQC,6Rc@d\ϭuv18ʼ6].#{ mGceKSx Bl] D1lup=_K ) =5qg1 }zoέqT:#L"az/:־zpMIZ'J}iIB& xf/cCo(~/P$y Ȥ_͗ .zH;[n( :O,=0Zn[uBv,Hk䤿>Iiu.ޚWZ`Or! ΗmC"P##QHgM_}*e]h^M rvk. ,h$K>i- aS#¯ShFǜVXUQ6+WQ V@mFF~FNێEŰ'7~JJ"][Y6M941lw$彷fu5%Ap#Eqf#\rj<0FR_ 铠de)ChpzIUmR!ٜmW"w=q&vIQ͹ᤴ:fRe7{aH]o=S!dO]:*Ħ\6c]x7}bwR Rd嶁u˾ip^|?js](ݫCkv0F ;vԯ}ET Bt0] ,(-8Y+#y\,tHMV%1wW٧_ SuEnVD;ZԐ*͕¬}Z 96dbf.9ٰF40Hr>vX{=4|A9xL?i2HyY s7mFC) 7T+ v=4}ľ7sh0cѽ3La>YRD~y2u$œmhE1zlx=\14\=fhNnUݦ,+*x(]W94s䡀.5O^۔M8˯ƍ[Y{FElaTI~Ix_y v>ƅduC D+6pxRrݕ*v.#[6{jkXLd2J?d< ҥFǍP.$" Ar3)PW.HR#-1_: IeƩ*T 9+R"Q֯ﲅ7XuDž^<̠>ݺqrbOd}5@6N(JODȅ!Ìv6⸳lGFH\ q&Y;pB%W??n~Qo&ϷNnͧ^Z ڑCux-sY4}X )z^$Y,(!党 g;xھߪ.,r|roQ2RRn+bTiF;%Xm_WٯDoCWd0WL$ǫfU+le"՟qB+ZΤZBa{h'+PS}yUźՠY$Rr~uة|SC@_9e.cf5 ?.AmOwY07g{Bi!|Lo@B#q0vj$+^b&^e|͙0Su`23r:p,?oΙZ:w«|/V ea csrsH`\ 2K|RX[ŗaؐk 0b/V:r@yUáF][74iW7"Ӊp[#9S ~61rT+bVG̦y$Ҋ* +)Ut$E۔u=~><5F$[:j'6v4LQ>fH9E_EBuR8J5x2598»1O0Iavno[8j[LT0r PÉR%)NT~Mت w8t( MK[*>))8j4&r YXIx^Cx=)-b/Pq19$vBͳ)JVu,4.g-f f>>[l3|zdY1~mA&Νf*26DK'zb^Fp7$aѼj|Yx7r!UcIT_a3REcwͦ7&=\la%p9__!,2;ؕ?Z~+f!2AcPyD6B <>NasLtSKAo -TӠx.S't9D< \ ϭA'1$ض/aCӠ@^Fvw8)BvmlA8ۻ1"\0_s` WN/-K eoWZDӺ]Z?&V#N~܀TQ;@'Sk%ԩ2Q@ycQhkVУmIՇjx 7~YkQǴ놹 dhSՈD:= kNR+&rͼ(Lg6xt 4rMfUZdM7ÃgtچnlK,m˧B&3넱ЦߚW"x>pBDh䢉e!{&mip3 V_ OTHbč"*(D2֝> "G淅7G=GH<>ŋ;aSgN#4ٜKJ8 hǼ~q͟:('A]|2. =q#X☮7)Dh1`@>'{[<|^&c>#fSS- endstream endobj 705 0 obj << /Length1 1376 /Length2 6255 /Length3 0 /Length 7204 /Filter /FlateDecode >> stream xڍwT6ҡt % Ct433034HwJtH]RJҼc{oZ3g}>y~7,z`DC <"@0 &0 P8(! ڧ Bqp@ $/$qR /(-Ѐ H%/Bo%˞ $%%;A@A06qChr?Jp8P@nH8Q E9 H rL`EP vB!0$: Z]wXlBB.7W!(w2Ba+%A@0/  G烼@PWs@UQBw<=B FU}*0 C! ~ E@+f]`po_ ;.h zxBԕB.!(@<{'_|!B @CyA('$i P{: qc/X}B ~uL4xL؃p?_X H,m\u [1c+n?kѬCrK%M)7"7;;AnPW߿4i=Qhh27G@h(]}P*փp\0 P C @ ;A[_/і_D0708 @p#~ݧ@%$ 6'o[[Ğ`~n/\~ZHͿ9";ɱi/& D1C7 ױWxm6mscT}E F <LB\^$+sݣ[q(bfSs&eqeHGiV\?m>S'7ɼ_EWΦ*oiii)?Р/+~?5]wmpJ+GW6H2H؛O\WPc3R3eYK?N ,MY ^ }es%)ی%*ێ6!݌go b 1_ 38fH#KӬ[hSNF)(=yĦFxA8eԾzmQ".|b)]9nؑ> knpKjat,dL^;U!VN8#齂hs&fBtzXmia)PFuзѢA,9EbbN;3X ?c4xUT Kg7mGPֿb?Y1\ {[N%]Jm(yo3'eUXK7 3iq E//HhZG^܌ccB*l"U3q C֏ggr˗DҊVvN{sW}|L'*sqCAf$g]ڶ&a޺R5pF[\=\7 .hV;EYcF\s Yh8-Ěk} !j[.Q[{)8Q_ HHs!̜8r|q*scsO 401{&'-/ OT+bJOG {TypE2gufH;N/}X,k@Q 6-hThzDg\ ]fwlF'Jo(T}o >p$!5 õyI#,]$]E y6U}wTݹ%$mJ}~)}tړVXpGc6wNy]>*Bq~llBY1q wH@N: ;%;=-v9Kң"A( ZH)@a$+{Jee Dˣ)J3!F ~S^/O?,J3GԇlG#ٺ)=s)L6kUHRc*4>hH8 v*(p8V}N?n^o?n]7c;$;vn[Ѱ1N @p}酦#\aF5zT}&,*쯱0by?8$38#wVPTkvԞVSk RJ7USMӊԜvQT Q.'wMc"LMXOܙ]8՞4.: ? i#_f ))pM1fN4K=}Q94ab CbzPrQ*1 4 ʟBCY#Z3#vewRڦ&<Y^Wc [-=>+յG $-MWj5OmN0?0u}|:fe3bW  Ͷk0KwOBXJXϾ+ ~z!$u)kχJn?* 1IَNL)j?yM$rF,$Mo&l5WUɽ~?v6dFyֺI}-ۺcuR(XgY~Āhax|E:eQvZLF/ zLʖZ 6Xa+.\]<݈ĪfW{6UUxc,Rjp.Xht<5˺$X.tdv'wȔp? B Z6y}|gӷWQ&*d24N!nӾM"ξ8[EU /۫BOU|ڿYX-$|O3= 7찗ߏHC e]-lvx=ϡazJJtoћ {Hh\tՙޒь4wL+"$glp:Df6dH~+;]am:4βsQD˰>cBѣ{Cmy狧/%x^ ӈeS][3(a$wC/#\{r&A籧Y|Vb*&.[^~߷sy<]Eمb_f~HDÂp)(YX\$WmRh;jѲP@u[|[al 8h]ĮmTAĹ{m8h@ıfB!*Ϟ}׌+h q(s~yVnu$sǭs7ź1`ٶT~EFn}tUR+ ۓ6޷g1}ӌ:$'57{r&"?{^s|Y2Nc)ۑRR繜Pg-UUpRr Q08F[pQUj+?(V'-uc^E l)r}:9sx^JVëₕʪS:1%ܗΔ$'kǺsou'fNZBLs#3푤c /9OTEkW\^95Xڇ|dڴkt>c J,ҍN&̍ pq"Excjv{(~0Lvy~ÃDuG*:Zܙɧz1O>Q0Ѭ9c&L{l[YL_ $hG8\XUzF6c>o3}̰`WϮiOZ"JCdA>Fz-FɤLc( &¼$, }TyZV21xYg̞۶X%,, {VTiu);0(+׊I5ZX)UJ4pϦ|Na(b-ٙŪmZcZZURcq@ޣEVڰڿK%KdmË!(`eEIII9RII2aO(u"c?"X;r_+$rqj8#xз˪ҡ̠ɳA@VZG,m@p𷬒Ou {J; մ>s#nXRPٍ[ L | h:pߧJ"FՂ-/˞>z[2Ea09+YH?s;.КSU:g^M~GebKivcOAkIBp:mfA>c4)C$ uT!_ C>PuPQ<6#ǸFQAXX&%q_#mgo-w^d1J i'_hpf k0JxjD:K"0u[-A')9HdN"VRԙ,7 9D'呝pW<|[6̧[,<72X™Q(V -KԉCj_X3#@UR:GbW#3/># DξUtiBHu;E8 E3De0 u]ݤTK؉+uuHZ@ |^&δvWX\KTa['قS`Yg5P)ޝDz|BJS,&O$SDHf#;1> stream xڍWw4ݷօ]EFu 1 c1:!"J%Ht{IGEKz'W~q׻ּ}󜵆ClQH PT$Pbf) @ )!1=(?0hA1X=DDRrr @ A{ QENJDee~]ah 8\+ACW >' MND[!B;* '~ ЁN`#drxC0ց$O= 0_U CAp#t"0{=87; Cy5,bXٹ`,=`X-{]u$P4K=m% Ūa$ M`' 8ФVV bD0Q׿MQI?L)4@Iw<9f쵦P<;z\yd5<͖XUz.EDe=P-mOU& q#÷jn#a~F! ~&M$##NڠPR<<Ҩqy'&x狎>1B!\ Mr7MVZ6aVux`>[~EZig] / w? |յYGk~Dрə' ܭX=>Wl&fq/k$`ؤ/m._ѥJ5vߜƌ\-Ƿ*\p+<d*~\`՗<]|U? w :' h MOx#j-u` s~JZC ޸:ϴ'c G7ąGn~+&f"xO"@b}7]\7q+&4ЛÒ5,uO#67_Мt4$9oGӎż}4I[()ۍ[ȯqhh(M~ '~keZ7FU4q(C)YYb ; r'@X.`y~NLP].]aO-ptӢ6 ȼP7#rX{Pkmi?|SJ^/Bm!Lv_025.S+6lQoz2+MQ"Z1֐kH:I˂ג"yrvL`Zf{+g?gflv+ΐx; Y'6udUO,N9jj|D Vr+Xmeʫ}J)ށ qúYva8e%{2/jʗ?Hd(©&tvQߌ+bZClm1oY3i߉߯nJ0z=uʌ2ɆL<+"Bv#7] &]vq3j?kA8Uۥx +ͰrZ>\4tb& 0:QIךGLVӔ3ES v|_Iѝ<(>鮤4Ѝ*# QuOj"" x=8XH*nhep;eiXbAWnҋ]be%+H$e}]}~1Bb:9v9iJ&oA?5, sKr}cpY<8@ Iz[Onhu7W8h|1ӐfS: n>N]'wCox=̇/qf8P' 0e5z̢yB#sj1~V_vli%O_ яt}rT0LG5HKΙ2A:ZYAIoCC(!*9FޘH*t-I) AF˖D-/8A.XhWu'G:-}͔i'gn 5&Q1fѻ&NSu(2vo{~Gٝ=/PX$O=qG/&+O ,У/:Y/:{Dwy-I)-.9cH-[אFY23Mᓂ^>t_%.뎸9Nv 1l*$j0&/zI*c̝զ|r!i-]ޠ"uo7ӖM49ǜڤbqNvOǶ9sdlfVÔOA첣FzRX/c^)zf 2pIp"DIM5"06ִS-{#vIoćI"99RRi|߾ߢR,9A 7Ɗgʃ&VdsT1n@עJi&uK+LVWjƒlktU * tR^6VT xi!*&^б"ŃmU,A@i4VDF؀^yaTnG5?w03 +ݶ1Om=%wtjMpuygge2I&{Az;F Sq, Ozĵ=(7U_vRx.HOn[WGE{ wFeSݫs#,bgXcpkD2S˲v-?z "l+%xK]zZ,KLLZmFHd G)o \ܑAgv SSֳs7RMXpBno3RRNa5-)AqrƤH4. t9u9pI4X64nܥBݪf ?V.i|7VK\R.̐"B24JJTu۩ `<':g5jL1Yʿ/6j% $\@G?v˵BDc&$kۣ&uR^3Kzqۆ꺜ś*u擓̪GS `ƈ&]"O tsL-R/G|LқG~y~O +wʶk~&tSjp6p,=@wD=^|7jU. |ҊU]٨9(>ikfV?p~kKw}FA/ؒڮW{r$nY S|_OZۅ5_SKH [J6@}ςj֌w>@呔s"*qOUW{ЄY TFm~ab0z+\w cG Xh3\1.;<|Pet^8=sX%ॠ[I .7I\ޘd+_X-+MX4M^}k=*^ Za8?# eD=}^7Cg?) 3",j+ꭕ QGC\9OUo0Ui(rj5SṠq{'8Ҿ^Z>~zVYI`l7򥞡,ZD-͈tx@Ǒ}@;9^ȣw7-v/ָӕw9G|8$^axAAG>Cb=9ZęԋonM&<7NUM9^/.ח\pP)5dv~=>9pݻqa\v`!"am?dDeV =Sq~w+M6ʖF)KHG{ZdyV*Qsn"~AǷ?tnx%SmISGtr7b"܅[όsO8_<{~la&݆Y#-xkjv8OvSal>T9hZuؘOV1n.F^ ,:q}_-20`$'dn0.w!-l1:?mUTjT>xH@ShbЦn qܙ5~R5[M6Ǣ^;y#2Nʩ!zg6Զ~Qkzmfcf&#&? ?? (Mw}.=X4_$";\OhkϾ.m!/-@%X */t6z} H S~Kz/tUdtwxa:oeIDLYt+f׮9X[܂wh(7Ҥ,vE6mL8¯VpA,Ք֔Wo8fr3Q$ڀЕ\}ڏ-~k\u+ڳQw+-NgɅb#hɦ0>uuROg)\kip"]'fz[ʣYaM?0xg5}*La~X~хuџ-Sz$ :݄V'fiUY%nug5̱i)ݲ8U!Tm4J"#aTO aYzšł~(KU!\Hys6y4:'%M<+?񪨙7|(ϒJW{c /2.R"kun}J7}H7Zh¥Лz|IL \yl'S,br_kYw&#T2og]ts@}MPA}6%.2x3_w~0I i,XR`_I ~_zJ w^{AcB:%Uȷ\&)jrTCrug/ކ[m:(2K /OWB#߭."&Aī3)71sT[G$M'ܾqwRf_фqS[@ɵmeo)YTGgL2G"VLCnmMo%LQX@:'9˵ ܦ͉w7u,j2H6|뚂!?]N/  &/2pwi{wqܗ-z]Y>ba*7κuȬb?ėvd)f/<@jjלV5#*B <&80? |ErG} kxq;Uuiibٻud]θ3l]c<,cwWyMejb*?/N2Z9*k} M pꉬ'^l Z˸W[)rc7{K^Ry}6rUqS[q.3^!$8=l zиT>h 4aIjj)#:Umx7[q lshIl<;~bcR zG)%׏Vqq\(pLt٧}U 2Vu,"\E~Rfh . ?x@isG/c}j| #}uy&ȈYN:+i&UԨۅGY?η'E]@ {EDwrTpBCd>D/zUC*oժ&&fc=lEyq b̼œşU .sY'}ϝ>*lE7Z?.4"0)|fRbǪq³)QJv51 /\,}֞U;G\2L~>g2h*" WV%Op75 Se4$ s,j븪2KW=,+]SN "\6y~p~Կc )rb*ŎC퐈[Se*)gc炳O$8DZKͼ3=o,.,N btP9nG/LP;ޓTKw_0Pds gI4^'|96`ފIbTlZa7̧ɟ\a^YLd\Ou1a:O8b'514>İ,68>aGn¼UA'&Mt0+*ܶ"U:o&);kjT-_(:2h@![~ǯB:_bY?E|$Jؿh?2 endstream endobj 709 0 obj << /Length1 1891 /Length2 8330 /Length3 0 /Length 9464 /Filter /FlateDecode >> stream xڍTJt "0H)t030 !Jww4 ҂sߵ]ga|!m + '@VM adԁ 0T_p0q/!`P37G psx@k' va9{!v8{0[‚p P!N@0+ f1;Yà J< ;6 w[~ P9*cqxAx`bޛAp}tseU3 쀿rv/G ++3l `*'A)]a wdyG t__Z!WNW~o?3uG+UVVU0`A'#, y~;AJ_P@|"_ݗ:잺`?L7綺&o4HO ^iS q?jaWU쪁!nNEq:Hl AXɘ?庿fk\! ~7=-{Hyנ @p8 ODZ= 2 Cܛ8಄k!d>oo7-~ ;X1w0п qOi-h quvy~A[0/?P_o_?ȿ OYY!o tH>9W7'?^RLj_of/YDsk B~c>I^?v: ™Yׇ_JSyplf3 pR ?N|G&|*5G{҈RFy]>j:R7.zA-]s]܄5I.<+z6 ڬPyp]>kT2ɘg5EAb%>$<=L 68")Jn'FԨǟl>{ŧh`.NQL'O 1欖Iٓq'}ԃa*٥:UV|PLJiЎ!/ت远kW.ۗE.M=TfA;f⯑(ϐ4XdQ^zWf,2psH[2~at:RMJ_oeyfz}`J3SE$UyCRo_̥{IEf oI;_&#nɀMwp8V>W +zH5Ui?"ěAy:*-h@\}<ӝc{ ˵N+u_ǵ_K-=HQu$V?w4{=inn)S/7N6Kts9`E!="H{;N4_AJRnDB֎s W[UF-,p0EV6^q|WvM5"$c?E 3(T3!/x ^iV:'ߢZt@Иy)mJ0|p9tYP}qRhV!lfL xcSjܹE5콸퓻N(;SexlUp f0JVGv̦5TPAa'ʢK٥zKX◔ bsRs|vQb%o>gWO;af^w<վ _hbU P%X:P̕amޒ<.{4-I '[m?TSYL 4?2}ZFD6*Y/óyvZ.q5R)KB"B[{Ul|2ǬHS\Esa(۞~M)_:2Vؿ8;4cd'Þ62ֹ!K}\9Y`' m6Xsl/7* *̓LEu˻^CEŃ.^!9hۤ_Tk]w(lĆ乱ڔS^P(Epg ߳ӮaUS 2OSede)ͼ(gO}1LN19{'TS]bw.j1嶻l+I4.6Uì`Gl՟vj&::{DlV,3#0=coX[(YXw'dC۴]Gv$etmcMwߣ~8VM:I`:\' ckiU覄n{fA<琭W0ۇюn"0\Tj/&tQ=+Q6RgVF?P7 )"=z 9DjiEE.xs]_gZ %=F {^#swtVb|>Nl׷Ikz)Vm/bT |,b=#D4>j "iߖO3d˳SMįT"O{ FE4&AlzkŖNY֖h4Zӡ¶ O1#|Q8K5m4:{?jQW@YL0acdOptރbFHxN倌MC7+Ck'*#}Mpe *ꌇ>O-gmb(O@6ډ/É<`rs $ooD nTS2jGӯi,֓;8ɲX#2#b D#vӐOJP^+[BaA=GQÎp|.R74h< ËGݔ")k'7?|􍐞߿6/% ՙ"k)bܢef]MA1%qJ z`"4L?k֖&l,zίwGO8ֱWsS"G4NV 'Niqs'/9ZhS e QJ騙l=7͆R翇c|`-jNM ́1{٬!"ZrodYp՜+a=FQP>M \n~ov|:N=eVIF]޽L7mXTd㻱֑ H֝)C $Ӕ_5;n_[5 wKV ]~ hvs 'g*z;xh krMA#D͙$<2LwfT7\u@E/z޽!ީJ75$f͌h>NlWՄi?Ci;ǣdD[.Vl˴z qϤvaXa&Fd<_ygMM?+}L&nNM'u(`k!`@evyK].20~,pJP2k3>w(f'@().dyySHd{U&KaF\֡&) $cֲU K}2^mv|t])*_QGOR+ >|g|ʃ鈣hɬ`(4Hsn ޴T>R`4-2HAXXjA@ⷮ> jja(Nn&džcfmIƇzbO,Mѩe͋l,Zt7DF!+4:){J=|Y_*x AYlKKD- 6%uM Dgc|+>PԱ0}>:FG=YUy׽TzΖ8 LHf?ۨj-}轿V^ Mڨˎ!{j/(q!m)Ghyݙ-HBRbMt {^9vէ*Ɓ$]{o Ilkr Όuݘ$lÏ, 1%\? Gw&uW27H"O~f\7{goñ9d -zihNJ'$vz*Ah%~\x+cr9LzZL1CugRpf8t=~uG\6Tf o_dʉ,jbVU-;8yۏBo0Z7eN9I SafR4.ﴑ*TKM&][4p ^}*N _5ܱNJkCj-6L#cU!邟 A'?zNZɛ̭^St 7f_gwQUK<ڻGxEO<nNL&1:5<@ވklѳR.NלtK&uX1#2_ܖFI'[T1 mm"AU.lA(*v%| Ha౺BJ@DAh])sQFH/iO;ɢIv!ݴ7g -r^*owfjs.FW։\=Yc nW'dg.UZveʗbJ: 5 'o! qQў)#BQK2ޟB؜Y-3kU a=Ͼ9ΩiQ=2sν94~i q|=O}HGWm1M?|={U*c+e7K5V{.šr oD \gX o;7Gf:n׈.Z$^$fY _D:#{iݪaZ:6y!xodA*M$V+yn ganѐUpޛw$&Qr[bF9~0m)Z;R~x1- _l`3i%c ~7O{k iGX(}I@~j#͹G)kɋǟ}ꠤSsb;O\/߿{C 1*Ǹ6^4 ao5 #*6nKNf}u増^OC]+N2@k"O"ڼ& ut[4[WxjSDUVCAg^4̔ :4GvɗI*K jǷ+GZmH)}ʽvs`W*(`i*J1EƘHqǁd1! ڻO|`-$o/-f$9k A*LUJ y6J~0>ajw>Xv=Y,& slw]4rw E, ު}O[J)6*L$)xmÕƵF1N{2k)ܕfd&~#o9NI>AŦ>uj4d3arrTzZEehTE}1DRhw@dxvU Mb +2* EwIZۓrLS^،pD2&,ZI/%ouӾAZohԎuFTX@fkK21‘#z8շ[Z2Uz-tH^-=ѱ;'ĦhE~Y !Î; ÌNj-tS7TA,>;,O- zu,w:Y>6E!K؇`F݁QNGvebu!\1Wi;V|;SkR$n`> qO"'1w6jgr6FT]|$"|QGNJbW[)bV/1.h/&B2?j>Iɦ"2Aͱ ^9-- ρ*~k 1DtzMsWki'qBHj42RJwo40-Z N%iS|.K`!;t'h0\&Bۍei.?F k#dEL֡hc?(.c]]eIR`JO7i^$CWH{l,2.\"SI{7As ˹A8d;r˩>RC/;ųV]>:z0r-fHN@舏vp>5u@I+i.cށjS*=Z`SzUH)a ig>9E/ ^?tqtm_"Jt"h{ַeL[!4ڀYkt/!g/ⵄO짢\=!o&QH[MuлKւ0G*Om~#4F\ >gJ dBcOs/⋪~~6\E0elXU}oЁi+/Kor%q`Ps3,pmi9MEQ$EuF>LqY(xT^s r \DӰ@>ϭ VޗDobiuS Y47=ˬj>n=>ZZ}Hh/Cnt\CU5s0P{:L{&NKي󴉌iґUĜTD7f~5;)FQtD(&,go罏~blԳu7wnjex:2o;`$ATʭqw<^~~('ux)qV3y9Jɛ伮=L1lmIS~݀St Ri/gfBoԬTXt/% q`G6 iݭ2mUD3XEE,oJ.( MUAo?A.GO_Fwn}ļ1~i@E| XjI8%U,gs\\@䧭r+#7I x䖵nb`O"Y2#u$J,3}$G,BDY`$jd'8\m&)P!h-*;YL6)= "c)CjNKM;8sܸ2y>.;yI&d^Qc m4Yt[[7r?j#l=fޝ)u{qyZnUiɐ7gU/f@7;0oR߭9qZoe,ʎpR|6,fU厂&&C{OZTOK,=h5-v6>R2RK(NbU{(T4=Śh"E(,=]]f"Mg?wz,ąh: endstream endobj 711 0 obj << /Length1 2083 /Length2 15074 /Length3 0 /Length 16352 /Filter /FlateDecode >> stream xڍP = epgpw'!xp'Hp 8r9}UTV{3CA b`t102dXl̬V [@g+{y9A6qcл=@`eef23 nfeP`:])=,,A@mJ`3 bt25(,v;L y\-iV K*4!hl[#<@5s3n2ڻڛdJ@r] #`cSS;Gc{O+{ -$)p4uqx7v356yw3uc ]\LA..Vhdfbvv@{ [9M;{[ٛ!ՑI(#ϻ `b@SK?6Pt]#]b]^_02LVw2 ;?INEQD]o]ux3X9,Q6;؛; #B'g /{t3~3_7#IW[?שrY7u]Ac> U *ͬ\ }D-[/L djWecl.V\1Qg}Lmޯk}@>T 0vv6?wfyS3ǟ `bw58hNNI쿈$b0IL2 sq(XL*w7I`/ygz}Иm-3ޙL8@wʿρ.~9س]{?99-xoLY _]!F[c;m_]yk{Zv;?z:SwG%.9XYyK6{J}K|gW{z 3z gL]'}JN=ˋ|! !Dk2L%f rWH.HXw+:XJ~@,yhAlva>t@+UyL0NbPŇG;%f$lhbWO]y<UPɈ> Uގ4LR9DZԭ᱑5@Y 08;LZ!s^I&`&M+˽}.Kd_^hkEQf-ÂI.V};6r+3[Ϯri2C! U{|(:cQեIaeQkr[v | FccCrJ@fj9 SsZx%JTmu'i4>FnΧc[_~ΡķګÎtk -.Ɓ@[q-zuw ▨ < \1&krȡ ޭ<0<E#lp 2ۥ4Yv;U6Z.Bo{k2:r$颔@\Lqnz^='-x+{{0qXb&آŤ-[Qel?MN.%uE6o xtK\BJ(_tWcEӢ! -V7a,P+-OjBc MpFhZܦ 61&^m솢лPkOCS,t3+hsP~i559:d?eWFBty/I}%WTc{B:dryi%:6eHhޯ54,s>a}][Sd.Vt4v2gylC ~_O86ok濰{#i۠0#GeǶ1[{HWVfi+m9as_)?{B?ó;Vxִ)(רcL=sn>( vt퐐7$?t]+ eۃ~0LS :≍Οjdб Fη-oX 1;})*UlޖXQQ0@3cc+3-&!XGD+6>@6B-" iс!<賺Ì@1-Fgq2#4yqi@C@ '6^~oo2uԆ #Af_OXMد '3OKz%vh`]2p'opfJKjbTSٺ,ͣA3Bϳ[[al3.4qh.ڄyz}zڒ 9: EeE jӗf%jn=O$1D'4*wM*wE8&LS6pT ^8#yS 5 䕅ǰjVSц#/hɴsďɯ|rIZ$6&Z_V TReețPSu:ĸ/I3_DOmX4@h? Pb, p(2?iK+mԞO q~Nx)^'~U DKɂa%u7NXVϫiv(DZ;%e"X>ESeSK+`z7gSl-jv)P̶ң(qfh%'0#+g2!,_d;nny LkА&S!UY TakRl[8:auC^|[$=eP*,@>4|U:5h>ae1{j-LE RrGl#lѵnDK˯[@5P([%_b7(sgX\9]oR|2%QcmQ]60tڔl<E oP,|6׃KO ׅuD^KyZ i֥G=XqWL9 I{^]@Gd~~h˚ oz?EVǎc!zAtVV5Ų_s[%.:rC} @b0cDL18o+.bDA-~Owi]o渢21tIVOKT2Fr/ұv`4lPUM},[rf  S 6~3?Oƥ *-fYq%v1!Ƙrt)̺vWS};=y0jݣyrcQ=SzMȀ^phz~ʰmtHN澠ѷ'U[7g3lG_5 bS+۴m FDQpPP1 gRmק{ ܔxqHzn0ۨE9~,svA0Lzh< (ت7J&IwO2VOW!Bꮥ£ cMrhW7Yثm~dM%_5j*#:3D\4!p \-`k o5`pEyb<_᥂_;#&m)/#ܘ~VaL$&]ɈD4۳ K=)#wDř-W#lNbPonѭ֯h`Byk"s-.]|Ҭ5 Bݍ {`%7$@آcK&<'}M裙d|ϓCJ(":FOZ.yR_MN9H$~cNY%Im#C/j|v$38(Lh \z8es5`+c)!0\Q POEv&tź[CTz< Wh٦hi,r2;,ħC[y>_skcۭ3_D7qi &Q?``9f'(+k,8E~ڀBD)&eԼzqJ|U*1 Aܯ.dDUZ(9 DuX &aaHETҤp%gtư]81:Lu+l[jjV E ~a2W_Be,WȘьވ߄$(]G9둳‘.0y,:uhT9C5т;*E4i] $JlmLU'Tbs#Y=jB}~iUrm8Jv`I#$W E]&kӨzo$C xq0X~=[f7mmÒ-A k=,u[o|ysUٲ 7qRJ\{.2`r1\g[EauÜi3I\YL]Ny@HR~>xۆl=&QX-GFZӗ>yMCH&`<\8,"jgH0ZoXK:ݶg4X&7;O]K7Y'8T cfȪ!%;nNRԪ7*#eXyޤ.lSizB@D_*6&X)Ƥv($=SϜ63=wܥDD}U2^$Pm*zG[ ]Z~N¶ aLoTN%pkѕ]nPP` q70vZd&o>4h2F[(V>C. 9ꎤl#"”X;sALС=LwA3x)׷tX\ӆUopX\ڂ?|]YH3vyL6rwT&@av|i;2͘f\ܤLղ<%TM"+VYFA@[#V gZK'Q@P W$Gh[aO_="Ï*ݭIHz+{nv/BC<*dM:$Zޙx#^|Iݧ<јͨcVXZʉ@uJn?x٭n۔>n O-ֱo&X>|z$fAfۖI~o`U,Ĵğ%qW>{9pU~5@Oz =TV_QqaawEH , YUoRjh٘k)^3*y{$zD݊_5n-'ix$pƫT+bb܊wZ 2MkG: p. N+$J^-ŗ&y3pp9,$'35E^",* ȣMX yUC(N݁a?t8DʊamwI&mrj .9ΆleBVmo.­|jکG_#|d9\{Th.+TTaٓ8pRoa*l:DrH MJ*#m.LX;lCuq U% {:54 SFqhlO?J|li;M$K:噵~A:ѪM2Wǖru@Ul$ D, ,g7K#edP>Q 7|U˃7vGeZFH7\q۸*a紿NSZ +_>@ ؎}$wwH/Xۄ} |8?4XKdI-#(1|th " jd U(q&;52OIΌs.ɨi+֯bҼ`~W4:>VC"ohJǓb^UW5 lkS*5=OҜG{yH$5^QZNMc.ii㽇' żBb߸SYx.%SefS2Tnad.c ʜ\+ckѸBۆK*h6C:3Y٥[6nC%ݵKlv$}ĿaOheuYH-,~`xឫ#]K tFojg{\nIR0luCbaTe2?FP}_8q4(ؕdodqߌ<9957"?H92VLUcܕ}Is"-~{> 0NCxcYl)[U[4_\jq@~xlV)m} 1ݍb)puEvQ  7y9^EW @4 q9@ao Y)19C5@PZJVtCO1#({B㛐tKK`fJ`Yb%4K@!-|5_J42}OU 2~֭P^u}0/3#UI40=ɐ;0} }k1pNzCyQj2̳Tq nNe8^ܟ+sIYN ~ASAj?ޜ'v -Wў>0]3Ih`Ngg~q ZW5Q0TkDR$ҼъhJ~Kn-yU@V c&^ۤ+d(lΠ 7SOӠB7nU c#dI&ٟQa*ԑ &\OJ(W"U%@g{2﫿yHHc5/ȫmgB]*LP.Ք߹U]~J*-a(*y}8(r(:^ hξ:R88я%w:X?-!ӎp!v~g!7bp%YdI=㆜ܫ]qN R= Rٷ{@E1=ީǣRKa(DEY&Kk )dD.[zFv"aNA6/I*84$_Cܵ/O߈ٞ*M䱔qs?6-/7if(gfE*ڔyN"a ]-%աc\N A;\=BT1G>4:L28ieW ?ʬӳ, z؀"T!mblx] LmF:NsC};uvaz,8~>6ư NMIД0_qqTzT$_$,^QoKN[k|1y"b.j[ !~.u}i`kY+iטe78 p*ٕ>#gR"Acv"hQ@q`  5BYB-A:y1 q9tf]`UO SFT?&.* W>3WnQ0ʨSL(Ͱfs@> +A`a"j| b0 ߎrL3.G4??[ƚGefEԶ_q&O3ITUXi[~ݯ1hkY,=~ 'k9|e|w"0z4'(Yn傖 jPc;9֡5i. 8⾩J]>シa2}ؓ(jnz9LUp#(_CY}}s k$ ނ~3CQXC|ʳ+z*~,vUyL乗3z~ +'>iȏ1umEd)j^A^ß<[9MMH>^ -v I7NKl3i IxY;dcDL6eNIia ΢(YacV}Hn=6؛/ߡ!@׍l uDuA`Pthj̐ VI^x䱓Q[mG KZmK< 6b4, _gDZ/ۉYpsk˷-|a#F˯J+6ƾ龮gIQ=WNmJqLi58? pY*Tx0R.!]T͕JOO ˳~L%7b5):|ا`_E)j8lnAX0 >M#iWWdQ[dfl03J0y\CS!eEx#KXv>*ס6b O& rG@!ğ8Ra-n<##Uo, rKb,q?>$>tE3}7KsE.{Tފi lCW7.w22$9Me g{17s?q~?mcG=E0vtJtʇ7[wnV3d372Ȍ0-a?q#(rR ќP$;_ 2t&sW.5%Rq꜁Ew?F2H7`%{sWqE˴.nF S o_,k%To2)w(F/\eMSH 8)4T/w!Tho(6/DSxxLɱҘ. cxlqB5JQ7Y~/Q0e S*,0d.-N{y(p%@V2@ڽٹBT\#O%E-ysx{e2M`f}Cs0Dj݀A2. WaHC-,Joo"_ɏńE=-*q^1g7YCۏӈ~t% D|6~aiþ< al" ҦC>jT>kA2<oF5:_Qݽg8'_yzOkW%e^sis:j`!e{ifW' E=ܮx*0'{,M"j3CU_0:%>vƘs[maeXE̡jq^3ُ>."B d[Fp&'P5(n?pm;٩g340t3mZXV-qZQ@֏_Q ^cr\)V[(F1 ##VFJ|= D Քrah==O! n d㎱kg .gQ#)q .D%sĐ]x{"ΈɕZ ~4IV32ϋj$k\?l-f1E 5Q0c7J 4p]XnS$: O%cC04o+y,rg247 kʹ(z9Oc̦0 W#nc7yUO@!5Nߨnf/ *YхSs{H|L,.yC2[OM.\3." nU چ5hm_p;՞yVyN5桶I}0lp:fphJ$_Q&7YNTmOYg[bYѪ"`I_gLs ;".TG]Z\nVV‘Ͽ5?C7I/6]ջm=9)HX+Ձr:ْk)!_ƈpX`-V`&6@!JTY,3sus Jښo uR_mB8  @s"}5:=dnǚH/(|:jDH7iuZ] 6ʼyq,DPP:\ju1޳\9)V\tY V$IJ9WP|H\#[= z4?9Oc^bp~wFVr%O cS#Yl@VB>:K2-14Ɩ6JxdP,^FL~9K79UQ ~BX's*Z4V>uWKOk^CXfv+E%zj +7GryirANNtKJo QxptYw HVPw(O{Sꠘksku NLt;>xxt.Dݶ[q3X3'(5-(fF]b8 0$,M\!LY8*\_--읱uғɨI-ktyC">ցV^N=*u#P.Üm y}E8)Qm T3=L/a®p^T^;]1<5K|~t)`kSϊp0cZdl䒛8/ W1ʠ)ǣӯ_Cd܊UILuvCQ4 CyyLr򊇲'_cJk2Oˊ(J+ sɡeLtAAM9`=Ïqhw먕u)%"AQ wܰw^SWZ$nj{@[s`Wșs#RW3x%T,*`47|-X446( h-Öݚ߂ zL0&}K-xB>uTlV&t݆teĘ~@A?,||unJ7<#a'!3\N-`L/g3mb{^ wrfIvF[&Ĩ͹M=hgi:#hwDrP攳Uy\EHf<\fOXd[hח9JYa˯2&Z qbIL`+OS07gƫ1J9]0ePy5ŝ?X[r|{O2$d蜑RI+OQM2+4}NvPlzU>?l﷥9h pJcfðK40PM3fp)ZȩO&0ʀ J%mbV֓!Q6h' `m 0%&kkINh5|5CG*G&XzOӦ[^DAc{"VPDUG NZAU M"Ig f${]^vU| zD3鲧- FERGNJr_%E)Ԁ,QH2+X=4؂2۴]=,'6۝NEsV܈%-C^pp^ U/L`g_КCuD^ w1?V&*U*nQmL$K`@͉ĿKܺlsck8un>6?Kyr<(iw= ]f X)61T ;YF(x-/gFG)~0swzZm\6f'r26JfCt%ڧWV61k~ ~%lg2A P]=:,U&;k'L{KĚ*Qڒg,jy>jސ -O b7)Zs~4׍ء,Ka[˨8&vh-#b}ځYJ;57,552".~D>.jr }ʓK O'HdS,br08ΗmTs Q牾1דּ1^N@\t'=cl˨]͘DSYF@zW-讃&(њ)m:EwETɐЫ)!],]_!yqyRn}d1:/mܞN) @lǍ +<(>!QI2! !(i%5tZ3'HqAs; 7dțvAhmZYE+`3aq ]Z`,`0" ,c/ޛ5}ޕE(a>ĕa"R&A\yaDXzDuucf\Z Su%ЍC+[\gbѾ~玖OC.<"1T4rhEF5ncݷ>u05~eq9~z)Odkpya⋥I|=CS5*wuf (n K7z̛ |NF''Pres!Mrw)vEUZBeoN/#3I JE =|kɌG ~en3{Q5k96-7 mpS?^tP~ȇe-@Z-g8р@!7-;+_+>Hy l tv+KdemxױEb+_#.0꽘&&եa!!'"Adآ+udbb^toʠW~آ&$dY0O͝a B|B=Wp3.t0!M*' Rq>\gHªYrD+q^Xhr-涠/nA9e}&S) i t2Qx!ȘYD4vGNwb3Nu 6aۉ܂ZcRKVJyPj) QNei1nQu <=ǒgUZ ũёCYR8݅4wAL?jS;cH&cj]dUfΒË)xPX$½Lg7a32eAvNB 䎱g2@JN Չܠ>]wyx܉%p#N\ʐCz+Cyҿ (!5vMaZCFa@.-45E &<d !xrǮ d`hR.o./$J ,9#2˻':.ޤ4k 3Cj2CgeJI {7.I9r?я(/ ("򬎗'{u3HteX*]Ojzg㥉up yv~>z=$+ˋRԮ˚Rۢg:jò-fFX$NG6٘4ILr5oP_#սtio EZc endstream endobj 713 0 obj << /Length1 1450 /Length2 6957 /Length3 0 /Length 7939 /Filter /FlateDecode >> stream xڍt4־$z7GG!NB0f(G'z]Q"J !zoZ߷fߜ9,z| P% DR%MM51( Q0g8!%g)Qh&Pw Ťť@(/ Y vCu$Btt[۠k p%%y`pl`!`{ [;@ :0zs8Q..p_# *>eT C\~ wA)fHw_5Qwr)AYPQ y@l~tv'vD:C|V0 @9|3O@PCPK5AwufF_3`DsO^P$oTSP63cH7zO>!Q @PHD ^6WSVH OcWn/qpYL f- 7M@#UH \~0nMZWZH  5& wu Mf>A~?A(=F-, bh}A B`P C} H/ `Oգ-Q4<~3 @)>+3k|b0v6%A?"v1o>u<`i$D:ض6;̭sc!B3*m7R\JO%. R~R+mVM". i{~f ?BX;H"5PoH׿S쭥5{·ZXT:}iVyo&Kv$޹Zƒ7 !q2=?u~j,'zso'M_#aY'$D?s;szÓb/AG̚wL ;V7Wyq 1\?"\ܜkxȟ}E;J,rd4L;U=Lz8? ^fp"1ˠ} kXU%+Ys'62ruЬ8d԰^耋Sxڃi FڋDWsEz3(Ev^Y[ink:!uNp]pȪ`k|PJkBě3_y]pxXHڃ*KbQGyiUGN>P;lI*.FNZ}@F]gSE=(+5pMկ"J6cpKp#,eu1k(v5%lHd38JW4nTs4{|~л,?hh݉+!8@BKlq%(l~},+4߮E[ f6!Aw!8ZC\MX'G}0hd%qԝ 71}sg6*"K 9Z3NBrjmX ZL3MHdĀ,? ?b6 :3ًr[rLul|9; mY?s)&br.&ƹ͚dgٴcqZ2{Rڒ6 mL[dPƑøڇ78q7'ۖ8Y6NԵl}ZfCvR~Uԣ}zlG=>(zK}oGbj<3ӱT%/.u>`0Z_= \7F0s P~o v\0o\w7kE %:S ՎOFJVOYIYQ?zjwi| Iާ9sm űqQ(N)ފ}ΤuŒWށ[|یϕX& xDM^2`4'MbO? 20zy(WJj$ё0`R0tuYK=GG7kQ#\J!5vmScXLOWx3izC>2ݡ B.s{9;jꗾjgҠd=Z'b>Zj|K[Tn+NtQ}x2Td+g羓<ۋ;Ecʶ#R]f^l!fknwhFLt.x{B_tmhwQS0gt$1X)klnÜeeʱ\Ĭ[V`{Z H wCy8o4/H=4Zx>4yQٱ23:Av&Gak8h6 d-i;+UhQíB2aaSDg_d@ϞfQIdy(!MLoP&P[byFC=i);nc*}s%x띧&Ns"8ձ/" K-t.Ci;F\' 0~귞c0}.͉m0Ї]f<nuJkEeRR-ƳJ{K?.53^-"è0̟]`F33=Ť((0G~if cՇk@ǚ?žx +PZRV9ҩ8E*[}rBҝXɴ RAHL璾x[V5nUE y+:)|N!ypQ+!XAtٝ=ҭ%)CԔ*.^l~hY]ya_zH g=uWR = y”A Rzil/~PjJ90`e "s΢vKH؏zwTճ\liJ`EOt)_y~ hPyLJ)t MyV^6{Rj8"oB8q~:'6=5+Vؽ["U}λR,;FerơŇK.E 66MMvM<5YD#zԘ9ߖ櫢w]:Y]njoy^5iA&o*(ۃn=98çZ؛l J?]|YJ=8q/󇶴OXJE6c=E9Bge&͓vɛ5Oޮ$l6; Qs6y43x;5C*y o0#J\Ux,}̾MJHḩ/z2ٳc.) (chI<,+ Fqj̝;A$UEG"S#Ձi9 ߲`7bA0YJ/uHˎP39vԇ-'ArRgmagc_$ ȨrM32OOFw%3y&icHd}&YJCGE&lX!^>1s~2:G-/]M7ZY^\%16+7{woFJ".륩۠s?u5( vx=}gH}JkFf .%EY+Z02,2XʻאI{O%DϺ3VeȘMsLbj09. X@AV':7i?&n timJE0e=ڟK @4z@1{+"Zwq3Ŵ,(z^w8iDʻojU:ζDldeQb`W.T3 WzN~[wl9i5mBDa:S㬺Wjټ7 4#nDk{O؞(5p*ph?䘌G\D#Y˱`{>6xKaO ~S2γ'tN *13SzoYb7TIf( ~̃'fZNr xpWڻڊɕ7ؤa_x}ALWm^rxȴuRPI,KaO b gG@km 0yj%M'T_vIJ^zgcbүn&mMeV6;6$IUN< I\ hMH Oo,'C U3ϠqXDm+!O1$d.ߡ98 d]+A>0Bv-]=])5dr~!ij7e723z@Ne;_Xvv!'#^<:?oM\] 77Q =⒝yJp=/f~:?K«N/-#v-5n ɏ 6cV;YZ/f6n]Ik`4fQXd:rU &ArlvL~KKqyQt:`F&nkf'jk߈(v bw;z[W/G1`/䭮5i>ތϷǀrD\ *FIzYO$Ӳ~)-%rFBu %Lj5EFf%;bm]04t$B_(K W6)(P.7e~2KL$ Þ4ik{+)]RnW[Y< _!8< Xz*K00MUY8=|t^룼w*  ;?~'2<ʽW8Yq3#r}q>,ǜ5zU9|sꁢ\aAζRH`44PhY!vW딵o?>d+p.~"&^bE'CJ@UuiȺx&}ADP@? L#Qe͜c"TbgƢ_41hSDU3S# "Ll\\лP+u(4{fEa&}o?Əi9p%Y/fJ>{x[)0wL[#wl>U,Bϗ{O"oZ?_bɈyQ-106G.;15K}וq7*td%Wm)҉&Nӯ@]k6tRM/Gac9B|퉖x۷n"t/:ؔ[Tٜ_FP #\ {MZ3IZP(~O6dObuF<ڣ_VR]tlTiC Fk'~ĞlɱYOm5q'~(6 Zy_96qV,|NH&]8 sCWp7nU.1*8n[2aua9l=52`e8uHtz]'M~^OvC*x~Г{r T]43a#J`+{ѓmQ& .>;JjE̗3Io)MHwy{9{Mˋ̝h%E?|m]1^b6xm<ı!K@iF]ďǂFhtp:kHJlSy߄j)jݸy@{Wحɱrj4:RęcUjk*w{Ŏ}N-S0.M~Bdףq> stream xڍP[-Ip= 2www-~_uoQ5<{uջ7%:)PWT3!RRjX6#Rj ĝ&.o6 7 j`errXXx0q6(2@@0"1 1r3:[&.V@Lf@JA#`dbfrpe[XԀ`G%{_1!R4,\M7~poeʎ@_d tGg d \<\& ?&v`x7k;7ŸMR.llf[Q"inYd.`o'a 4{vO:k rpy ,AaȬ vrJMy3!c8Yxy@'̊?*vtpXC.ή@_;!̭\@Kk?@[=,o `?_oeg$ԵUT?>117#~4*&`'Vdx)G@v;4L '7dM$jgOmbom7mj]]6@m@ kk֮+b ˷ifd`bn XY5355;kPl|o ff/ m\lOCd`qrLM<Z8ެojsL [; g?`'zf ^n,39Zc0qEl|Kniտ '_MͿs`? 7977-R vۙ%Mҿİ/vןh?|F@ꒃMcH}(;W{6Ư]$猶s+VY>(9~_m"qH"uhIbz }GHQ&FW<Bڋ ln9NC8NY.-ک[_Mȫs[ͥtTc:o>>x' 2])L^1ɽusd|tm/:P-hE $(\z!2LtoSi qluR5\Q”s\p:6ّVɈ'(]%V,c򒀑er͉MEHvu &ɝɒ$t^EꁎgFTyrha$8, V窃 7[/m== h ZvdkG 8J@"8-\ +ʓ|GL,݌N{}߷ RWQFVZ˲rStynw ;{ $x.S5K_ypjמ]Ҍ%VWC1ʔChYSwFףfndm~i9@QYZCڳ8uWV%d* "Lܢ}~ +`—T$ʞgjbst p# _/;wdpx2OqulJW,eTZ,r^r %;Z,.CQTU]z[]X)N9' =!9j9C*Z?oh{1ZĊO`n'6=WQӡlBn im> +-f3`5{D;LYs?.`HQ|a`x9nUdx۫ VtLp G?E,W}P_-9GgA;`D؈TmƟcQ]5)ְ;8EL2a5:㳂G6 êX"95{XBp>@.̰Қ;ޙ0,4巶/ܳjUic=T?! AEA؈Pwlt4 c#<|9IG3A;9:ʢo,8A0)aF4;^ %+?VH}y *5]oEO!3ĈN-0`i$g{7yC(hP.y{3_e:wZ厷9rE_ ;pox 1O,Pk'C &X$!!p=dp)[8a<.gdg4R iy۪D@>EͲ~Դ5=s ]!ՍS2ҹd|.rj莱 ?x^K%O˫fNrP|J)$]3_3*07H6aA |;wN*2۵:_>j?mhZSOa@(qn<&>fo|%;242l U7ɹy.a<~_BzJއgM4K_;BX?>}XF[0"N~}-\, HӘ@=Ӈl]-&+_óդ5ߛU& $[SU6^\>Bzd4"K%0ɦz>vmCnB,lxQP6exvPT6!xT_ m:z!ӿ b0s;IUHbYY\us;%y\n&"h~C䩲ꕿGȈu\ј7cmċ=MLY]Og؆e]gYO[U^ CӹZԊ 3SEDtJ-68zF_V&|J2ў:9.h4]B2{J XχvC0 A9!̙!g79dym]>/L91 p4ԝ>164ғ:{\p1睱/SA' Y~ė:>X!ge(nP]۪kft'HM㦕;G p\KSdU?觕Yky!J3V#/;uoik͠R"(˅[ GL[ |Ldx!M%Qz +FRre*vU(끛'kxh?B˞+ &xqcޡ*Ђep?8b{x7 D QR`E(NbQ\]?b 'qpƽBVMcӠ0rm0[ )g^/sh/%R9?rL-eMs`Z-z-Y-: wKgjRn.\+ #!u-o%YJPT]A ƵaK{̾NMU2Bo'E»(X0Qv)bmOW9 ]FNY# zҔ$`oBble6BUg?5`=LH1g"p!sV iAs21>;\ TphLR IϯJ' =v(WܕO)``F̋bv /+ۍ7 *I`F\+/#cݐ?xWNf oMCY絧<7VEF#M1yHf;v9ҍ\E޻e8e7_W>AYB~&d:r+Rw#ҋ)Yb1I>"t %r.,tWGPT|Ɓi^5qoFĭ{$‘tG=q.3yƥ-(?>OSETRԣt7Q7QsC){.= RN"C1TG;LO `K-!'r\(3_CAnj.EE`OX#mnkq*oSŽ]VL[d&N0^Lo3Y,,JG~񜳺dzZft)4 Fp)}tS?[J( & = I1L]<>5L #EDr(CGNY: ޝ܂=OzZ QE=mseøJ&9o+~4 يRe EeSeNv0MN@<,%Tc?:fM S9pV]{᎚8`8r4+e'Q2Q}%}k|W_”<;﹂LKJ3q=㜍;9A;tӢ qGHSق3߂ePji&x E3}|裁IAQG͡}8,Naaѷ̰kA{9NrruO-T\3 Sw$ Ɏ~N,xQ2Q5W)"-OK#)65GMXJ;41ev?X+k$p?nuz_)pԳ~DwB!L%|EZO.8S)4m/6 yy&=gz-9z?֡UhI:,#q(}R$֌8u{jf_"ǀ!glSj28L<>z%~ e3+QJOܓ cU}I:Gj˂wץ_8gV!%?c <"ėf>WL͑ O'"$ .yn~m̪uO"SrG>0cf5fDLB;/X(݊/,?ͽ: ;gM|6*'g55QiN[W gH/S1ĉmhg0nYs2z^[ HktE 1`r ~_&XyYjs r^hw}ch="ew_rt~sa#g1al$C{۵E[fuW_Iˊ)М1ZUkh &½ Bm~911`@93f.[t=FqfwG$7<%_l$4iUDC}]obGBI L* CƞO ]_ȡ@>gJvf۹ϤHwOUdĎ)+ͤDRw-I>.$INn+ NJH낫\PIB}i4BkB4r'-ڠ9U2*3pYxNVi>@/ AHt\`-7 @ }!JgU͑P#ޯ apVY^l`0X18mĀGqcs5ɘ_J<@lmHp ǭFe'FRn&rA Jە;]RJ@]TXaJԣKĶ XQ ţ:}zF_0g;׏ɊHmf<4ʖ=q-Hk2JNaZH$1E< kŲEu̫e!_eRދkKr֋؉tUlIi+?tZlJ=ӌ[/ {=q-*egT3qԽ UQ={(X`w7͗",/qo>dy1b𬽈y}xZBNHUTݨc(2p~͠mfT!t*x5.DjGLh_VOVҥqEڅ!"Z/!Ɂ?4s֍??eٌC$٩/c( EE~c Y] ҽ꾔1m[QqV(p{q^"l)TlaC{̲,^+xN,ꍔYz[zf[v! Ùi^^>v`TuWOhq4qN)Aahe8ľ̳jU.oR-ڇUٔkKwjW M׸@)څO/ą܁g ̰`>]S@Hp r~Z448䲂8>!7z'c";A'vA@^،Nn` җ>%]O _<(ƀ6璌2f,-rR&C{Y畅һA]oq:!K"Tʆ!%{i${:.b4Ulh7!UrE|-fVؙ0- XfaV$V2d~];?iM7}Gi Ed׋2SG wT;] g=~)$'r{Oe[JvGw\W18 QJ/|OCd*rm]siIu`kҕ] ha ^nԏ=[&ZqRjcqHh0)Abh_cm-gi+aeX +Iݍ Z2Pou˒.r|z 1k$ݳi?k~:H%\hQ`{ZNǬyς?wUi2uќiES}afVVbAϝzLmVD^Nw#RH鲴J-ѿwE)BciۛO%g? G<Cv^:L҆g'{y8NZo(i<CnRũYFygqX0&4vw61)* dX!@Ӷ`WR#,W+U5̆'+{I_V%\7,1;0+)>N264p<¿q6&UBKjn5};Bn(ׯ7 ɧn VfxFL?zϐ`*ܡSqsƫ!)-sktv`;5WxЗ Zii2\wS'JhB K=ɍ>\¡V|Kw:n6f@1ad y];=6$Ot6,y%Cɺd{ U_+}0of@-;X%cpecA$B͓7y膀q aVg4{WFxɚ~u7mFq9"Û^STd=W/4ވ|!F2Pcx~;b#p/$IgtdV]*J9F&raw[C8IgwL~@XY,\Ka:,<´"℧yyG0̛TBImGQخcJnW%U~Eg􃤂1< C(O(ڂpKu4qȁByبjaN(! .@|\* h1ȴ4٦nc~URxU!&Fv 6?E{#-O+=]NւkZ6 jaqKp㠔'"a;?NOi|z .;~ ou >ޗ=Bݲ1bVc\nADDt7[*:9hdNYͷ:w&-t H6H;3UَoeZ; B&&&Owb'4\#6$Fdm:SԚ+."Z# s\5fӊdJ ?W뭽2)1ꊄ(H؇a\ld6a |]UFJKfF܀DPl%>YMPm0oX[M^ W73mfe;洯BZ<ɅRB D4x|bhNM`|/ O%MĬ}k)|y%@`] g;W(ׅHC^f葔>/yژnYboSTm4뮶P$k8&Cu5)5'TYgE n/dhZIK)7"qFp)#Mw+zǝT`%%9뿵?B޳2 7{Y+VPYn&9.dݺ1&e,=I"D|3]t1U5VV}#mbd 1b$[' ͫC턉mg@zz̍?dgC*+tXK srYsN 2׍E]}:J©yΦB#(e28A(ev:Dsep;>aYhA > C6OCPH_jáh`wwhh}SC.%^]ZV>"*spXU9 np :/0SkX\|:NHQmxQJH5Rc } a s9ާK 4%d%W5OM"=qĕ0.tjyDKpbUS:;wPe7 ^%+' ]:y$F9C"X͇ѲگDfs1HXRkv]%^t+u5,|.vέ܄ԅ:ﵹFxgGwM>/wn|~[ џHy -"\M@r:6 13U0qeL$&8 oH7c٫+-s[1,)R֟S?0ڬiܑ+2ΨH0&^'=JC:]{c ͂s;ZDk(3 ԉͰ!<)A6\?M1H磹ok/񐽔ҍ9p)X_.?38gHZfQ5R0ΈژU;Ĩ?HC⻬>1Q%8_~'>B|@Tc^ҧfO^ x3KHAӜhY ޫO5KŠs1ۓ.zMk{F( 0F3*%OZ+dJ{J'د,%bst3vq-tO*\O|(էRD@c i endstream endobj 717 0 obj << /Length1 1429 /Length2 7132 /Length3 0 /Length 8101 /Filter /FlateDecode >> stream xڍwT]6)! M !H0  Rb) Rҍ -!% <}k֚9gs=k.6}Ak\v%MM5EE`ab..+ ĠAQrC]0eSHH0,, (Cݑրps)a6Wxa|DJJBw8wF h@j wE2 El#'x ]m= n jЂ:M 0E \=p @!apU \miڎpd?@ 3W"$w088B^H @E["BQ.x;Z]~TtU6sF: QJsu*hk%B>e3vu^@{m"hkįFAH'7+? KJH p'  _uqW} :}E H+`A #$W _WW"ƠQ^eUSE[WQ x BAa10W 'ҁ",jhޫf?eΥR//YCJB&P$Oʕ|\FAs5`E }jЫP@\Z"*G"=:HW\M eï U0AP/KoY= 1W!UL !utqp G[6D]?s ?*|o\u/È'``CBW`\ѾOhBᄁ$sCݓq's¹ƴ`{Y'Ҵo>'.nꍍD65zw>p]M~WvjC?  NHBZCb瑏j.{w7_0~?Nn[hF'3pTO[{"Πh̵آBw_kҫP2~E7LCT2߮ XuFd8i+bF}j["?_߲OOftǵoR~Lćs8Tj5MMXF05R.PZn>Mny Yx9]yx7qobOҩ+/;Xdp jrOsBjmR .^l|6MSI2xP5Z."v7_2KSRw/SuGҽt?4/knjzjߒ<يj(%@à LX摇/W,{ -\pF^u[9:&>Ԕ4SQ<oU;xZq'TzrKa m*hoc G"^}Rx}üg5Ro5 w웷/.V>K=_l`|g8OҨƖƮOa7Cm GTtku_/)34-W.l^I{_Ô%T;ճǒg=0G$,uN}e/):0˵) Wα}Q9"sG+pF 75؆KdRcM0=/gd=QX*7zNw~u#--ZU nAS3Nvojŋ5v #wr-XkEᅣX霿 [po^xNMCz^Hb&?,WL@{TM4_rb" 9E^\6u6$ç3. K{?> 4 : k+ʛ76 Mp@e|hVKofKYiYA!϶.!=*su)7f1ex; Gh!\#$&L!a02g*JC=h-_ ݛ5,4)3f[ԥj`cZrk4V+ȨD^1Z E[򞡢b>Z@rb(<=_n6ypZ T&׾.llӶ[zӏdg2s?rI>#Dt(x+|L١~R"q[S9d<Ä*=t1Uo}yxG}&ݽgS+ZT"[θVAQ[gu?- ז pY!P⧃A7c+Xo 8)1s|]M| %o!t ׈8k jeTd$wY_m[ƨCKV\6&r ktܷNmgZMN ''`Og~x~\1f>0̰8/A.DB2_—?I&V\y`m1ܤl0 JV]A;[ mvgXb\ j!7yjF{7d:3(Z/azI;3fqζbˏ?dM辯 bLw>3exy79jd7nSlv ~YKˠ>/X؍"Nl7i׵;.M\-X@7;=QPˑe+y{#ҀMjDǬM+cS}7C-45_y6g nz0eOc],H"=WK:lT(]+kqÌR;= q s f>t_xVM  9Qϕe,y|v0%^@8wFa\ +ZZBSM!nQrDO;eYv -}b\e ̬Dw~mx,Pb:ZMs_ 3I3"f U)0Un]e?J_p"RȇYM4pVǟifo[:)kTͦ\muFk-Q6IGDԋXZZ)ձ!2ҞSCO_礮|qn۩^y_b0}XK/vGLJnsuPz`7 JlkK Z̟b.m4K;[c/n߸ͽi;ɐ]3ӱ؞a]{MPm2ruiАǣl-^=׭?RXsٽx~`CgL~V8t$c&>_̓>2}@J?hOPQլǁO%n_uYRaHceQ_b)8ad %0$B& 76;N8N; Dq:Kd&ddAnRdtqd-[*Alg"x9*J朄/3Ś'9-q" fרy=F ՙi ,< mXW.ފS>}&fR墳vztؚy5nA{bFaۛgzk?#rlI쌈ixʛYU|2pů~{ "+po kݯ9IB׎D ߅ջt/GꆝQ(r$BH?Փ{V^Y tG_RJcFMb|~."/udK,Oh1)cY&Dth-`(&Tj\_~PbzRngV`@i=FY[;kX6 3<5^#ܲ->8%5V빧BQͫ/(i-MA;"NW6G.}0k7C7xj.IXuc@*AÆ +s^^",=%{ޚXK"'5s{d*k5_,\?5_g ntQ*wA1D6Uj`O?x?ˆ &T}  LϺ-3)m_\ĽT9R xQ<L,@_!oTv3aQy kKƻ.s{]lJk ׬z,eWK6 ':܍}YviTﲭV덩NXgҷ2*]/B<EL5TZ̦&v7\8$C雐FlY,*[ 2ԨfO^2Щ~GG=BL6g_NKW `{&TS BH.rfnRu3l-ɈlnAPmSJikn3n9b^L*xwN$ ¯YPyY;4ޱ7uWr2zMH7/ɎFF vi,Y?S1w lP G*YvWr"Q㑶{cg|塂$#"nH =Dk'~\zwqXw2}@'.u!gWh&'_G>˼SʑQ7e q3)VUnjم2RI(X4lN[T#OϓNE-]˸=D'ҚsGX&Ǻeѽ.OG_;j+\_8HpĉpDi!ܚ7rUCQ꾿} ~ԇh+N!y[̓E1S%|OM/ tnI F*{KA&.TE+,%o"G GCGOp^pO~JeR(,)c'yCυ;aFq[}'-;4X&;cE pdꠄLte"uf̖[o%%^$?Oݯx+N9ZY5:GR.|U{e^#t%hJՆCIȼ*N>MW YQJ7^}URK*V6%V%d5^s2a6߽d99n$~{k!4^i>,e6v{BbDD]3M+GjbJK:loKpNM]K02I[1kr͗~okĕ b[m*ښ=!>)ע.Z*/ւEVݍMjRU FNW+[$^CUoJ!,R䅌V^&o;̲b?!\=t=Z r4 N"tM|e790$ݷ.*LYnJ Q SA|SǫGj$iӒ:(ΠV}Inس~a5 I/?`((y"b]kN`Uq3Za.x B5zvi5czm&@Bj6#BOcn*$\~: ݕɔqJҭڛF_кu Yrq?(էRoǍʡ$3r9/aڏo8Ҟ^LP[?{JC[qqz]i!KJll:U\T4#0>'&x S+ϳu%ԯo!dlr3l Uê9*wěyGSc'_[nM%;o/qB{H1!*cn>dQ#3eI%ͫ^*>ذZ+uE9x?3DSFGliHXf//.yU^d0_)rmU6?hgB2c>mLpS!AB>_wpea r,UƭO-%-OŸ6]{NhYU"DfhЗRYg̳&ϹՉ3;2xc,x.=54)$pcՕx@)z+q \ÿgMDy{tn+ŀO;Bs^ cJS"S]shjQxsCu~>cL{ޒsꩤ/DuKVp7j,A9GkjL0ö|,'u5k0TSzv8[1 ]d~]q/@W=XɺW &9([Nռ&NDގ;7t4eqhhZ졬r Qǵ,qқ]ð5%fCY7eTEԶE(x` iӦ< endstream endobj 719 0 obj << /Length1 1396 /Length2 5916 /Length3 0 /Length 6872 /Filter /FlateDecode >> stream xڍT8FQjF+1kQ+" i)JWkU[*ZVڣjt||9uΕz~-E`! @uDxy Xg_v2޻w o%w )ð84 2`I IDaH[!*]}ܑX\^|p ,--)+pGa(Ⴋ9 p$|X#v X>pD~ І &D 0t@b~; vX/;38#⁲EpZW7X7@r`! .0eC:#:P-!7VCœ1h\<tZzn?aHW,Ft94kVA*]\(,gHww>B{:!Qv?ǰp6B!<086{ n7AgCW/'7C+`C00O`- (gݑހ{ ,p E}Z]?# ~`q8$q/̣ Cw: ~zC? L.7A 8WFYO#/?o\>8z`q2Ɖo1v!hgԱ0Q8B ń@H 鍰EbYnSoHBA@p";"5`ⰿiꟍh۟a>dN?0N_ X\7rNs`)0n%XO3?R=qQW/#8G4VcuHq"{1ʮ݈}EJD#'G#n\;0,?G0Nֶ,ʝl`:2(AHbVkJqd:fWf";t{zg|3|tuJWuփR2ukz ̪ț3}U%{oz%= 0"I}| I/M Wkd 2 -wsDdfs BnJ$$䘜? o |:I4|:.]5Lז/pk/βQ{ٳrKa WK+)WsvJf/ ﻵS )vz5LP${h3E.r(34dF =v!j|mJz-[+K"oXÆ--mu6݉o~͎fq#c淲ZnQƌOj7C ؤN\fl;I8 lT{Zޞ*'TTB|ֿ%b o̳ 4; q g轎ۺφ ;-˞m0c^PK%wyQ$:RZgybnH);l_5%Rm%fSiN 5x |Ҡ]3=K/3|1Q(Zs8sZ6T`W(14AָHq \['B> 62]7itsv;!:ہ8CސVVnUgTrشHō~ԆP׃hhiB^Yq:/kU*U\OKRQ0q ,-8m&sYs\ŋu#3{" |h 83d9\*1io/C"2]}i0Bpi_lScqOUZo.WDvч|m ͕ʐ6oӨR!U6] (˕IQkS;3 8,9eմq.nuA@# \oF;bf6s]U/6GqX Bp-:HcX`\.Zж{i6-ץ=њ|O(E<_0ȃ=%I繐}nj&3{Ӯ =H)rV#*[lNnv%g-ۺ|/8z+$:K VsPGJk2oꀨ& W=q/G|9*n5]>^أtVXa:r4"Z@Ŗ\zx"AYjx6|=Ms$yqBn䫥ˉloIZUmj߸lUݡut鷨-)&N#ގ0h;[z,ݛ1p0h2FuzX^9ڊvuf>jhE}xH^_ZC~DQ}!QSf7C^:ev-J%[qJP)x 1ʬ+c_%&?(ktXw7B,$bLy/)dytk*iv,ZM%+$Tup`-$~jNph:!О'2-mN{-%[f)BD!_ݐP:jDZa 7+E=( 䠜Dzec^i*/owCEƏsoʷ᭡E{_!oo9\$zT~A墜oh:UI:PI]Icjfko>} $RE5 Wk >f:>X)dʜ쌅zfi֭e2qv&RvŕӋ1/0w~ۧ՗.}!LNOD=#M끶Bm;(YЏxi3䕅KzO\P8y*T/Wճ 95Zu_ ϒxLL)9A?Q2zi#6T3-Oj60rt$c8={G4 !Bq5@YweM9X*t ğs7lK|۝9U)f*:^2}If+@c|jǁoB̴ĮP.Sl%lID+3kB2}ec WYq/#VME^ EsC|ۅ śے 5Pѳ$Hedn,AC <=ʬ[*/52l S;knC 4wkEYjϹC,coJB`Fzeؚ=#9VXϣa\7I8?{" &jVy\c>"{}3m};-Qv}cC#{Hhp0}BD㊕%@8V+: &ԁ3&vCr^Qc[U ;jvwRnvJ yk*@^l8õA'Po<[њI[ { }L&x nQL@ Qy1b%آ*Ԙ> d0<-4xwcF/p:s)}첬6ʆ}68)^r=_3}GXKG#~M<{zw%FCFxq)M4@NgAǗZIH^{ T8ڋ>'Z{bG˫I3 P906nWu^;J3bls "O/זܖb10yKXVz%z܎&,2)2Yz^zp>#3rQYggl{4䦵l"8$9&CR uف_.v /^*zMkO=`qunO2ʡJx3jmTr8f's7!jBAVTaa'vW6戱3SL={-^`Yn%_5z7N}J3-:k#*"mU/ !Eq!))m XQ7‡"o&ЛM^Fᜫr&M bo[b=\M:$:1.%Fδy>rVB㴇p($;cg}ip{+NˈKg] :cxZfekZem!8D9wf9X{ʵ^^^#4T=Y㴌3bcZs5%#uB < e^aˤk' CǩH"QNl(:)P` :gOslþoπZNۄ4X{$)9/Q9)t 3!t.Mvd#Dp**;AKw= 2GpQSHFA~_!B ;E; Ds*nhL$<>q~B8tݍR [, 7Zb*w>8V>S4U<2r>&?X))/YolSˈj{U /F^{Z3k1uQ3yKъؾCv endstream endobj 721 0 obj << /Length1 2884 /Length2 25091 /Length3 0 /Length 26702 /Filter /FlateDecode >> stream xڌP]i c!hzp wwwI̼C{+U~WzMFL+hbguec*12T,ԀNv\; A6CgP-@ `dbdb`010p/Α bjaHȄ=-̝Ae@iL`d8@halh 5t6ڀ*Z-IAclEOFghDghGEpp6(@r6;#[8mW3uv3t@k c脋 *Pm;ltD64675ZXb2t4C[߁Nv󆮆ֆFiىwӀ,jk"lgcuvOh 7kekf051݄= PR ft200s29*͠|&>@p^N@ O##`4{2MƠwph3`_ yZ{uʂ Z_;@`-2vQ0g%mM]?Ao.9;jW020_G)w7_Bb.) m,= v5VhbbΆE5wNb@ gc]Y[,~?V?>j[N I6%EmL~+t b /lAG|vpo@/7b "v+Ћ"N迈@/"K+bK"+~E r2DNɽ"9+sW@Ԕ^k `aG *U^+QxE "NWW:Zih fP&C{6~{ d5zE כ_+T4p4v1)fVP c;kjgah=3+-F&P7&@GHad%ژ:aŸ6y\@Ԡ4}? ohGN~̿+3߆䬿\ 0chGG}P@дlh 4̬ǫN@l]l~?`{$vA98{$nP {зd"bQMG(_6 WMffo~U} .v@#efyuʜxkf}#h$eYA64w Jd,6#MFPeA/,ZdxY ;oݺt, No)nWI?Q ($1ʘ"vŇF=uG@}!sÜ掠nsLɩ [  uhKh7Q sƫ(gֽ'0 &in)B)ʶuhq3 f|Uq# Q}ZT[9 X.spk/g}SL6fCqOlatoYh=Z%1d?r{- +2c쪩q4OCD\i(&: 1] [U1zHmQ hMimVў1oo_qyW5UdC1$;La3O&̞mbcj$gfnQ<Ԋq 0('@uxq.B%RN]64{ *H=ˎk(.%!=|%!C3\Ye +VN"߾TTW@o=Clp| BW9@ -R4,W 7E7w&0k^ꘈyh",(V61*2~ȂZ\o&F'ocGEBa˧lEz8]VEPү~Tmw51o)6,KR2p4/(:@|,MRh8Dk;l-#`Isr5( /Wӗ4 \*(;?ŚlدEoc/*\]6H,_jdqQi,FS &j~9#*–&5?$xHWf\L4V 1/Ѥ Uv|0d*hOqpBBcjaD-SݦgtHX}EpBwDt_LۦJ&O$ٙOvLe:)fP[uCL@Y+iʼn(;No' 5U|k5jb'g,{$ZN%^`?Fq}9]wy=@wtm 9k O2~3r TòjZA?=k;ujYOЎ`u$ 3c^ O~UD[Տ9W ^ _E+OxG wf 26o9\<I.;t~`m%zZҝʸǽm;|JSMm%q"ҵ@YSkyv+s] _; I!z:F8湡:(A/^_̋Uql&wA;|o1#!9t4ɪ^ ɕ¿׆bXyCih=+մnJqV}񴷷5sHѓϫ1Cp,U"|>MK8IuNd f~5K_sB4AmJ::1 hXw,mv?9鈼/ru)fބR> ^ံ3 zaI^(d tv0Jt馛Y||:{CoP$eK%2Ladq9Ai{юnM1~>iqtu3Xp{ \n p ѧwS{p3= OsP #+G" ~ fvᴿ94$Eܶ81 D/玴}^mʖmd[w^*=/ Fpʌ>@H : /;76+=,LD_#j$ZPf6IP rל‡ֆ&yTޫD-R68*($ X2I/,#;i`ׇ7w~b&j24v0ğV;ͣŽ}=*}}XOɹq >o"''HFbUމz~\c5)櫟EW,.:& Cxկ7UI-1QC5s¬:s)n 94rR4@%9)Q5?ލ} $ת%I~C"z[^f';D88iHqOފca%[sO'@.j\%S6ͅ'Oᙺxc?J, (;0>CtndIq|Dhxr&=s:\cbp2jTk*#E!eV۠1%ĝ]?vU6ջa(Sݠnw#7B;5=yC1M$q |h-l:HCO,8Ҵ3K[ Q%A]]?~ 0͎I(cVin~GMX"E=c"Ql^݆4[#:K2eR`6fqoOh?e9æ{qQhVTW̗S.{Rg+3Y+ ڙIéܑX&s^i^Q}jDBhW^c A_ \/fM;nYp: fE-!Mw䈰Y`Yj畠(5it]hR< ˓5SCHЉQ*y֊ ЂuZ?~)`JvgGX 5|I:^CBX ۼNʣE@-+MkF6$@z1 .xxsIH)A!hFPb K ڸS*7DO[px!2bj.}O&b^3*7YUvwes hc'ZAM֦ ɕK-':GǾj䋸rֿ3OkBDmnsﱻb.@N=*jfjmv"7nCirzij:V]P0/a q_ԫp Rn%\tN8$]d ~`w5F$VdIwbvr$J,w#fcTmJq&.[; yN_3<(>\u1z|`S1}!0t,;օ8Ԍ1āYq^]/ʴaø؍ (!Sqwr]f7i%_P#uϏ7;:M a&xhJ"LEL7HPF-GB顽Wk,dyZcJ3*8suHe2aNe,fro(@ ;$ggJ#LtR R3G/wKC`2K{}x6x_ .WK§2TFcY7Oз3#֥T.m<;>]OrtT."T…X]%Yfr+U\tPg *J4۳LC4n}ߦD?)v A_1NtY*NulUjTsS}+GF<' k?6](1թwG'#IP[nNq:)++RܙuNwqȅpN\Q韫8?QmB)kx=*F.|d[50gdFbk4Lrɉ 'w0Smрel9 ݍle;5 BzOGbˢK1Vpk'h}ǕSF~7o⡙g#c;7u3]|k{íqٞۗQlE zaٹ4}d8h܌3k* .w7Y,+0dkQ7҅: Oו̥tf(G۟-eb" ,ڎi=x`[+Ħ󁚵6p=sZŏ3mJ+ mi/)Q(WTU>; L;e5۹ 5>nڼG vE8v0,œ$\cҌ`+HfT=4W mbj$1!Ω Z8_d3w#s L7؊0* ?z _f=8^TQ/rU뫹=,}c,=S\gE%g3¯ZX"upyA.yTLBh c .`Df [@f`g~Iƻk0X!(Hkw_yCh< ǪVT≊MߨinzqO(>UMzLJjWbטEռ;*ܩXM̺DxhqUYćdkm p?pt"p󭶦cO' Wz@by]'b O^Jt44Zڬ{n!o )?)ŻgM'GΊ52 הuH|gjW C~[ћKdC=8'i R]䶩_ȫ+Om[ vpv[B0{ WRQT3v sW̙L|`wy6PzlP~׉$ dy*M䙀Ljtګfn6L {n65uYoJZOYX0&I#~KV_(yHYE9uokqd y'Jt5/»C _TW}wL9Nŕײ$`}0JKqnbtBQP=D?Շ{L&;&ʇxa? &k{6:^5fvDs9@q'&xuCf4;7[Rr͂fEcM R&볓p^5r28_]z:]uJdxAi'%ƥMk[5.ho=br6"m1 1.dωѻG:$)H$% fFx)F;@_~ׇZeɁ?ԕxK'Ѣ7 \Bi^ȵ>G<֯`fTRVE,*hg XL) wR#~CZ\$ta2}Lz]w{rAM i#_+4!\t(zF?pB 2(]Uœpu<=(s-okԠڡsеuHh.q{Hsd7V*N z[CTd sd "-DMsW+"3T>٘}^߯S^u?PW v,^pWPIcaݽWʅ M HLqą>XfJp]V/od6k qr P` qΪxזEk `!u= thy4qOS12x_t݄W!=?t8'3;TJ%sƇIyӷ*LZLR8(p8;`RFp,cf;O6̉ݯPk(z|p#ۘ럏+0&:@f'Ӧ aALeNiΣѷv]~~~{Cb;Pz4lwu;!JG ASLfL]NLI ~9B2<\K!:xA!~|\zs#;w|ܸn)嬜}W W.'m/&6{qĽm6ߌ-!-,]դRjRFKE Ļ8g!y/1mE즐nPw7RXX/(dĐUFIo|bZ{৪PHz)LRwa"$k&ėHVץxRL+9e&T^H-ss){j!zt<dL=G;I u me9(ETJ]cα oE8B[!.cwF |d2<֚tQXSK:|QqRF{A-X$B*^#Tdޏ/xh|}dvܣ]6I&(bkmL+C6 av\96ǞuU9nͱ\Oy&VoOScYŖsҟi٠$/ RX`8 *ּG(>͍(BS4jՊ3GhbP$k ڥT˞_Y{9z{Ġ sm9}K uF1 ^>r [{&Q"2@y%a牒C`W}%^xӒqX{ۄD9'ALA=/lcɱnto$7œ0R6c2=*5]Jg֜}3Ѳ).~SDQ3[#,VXPqjwe3ťNV-Q9qJvHU7h]`^,4Gxs 7I: Eu^+= +&i%?Xdyo%r*drO3(:ִWTAr?ˇ {FvӴfj@/L+!Ἦf$d3k>>?D;4^*|)?Jo'LSP;n 6.~aZ,^>R9~ ^}}eL+_ᤀ_+I@vYqO;Я^~ENHN͌=ҘSc:A*iRtKԴ+US0%Wq|cnQt+i,+b*peVl[ɤcEnYǩ|{4_1!KQiemVμlC'y qƎb:"p̷L)ސyk]*jK sI{-jٺl »sev_>GZgHI`_Jh÷PjLNH5I 긟6ޝ<<kѮʙ'z¥{nj 4\,ỆoB^2w c ny<`)tqͫ:|"9jѲ)Z8$gG GX44 YόdmɟLhWXKI}`rOj;F|6w!0GɜUKacPL`z KM8F(P3$ZX&0i㣪yvQf X8#h#E^I V3[`ASE߆ 'd|Dqպi|@jO6w80dT?q3[BF|tD+wj:TCJV+N6M6+h]p*!ﻙLRt^: sw; ;GMWgڒBIV櫾 pRul IGG9 xEx(F3lVL}]OIJqEg\SG Y=dP"›RɡŤu.ݛcz;4ƣ9 <߻9iLfl Ԥpil"u3Z;Q7J,'=%lǟl3y=WȜ&o]f:왶ScV2JiGSm6Ϊ4,<_%0KsMQaL{$ ')rDbݱE*ncZZe=]+}GH;Sw9 :qT!3QP :(~tK&s19 `dukHgy.I RP2t Qޅɪn* a~aCyPRsHv &3ƁgS%E^ :f]kqZ46<ΔWzSg6ߛۆ ,X%EV¹Tڵ/+,}7<^d P[ƕ;-<N>,酫 oT~jt:3ۍؖ}wn#,PU'+򅩗1A}`IhZH*@zqmWF5]n{[sbD>;׳kCj@x76dAR\_< >nǧ@?Q&v '$,AXP~wh[MwSѢvs"2mZ)z6_īq5*OF71}Xl'j8;U߽߯{sI\5?u D2_)/W߉@ԣm)U,&3VXGM@3p?jX@u ـh !fl8B畈 .p= .d*ɎX?ԯ@^@X&[ճ.؅_=^wD+!B9z#*认Tk(4?~ ^bA :(qK0o"b_h} fSү΀7RY.(0J g/w@ t95ټX2[91ntt+|WI-b˛B3x1X%-j/& 6<"KH6eLp:դ[} HYw8f;2}pG@x٨yzx[ +Vlq ] Z~˻LƐXx9qn8< ,dgCI,kB!}lcTU R3/]Ts0$4Ӣpqq:jdI3LtMF%5Rx;V [2XzSf9٠Z kY 9S~|E@.>Z%CU[ ',S< $ CD^R}NTF/ua5@oI2L[{]myweP"rkk28E\]܏V)$aE2&\Qr]t3,yqc*V@qb_11 ^6_3t \+pKI_ zS8M ";jDhf)̆Vڙwҷn|B݌'r~+J}x T4it%_"|\JA42*euޣ_hnh=иۜv =Q:ZA\]xKaWM~iyQ`%2 ā1zc`Z!MQ70b#8 OJcpr$DHʫޖLt;D[Y:D r\JӅ۫5ui)j{Y Z{$j @Fg]u\PEw`rwQx35Xj|}%ȞѬ[CsX0'yE6]y6R;q Qk#k.*C6NxiWN G)嬠r㭚v0]$~hk8 .͟aG@ؾS^,y+kWqͳDQPwv_ͬЇmCIxekG"DK' !=I7MHAulGEmTgR6F&3ݖEc`ilTl+WLU;b<?]xHf;y4^R9УMVf5mYC c+U,i]7p HL#:*_d6+yXs0hJl١56nS&"ѓ|bfJA Y"mCGn84E4Ch(8%߅N4]:yNk[̸(njTf@طy=KzLjgy;g]pʋ(KvvH`؀?-;AG9Aס`)EtHTA$X#n- R.C-WDf@Iu/2%;wo/]$g6mǣ=W; \ٯ 4VژSIyzڳT07ѐ,B>vw{.yRl%wHF{ϟRA(f{T4g$y)mt(>@љFd L'4icD0Déz@60rөOw 2/$Xfsرl&m5>|Uudw[{M@,nRl $ d \ H磠n36LylHEc6ߠ!|Ħ5#zvzKL Ku3V F~ڬ]d Ԝ#B} [7=. A4޿Z(#Z Fl w!;X^]/v#5ݩQ9) ClYc@W9rj ”E_ۏ=kJW%FWɿdRM3)GiS,""#ARŦGBny%?.GƼ V=Vq(JWO.tEwZiS:6 V>qGjz[o3O`GC k>d7fJD7=@o9wXd̳=Kzj-j%}bn\C4h }RaD/r$^:';ð+AOMCIgjz6hx')܅D$`|=qlgx)[%G-:rDb0R]JԧK2.e|%KARWvk9gՔ(1j<>“)rpƮ"_HL戥3xݷZrN m* A-kJ &0nl/-4^ ι3{;jM;wDڳvL1n?G-!Pb}\4υPt+!o􆆝ekӸ>*w#t\zL/oHb$Y8u%}1{Q.ok6Eڲc 5$ =WmW]u5(2 3N&(:Je*bT" 0W&_&q2\4 [ѝZT2ԺأRkug``auM+(jR^}SǶOƠ|RF=x$jbޕLKHtޕH4o=Er2$'88ҏ2oJ|)$@x;f?IVķ^g |sd6W?C|TaKA}B4gf\\>F#泉Xk:G{;{~V@9_7Mu$jLl= bpj<.ZWH~$EGL7ۣuxj ^-KVXԙ"a2i7 )k^}TOf(%bD-ȧm0A7:rب"&ct[6^v#BS;s&A3ݚePWH0XWph&Hörl\ea{`mO4lעc`.k$*G\3zqṛ.Jh}˅2ӘN〝@n@slrx^;=^S63|I)C/ oYvpi-f|כ=g];nI(rhmS'-Dni*EJ1_mi(Y u#Ƀq0e 7aѦYEu{hu5|;^p>i9W4l ..DȃLd)XπE\c.SL"`P2C#^^j=z7Jɒ٢X`;D+-Rl޸CV:sCZ~C3`rg߁.tpM2#K Xvm/ALWMaJϲY=Y #H=I# F :9ىB>&/%4 pG؇1 1+gTF'6Z>W6K{ZxȺ8g|n״1T8{OBն`_`>L">Vf53dOMib0"jJϹ񘮃\{`e'\$}{"md_XstJ?LAi(q*n]W tpc 2aiג=O.N鉵s$<*^l*$mZx@jC7KQ&4ٴxᵣ(UfDn8Kk{;dsFe,ل՜[-ABJHW0N&1vtrTc8EL\[Tگڦ uc\,~Kx1EvYG4<冁!$/yvEIeHtcjT_̤5(C:Ȁ-(8[Z@9^NlQY\ gp+u]UTvEN5(st6Xݩ!Ilj}X|#-S,70 I:^T{058pkI=Xу?Ku إMfG;헟&x:"{q rW2|y>.cs("B޵kXӞF/ ATFXsr \km|DX4>M @VI~y&Onښzզi_jSENamdWhM9("[HMFe$$D/#?}vFx9 ʰds '8TD5oq@ QӅm{L!0^Mf;JR|Ȱc"ISIgɢ 84oiѻKd$N:o jxn[TU+]u,kB<$z)o񄽹2"Vp,xd)]պCGs牞gz~uH᧵*<Ì4DGt314Z pYD#T J& <8p!^uWezdh=E'}xϜz ɀ=OӰ+EGa4>j ȇ 39'\j%QڵMkAN6p g58J Uݞj$$b_7=5NX Ɋ}V%N%*3I_-t4'j1R0>>FG4ĆiP`=j +䛺0,)Qy12 xI?C@O^9g2b2do[^iVn &K>7dn -WFFB|\lnݹbJ!< &O~ϴA.'O.=߽'5d+"ݬyGwzPCY5'>JkѶt1)!PbTh/'%.Km`c׷BL3g>)͗eole'tܸ V3j"PwPm^&24l <3lK }|VIkx NL P7F%Z*;%8;@aBn9w p+f,$lP`<3T $|i-"8* GyBeE;f #ev.tI >}♅8'U?LckC '^r;p_T~Iy!d0pRw.%%W皌Wu6_'‚lDdC_=&%U9^3\V@˗uo1z{RY^AGw:2wݒ Ҟ9Yh+y?\JF쵢>JIezǓ×HԱR 0m; jq{uxL@e(i`LR)J.ёYmDN\3L"*|=;:(U=VpJ켚vf)9_ ٸoij& J<iR8w#2wf\p( 6A3;[9CP[ ҉wџU'a\B02/3XdV hBTA7Pj2,p:'?;ؖ6%0KbXe_AF1DwŸ:RKjU734I<L0r%Şi` fB[9Hr[,Z8H<;|G7h&=qGW$%1oM876o"(Tq %ՃJ~b338#njS[8OJLᱽx_\6)u+2(\z?a)i`*H3*3hoAς4.MTTD-a^)6]gj%:7>h4mA}3-9q"Z(cK>%h.Is&zcЈ8Jr\<Թ`waNð{. ^ly jsrGR4cb]!AQ9g9T H?GTTWw"^mX3a[;**+$=bZ$CP#cabFdS&T^`}CaZݠlpB%̈́/TSj| $ydXU0]8*5-XQ粜 -u O<233v'BrFIWc Y()Kuxj;;.zy9<┠& 9M3ߡBRĐLJ: 1=qĚ?ij,FOh::j$+?^ozQ\,4=^[[@X[ a݃C1yyZk6+$lO.\GD%FH5% P_`f cG7 =]]^J$E<ș((j"㳌KNEBczUx3+@9r@G|;+-h^QG;uM&Jܬ,> stream xڍwTl7 &]%!HKHHHҡ >y;;g:g@V#Seg#DC  d&HT!͠(hAzC0%WEB( Fa0 , " H ao*Ꮔ0vKKK6({BP'0 FA<1/:=p'(/rn(BFHW-G*|(7 8~ 0{B~'&HAp/ `1@Ü!HmQ4aAeDaP+j P0"`G ec_y;!7WB`sV{zB`(o_A'L~9@a.RpF#aP/4D[/ D AA 49 rn揀 1" &H0! @($ N(# #;c:l@ @>0ruۗXۂAN/o6; o)/!{B=cFaLU-!U E{T̀2"B5~g#(U  @@/f1K"fh: kD%`$Oi1&ƌ37B08 c p#I~SL YK?w@Y0 T/O/E1@ A Ę~Wq"Y;FoѠ|W`}T~ nG p}NNS9'u,;-:?taf2d~tG 3K pw`ZܨkM7Ubf׍7%tI/&m+Ŏ3l(&"^C?STc,:i|$Iek"g35f"޽  L'T&8U6tn>-[E˞{13Cc;`r=N 7.vBjVgk:XΪWGo~t赡d^T9#͝}{;- (YMDuY#W? M3o|05T|%U4a-G"?+ ~]饰P^Z PuM~ ԆOJk5;ɷ˶M5N /3rcR ]l*t*t]TTPǍTEzÒТ#kFx)fT*ř_L{OOE3{jUZ4sykͱy|X;.}_wP6.,CgHxJiJ?gfG˂SwC?8HURFV8As4jtWquփG UЧ17jj|K:d%m[QY\PT=Fxe Q^&=?[qLUЗjm@VU22>tq96QdY8rR  h-fǫÁs H{(کc`74I+9yR>7B"5n$ }n':AuKX}wiCxeg{UJO+{:]H)o kuRjrt;״mji+KoMcp6ApG{Z] !X9 ,K ܷ57&u%cE]kU MZOabD8,/;\k-!< ͩ)ڋ 8|lu zM+z]چ-ҀSom"_,MoG%="SՀ^H רܜiNxr; #e5qKLTs ผ:^ %|V<1μٽpͷ\ e%>y5g *$BzBSzalZ TAwC_X:,u%fIy[AOkՓ!X{ ټYfM"| 2Cs DyNI "|-}f$^е˷$[ ݱnQvv5 /W9bG]L7fwG xm(dݘybNI|S|^ِנ&bl t!<h=9ݶ ,ew 'uA7$s̖Ul$++I-}yp8K^Upx+]y-ېT@jC@m},@KjbA卢=9loٙp"@7rMgxk$7?fS_qunf]cO(—qs8Anny׷ߛΦ_ 7s.Uޟꢞ.~a?s Ι!wO՗^2g~a$g+)@҅ؔcDS{z|h'(L&G'haSotӓ#TyLbO>m}9V!'/?o3g6^[`Z%Yɓ.s=5dȯJ 6>]I-Gm{k gJ7d!:w䁔2kh<;q#DĠԈGߡ]Bvf(-sYLi&caFGk"fFFDMl9$.XHm`㉲ePܩAf뙚o]\XH4bih]K*Xs==P~zw `QlM2y s|ɄLO=˼-e~ZI= ֊70Uίۗw ;G{>TvOWblMٴ6|C ҽycFvA׸AHqaNN[* oIAsE;1b+ U}!)_?l?.؝kYry!`y_â1{[Fmø+o$=oBGw |slN W9xO, ޿gA`5su%nwP':K5/;H}Z056lOI#e;+6ۓ|Fc8TfFŞvP$7\ ~7OfiN*qگAٚ)4Lw'8e'7XL`Elk ZԂ$+g>f͙Hb_4-5wѢAp\Y7MQ4R:>]]fpe Z_['?ںV` d@̼T6[GF ]z*Zv6IZf?ZnW9v|$"fw|s#t܋·uqDJh>2:Wd'7A;qrW}-US6n^? v9B,)JD ~jROows6?.>j &"[:Kw>ynn3GU=%U,S7ҖCQH>q/Xji%yk\}AD5w6\3K"G$#i 񑙿^:0I@&@<]B>U! W1h^L. zt8i46>@Fl&#t֙- Iv)]"pk+g'4Vf,B HN,Mm =Eiӌ<)2L.+vc4 FAL90|^LuAQ2i:V!oNN{[ThΙLt4TWQQ{'TO)EGk L{j׿Rw}',Ӫ- ڂ0L{ʻ\vm8h՞ЬpΤ&~ԍ #˅RAℼ:pԐ!Vy0_9CmLӳ2|x~ڴ$2EsbUVg;s;/J=D:gGzȥ>!sTd.OHa' &k ^snIkYו;n[>}|{̽~KѢ ꐥYOM0eqڌ_\K\=_d5H/Ip埒OȍH~AϨ;aMlq/r6A`P~b1ٸXP:FWøD{bZ\oU (ZZn$^wo䱨V;HXo>DJibNG*ʄD;v@^uy+x "} (eZ?δ!涤+]D^u$g*UJ}rKMZ ?c8X ^I#ϲ >aslPF>q c9a d,tEDnpF4^z1{l9mHdޟOQ%8)d*Z@+4+K+J}vORh5Sԛ$ڃ8w ;?sE!6yN,,Fϧr$T[ :]df$Ft=Hmw-M ـr{$w S|Λ_ᒑ]c9ߑs\ZyGV+`yd(/DlLu\4ʦ (,ut_pޘ8򚖥 LBO HX2f7(eST QZ˃|֤dѨ)]>k7Ƹ&Qfr֡ۇZ2 S]D"jmյS̺|wA^8vK RNSɟ͛bI%Hkš[C؝N 鉈Ӑ〴gMc`˂麯OPNw~Jȃp՗a cT*#1x6-c:r9MFU,snO(rպtbiStb`T>Hqm 9x["9f~HS }([8–kA(R=[f'efico0@3DGI1/uQjSjb[/+Œ~ Л2 /\U V_l1!U'cHGX)?G(E['0ܞ-&QI|}n+Z k ɿ~G\7:x4-ʮA$W+i3bECʵ+)wZ SP`!<`ovf>fYWol"\irS6H%vĽٍP$SnsD Wz-?LW1+E֐/ljAѫab81*wr5jHS`_.*s!tudCo+ETr̽k~CB*̌̕uExwr֦zXZݤ!@alYMb"#GEț ,m!l{Xj47>ʖq_'G A }L#4:\\u%&kK <5㨵b ~Wؼh|PlfW۪VQ#'OOk]nNc> d v t\ro߸Y34Gv)tI]+\|VtPu@S?@L\OuŚՃ/#g`^zȡ()(ڼiaQbsCydz莪r4)(!:Ls )pOe\M~BEfLqa^#wJyjr`;w>:H4PbZF@YIѝQ6}"ǟ +> stream xڍP7 (H7H7KJItI .HwKw )J{73=Qj[@203S ɅIG vB1AN`T? Sg8&e SA . rr89%sH-JI' p[Y;ý `4gY@`sS(@dhn h g2(ll/njsz p;[AN GWweS;П1`` # `s 9{/aſXdڿ MavP0 ` *2άSoAS oj n W;;'sG;CfEZH@Pg'IA{pi-m ZXNŞC vpK-0@^NNN~ [s6a,)| +OS@ l 0YX ˿hx}N?~gx#: oY$o`99[Q5?PK@`UWw`-e|bA܀<Tˀo<2..okjx͇ϫ3|` [_/WP+ȿv,T_ UA99*s[p4Rj]\|SGGSLx/ _C pCap<=%w?_8T?hbg + 態 ?H o7?\ r)?0<_)(d k C\1svC=g7?*=AUbsG?+? rc.̅mj[oضDߦ0y-8FKbxx%4ۦ4"սaSZHsZ˭qv C^J 6M{om[&Wتy7n=eK#sj;U|oޕMEiEeΒDufDg;u>sy5=Hǂs]襷ssB˩TodKbWIH,Q]"蹭~fDO7.HJD ,oWLʓyMa]̉U%69MisI엙\X^)ݣu4Tnzy+]Y=U*n܆#DHCp=NCu6Q14JFl%o29j} aJO}/4ƍ$q?@¨"!iXt\Fː:W f貨QzI fkXF#X0)*)ge=@ "ⱊ{K3RNV}uD#$X.W`T@${6K3Q|MnVi9;!%j-+k@/+:^UEY7w"*M$V7\> {"y_vUI6; " iz{pQk#ި'LH0RegPd0Uq+}T,zc/T,Gyg(S|U&j @ ˱4+yX >g^`ݲףHx:xz~GW+@kHP.&=/` ꚴ}=VwTȀ~4=i_gE}W>CBn\9ֺ}y 0 Qf_IيQ1|":Ä|,**?,Z!ۮs#~~B>RECgI E*2ƪ_9h%Wxc0?c-6KBA:ZFT2pV3NlNQF,.`X -qۘUl=I姗%`-wi*WgV4Z? gMp1* {/Lvf$g ,Niyktma|Sk7TKD:GҮ;ƺ&<B.'.jі_O{^ Z2Tm﷨ rzYon .s[}]Nkd6#o!$dK[б/5tԪnWCz2YbG)Dұd3g+Aj2( Su H.= *~iELkqeD "_;&HdTh¥RtR*]="l À],D 2iľ,yZrorx򋯹15~$(}QkDs%x%) FƮ(КTg )bv"$U}@?eMn |maj?Dz Wg 7PrCsEn-ޫ'K56$<ڭ)[Kp==VvT0('ޓ㷽?] uOByC|5O]l$͔z֏C0zU c; e_TxOz]񌲱xj$svT[y({F(>@ '1/}N5{2n_osgHj2"ib˳1=m:}]‹iv]šb/E )sMޓu_v B”tN?Qan Ie繲I%"R1j~*Դw 2J%AL[ºpM|b r# j&?u$D Y[4|JI ,ZFЅEl"UaAts@tYu+#>1z5iK:/{v/%29:: QZ/ $U ~R^P'@l=M]͖ћpF>+t EחZ\GŶּj1eL;IV 'T~3ìkK6C#A4p$LR9?AI lG"7 cyIS79@ڣ5H˘_,WBPܼyTˀRHyګ8bSkAZi{#[JB[H De}UHNHpdD~lDS 1eDt"AטRG󔙟%x; ;]O UNxX Qy-8gGcm r>Koc6Y!С? M"+7e@£(whzh6 HKMpU!Z%-fg۹2Uh [+VК{$\o?J5{Ik"gP[ɺؾ^m',(yI2rk1rA&WiI"t;oc*=$~B4\l=\_~ZĜ+FC6x:E?W<9ЬNbmK?ݺn \k~€!e>B@TӋٹiLJl-.cL 7z~x I*A^C!4j8ovcö_+ cf.pGmNڇn&5LgX Cliv7eUsX._bx>N: ?C{+:c>ʎ|/gy)]|':i_.Lqء("v G ޜpݝ1z{%]DZaĒQMJ?v T4yzB $A33 lV.a,^|%'  kNT8Lr4ůp@C/ wgu:j+dv[cwޙO^F~C9weQ>8f:)`nb$|8z|Ey,of>\~'clUDkޡBG/I|4ջب%^F-'wkU|qtYz[" }iMߥzJQoF!M 1.kHpF(Q13Hzy k(kTBX WN$nO *4l$}źzyr+RgNMy(A<է;;KLiVT㣆J8TW0<*J63Jݛxod y0*3fuߖ!1⢶M|x- dL+YˌE'0su yb֡OT_L%`>vrVW"V`S4ei8~5rx(]4d VJ8~WM+aSb$em:[P !/AM]$Cзevnyϔ~xǷ%=e0|Ⳗ4J(Yd) 'l30[tCtБw*AX㳁֠>XOS3A#gJnVy;Vtfl`)rAmJP)yVgwjr< oCN]Q}pъHH 1+WŸXfj '| |:A*<ɫxwe-u槸ԂH}`7l|F‘qz<Ѹa6gJ[쨕)y.7Fm&!J^G~me۞-Oya\#Sv yA~zx ;e){ӷ.nГ5 -QrG ܐ:W|WCI_HsPe_Y;ܼdVk-yֹ_t)bZ˾o7tMNi敾+axv-c+հ2f{To^x\ cߋ*{ gNy$<0.w3y>eC6"k&biԖ |c,˛n9i,:c;.|T}kV%ͦ!D"?qvDD7},uC\0?Z}o7~ށ1~^Yg.bǎ_mi3c!ʅ[V7RʖA9TU[|}~<4-| <G.G6ud?h=jTif1DҜjiG^u9")38 锍\du xQG]@k)3GI%Ti7C"p}`NDD=͙45e2։p5U`58cC{P&tAwBqGsU%AVww:*OŊ2Rk-{rTmMKRҸ1}ީfj=LD4 DhKE r";+o i'yqN_nVtKxi|'_!֊߂! P]kvY&E+ []GRz@aFp8& f}OCD )k+N'"J&XL@\~m Hz%W-/Zex23OV'oYyS܄CpYlmr9ӵI:ܐpHpںe$ X%J=6ÀBbpCel6RƁ, T9!H\Y9\xF}UvTxߒv,BW X_mVE٭uB5kX+[]L>]/ iFSZD%J=Ա.ZI{#rrgpKf^^bӋ} PM2]Mg FN{G*MQsزo {O#X=99;[ H. 2 a+ tQ a Xpv!$?y:Dyv .qp)k:5c(>Q1V,(e'qy* ޫ&?Tb*ä' tYpwQ\=d ΰF$!%d=3gϸ^U%n zh ZiڰaxTjOd<VZʌ?]eMϼ?}%m\7΅wTJ8K$Z؆)yiJɁ)E -X,##6j@VùbJ]\F<-]^̠4Ḥq3`¡t_;I2Fg%D&=1TڟJ7~Mdr\LcqxW-s_%j$ߎH+_]raef¢qPC+NDn:Y(֡JFJuX %{V }&:x|u}>Cݯ+eII&' 4`Ba6i.aO;ᰏzVg*z-?#b=V +v1mP޵^zeHb& P05=DU{ᢤGdB5O{ޜq}b 84Bg}0z7ŃYKy落헭9쎳{T۫i (11jh3qk0VSq/g υBD|:DA0 J\? د%^DwਸFgnU*t:>:Cɩv>)˥ۯn|E%aMٙ޵Mw/ĤiYV Kr 7)-(R}kRTwӝevmKߦ*jv!mɦK$Q% +Qccw YTɁtx"idt &nT= S]nyչF~~Ũ.!PL/Y#TE,RƧ).MiW>^~yݴ8qDEʕc&,*WgLw[/jJDENq15- b7YIC"tyM %'QW?&k:jscM1SQ ƄXS25EC1E-93(HY H|^yd6OvcǛOU47 m`iC?f -vye/kΌh6MVs$ˏ/y?/uDT0E(2xj/no]Qmh]>r3WUf2{B#Gr;׷l)+ye E)I;ckBCyJA A}> stream xڍP\ր\kKp@#ݸK@p 5'hpw.-}7|9ƪ\EIb ؘY lVVfVVvd** -?rd*-#U&ij\ll6n~6~VV;++ !IW9@ $  K+I#=X\0 -hŞE rpJm*BGf tp 4<*eXY_L\gG׿Klls3h #U _:~l?>}2x0s?EGJIRAL\66vQ1]ǿ|e_從Jv{h^:RN.@Ϡra<?HO=_?z;]_Sm_4_6-_'/1I4W9Y545X7[q0뎙ټD^'Ou7 bǮsqLM<_^lKits,` E{!x,/E`^-K:XtK|~&%Wׅ??΀_` _Y 9,pVB. __ڝLuX2 zh WlR\쯹j3Gޟ+:?߱@; yab&d]z[%΍i{" Q{KuW5@B#7XN!{K/p~V8 MF ,C'=c%wA+i:|F+nʭNhQ#xy9P*īŦge;]ǓHߥsm/0;8@J?N&7LSmuK<\Ӛ:P dnM\\daLF)0bZtq:9bmOopd8Ԍga5u#DWC/ a9PN{E4+hTƧˋ@a,s,D0hl?uˇk@[{3#./ʘ~h<Bh",dɻF5>g<j :P~}N5Sa5N:%@lYzA5Uor֖^6 un.ׄjQ.'t##Z3@l@tl囒v^]j*Ͷ᪥~Sh\<6NS>~ObSDt#%065_b"î~&ol{^x<Q⃏o=r=U] 6g4>zKHiϕ'ZJ>iG}G@Xu1=܏+t݌Ti(i,{QOjVJ=/' Qn!1'GU{:=Ńolw^? 7{.r); K|~iłk'V{%qCx1\j/Z,GD ƴEӐM׃4SI_Y#ԐI=7?/Z#t9X>I)p"23 /z^d?W MɺߩDK0V>F<ݢbB vB5|w:ZN%^dpE/uĐ8|6q U:D&0HzܐI` 7e*ߺ1L+EqJر9XoΙ\ *]tɆ|[Mڡ̹+ e5~j' f~Zq4bɕs¦(nݍ o[l:% \x*cc0ҚrCCZyM-VY-/s=iDJU!L{#(!yv=IQsˍH\~1G_';7}q V/?/>z.6QC8b؛Dj0Sg+ӽfqN\g~r4-"HAF hXSJC2aB/ƃB(+f>X80ObU`w=yhB:~|%ِsh3k՞Šwt+{Dڇ-Vd!Ir!B̝`ۜQ;V ['cK`Gx *)[n%7^nVͤ_M'߄ ׇIIBݜR 49!ON$G0%VC;+|z3Ic)Nt &yOc8*6#˂/0xХd& aneDLSe4f7w 'n1:g׌}^%S@IN{T2\bC%a8?"Yo Abb?|ӝI&+2-SZGb~/LF['t<)9,Ҹ=a3z1=Dm~0 7[: ux Of:1h_n;=/%!x#˞j 0GZ6H,%g pPM?HElJR~HsRA?􋶏K/ME@7]~z{6} Jwl+6˅ hqgwX $-qOE/Fxj frﷁtkE#8J=|TɄLC+D* ä\;D<0I$g%(.}O&|JAڸxN(R"fFһlt}oQ r1 ռ_m{[ ZT߭zQG]us|YcijV<9p"3|V ms7_B䯔PUdNtܾN"0$xljbIhG| VӳMTMĔ-As4QڍuRsCn?&ǑG_J10*n(-mǹG_}̚s ~Qa EgP$æ ;";4vw|75g5f!^Gzng*pr+<ڄM(ȷXޟ2-8S jCkoOZ&QjD0ÓC"t@l.|& LT_5iHc/?ulHxĀ}jT»8iH_g˜*KĚAs [Lߦ`Ybe;G;ܙ;&+vVO:3,HŔkVO;ޅ)D5Yozclu+]HC5,|R8<VSpļ:|ʣڇE= j\t\&^W0aTd G` [l%E*K5{a ֥a7=l΢?<㧓?w^>:*vRq!Иf㦬%uC?^wșFQ.Ho0&g3wM\ޘw$;{\Lдh0Oz1lz=JYe/2SFڑYfQ|ؔ<+T-Gu^{s'ަ {Dx-CWqatk0ӓeW$&R&gRоxZ\d㎹_:R;Й~nݐR 2yu?(8>˹GƑp\CO(]'{tIEt6G2el n͋=4G feD3<:)/P dqO{=dӷ>z#]mmZ@f#> e6t006" g0ygNTVq,M-˻+I,<(C)b6W.fX^h*bY"r"o.L/.!-Azq;߀MVĊ>aڇy5Mt&Te@0Rš;A iDMvP4\vNDF-Uzy`{ʨԶNˮ齙5jk]+cl[. k͖V VbOdoDؕNO;#(|WcvNؑ^"bI>m1miyKVLoWmc, qa:23^FŤV[Osh]X.CG.TVC<_>PU$kz?Ԧ_vr-I,9s9IY-P[c+ d+K27V<b'K4x,՟qC9pSqN}ۋIߦE1?!-kl"ɏT(p|J[=@(yikeECkNH "8YO^vE:1܌\5%B pfWn#5J{KϷ] 螺h)΅?eDrh1Հ:2)SrPH PzGnYHX#݋ukaX԰['v$8~m;%$w8Š5W4'3ǭwS[~E:|iǚ}gϒAWXZ?Dtj$*`a3ndއXR~yd3@Bo"Ye uDw>-Q>>f{/uŊC)b-x?=Pmec3 C7j/oF'.w_.s:ԕ;>m 8 ʱ"$"[zߟm/$Ny×=:1LK?Ϝ(R~ `$=pZ~kDBZuߙjoas"9 .MJkxX'϶n1 UީF6S lnxW;Xh̩sX˓#*ea;|VO6ElЫA؅-fb|PBI2-WnI!+'*< t_'x%BL%MAsv/DVDE  *ŮcQxd09 |sCfGf*EZ6Y6C}Wu?ox5xQS*̲|DV]?Lq`yAbp4ςioш/dhgR?U$~Y 쵈s>[MjWQ"?I'jYGѻ6 }kvWk~oiNҠ"!$&tRfV1>W)N_J}6)Ur7Mdvl#Uel؊4\+V\+skqiMy*_ vGc͵p4?crHpy~5VsW%_,{kr)Iyg)wo/8j'?C^Wg':Uv%;-u"㞔B4.;"L2!5;Lo b2~x(E#;~Gqk[B o&1`zAz6"ƼOӖhV5Ҍ| _|xCuSW.TE4kKy@bJ07>e49K\ F#N^0[Fd 6[T "Q7UQ"~Z|{1geFzOU#&'Ռw P@`\_̙O)R o©&ђ(RcrM"j f "msP$#*<yтEze\)!'TH@zdC*yd$ jU;m"riJjT,S+lx/?Im H䅢E*X}S3X"H8xo} !xЊk916 މOAwBqNf!9)Uٰ ]4itIxs6H~[FFZFC jL4# 9,wkXg wuu+֗q?_OrEb)cutdLXӱA 6zk{Q֗#"˺>&^z7*lA TlKN>TV$K{'؛Hm嫘fJ~k-NG-ێQM/j*LdJU M>sp80XFQnbmNMܹ#izm'/f:mAd%$'0L/+G$)]wAHn7=jJc~#t(bhTi덜}HP>ݪbtb. #+% \2o=x v)DtARV|3DP]+?kdWmKѣbƕatQ鄞{;Lk|‡T#o[#ϯ|s/DDePImLمom=¢aY旞"ХٌTeԵٻ-IRgEՃ!HiR1콀(Zo8UMWYP$/ވPO'Y{ X- slǏ=D:Ol8oͿ1AŇw{ CՉc aj?oQ9 yHq$0-P|L`9S 53MFީ )WTmF\Y ua3:vӛa,OV{<9%e$#'yv)+6oDj16AvW+&}`[.AK<2lM",J7iZVܯ 7."xd\GJ*Fu9 奈 ߌLDU*v9"9Zu;#ypJ]N" \wKV88yc4iFۚN*.v?-H jae+f]zN$Ff yѨh$6˹4D}Dmgg{G=D`DzLR,΢h~9jG:8ݑ~h3@xKdjkO6~CϏ Y꩑YF%jvԆA ǛtEmcߌ%,<6)\΄#du r4s]B2ki!O.Pi߀4шKl!Mz|r]glpDkF+bipٰYPܗz@bH2v??pKV:9KH%,b֎kzE^bsb` J%YtEYFs31%e^UM0ͦCJP.]m,,aJuYJ>v,V?&%_:6%MG<]a^m'۽\4D[*p>bud;KZ9,7S&ƜAL߯6؊T&lj:4 o=ɣLS8?!kB 5< ZXun߼6D?4v*,91܏I6fޱz/\PWvgƿ:) bMG/㙑7u叉WA?{s̭m m6(Z܈0,l֑ mxn;%%M%{irmޜʥNLN{|96^jPܰ`}o24$goޏ'ּ O m*?Cg! ~D#%jݟ3?Nnu_\L^a]K4~E߱X/^_maytCaIAgSэ;H"p'!Zמ(uQEY.&lA_R6 U# x0tr پ'NyЉuaLؒ=y* LHAU"nrXK n0BپPAP̉ɪsLF{JJK'q 9T|͝1iTy\[8D|+z k拡.z8H2]u=^nѵ~ސV.aG r׶ Mhw/(\mj-gU4;o읩t1b~^1uxOL,!<_Ͻ4m^v|qW^X4;Wꗔ_v{a=[@P i[ iΔب |SQ~;ڏOdžJhU f^)Z|mB g.Mj\i[ 7j,=/(5͏c ѣ-#'VHj6Bئ5@n.]14)*Or%߿8;s7NDc ()[L5m v(0EzBRyl*5T$L?VR꒽>+x]*w#F^R#o' S#7 ׋XxVP64`YLA6K qI̢O2LO^.>tX|s;^WhVQ BπnC.)%er>#f%ؔct FEsu><Qq b]+MF{2ƕJE=ڒQ{BLDtycTFF?د{ \{ ʪΛᜆxǣ zm>eME0Hg{Wp=5`4^^pLٱxGD6QiDI;&m\l U0@dNj;8t 2h[@HG`|0}(|S::JO/ J2͍m.&a swR7>}t!%\t/iX_ b Zm7紉ÚG+iYi&͑Ddx _`Pǽ$fҥW);eq$qD ꙼l:>[)c&`ZbW^j5}L^) ٸj.4e>/he;Z"SP oEq\&Se %Z&[{RRZ}tkR.8i=t=2a"K$ẅ́hc>hjJD=T>yG0TkCwIEI!iyΊ|`:bZ_0 %|nC>&b|$[=EIs)y~t\Fm$.6(/k HueIr(2{Tӕ9;-335JG>K.,wİZ)CeEY>} P ֚ ~=Cy 1bD* 4 |u̅k6#Xejpp8^ů'걺 6n1і11(-pSg+d% @yۋ'/]6 \q>!FsRwVŨ<֭͐8E@('rXj%+hGqa$c#$\Q,tَF4T}3'}uʱyu\x77,WV<#$~}rӓ{TQf{NFgM~,,ra.}3{",04_ݚ@D0BwA̔9UۼU.q)26s㷘8q?rX|_kΪ  e؀c 1q:3W&$(F> stream xڌeT\ӸCpw4 =l:W~3?fZt?Oծ@E$jh rtpcbcf++hhXY9YY44T.֎وM@2 7@`ggeх aamPd9:]]-@@kF`a;@bmfP4qڃ2ͬn? '+# tx() aeo Y\AN@(?@]Vt¿ ip_&ffN& k; @YJˍ``oabmgb 2y*T*t5svrsev]%0FK:;\~v::8z:K1wwbtvvJ $B#XYYyA t̬X~'vK[ `*om}!xn.@߿fnSŸ 14.^=VXe3sG;?:hEQ%m QG-&ebg0spx| bb":X8Ԩr gOK4@ygb5a=rQ$ng/ ,Lcaw7:(:jŊ@skwu3%h9-vXYkL-vv@GWW @ff N\A/JUαsqL\\LXACe-9_ `avpt@,]~-7E߈"X ^`X 6`@ P?AeP2A?AhA4 P>8A1M\AojDdbbZ%S3ۿŶ@G@%@@E{? ?]sG;;,@,,\@kaWjGsL,D-?[1+o'+_ _/:,ۿ 9s`{MAA#˟TL3˿h?A!@ԠA!@S9[NHdAh<'gNvq^8! jaI*?@];5\m  v&VCtYPn9bA1ka@^APx >ȁ"]>3wP]zfK fa6 auL{߸5I9>؁0C)ٖr~M˕PуTH$i' !MpNvAR^Ƨ#qA {*!q G@݃CgeBjx:>_!2v0s#]6!r_itĬ0_C Fpk4%)fe)/XOspXKvh$6a2<%Of ?bZȋϓ8Up(xа3~duU^MLd4D+Q~M{h|h((#*Sў8n,[19qbO13 q$|Sae %} ˳b|c1XC-]SosTPkd z4X f熙ԙ\(39QX_/xH*J"06},19.:-9 5 ]7eBr״>HhR5WdG(Rd9u ƶUNࡷQMpfëAӜOlozb!?w 235NҸX<}ECd?Dkuz4GōN͔+?l~8(d*eޯ}I\ d[) kTJeN(ERbsVZvA#嵢 $*Ε"E3EwXYϨߣ0o4X?1ҟʩ8] f,ARi3Xܫ]O߃8t3_n$g64)W=˲YzB- f <ʪJxPa"K))-Z7LZMܰXU 7HBuz=0ش5 ?+pnlZ (=<g_fڇ *5'I<4z/*>lʪ\m~P$^~)^EgC w#0c>/[ \^z4g}OPH7}~vN]DQxJ!+>i(cf6R,c YpRzz!4Ά_i0LK^ƍ$=DT1JW3iFeXaaUūd<6Yp?Whu'iYE|a+2n2ZLY/J*Zŗr||VZ}~hV$K7I-#&iMnX#8SaK,;mVqpfg?'Q*},)-D*f4x`H~(Dc1U/^G t_ ]P/<)G>~ˮg(OF(bnhʏmT,|L鑤bO\MU>sFʦ(Q ,n&ms3"k ϝ?kM8vǼx"e~G{VןBSS0'}kQ,q~Qq5Z :Ccdou Bv\i_pU4krs˕vϴOgE)L!  CS>ME`J+C"d̖yk2v|?=֍gtWcɭ/Ѵ˂H R,֢!rb(㴸?r}|d -rhN\ 3)DqCz!vn1*ŘELh6EvB#E? N#վh,IV%硕8G1j[`~ŭ1nԑie@ݍѼk1qcG<#զ֎\q$/-AɑUbas hT^~$ҧt82B^=~\z|/v؈32ij(ctNrCwQe8+;!Ǽ$Mpb iL+ۤRowG&9`ɐ]w+ULBNr+{[s9I>̯顳ETq@e(5m膘)dIőj7zPzpm>+(iT%:;;`s!ԱK[bQ+Fjk]47&ϩ.nt%BS9; ;AW(j`WR7DPpuD Ud]>`wtP%ө9i{ !]H9twoB0q䭒`Dvpf3u!Vk2 d$VʐDb0 lp W@rLjkNos@zc+Cە :* 1-^ s{ ?"<}Rڽi3A5׷ְB|JiB4tt9L ;CGQj*(ȹ#Q+qlp$nwME9l9-%e B(E_{ S[||wN Kա>~?{ov%^%;؁莆x1\9{qB@WBz> j\lI) +$ڪi-G?ñbE*z% n\^L>B9FNOrZE9{ (Shbf1VqU>s\{:˞Yf V$aT<8N M^a@̘${ԢBM:K4xh}e;x0Ǡ@O^&?_vQ'Ὼ\aݴ&P8?[/Oֶ`p|謜9$ L&ڴ+džνֺ:pc|[ɤnu5]ꙹIlj㞴􌯧fk!@=)o6:'%0˔ϬubTq ̧<}J2C&C};d:!^~(J;8/\M^kT2v_vAq#?!]^BxGY!YfcCj#MxT?⾹*ަzzucÂ#.mF7d*OB}|_}z+>Hlc g9o Oz߳A3ۅL|%y)y]?[8+``K&C{s+cv%n.;_M$3{\0*CxxsϪH$oL6|vI$Se Ud Gft=hGh&Ƭw,M 3 sƄwߌEtcZu_y ?ptO@5b M} Pqrʛ"#fF86ЬVZStz`WPQEm|BMƷ S{$+ݲ ]v9 vq}tvtT.`+ 7lD-Q]}QS }nSwKd"*s6K+o,qoú\Kcj8A' _+QYڱEK*=>&!+9bYOu¢,`P*CZz~y&/_Px!ibZr">,騚+ qeF#.E/r-~sxfirYjf$enzRroAP(p՚ W+cy3 Gjr,GsVOu>)PFI.W AW]w|%۱s&wu^,ŗGjI]`w䅂@I$`#F&{OU]xhxw%Q ߓef.jzANwL9ͮزXAM׷ٛv 0LfF\BX~i&N9'ӰWw>ǖAP J,ݤX-~ڕQlC\1g0wB4Б">yR?ٕMk -K5(a`7Q/qP-~~cul q'+߼% %|\'禒t/)H%pxV'`TlQ;Oן9X!cE%hQ}RIb$0dYz#|y &P!pD{ o+;k~]V y^m0tcEv4xҽPRpqDU{bo2Bv.%b%$xWUCtz/I{xP0fS1 uƞLhn@UNz߾T]/sy4 } *]W8W:J|tfmwۡr+z+NS8xxg8ݼZ Y.86m7 j_׷Dtv(Po[ OTuQs`KX d޴w .1.|$)j1[d{ڬ/kiR L쌹 s;x"Pv{{HɷBZ_2l},V}&_S_1XPĻ2Y? ]$0" \EfK u Ҟz|WK;(Ų9tH#]33oW ^[aqq# ? ͯ!ߨ"&GC\0p4&s[B$T/\1"NzT.jz RqEbDs*rͳӱ=wMq_íq9OS(;vC&NpD;Y4O-¦dU#d_͌+}꿆y˧2>eMlooo{Q -a~Лrj:JZm#%*w>di r;;FݸFMM*#Su޸jYRkCXlɜ&qDz Ȋyw>aMk^XiDAa %|8kmQS/ uYĐʥ R,$ȕ|`5fuS~<eN8ʣtW3|5:Bz놻ᾐ TIAK1 ,嗟Ov1[ѠpXjNk6>\Y-4 =M?a[WάvV%CW%cx)yrR]e댯O.HxjΧ,sXaJư7$!bͿgco: cEʔGkSUc=d0"uCHVrS3] Bd$+G*G4mR)|jԝT[%dۻG37oh[+E~y1酜fSȏ}ၥRPCQRL ݛ-Yf7T+%7jƍu#-+C;?ayWn"6[KˬK?mp ؽ_dNlG{V);Ą1 h=<() \:ؾO);9f\Lq[#L bl=B JP3xur72*Ij@6;MTvfK#T#&ysC4BɩH,gs}jԣadSɯ\ȥ_`m˕gllkO|O(Uޘ*F"?ĜGB#A*=)q߉Gm)K8iRFY"Qu,貂,Rg՟ 4]y_i*"ΉNXnꙡ"fvr=?:B%R`DqgyP:]X8}{W _XܹP4^RC|*6DugXEa!'\6km:sAj3$KH"P+vY2I{g/~^"(Na'^իbG]kG~'eCdŨ \Eqwx8 2oU'?Lo'8kQP!sLpZ5iR6ƴr/=Ҽރ A#*j1Ͷ.I>5rph >&|4z8ck.r'.ۧ`)M oU6,wPh+0GSVkS2=QXMEwt9IuUv|v$+e?]I1Ќ}bA{sj楃 ;Mū}҆1`V KBauʗŭ'g~6?m9U5)SU.f+|үvOxE? `0B^ T4]Uh;I2Op7]XoA5!0w*?@MW@sg?bPW#PiM:`.b cC.Z3B⒟;mcj ޅ%)陙X%!j&q!ED(I3h7C~=Uۍ 3vL$!JP}[75+~g'ܸza%4";%PJ-}]xt8h6::GYPw%UqyzndtwPV`n|]JwlN 868Sw]de0sz\3=}r~^ja4#a^KXk5"1\9=jRQfۣ՜ʑI$ۣ_tx\&hĥs-{\,p.>lx5 5ܿJ{8Q*@HS-ܑ}rC6 5(/Q۠Yl< M$HjJBhD!<@ 9-OՅ? ~g(૽uJ{ܜl:&Ww# Ts5k\˜Em(l`iut o&Usr|S ;Tx)nwWe0oT.%&˴1)xˌ@Y47F՘q(fX(UAU 8%٨D" UI細!ُ_<0ƷK%Wk/Ϸ;^| ο>Jt~^ɕAQE A,).Ch:gso.Jx`ϨEh)~Aq_dHǍ!V%]~3Ow3J|t:\s1%lО lUUC_ES!dEA?e`h RN 佋 DJyܸdtv{tc+EFU4yb9m8)#͂Ƕhdg4:?W“nz Wirjua_9Ιp4 KUL&)?V>r"\gD ~ ]Uzv@z2iX. f'Rκk}?t6}>saidNZ'%+<8N4n2bvyC`xyw߰fڵyxee3<@.j8hepдtt6ټY&o}6}D=#fbz.Q?Dɶ՗~f ַ)hsEcq -{nפXo&v8!}KͭYb5l|E.8l:wDD+PU}$L#l2J(#]qzJu zmY %OnE^ K|]Έ0V\>#&TPfQ!X~]'( ^Zm@o(b>K{ZN#$ѐLJrlFMb(oC0ILFCgw)fAQ:$~HTX,|+C>I-x;UK[vbJ:wR]\\wz@IzD3;uK]jRCAj=;uDx|cj̈ЅoLc|*3?rz%y} ӟR/=9vMe_N5$qTBke.I[d>]jxffފ?fw#a ;|Mp/D]sa1}ݑ7; =S:_l0-mwۖ O4?VB O+큝޽Њ=ʪ+b#rb؎+{;`e`)ϙYVv,HZG[KC@ ${}d ¡zhtF* *0'C cɣ%p%9!-hy EcI2HwcJI/J;|HN8P1{׏Zi8L^Q'~i2(m+@ C\1 _~L/H*F| |'gG6Vb!$hNwiwt!‹]}I}1}})fl)2Cҍ̤9(ꈘҩ4J IRФڬk ^׋!ZK_^J{{+-i48j Nd"Kha!SY\Voc@gxޣhvZ]m>&PB4+M8=8^KĹ=L 7MLt]$Z7^I־wDUSfB] k{8|چt<..LmKm#| (5РPrShH@Rw26k nj͍Vi, *jO?$p+HJkyPGӒ"Jḿ f;%¼#B4\ 0 kym^/t-IZ 3AL :GO2tdk?cSY# WEopnb/_`E| B(C%p ?S&]!N )|φwâ802=3~{ s?|Q(VLKB+-{ #u.Z~O.)DУo?f08w}],j7f^x8u*cF|sU+TcD  A:r1iњG玈Oϟ 2HEq /. tU=~.ǩ ӺĽMs*#W}'CῌOXr9(e4PiU!zdk8VDj{2V'Qs|:0YnDh^rThX:kj}B)4vvႎͫ]|ehSH'yR~C-LI J BHeP0nq̎Q0'K VȈYyv:P#I//ƻjb__ln۩y]iH8}=GJ!P6݂8_;ѻ_ jDNb;y* I}|zz9suͿ5^uT*Ng4ˢ!dby 7 ߂Etͺl0VՠٖRᶋtP&Qq&⁄w'AȎ1M㡢hHv98x3tϚ*C{ 2Nۮ#tV"z0tNr()>Å0gźX~8[NSJ[-=J/9F!&{_R)V:( [EacZK*^m _D")H[Z J0ѡ1U(HuVUD}?rܯ/tjaI7D׷hd{׀6eu+j%hu[foغ*Tra֤FoDcl%+1\ocPOq?Y,eRk!|Zf?Dd66@'HJRxiF9E.=T J@,g9|M烄`S}CK`Vk2Z=D4*5}fƷxZMu /\ brZJ *5|:FdvC22ݽH'r9XӜ7[:R&|uugx7Aŷo>ۢ--N;Q_ގǵsQ ^%VEjULG5Z8G%|K6U|}8q{\ꨅ &# e~b!]3Xk=Y@ ;^2~Dy> aMdduM,*iẕ5S28p~z EPۚQ%Y֕7iM.LuB)a"Tݦc%xl|:6Hm!-7J PSJ V| _5k8Vir u8f: V_,q!oGdޟS@ÏR6-qƯwڅR|1 Ȋqt@"/W۷f[\־I0'ڳxBBs~LK -L6"+"EnWtGw&Jo(˦8cf@̈w-c4D,39+{xLbrK_Zƨ6X9kŖ{~B?ɔ ֦ǻTcDVcLcPW43>y 4ש8ۈ<{`wohȦt G[wuUU^~X,.E%pc^~楃dǕK G.Vx-ꪩ \/j%x |c(( q rN'nhQ L$**o-Fx(Vv2\1%=~'z2BsMQ#*sF LOčy8).I*臠kMe#_xo[.V߭L龳\P*I'@)P 4q~%Ot;}Ù7̠O9TFh\rśNX2"ޖS(ѐK-:x9⨫$TS&.&L{/.)~7*odI)_ aD6Em7!x%SD"٨p4JK)~L80. Ծ1}ൡW:_ v )=]f1e>9\ /&\>GRG/RV|a{[|t<:dYa2H+oi!"#'2ǨslT*-@:AF\W|_ɤpTS$9M00 }c>>I~h;0q󌰁fW^,XSj$RܥN'qK/W"5L+X3;' rA..dلBb#ħ…|#|ۀ܃!UYϰmY2=|*n*(YA#R3y瘔.P$ :|LlGś+wUU{N f q |ng탼sUFWGE{W=2!S\-](rC[s˝P,v=:jXLsB#C0Re YT>Ik0rMKʫG7u}b+zZ5guFQB3/CyFZCxyJmj,c/N>oVl^|7]hOۍ]8Y:J };'巍.U&Ep9e%(Qk郪6AԻu1mρ5+~/7cۮ(\xO>gO,0D$..08?4TrkU2^ar??s}Wznh;5Iu9*fϮ]0(a܌ iIWQqˍA{ =ك(P"oCKED~ӻ^iQ͠djoJ[foxo m$xGHs; ]wAlLN|@|$Tc;VkSvPf2D>ͺw'%*(3w(6eҫ?ߩo抱61 ?0 aiAF4bY{bޯ"{G’,;:7jxZ8#ڒh²b67I Fb;B:M["øCZ9bғ">6JC!KūټDV^u޶!I=Xw]+?PpDgsKL,Q5' K % 03+k0JƭU$% Q?LڍL{zkuʚ! EK9k\mx0[r`|;'zpK8b2䩒ޥy tO˚;b}̮09.g849 ۯՏ$gD2;m̽?-)>>_)9lj}) 5Õn8rMi'Z'<8QpPk9fq3zzOa|}Ea~ޠ7?wz1@>VjpVwM]ڳ+H&%74d@yqH4Nbq}z^|P9j'zE ĒuzlqJ٢Ms-F<8dPTܤifɌda9.Q?c97$y w;*CۊR[Fw#þ'Њ% Q[zu޿T9V1](I/V'%.FzRt]q*y(WS*SLM?!⣚[d@yZىؓk'|^ŹQd8["zs3632fg2S. JBfIM݈usx"`eqzele;Ua,M_֦sA\,,E

    lkǁ.QvFo0 raVC*q]^[;::rT\;tN/p>fAM}BQV&A¡{ /Yuts$ً)[|=T[0 a7Ho M UޔҮ&ؕV{pǝkV"N~"\-34`ُB#8]@/t@"J)τuvд;^7ukβ$% [vNl}E A[˹鳮O邩͌( %ad+/v5ɠMwwG7%O_tBߥXι>`!֤\5L(US"7)^f` PTd¹ 7.RجF=5P -ݐeXo Q?ZLs4_HZeZ"\t 4 Fc ?AE.H9K3IN-lzeN`"BgS|KfSu^!V1pdF 2lp=t7+{Ԉ֣Q^) ]jP繷qiY&&čkUm)$S,< endstream endobj 731 0 obj << /Length1 1398 /Length2 6146 /Length3 0 /Length 7097 /Filter /FlateDecode >> stream xڍw4ֶ$ZD 3z QF eѢE'D;Q-B^Dk䓼y9oZ3Ykؘt6Pe$'5A$ 1пlPDHB`|JPܝAa@PLJP\ @ ɿHE)"]Qpߏ- ());w .Wm!΀>x q䇸(Y.^zP4~ hA\V'd `p_}=Wg-JqGAQUw@_Uv"k /r#~'Clm.7Ý? @vg4*;ClGj?mQpW  w2W F)"]\ |Jpܽ\۲#a*ws*\sbQ$n&𫁁+wPj?W+`n!AC<?-BAAnlpΎ_W{f+ _=Y\1p7 hx DA$ .* gHUE#ɿ:'C?YK yE\(oDAW_gNW=8_'q;{A\s%M 5\M忣ȕ:H8ZӁcla/_Js#:H4׻+v%/[C+%a%3!Q1BA AW\|hMc@\WH;\-  oKm; uߴo̡P/-(V:ı2BΓo3|cdIG(},q#])uOxq/IyKzLJ ܱ×}eC:'E.mғXq^ /-hD/J.@)S"|f:v <擉9i 9,@a(x̱BC4ͩYLʧ_;;rJnP˙IeD#x{Sͧe|Z#Ł՚شkD5ؠ} h"rlnQXX2}勛w4IlxIs0} }oJ ֜/c^|)uiJU]]jׂG/ F jO;t%8&<\D邋(A&n1Nʃ.^'z7t,Z- ; /((_U'XR3%rҹ hvxsHAh +t*psT4{fYmb,Ed+y7p20!K:[ݍ!>@ah\PbPW BHFݪU'ZM >tfn!^J|/O%b^&JNyY鷘j Nδk*yVG)uG<0b0`~͈grM dx7b}iTc_â, VA=WW2{-w ˘X:-pߕaJM[GUZ&-N>S93o`V6.~xn 4ͤ:[Fbj)-g#Z[uLd2!(,zHZcĉ&xߏ5zhЁ.g%,%utriMĸroV)<]!w# :AN#.'Fmd&pgcg ^Au(~>qFɾ<3}/#~uu lT?}FDw(Z_zg@ Jpʬ9õorOqFĢLL^z;$_Q)kگҎq™O`R-,y͘{wWn25G| Ɵ[c9ҐČ[0XES&t 6Bՠx0Fu_$2 9w h)|V(qBgc'oU$ +,` 5VL<Ök.&buu-џ&18.ܴQwOM©@ u?Uv H3 ~&ٸ=E DQiV }7jQU6L)> f@(624'UZ2vѾ` .Lv+6W{zneΌodlRSuFoMjWԂ҂fqeh% KMv A8*\}a2=!r̎cHRECH ÷k o`/Qhf=gqA>ɷSN;<"s7ts&k9gG/:Ɵ~`·qxK^~GA5=mԎLEB{̎ᬄ]Wnbh/~c ; O]yj6u7uoϜ$Kѐ4ȉ1/yIfVAk-,eCbs x@H7Vc!n僖8#6lZ$wdɓF't'hI Fpd>}#ens3u>((s N1ce"c`N7Op"e-5C\ NG>-#_=VM"Z"2 +U o@Iw iT 3l䗟ܚv_S$l%?9' <_UAe᜵Z4KG  =XPt܆ISk\rFyOO0))>TJZ9ιXZ?eoYV(n3:ΪwZE s{Ax'78!Jaڳ{IH+UZlEեQ8 tFΧfNj!ES/dPEqK.j,X7AќgJ|+_@M UG#7^dQPV'4L`#)5)!Ob[];%oLE 8!ܡp0 yـʷ29ۑAk'$OCjc!O.n~WQAMK>Nk{vt]ĽگN:S5BÐw׻pL0]WX1TcÌ[6:4bg/ncŷ1V?ep_$o+Wj8-ˮ 0M7pa%\DvHZuA)|۳U:csXHX칼 ?|lW "FxJ_Hl+u# G+{I5JGmKxV]lIۨ9b\^}CS*ɮm!,<ʚҪkvBL[9YWQƮ;Fm`)AYe@MĨ~؏I˧k~|,U΂O{2+&$>Ԑu͎Q]:mjJ?M>rfcx |чjKFg9tEMUv[ivx77V{cʞWVQiSZ17RMEٱ/ۤ8Bv4F̊PM;?N,xF5a@^ :қQpQXh90.dL ;UnJi)SseVI0ivi?Cl2' |M:ψ>4X۠Oe\|T|BK͹],^K.jSwXGF$*@ yV#oa_J[{K;_Z5m_9Qe'~0%"]pr2j€i&՘t4!Y6nǨ"i]wԈWrh|F ݑ(gY/ʨɜ_dy Mp[^Ɲ{n)F(;m<T׾yϦGe"X温}yd(57lx`ac2ASYTd27f/gJwe *qJmpTuC;׌=8u# .+'`?TyT kSZFJկXo;8D05s`+~UK͝RaMXcۙk8s7<‛ˋ_\TFѢ/YӷqQ 'RX?vn~wn.囕Is67ӓv!7s*{Ҫ"ܩI\-6(F?| S#iZՉ#)<^-ߤ酪ݦz`b㮔aGm?Am0M.?JPFj"Zs,GՕ\jg|Hȍ)k3g]B~Ax_+ 5( > endstream endobj 733 0 obj << /Length1 1385 /Length2 5961 /Length3 0 /Length 6893 /Filter /FlateDecode >> stream xڍTuXIHO$6fñ9BZA;iDDAIIAJnз~dz纮>χ!=Bbx| 0 @66#8'`3(`0ʧ\AQHE$ P BK p{6PQ>h_@N(,..;(Cá$Pq\uB@C*qqCex^pC{쁿@\`/`90^4 x@0U^ku]a?Zxܟٿ !P(#p Ň!H_@u+!@y} j?7tw>w8ז\2^CbSaЫ"Q^H?ik{W~c$'A Hs¼N~W{c]Q@U`p1h??`0`p$Wn po芄` 7+٣/_GP[S+(X^Q0W@Łb +ANHPNꯡ=dB0 ? WS/ Ϥ@Fp_ '+=hTo)+S@T!tD}pw7^:A?ƿ$#az(w  WJgЇW+fdH(E4+K_ @~>$ sZB~ݪ P4Jf/_oM`0(`r u m<]"N=~ ;Ph}өjCp iE#4R;gQ(h]g0~6B '@&kn;~ў7\/CyM3U֨2oLn ]_:e8R5LG`9c%1?¿ѵ '8S9u}6YwWqׄi쁵RT?mO?c5~;3Y ,Ÿ/+t<-yT(ÇibK xWŚe-lx٢ mequpu}  t};dZ[0Nm۵RзqNO^2PJmJԧޯק{3];l{-<$aO@g6jz.YA Pbە!]hWW.D`Se "&B-Y7 ˬ:|,?~[bqA'ld yi;BnV<1bmp XDg_4X\?)xTM@!&>S U2TKx.'n*.jDHUT1=yzm][־_c=.4$y 'RUHjNܙJY&-P@!i4AA'=paVhzJ˔xWTtF6wrl|#qLhnRٜ躰Iyq -Y[\ ! u%dz˽] "b̧^MӸ K!Htqgw3%K3L@zSYPMB^DsGpLYC:4#DeJE%KOTq 7:wx^2]|$މLrqzn YqB έ7Rig.\cP]Hb(נW45{>Ln!;N yl9'˺5řL &1XgWqc3,xgO !3[NE bDOiaƤ)~r~+v{zkLNfJ%vBSu%u0chd \`vPr}')~֑e6L袂 WF#eu|`'"xtzPp"3lTd^v }tb{b_cۖ/gэ!~hT@zo>TWQ8uC2%TlEI^R<6咎–?.] L,ܓ,~deGS2Ikg Na׈u"oHỌ83ZX9Ckn'~ `,g0#&Y9st{ŪDQBvLekxn>V3]8樃iZ}s 9mK3hSSӍ'n܏RoP<>4۽gE=kQb+cCv >IOJZje Сsqo Y4=H_.`[FkE|י8\\1@tay`۹{!hR-8oZa|ǣ:Qw܊:XL+&yG͵R3l ^[+7ޝKMV,# *ួqv: س-o&:;߯s UW6ͦ\hfH/]D&-zmώ(r$qiKC$.tX+_M^ɅvE}⭜V3kvb̺.6d$9f;wFo}EGfJ,?$xVZH./Մ."N$6h$܅68cM* ҁ.gmULZ -r8%M)=zk`cϾ=PLe]nMtVY-Uɭc6\V/V֝.fg9:y6F<_@CN)&CCiumr$~ lSA~ "b6[2N4#kMO~'eE:ӵlınu];bl( ^IJ\KgH^*;u菉Xxws/YG [a'3=Ƨ'_^y&u gH;1} "EaC/79m[23ueaxBZϨ/ܧnԕX[ :B-3 3h΍o)SOiE_^|(-\n3b_'>wMtĩpl\kLeS*z>!!"K5Mޢ2xDc̰y慯&'yOTW,/0uic^/W 4FA3+ft$o=Qj OҰ&\S;]Z w`ePYcWU {46CVUå.?T lLͨsrx;< I>z]Bw;41)l_sx($XmD aM1.g|asn'D|Ne›_S»0>Q{W&Oū԰x?GmI4w.8xP>?%. ?R&"(|SEFQkFejōGppk&2[Ѱq "Tjح 5rdҠ|> stream xڍwX>!C 0J;G+1ƀQ҈ Ғ]M8q<{:|E'o h>A~$PQ VA a~HnG;v؍a(w8!E ؔ h Rjx8%$A $*A<@m~s+"]}Pp{4_@N(PPBBw8PC!6sTB`$C+*) qqGex^pCyl@\``:vh/ Pば@PNgDp`tq |{UG{y/ xB w>ݡ(+ڝkH_i0UDhw(>{iGUw+ ˜ah(D܀0o>N_fHWf?Ce ecmECf۫IDA@APLTDzG: _Fճ4S$\Ab rCG Qs?Sw鿲dT<8D? OhA jC0[{$Z aazp4a78g8tz`B@adu113č\a%3Tc$+0IڰHl$~e-c^( U444fhQ3Ho-kl: Epd/>Y~Ϊ)p H*!1E{7 M,$rxEvf:*ŃM۶wc/ _sąΒ|5S5Kmu~ƌ=t` M͉4D zTs8a.GÄO!tHxd)B3gNOkJijH'&lF 嫡 /ҙ-X-?@@ 0$ ~LJˀ_XN)\JB훗,ݥy%Zb`6 _K T@%׳YFFf^9a?Es4RrJ]|0,~gyDpL XmgvW5jQ:&^QPO鄲wmN~ԧ),xϤˬ>JۨGZMTxطWEŢ7kh"Ljp_=xxI Ȫ]&e.~@ieI^8MƔ&LK>a+SIiheGO蛐jAvMOM1Q7aͬr8#o 58)b²83[] b$ʶ y9u}iy]3Pa)$JeXطqwdP'[M2/+KB)L^P",euPZO^煩OwayzIvb`oq_uߨOZ$($eJyj8%3pQXc6~v ټEh6 &ZsE)5_LG}*4>/Z 7Zdpuze1Mُw'oUn>).ZEв,%m=I@Hϊ7 Yd(O(w QOMO[Ac]7=|}<(dDSP7WUJ1@h7]$zT#wiT/Mpj޶oy#wTDiT$?L 󢂚y]a=2;ѧJԍU9Օ+L[@by g1V@#Ƀ2S%Jo,YgڭRrjvLE(aKL]7=[Fl.D4qÉ!P2QvMVg ~2yl=W=CH¸KkT`Z*akguDibA̋F-_83XXNHo6߭Y|Wdi.⑒RDcQ*PkIDU6 z5Sij.zjji_s~{qg~*qaA\>msy㵠 0ᚄķecl8ʃW(U2,8>XK'1~8sȸCRE꣠Wc @O"1Ss1jc5a R O+捖I +.m21)J}u{]4+fKnp}6(aNE,w2FSNvׂ/srX9Uf_hn0]|;qQ=]9}{]ijA5ys-́k0q93ȝ穂,A/8<³VdĴ2`5~-ާJ?X>dP$D q+M--LhY2)H- :W[9b Ӓ {\l~:sd~+£O^AuHAF#y=$ fzs2lWQo64.=Un&3GoUh, V.۷]dxmed4iO<ܩAMz+^^ |Ѫ4W7eu1;<2<&݌9|şp 3U{Vⷌ'RxIkxfZ<56=I!*k }84'=UcX"L<"-n Y[#3ɗz3' hAɳn$/k4eΪ6.IgE@ԺTKš~~8 0E-2X?Nyw[hea%3ntpոΏm\PE)kwlxWMEэPE9SBq+'F 'T}ȳdH.kq^Ys vByÌ6%qd>imܵBؽίVRG ,4w(Kd1$Tv|#cpR7',d,r 'gLO4\xžLyZʩIe  nGb&j!.z}ƛU(,h_--$0fDfocfaY)kMQ>JһOAɚ:/&iTGdSUn (6HVi>EkD {$UpYLgӄMȥ^;cc:ptA؍Kw/dݲ4C*Y͓ 󪓱TFz3 V26m*c0O➒@R'OH1} EVv_>n!,bUm͠0!ҾSksKSiRۀ/f dо5EFh@m7;ŰݼB_fIOAZ#|̈fY|$J<ߙa`6HV$els|2|g)mvMVˋ 2(ARIǟ ^*epm.;dB?_X^?㪍 QЦϹfJm ` FДM#On>ۢs?8Rng/'WI/I cv7;?7 /ް8F$Yn=Ͳ)="14\xt}ON~)?Sm&ueyR ̍R !\W4jZ97_IEN[ J~ -i|onQLYgCI|ѳBcŸ7X)9;VthvUfnUohMGUe5#/WmOr2 㟅h $i 'x;!ZK.l(ΰL\wNWi6ξ[!GS<ѐdG|E,[%Q:;GxjK]tх'w}6RY?/Rx~8Ǣ9JAdfv,ٽk@*'k40  * &o6EjLٶ#1hZabjc/ 7T3v5}L̅BR x2`0RPv%$,cםk[BRN Eh|YB@[xBHH{]yl.w2*mz\Kþ&ϭE? =eBUPz9u;D'm:/o-gbZ-8rۨbb?M<_ƖJ?Zg >:D尢hS`GbDMAb&*K˓4TKt*]]dXф5nߧ"R:ZZXDCZܔk}fkWJڼ1_ʎi=S$AJK7 /OoP'np◛z!_ukzÁ7_! Տ,Y,̈́!o(fytwt O_2Q } . -JY 5KfQ&Lwa!qe$.hlb7v٦';IjYàw)?$e3)vNKVw{RӗfS[OB-F&'_2?o472p8*r K:ؖ0G`2%itq` F:qE}N!~oZ,umо낵 {S׾ $H@dr"fK2HNWS SHEUKJ鿀f}urDv:V9 rny.[gD]| endstream endobj 737 0 obj << /Length1 1739 /Length2 8850 /Length3 0 /Length 9994 /Filter /FlateDecode >> stream xڍT]-LkqwP Kq-+C)xbEn~kݻV̞=3gsF:m=NkHsr4yy<<\<<|X`87hrqàbŐsY%AԀAn/?WHWX#7"t[40(Q#`xEE98\@K(@nrDTZBz0 W vp7+VtA w5wMKG_qa1:`6pK@@z*-'O_O'CaNP/0`Z\pO8jh q!--K+Ꮵ[etvr!{ Pk9# w>y w/ mـ6۰vs6@*q?- O^N?|}`ND _ j]@> +-Ov F `ƒ/?OYà1_-)+ p8ExBaaAh[ZŪ@m`?اXVsiX+A YwwEn~? #\78b 4`Y/Y׫DL hN^.?q"d T͟y m+ bȀ[!?\ =l|BKK/,Y#,A/b*A!D py܊X"nX"nXnD5ǟ7˃ge_0>b A@6ֹ3vm n0q S;h BP7"v Q t5N]_ 5mqw@ , (l1Zʃsk }u#"θ?\4NҔu%H|ik*qWݙ+)9p$ ܡn[5vB85b8^Մ9mp·킻םG@] ᜾Rcci<"RB%$Ey,}>y$sKPsfgr͇i΢XY.n؏ݕ5 8]ɑ73'*y*&Wϝ#𗐈) Q1i]zۙwoʅdgӀ R[$7axIX{trK[uxR(]'/XgћTgMyƱ3yF 'w||ؕͥ0d,^2) =67׭0ܫOit)$7N#bJH7~R,$wxVPaL_ȔT-|Ǖk&2զ.d$H&=OʬY(.xE_~ݐlR]W.y5SGoZڜ#{t$ i dGFJ 6cQʐ ?WmPްJx+eјqU[!{wIny&uF\t'WcTGg8 5kf$y]}7 4N#jElZB^:EaAZu瓞.<7{5g,w浲^m>a)V$J)DH-VAK\zP SxB% Odq|~w5l)jԔPnN yhsi@}xn^iR;t)]2=f>dȔ}'|r`H:g:-;0ޣRthinیOfi ұulRܚfiV`Pe;]S+x.$1͎@(A+ܽǼ]hh ENqwee[xa˔΁MyZQ/z/'qJ @=!og`ӊVJrR􌁯A_ =vkRƄG uwڿ(<-flnćM`I5L8R/OOd|!8rV?#^@PW.Ң|~֐͚9}n h?Z\c4΂S!yDR^P#T" 㠐K]we}kS;.b,L3 8"Ev M:(j! "z1ھ$z`̗ ( sܕ4[eлmG;JB8EG9٦2dq]QIwY6 g~F*T@l^{JRa. 5t&ׄSxs/6,xO ~!v@zGЩ!MәZXfx5&ͤv8f\.I?u\#&D;=T[dSԁFN{*Y {C3PWk(|$lGdugzC5 Q[awDZVoZBъa4lQ.3*osaUjD-\9Ǯحxg?UɀȫDlNۙ6n_Eqr_ED)>KQI1+dGQ|mK9-ւ4fSWXX%T$_CkS&^ ldxx~ aěOWq\SF."R:`j?Ma dA/.Xꩱ?s.yrG#aRAyʔ>Ь=uhݷ,};x\cQe^?\ŋ# uYDI g _K~֬hM[CS(6AlUTFb?;v$މ E&0Y&؋=f(`[ƶ`փdlTf1bvF-7=nI}|Ou<ΤDyr_H)Я m& "|/o^KAB@ጟ38 )U~\߹KGxRນk-\ ;JNhUGӝY黛2%Oڝ?N?]ZLj +|Aۆ3Y64.&_-&;Tbxȡ .ܛmMj,![PKOV׼ 춡#{m {/BWEza;o\`St&cj|I#OɠRSrʻWY"_fў{]ejZ&as #5 !| dTLX4S3L%lf14̷@i$u[ơW R/;h')<-'*bvhFpQ[\Ԟ4?2oozMW2{auZڐS;E5/]"!%Ag80ĕ||.*ʼn>;<֚曌zl `\wK-~Mn>m-Wo9*Ks ަ(4ҦoU e/o2 I$_`ީ׉:`v?a1VጋX|y$n1C-SY5ce+VA&A]3wxXK(/RJ9bxM6k0*J Ȇ_,\*kվѬ3iBU6 <'^ytJRm# ķ*sW _~J 5l,`nY~{uk{~jK/|25WbK5NM^P? !HC3ZG }8؈(K{ME\*N Jzra`!(Ab(.A+_.%Qʶ`% O%R an/J={r>gZ=y?(W;IGpSWX<otc^+~>rrSMTjP+w"W5:9t0t~+A0Uӵ2Q[!\o#~'y#]4‘>̖xl %bD *خ2ĆF1|Uls^$;u7 ]}kŞ ^ 1qa:nگ<2+*?䱮I4sgۘ́;}mDx]OOB^iegYA5R!D/hmPKw zJw2+3̖j-t|}8uL=c庛-nݱt)tK 3ADύ%?Cm(I Ɓx|r&ʵ%^8敶#U1}<)"'ynPqNsIDfm:O&G$(ٚD]ADs3:Jħ4ʱ9;{Av\ I5;[cPt㔋0't }YŠR0>{m|JnDȬsqY3XDN6Yl 7h+ CC: 4U@_NQ-Z|Tu#ZM"w[O^ {ʪM&2k)TsK 5UW‹уBg^Q6s[mLj* >;lI-m|k8燇5?ڗ/>?2!VA1~BR:v YO|臡 skarsE5ͰyCbBC9V&8_|2JX}9,Q졹Uk )`\ 5Iylk^FaE܉^~8Ǖz8#7XǾœab4!2;9n,Gba/ t~Nʩ7zyxS!Qr,u*~?/ƫU&Yt@r$R}j9zW sFZ'9G}kIJ|+nPyH7f! !rjZa6,6E#=&{~oxӚW<\pn{䎃}ߴr3³E# oToŒÌ`́KO ΓMɛ/f?RU<.1yƶI}9NE]jw{3Mwv4NrO]4Yo[̇$r֙tNFj>ݒ:|OOu1=dm?ǯݵfgϕA2N.śkTf^)̡uE88h4JK'Az#z}4NW+ ]e|nXi +j◧̔5?ӾX|Hd.`ә=0ܘvuy~~(`Ob_I1yzէ=Aʪbd2whd+W&L+2lN9DIDc%GtY=g˥[j1)ʝ/FeMwŪTr 1w#ӷ~OF, s5Uhs8x6cӽUꭧ&zVpCtSSls&gehmم8W]J%ѫV&7E ŘnW_> (.9nE))8R*g<,&7HzBRrZ-(qW&uxnQ~tJ畐z$(mӧH.an/1qw*["O`V YiVZF=lʷc_{"'VF98ȶhrX; Q[!/',+QMC/,'e nfȧuLưp%c$= m׽xpQrÔf=1kFXR ZDPyS-#mWB>#0$Ə>xYs >91pw@zt>#c%դT,pYG2'[rȴ5y&]馒:7VvOC"_Jj\əYWqtsz,6M^Ұ9)CIӱR$__p^IQTy /5=cx ]+;.9y o˽(ye@RS9r#y6.-;hּk;+/ xQ#LVҁV{1c;IopMF+ $_|7hDž-m5oI Mp vOhCT?*ENU,$ rϟ݋S K֦XpvqwMsL%Ķ.e33 %Xul _kZLg_};.69?+;n0Os R?q,TW_|~Q+1&c^ZtMPR8oAV#NH(@=XƁ!I{1\Vj k|۶>4Ih'!)_"V?qT;8<2<^+ߢ~pxw;Mi+ϸ͊Gؖ&{,6NjwjEAMEOSg8/gSt )XL#Qk6OS^sS<Zdg;e둆׷y*2 R{o/7TLrLݞډw(A\b[ s@ODv$S]? Mt(atJ 89&MSU'1)`Z:Kxͩ' iWQK}|: ]L e-<(㈲!e dfbUWUY'14R=vK8Uv7{ti!M_5N21-+~M%>Ec1ٷ_-ݳ G^MO7̾>:"b;se1y6ƒozB ʯK]L1=Z1C0:K5 b3)}`T/y Dp'=έrͽpq}51Ng_ rl]8]RdX3y? DT;?ߊ>^6&)Čti1"ё  /MߐG3 &-Khޛ1~ȯ˩cųj nۻzآǢT:oUwAH=A1SdܛHm9xK-TM; f `\vvny i|'Sv~ls*V1tta#EV3=i>1{>$VƍyS55*.#LUbWu0 w:VbApjdg-WO*7[O~`|z qsjs:[f_u~}KK|z N}+kQJ0_\0^W4S篼x#4AeH˻n\m54}--eѮ!i7#piɆKԧ,RmQܱi`-ywKbˑ}ϥoD_nSҒdᓁ$Z/1g- m Df 7aq+|.%_`A'+gpe-oNiX^-@ڹ~S J.˻S't㿅J{j' endstream endobj 739 0 obj << /Length1 1371 /Length2 5926 /Length3 0 /Length 6873 /Filter /FlateDecode >> stream xڍvP-ҫ4Ԑ{E!$"$:JҋtPRW JSH)_,ߛyo2k9k&\"p=B Ɖ Y$@b&H /Lkp"1hEPvG@qxLt1h ,KʂdA $/"]D@R*c\}ܑN86z`)E Puc`H%p8WYQQ/// bo 8'pD(e@ ^78༠pAh,> G5u0@U 0(W(v8 ]}5 ' ῈP,"]xΡ5EC/<,H_E*AhW*Hw >nB]8 p_"hBS/QsD@xÜD7qEx~W^"Pa\Q8#hT?k# ЯϿǠ]|C}ZƦB;HDd$vHIKY?h @OSWÞlYK7-ۀ  )77;;(_޳8u1)@7gfup꿣8(~юx/% ?8F 8MM 0`^-,b9_X-羪hk ;ԇwmd(Sx;ůkB)QqF 0i L.~uHq"0R[dew.I`acl_Gm:ΧjT?+tD\c9JL-ΚfiTjӳ6쪕&(>Je*mF[rX X]qN04}{F;Rja:.c`n/;M|@udy+W`b/+SBV=O r<%7O߈{ BJ*"#.%k)\ߺ_"0~>z6# vVwAzpqۚ,}ɬVط< sb/7sO?'h $!Zu];4ؿ4F̗L]zS1}%aSBI -Fy+cv`>T:%ɿf| <7|pdO%fDP2'Kڕ4Xk>->al8|j{)ԔƼ֒mǴՈ~Ϡn9miI^Y˯Ez_6fJq_ZoK0s:ptl캣6wwWZd~%Z/JTT F\/SmWZdpOxl%>xx {ꥌpqB"Xl"cd5 M+E^?t ]XKUM0{`QA~Nӻ/M\/E%ڧ- Փ4Gl3X:1_bxmtCeŬfI|1ʯ2}`().Y2Cw^籐Nsfm Bonc.8 wƚ5m?Q?rNQ[;ٔrIa] XOͯ*ٹ!\c]YeF}:U4N+QBJgējviEli(!n-&Js+x Y5ZKl '!ڠܩ-*0Gc*YdA![Xތt|e oƓ9 _snHtI=փi?WѐCtL)Xj_#ؑF^毮jLzN6vbi;Xۀ˩E}4%JedkdپOO Qܸ]ݑ1%8!{V:{8y#$c)$E6EY.\f'yT0S*UfZuIl*A"/Mn'{=yP]_Pt}kp|ͣB2{C]=%ʗI _Ωv nEkO:\W#+:;!1Pv14BYbq3%zjXbLjk^7{ysB@x-_ޠoFFWVVXs:"/Z| /PU%DvB1"^Ʊc]Ǟ:Vـ;7roY|zuT܁iYg'/XF9kCdဥ8xջ6v1N/?sf$pئjcL1-(9FR1gQdz4C°F~Αj^^n=q5޳ks v*s g ,#L=.j zu2n"bF(QEF]yY`en*};8yH\S/bYSj¬Rߊm=rŗ@E'mhV*ҍ[b@5)Out`@xZFwUN.h`T+#64Bv=oS(=uyr^{R9 oTl>Br\}VSi_Q.).HeiZc2 -Uذċt#%и6)|QtH(9۪ $3%q Tx _Sq/Ԍexn,4MZ6A>^7p/#E-{ށ^T_ԟMf2#eЎy "VB1Y}ą>2,og3DcN-mơf YCm:ߨ< <%5gaSImmFGQߕT&fVS2Agg_S'"[N.%80 Z߻fϨ+-UA>er#ė쓇'rϒ[H$)#EJOZC">%Q<-'z#;f>8IӋ q̕uĀ(sS Jrc}#몑VnLIdu |bm1zʟ>g.(M5(`/>?aq|U%'5krMɛɫޭL$oy?s %k784PGm}CQH1՜t)-eȖBjA#O[WJ鋳/)Gf/]7n.DY͇{w"%rOT"e|kQK8i/P% 8W !ϦR+׿sxg31M*ȜD\Y%:2q9[!~ I[Q?5_>T|w{])M{v2e~(fŧqeOT廏r⑧l^c* ,oEțE׸@V˵zw{erlae생&K>,ъ"4*%~o_tZ2 >]wE.47M.yE[sM,v5/i(yrC/zvd*H6:f\m Dx~)=>mR }Ȕ6at=Hp6 b,;H-i/W(mdUa+e+}j0)=O6OImtc,MSZy9m^ڎ7o=<&4y[Km~RZ]`@וIbB6'>#ĨIpټ jKZ6-sdڒVioa:|GrCJ&x]v#zU\4'0D/3yuGbg\&$n\32~RkqOXE~KvͪFI$h[+qzE!s!c+] .PD=BrGM fIw=/:O ZE!G%&Oɒ"b<q #&4>ܓ7̴o#1 g'VaFUw$q4~vp-b<"{pJ1zL#oBZI 3<1T In_O3f7Pm:>t%3ۙ }0 :j3$eDHݴ|@3bC(H_H M /CӘ~A.:QT3fyLف&^Qհ}2Ytk+[Ajո NǓDm~>󣇨X۟nFf1N LNW950at FR7#LG|O2] Z-QڮGܵe}0(D2ax)@4EE+U rfNS,`stc}[ZZ}G_ec?Ky ȡ+aQdF2XjƽT}yKFLS5.}Jd ^^Y¦bF;f݆eqY_ |Fj'=|]HbKnu1B!+s ubIv7Hel$GM)OjtEP!#}J2Y{u4K tQkbOI;%__:.!j(Yb/KK o]c Bkʍ9.dKW~|4G(R/wd|84GjǵL0JkB{1A9_sLǵw%&J{&ZFfG9+^ $^lT/kAk) VcƋ:y¸@w=ʨ϶IQޔ.Sal6ૅ:ovn׍~Z\|ͣb2v'OX3d?.{kDC {βڎb$CjpO];6Xw^-3͕Vo_H۱s^.O"8ưx/3d]fޯ#g?]^БtMۊi~h_ 3œ sr<0bxG3M2Ts_St8YaoCq>t=Dk+v[֏]e**Ƚpp=*'/.;n({].wC7ڧ4G}NxcDSYXG=[杦U\?7K7LܔN ~"ܱ='((rJ 3B;W|yYº$9X}'lEp`T壊cNtޞU O۝bwugdrBlRi019>xҙV?&;P =Vq> stream xڍuT[6R C] C H(!%"AZBN1{Zyu׾}{8X `(;:  IU-$BB !! 8h _*h8T!X p BB@!!Qhi*ꁀ($PAX\?Pp+B@=QPܲX<@/hўp'a]+73hƍQX/.(Ex ap4Wh w#;v90H?DL@ @(W7t#\@}u] a?!.. A@pv+!8ah \~RwjH b?@ácYg$ ga@y "p-?.8Ņ`1) : Loe ܀8=a p ߆@H?q0|4h%Ӟ0P/`(?+XL7ؔQ@?Q!8PXXR(!! w6BڣRw;wG\wQ8ZH\f7!?8z`q]ῇVCxU 'fa1oQGxa,d~?G0w .JHl: NLp3q0FC|B89'H $ 8Qh϶!gǟ_8 sE.cqy"bŀHïUL(F'/"P4Wp,^=po80=ʄ9U5W*1y |}OKKtEW8s򅟣g`e7 #F&x72R5cԺrNa(F!EʭdžXZq=18h])_οhXJ],LGJ-,_,)VO=LL.cGMq '>Vfg{3Dwsu:*e 4,I= 31x*) /]#8 ]VR oװ̫׫*2YLj 춻g9S 7mD@Ng׸눿*Z}bzM{bf̏{TIre>4x795+9K_}ψfU8: &U07ȥdjL+nu` #s` ]2,RWݕ7$a]8]O d3n.vQug=,,mK^m|}w\\SdfTn/c<)it?6t۾OT;1u=.ڷb}}AA,Y18=!ELuT:+QDr+g>¯~\x-4]lw|!~ss;40(1QK^VrSL`_/T>ݿSsTpδpY8Ζ>YMW[*FA ;jKe\D%q&ipwYȜ& qeK_XCXT>yB}rߪ2=T7lazǦ6IiEc qLit:L[m~tL<ڷ0`1_LK2=PD n5b<& =sFD5:EGȁ[i/ĥ#UHl/C؊K_A8`e;F7MvКCN;0tvDgz?^ iv'Pe`}IK<H~uْ.}iZ;u<?Bj`l&AO R fBW4PjZ AJe:adoܼic]zeir_#4L6 ZxQ٩$j@޼04v^?ǬσyhGtSf.^Vwn=68ߊ<`q7a+)­LW 4RYrO6o?J6qd-qYh)e!V+-ꖢ>D@D23|U}QGɪve{΅טnpv, T[opt}i>"0NEm΄nyhhKqΦ`֋U'τ F,vSwO͔g{MYFBʦ*ٹf)9c}~f{^8=`|a*"s4yN̓4?}ybKqEѪ4[D~!i4ȸa<r q_\{;Wh:%f@iqi {YKgɄLCCNR^ϟR>>\:t{%3/CҷATYIy:؅f/}~^-<ՠ}ĵ{1Mޔe\f1e\jTBD N͎"CuWUv焻_@ҫi&Wa>SSG{-Ϲo_x:Ø %N;$6M~odo92*r ߘɫ fLQuZI 1*dIw B;tX2HLڗ]B;-^PhSF-rmv'xi2<ǝTnytEWj"%@N[)i$3+Jn?[8*-1⢾ɩGoatf~^^s”}XPp% aC{]-Y Fo!vx{R;|ya0.'q7Axf16w{lRMa¡LЃej4ϏӇis$k[R;xy:7M՞KWDgKuQlV\ 4mvV˗M-ܦK8tJ&kae[=xܡ4vF@<3*eRBl}3|ތ\!jp0Vv+n7A !Ы)eE᥺jYܓU:\7XM< j=K^ϓD=bll8p\lJ6 ʉ'J /z0L5Y2I#yMLYC h4]Jϙz ϑN5x*xunH˙HE(zYӖdF?~^I];(5ad[G|s-x^' =bLk-k"gGon̔K{8 \ Ps*G-cvȝI=g)j9}ļ`8J&X6 YYXfN|[Ȼ/YqSU"@pn|:z<ܐ,Gٷ]7=@4&a{RzYdCvLN9rj] X6%k.QBlMj]ƛf䗨^Ə\amqOc9/ρQ5mFI/>[u4[N'IVdɽFz]l .Q^6^W}C&QF#*UO5] o־dӸHI`U8YsbGQ]r58iz&ASuggU9L& 7*l}ҤQ>|^dF G#LBNhӳ!N/<-oݓ~)۳˙T:dq?iC^Ũ+dQjZ=u9O+%ǺW/,wziq࿶Рg(PX'W})"%}* ڲ̩M KIeo C8AiC㣟ʪh`s^]BZQc!9!f•YԸ5#'ɍ.^~)C:̜P,8b7kE4)z iʐܽ#*x6ȊsYTS;eq(.{CՆ"@<-,Wz44/%)fWǮGw=N]?@4τx; Zs_Ƅ]ZclCMIOHIW DtTVb|HCt> U|Z3Pi&! 5;d{\_ό k7S;ZH\u;G!Q-7)_P0eP\5V~KUN@!LkJ*αDl@wy'{Iq%f̶O'N9M*Cd~ dmW(5{*CX /Y.D3*"K`y/w:ϙZR&vH)7*/}wϽ7ПY(~%:ׁdvZusF8FN miKflbyaj%Q!I*~ dzI[_mO>|<܊Un@O _{[Ѳ$3.ΆŝkTo=!>>nج>Q.HQ`P^/6tp9!m)K +x(7&Ҹ>3+'-E&4yU,}و1N3,ܽeqAMlMg!{DTcyY)f/VN]ލX\2Fz?A(AɠHgpy_+'.J:$}}Ts FD/jpgM9](u=3NidcQ0b &/kwi>43!"`K@JvF(AutCnԤ NjTkD|'{^$Ao+j猪Еf~o NfԵe F /GV ڜOb6yQK9|l(驧{T(*[n()m`( ~kIʗ>#+8 M䖢4LǘɛޠiUݵaniAh6C}y-1iø:){W=vlwk \\p WWX Pn3GNhTJ-ACր&J C)%1ATHܙ\Ïķs')OV2g9[=0EZGhnM6AmbŲw$ۮTߏJOf/$ P<_(*t[9ܻZuݥAkO(.I"H_, ڙUMKCt{(WL0qI! #ϷnIw[$f+4`vkR^]!1^u{ͬ(gݢSRfNVjOk!o-N;<^pS>P[vǯfhy VaPmo7:b |r f)ƔxștA!Z+#6q2\_1ylޯd_ԿkX=k = Bw()'ބ^XAX~6R, ~ ۿ7Zݾcdm[O.IAxd}Soudl:knq5u-707ݏ'd sOs]e'SQΧU ZC+Bw> Sԥ 8Vb$xH"+c-q+$)Ӡ8Ԫ'6x8˪lɵa=H?BhۤYC[ǹ+r`ׯnvciz!9eN VwfZUzb~h;ϲBY%ŶBzO'0k4VMVJIb{?czK % WxbR㼆J r4tmIjzf]EKZw 9ln`-*^t܉yUSQ~fn1u6GY$Nb|?T @9sӲDd'Fv颞HV+eWA["2파ofF ޫPSW1єrexݢ٨aJ[LJHp0>luG&6}CWN# ǖw6٣ ǡə{kbrՌ&鱈ݚ©F&+Pޡu!!L^ :QRzÒB^8DKӋ#wU_n8QɹW2= Cc߶~i I_e }LƝTt)$Nƣu/,I|Ogcכ%ry(z&ZtW?|w`^@z<UK#6f$"z8խT&Fk2z!7Fn*-?Bdyu-X[MQc&jW 0}E1NV[-ZY-SSm9Ϟ a Jg m/5A慎MT{2Sf„ۂٛ1en9l}iN( Pl}Qjb;`bp\7~!u/{I1| endstream endobj 743 0 obj << /Length1 2209 /Length2 18293 /Length3 0 /Length 19619 /Filter /FlateDecode >> stream xڌP[ Ҹwwwww!\ 3gfr{}?{n2"/&Fbv/t \aYIF3,%?rX25S'gK{;X;?d"CY{; `b`@ K3u%wp4~#Ҙ N/w@hajloli <@=39-hP2u6ur55E ghkojtd Kf@7C'S ,)w0XoZHp+ݿ m <,f6y1:;`hg򗡡чJ7 * ?Й/8fQ;a{[[S;3_X:kmgfdfigb zU;KGSI|`MV&#؂*R2%`0aciftr1,##025Clj7迓;@c IcLl<b*ʂ4_; 3+ +q -]?|%qN)3@o,95PtV/SWAKOY]J ?Ac01-tt75Q[=5U7K;S{g˿n/X2c[c42ء+jgloײ1 =`?zX^[iba?\}fN5@/o^Ћ3 F XRGv?#]._Ob+A|*E1 ?b:|_Wm>FO"G#aaKbk'_]78 ?飼.:[䃀ٟ$f|XRۻ8?X>2seSbp?l??zq8)?pe(O?1~1hdf .]?_=p4u;쥱K_GF45u75]7lyew*Z}Gu__97bAcf/qZZo;9 sh{YƅiO]ˀQiF` rw6Pl,2 ,U0BL$icr0VR -f#雴#]li.Q7-(GI%w&$v"%%7{7~_lfQlqS_LPdf(*-GY< D58✐>O;V|58i 3eI+Ԋ e5o?ҍ$GF~LpL⯖6~6"i4LBA4K )~G}8B_َzƴAe ^5l]_å۫Kzj\oIkn,kI/f-*@3\1 V|ͭn"(MmE2*[eFgz5=Pی N0 p\|-{X03SfI2P2YGB1kG2:*%ᬆ+.wcE  7Un|^CBv)V0Pɞ}p*H V ~X=)`ge,+ %>Pm HJ'r<?Na3pEwbcqK_, BDw7hB.c/4겊dA~iTY2wE$ӐR"z+ݟE,kb/r:c;ASߐŽf"A{J1  Ί"ܸ2+04|_Fg7N"N>f+ g^,Mzԅl;bf"3:,΀TCT'[\Qp@ozh,.H\M7D[I9gGǠ*1&[xqŤ{ ' h%OD/cNBIڦnE}qfuXTa .ϊ)yP@N&|PK4-l@xT.?9;b+XXf^s 6(,KR)Z&k#r8lnn(At]$(?nT #{y8Ź>pGo7g^vQƢU'1@&N ;X/q\|D4m&m8'Y(풾kBa;9+P!NRfx+QB׬SqAagIź=ORP]LzKuVO]1d Ʌv嚃=1oba*7 frk E.~n74v3_] 5\I5:P8g2΋J̯X&h-1]vCD(/cvx:|'m@=X_BaPqiIOAЯ| 3ٲ ɨȆV9ؠb m1RE":'J7̼|+)Ubx2#\:l U4@6kRY E&QC yy'y.x[fAT U>@|uWj]4'N .P!-W1/9^wy1?׫xE]kfWr^]oGN mf{M{v|OrVȆ_)';TcpG e jG, $KI#bAURU>`J}6fMt,E$t'g^t>rYp[[¤pX=ٜVv!yxЗ>pS$S2)lOy~YMFe#ct{X0Y4l|y) 1 /ᡇ!5wk9_45 C= ׍vA*n) ))]:Gm Ӝ :EG@.o}}b4GNs@g%p,9c^][0Svr#ΡƵG9bfh2i܍~햨#dapZ SN82bضTFў(7zҪ(˞ME;f bnLCF8"<Hc3Wp|I,W؟{g7mWM]?L7b}/l%}_K0W 9NXW+lޱ? m`D\'绦Ą\G"_/rPjZ l' L<βU*t~mApaݦqld5_d9O}8YX>*vB;0H t栎ҟ[w< &/* !ٔN9CͨUO!H-ɣGmbD Jo z_7ޠZMe؏ ٺH1sm'Ͼd',,(U'6Yc}Ja/ȖL]KAal_ cUr> v:J BT.j>)¸U@fWZ~ tN1`p"Z"ї#q.7nPi7}vtۀ^˾ 'iRLįQ59wd~HPsR'(d OPˣ HUy,qt[2O6 P?/8zva:q˥֥cHk2a%#ZE>O)Xu [boI̚[YHv̲֬eA޽Tb^(8)F0#~jɲX {-yHi0~7&ld3'}ɦ!CH6r-#̡*y>b$I8LdQ"R,z/Х |3%pbы8^6ڴdQۤk;H:%Q`@޼Cdu|oXi_Pœ >pD9Qp3ܕ >Fm9LѡTTzhE+wwʼnkiJ qľ~Bsj^ %p oI3@l1xP]m;To]J,ZYPAbj9|$5jZP pWT2"K]+Sc @y0(^g?ˆz`)pAV]0 i"0:Q-4H'bwE+IޮsiXƫȵlzMk@[/գO"2['ڊދ;hddmgs7cZ+t۰SJ>yौ 9׋,ͤ{xNda8D~eŚ7uJqUa%k`gpɬסw&hd8Ce\bڻ{"*(c<  q_s/>œb]6`<dέۘ ɨ;?u) Xm(l2m1l ɗ_\d1>W$3ZJ.BH :;vƘ A %w켹3vZua!w(w_Q$*WL,QC<% Y^0LK/Քy1e.HpEO_஁4:^upNReKdXZЃxTYc:yh$8ɌSϥ!p1kFnQ^1I%i:mJ%4OT1|~ $#-.)´3NX#%'>1pQ0Jm}0v9%)_9mhw^EVU>KzlarwD6 zcowc/Sܬ--GcnyFx&ι␷$-ҴlUz m8LUPqJbFHϜdA9.[q_|j%ϙhRjFɭaڬ KtCP@sE0VӰxZTDث.y&pTd|t.IV`JDYE,uP,ήDxE4n[Ҕnwo~2 34Wic OFi񍬻#j4$,z^wCd0gwqsaG. |-^|nX"9Dx5Noz9$McV ]"o2|S]:"q,R״bK8IԵtL8Kf|LQ p[ +Rָ(dF.o7ﴳ|=l$ls`aI>#hk3ʋhowQh6W`؃; (jz$Z:hS+~^V9%4 vO`}RIs_]o4Nd4QXB2-yk(I=gw$OR*6hc=xg^y߰k^3vt>dek*&l۸X9$̾eE(VT^]@k涁-+5!T f`s`-3`8BSOqMc3]"9?lFnN_ީ ?,xP∁ߛV:K:!f@o_%L2"mS\iw ySK O R ]=-z/s /$i4;H zB 5is҃By+]uZaI*}^$2y:"~U;ǛI^3lbeD"QI  .)NV\HmT?|㑪Ik̆CtL;˼Z }DD;?[35(Dt֧qæx ;Axj $t\P?b Wm=\zh ;b~KZ=kn06, սX.9N!)#?H^1H}•OAeP(Qi>grmV) mAO2kI %52b0VE21$b)c(dRBćțWj:kV):y8ơ|թa2B{';+K݋^LbARSlsME4A*ɛ@][f 2Pa.݀/uᜯ/c)ь)6HB9B-s~E } IںaI}`#x^&e߁A]K`bweTs2r[*wJ~V3F:)iMն,79%|քu6˺y$dxbZh0,m⪤.ޥݭ*]:JUMH ~v6pXOW;Ņ-/ ]Z{䢀Χ3$vK-iY*9{OpsS`Vo\; ]BtjJeT8i4[j6DяaQc:qq񒔝E5| Gt\Xyp^Q$IԅDVMAVHN1UxXk,0֙o\&Lj;^X}eL+*wK[=/^ˈ@drPžǞB>%8l+Qr̓AT5ny?8 8ۍz\W44Qq/<+=uAOG' % -oMAK s%"貸I6Ӯ4mAIo 1kBXѾhľion|υmw? kUw>czf\jKoj%ORlp6P.J.Wk%daN JŞtc&$\XIbq]15"֗LNFzDMhGx0~DK{{J[#}IrHS;ScHTfaJ&Co Z#߅A*n, Hp aYW1.)ඉ9J cUbQS@wd['j5]6:w^JZ^C.{O9\5q*; nk'jJv\|:. K]ѝy۸n6ekqHЬ eޒz?KF,hۂ.A-TEXYvgxB< &u2Q+ 9 d ,UL_tv`1iVI_tзd|I[zu:<Ҙ$XUMey9wGɄ>B Ȉ=D[Q ̕5'w|֠w4E:i #af.9@@Yb_~BY|KLi^ljf'ⓘv, 疓W{a _1x0v>/r ?9XU7-wT/0hTx<e ux9-r._v&nZʕY"#>Rs 7g kSXS=۔sL'ܔ8aua٫94eU$Ps^m>"R)URf>,8nffUf5~@ni\oG=*\ ~!Up7X{@f>X39+AC L=o .SNbYLZ>oStZ{ӝCd#rsԐ3uќ-xhlf`o20zeŢ0ϫo "<..LBeD he }]a1\^U1o-jK\&փcqTorM=Lڴ21`}.g"C_\kKxCѴOأaG4Z )fpiOD'yF>ALfvFI-= $ò1{i)R,wWsu%i#g[C1-ˤ=I1ï>EV"qؐp`o((֥}gʰ0c~G!Ę7u2&#Ӯ ¼PCHI16{ųNEFR^,צЕY"c^k쟗f_23~a1LCwjf3g0;ه i*jB QƓG aU^8=dS ْI'g.䀖r@D, xaZ88zP^7 Yqm=BlKQ `){:zND{c/~8}j.+N٩7A'm4Nݿҋ2υVʓ+6{@opZ)Π!{Yj})p$'uo6捐[QxGn٢UK0QjƯ:V6mK ^/n:۫"N!wH_yT`,#CH؟<ق_1$e5N)z{,04DrvL An-ͩm9Aa~&5lQe=ዺ/*({~ ](Sl`ugsu!OT# ={K%pe[lv7'h#TS,c;YȰahpKY낌(\ yşdd>'ZJ}<%gB5q|nhDwg'\Dȉ,KxqD(AmM1Zzܹtթt W\.$/7K-^`mx#4D;-^bi RmKdwvO^tQ5{9 It6GʡIr<R#MRҎp,axe]H 0Ho]}%XWc'NJ̒-4o/Q +( . uj*IL7 ᙣߠV XܭJLCcbe 9@y!DIHE-|Yuq &4pLlʯSy為Ghkh  \!(JG_pS:1]Nh >--R+O#O;bEb8dm]}~ƞ)#`=VsAss@Jpb>Jb ֬`GA##4ҜgܖVW%/lgSsMd{?#E/jsPwK9&96rw;\[[;DYwp&_AT*X@3=bI<`ćV(҂Ҿ7.'g!:+P-+&N~=Q LC ]orh#PGдr5(xgvGrg#iҬGA3jXNa;iQR<1t8KK2l^ iB #wc^5 5Vqa\aBY-wPT%]zhd[/KA(kfIZ|J)qY͊~rs|kkf[VMyp})8Ī0dR q=N>z*OO|3\TP<)F4$ ȚbۇQ'rMV^;St:O9;P+7cFI!NݬbwpF H$ޤtϹdQn^y5Q$&LRa5GOxͬ(0 ps #_8 דZwbwnnCN Y˵4.7WN]ĨFGZ (ۛ*ylaGo] 'g$'bjuw3;M62l`d 6|2s҆!Et  !oP>~ylR.Y{#CuZ_4HUTg3A@w" \11E\5=MwuԲ oiXAH瓢^Gg~37vx "o06ߖ;qBW K.foKj~`Mos*='^͠|ppxm.r  mJd󘶟ZYh:nssx30r:dErJZ9 ~@t0B,H)x:*5{\ruv.WV wl);(MR٘tf|N)RBXY_q:GKnp)i d͵&\J/DlXR};KQ!2 d&ijLozA@zɥuaT YOB}?6ޞ4L~-lUs3P8ρ1"l@ |xȱd!̷b/AuD[a:)8%(9*ɑ<V6y]4OL~ \*GXP?ʯ@ .ZҥjlOq@6iOv;4ΌPM`4b|e]j{,-$3imiJ5mآZPF>lk&V&F-p>Di3E.}gWq(j3+^S BeDW4dtFC\s0{D;<1>>;|]ePΜ/ޥhL `y)D qlouLn@QyWuvVmֈgjIeC bÆwGC ÔAM/*`[Az7n^ \I~JЪzpzͷCdGmx2r[=Λρ^b,y#$ E>t[+\z@҇1 {[]w\brD ]z֓"_3bXT3dDЏssu@&Bi({=z} ϞAk%A:/\4$7h?C-.l@˶SȊX"1jZ~ҙXlC퇠>`xH%1z4P[vp4Xa&먱r=xwM+I nu~|YL[+{j ߛθYu'GXzl%XCAeCxf4w6}wX+/Q$?H%it pY:5G1ߖ 8%ezgبL6jn2KCBѼ @Vr(ũ7<_\{&<{3 ,ѓT3 JL2Ͷ4hr:vE: ?f\YSgR+?.+BwyW ͧ G^gyFiC2H.2Sf tx XRX׸Y>QԱ@}ŋ]&QXX*aTł냊eH8SuV\bPb75Q< I'i|V[AktZoqÊ*KWҥg>gKnd+9bZ_%`Hj`yэgt"=ilF[hthaY"! : 9 '! vmߔ皣rQ%bqqlzU= ޡ[b9n9zb_+d--AY) gE&{) N=ϑoaN_;#άS8DބF4ǩGT|2_\C?م73# d}\fg61|U$!74_fyfCuK}E\V 촘: "WPChY"! bTV%%/VG66HAoNܻ8N16 dDgàodL䮿;8͆T 3lch @hĂ;C_׀T~g6b? 6]`%MS  tzΒq9xJ"Pj f>l͸.k|iQ*6*ac%wnDXv>39aWVS)ԕ/Ŭ7,%لH)`bm|aqxUD{+_Ƌ~V endstream endobj 745 0 obj << /Length1 2456 /Length2 17587 /Length3 0 /Length 19033 /Filter /FlateDecode >> stream xڌP N n.A-k`Ag-yZսw(IEMRnL,|quuV ; %rJM.@7L duXXXXl,,5ttHxX@WJqG'okK+7P~ИXyyv]L &nV@{PF3;5BX913{zz2ػ29X 2<ݬ@WWE{?1!Pԭ]Psp4q@;k3+e1XX _v613sw2qvXXJRLn^n  M\A&&v& DU& ͕ j=/~.@3P߽9\[GO" ks0wwbpvvHc!Y,,, 'o:zMͭhAMY\ʸvAFZJY{͕̬53;k_7 @ef =\A# ڝ(`hגqrL\\LX@ em9!0398\.)'Y/Y70F<f߈,/f0KFf߈ 7b0F|(oʧ)x@1U#PL߈@E d/be1q59wx&n@L#P.&f@+ـ:E`fv/@Sl/d4w3q7a0')_zgwvvu 1R;dbe+o'+ Ԉ?فdn3ߑ9AP&w{ӿ%?YfA1be[ z:9r#S ?NvY:472V􏮲v ="' 7 vW;W?Bfbݬ\%07O?@1{Amc)@^@Pxl@>@k4; hhfSVpJpP+w# l*mMNȶ˽hhƾ$͝ɋY[#ld{JǓ߳Qa tљh =y \;U^ `~$C5c'z3e5 6NccF!5Mr:de \Vz)CU4Onq` sH>BбZz>p0絲wTrβ~haɼ#C>Rp%e(`Ŷuf ['+RωoAy29ѽޭ|qKe1ɇONO$J4Ծe(jGjljY驞# RPy0goԩ U(JRMvF~Nsn)ۋ.k.l&eD ){ P=L4xPU."eXK~+}^xXxcT$F2UlP %iB "uG: 5Ld1ZT$YuǪc11X#Mj<5}#5<2Pz䎲"7h5vi6U.aƫECXowR8LV:y7Ov!&奞N%UU{Y u0FXs{Hcs)`U=!|ZΤCoߊʠ`?RX!T8;hu2&?c~i?8#~y@]JB#Uvt_!_{\ZҘ7OnIzRsAFL=/#N=;NjqMgLo<7а-uK )]O' ̐Lawu~7!^;ɁӤ]qU'2#6a`aέxzW]e|DXeBW=i~K&Ch圸EL|^T#t=?>D2Í:ī֬{ihiv;U+YB~kgwzs0di7斜o嗢t)>e(LMDŁ po;m?hRbb~` ;Tw,zwj}sp},[ZuO]1vwd) 8AKW ˱%YѾfr6|f td"&ok+ kginwWAI40bBW@qۃ|Y[$p+ҡUu&scqP+lRY; =*7z4*l6ri0r_O4YRQ_+XQHn(fׯBW!p±v*(NOEbKF;(X^Sa2 ='>51)+S=I[] Aɸ|r1wZo)awP+|NRYbSK4\zt9Hsbgu^h j)`V(Y$Y~$>jP%v H!F qA!] [PM`3Yާ%ⶳD5]( @j|eݦn f9:~66< U9m^5I6쫍O0%wın"?ĄR6:t݆ -]!lD8SrX2ƺt>{$]3/)|VRa]C47raʶهT˰hnOVp,0mE(qi6pq^ b 3oh6i첎AsL"s~ f$̪vf} wCqnL[Wu7;sѝnS`FJZ*4}9 BeE!t"'[!J!/5_äI3=.ꖈJ/ˀT%`_Xޭ-bQ.A=͕twbP0]?d+5wd#|&ZBx/Ŝۂ7UyJuv[pZ1g}Q;>庽+? `.jHYW%\e3v|Î" i*2Uaꡭ1 _CJϙ󿉁SIf6d?+{jl lykψDFIoۻzW:dR7mE_l B7bxHzBOST?tz\!KՍ槥K+ttg&*~ryb^K%jjRw D8@j&Cc{_dSAs"id*)NS6ɋVڬ w3ЍΨ4?{9{À&(k_Ft _Zr_m]d n8QcqmOj1]=}CM ί+`Eo7);x%ZL?zITTHT^4ve^qv[ kv}ĊRe:teCs:c;+6eX_ݍK Yؿu B2TmppM-cOx٦:a,,k>@cxh^Gq%{h~nzҿ("od]^h*, jO` q{wtu%Ɠ~P` [pem%i8~:fΡޣWqyJZKb#L\fXY!*%t[Ny"5bAS[G/Bw߱&Hpkː-= xâ)1FWSn,&: _ẈHц7&՟#(;S7X8lkvΜ~gI'lO=yk/(q?1QCL/\hnL,%nǾ(Nq'͐6`/}Z;&Lk#-{WGlk(4NӢ{\K$@9Q獂a$e v P0`x%kG^#ےW~pRI}TLF *[S j_9W/Y*Q >كWkx `CFp&u61R$pxG@¡)w,sd.YSjG}scHOz O Ŭ)[ 2B8)2`9T2uA˸f31tp* )-iULSeȖ*c3a#K[N]MҼ9}B +MgQ yh<-y+&QN0%S{'ll֐n4z"~`&.\waZ 1``J8T?[,8_` 2Lt t+ d.] rXRPvRZpKIТIM\H%vVo-H:͇/DEq(5kXp,cQ|lq^2 g oZ͉69DO:خmW.nY/GܴaT1Z* ɴU A-?'ɘz:,Qh/t{ c!|܄5w63 Iۼ\P6 3]G |y%[f !&xf7ִ\ٓЂ E_f.F:zlfg'؝(Y|[7M*R-mɚJJc?CR龐08L3ju``< H?*+X &&q^㜴mZ2A'kYzq^sk뫂g u;툞jZ-h۫5ԟ&=vC]Cy\J.;4{u$iўdSW1H'I۷<(|Z81>cKv4:+ܫ}o~ .f7@rDk̨H O R OY6|`$-aj?]9z|} ƉZ܄?ihɨ{&[6eQqpLp*4o;ft.KǴ-+M'OXSULte/0H%)02;T|-oV!I"eD8.~`zRK*ӯ#i UDn?Jf+6CRԻ*FzGd| fR :K)JM6H$iBl\(mѸ-M'!>vBmEHͻ,PM s,\"ece(+^ɽ `{Y'T-ܹ*HB֟K }Kw7Pz;}.p7Jx 7Xg^ ?rq_)6ߣ"Y;vw x!u=ݙcSg%)[eKhku@@pVpUJ)aD~?6WKcJSN$2xG7+N'wwXa4?㠊b^4̴.'h-}7"ӛ_JH ѾW O$nᠽ%jB~U7óSٟ±pt=bѢ8D^_~ܪx'%~㬉Ʀ_KdصXDhFO Qn7GFI?nvJ>oahQX& w0J;` #t/D >#P{:uV{wSB%d a: nqޗAˠ楉|Kh[5۔6*/bjUxmH{I%pW~$5X1lv a#I)d6XyP+g IE p(މ2Exob~W^}3Je,* z%MO330[A@3.5ϝZCS`>CDԮIYjg0 V KgRp}56H;m,N-{9]܋p |v_0Ù[`53Wj 陳U6YCF5м2=1x tu!pR@v&JL(mR#yWE/e徫.؟FPa.iEI5^}ƦCX,Ȉ Yu0; KRyp޳Y׳RF[ɎBY39)9Ry`N7o <~G iBcrɞ$A[uh>|n*p>$f{8$ 2b +pG.g}D\CnQ7-|3~?Yם&TXr~R?T6B3L~HZ?q~Nh8WЬQC!KjG@Q#c_nW/Y@覄]Gv,/lIIg8ӧ%w~bp^r:~-kuj.2K~+D<󂝀SUg1XlCvx Q_!VT )( ^U}Qyۭ x: ŶMl| K7];ܼ*6XbO/O.2b)SLI<=Q0=܎ iES% 3'QhdPәZJ{y]R^:2g~Қ9fg_Vn60.G+jȗcݩ빌7i'p2oݗ*0=;kjdb8FIf*&C5 2YwaV2TO/.a( cK&nlRdt>/sPրA؝L[idPD^6/sEAccdЪA솴QvC)gL 0Ci,@i隮K[ÆyL@5l4{/~xEVBz2*"K{F넅e3U{G!t8uc?\݋/sT!ޑi]lQDnbG@:TIlfuuUIHl,#*$FonT STQ8lr>/APә-G}asmT\;p"rܺ<&LFYпf`nZJZﮔQ#(d%7a@)V4VXHMV)q?JPj]F*øL2>|IîԻn}ڽz$MElgqW /ُޣOUj+v+a0ٮ`^_ Or35Ց#zO7mA 둾CB|{ +}QfrR-F֬̀uUotBJlBr5ODxaWC 7,[z[ ?bo.hVrdJ/&l//B;Iv{"(ʢۓ B1BmwB8ג91mB~I;a7&.8b *ӽG>7\HAy']32+GOB\mp;`f%+Yuv#rr x:]px9)˿}*sLB3H|> #![~}]YDbdz6Wߞ\ u0c9^r2v 9e%0A]5&Xȕݡ;57HGVU BFRX-9hgN43=k7]\q0ӇWnfLDDN咮ܬ,Zk F=ZeyPh2 L똮Ռj38vDģ#F#{ MG:T`n,MɔTHPt5cPS'7[Eӆ k]oVV_J |D#2h c#?V5.nhvQ>sMk7^[ߙPf],RlZ5GDD74Kq) BovX=о>8i\iRFk2ڰpJR%=GJ~ SdO֊@LM5M4$5qɐ` h)7-],d$TWhu[C-rѧ)^==PlWT|Î vz{њ"ʑY/; doq'e*{GuDskPL0 3YK* W{L!}mKNq|_3@,P~Ƹ49-7xMCl2<+Z#jTTGXO^=xZ[Mr[Dˑl$KwI0w f Z!H( t+np>/*'=K{g M DTR)|zf~sWîOn\KQ W*j+}>_m&SbѠ= ȂQ`v:.Ӏa8=\zdY.hg2Piz^m8=¡ >?m/67yǴ`jOΡ-jK&6܇Z盄 Sl %GR!M?]G*M3}KÛkз)p,s82dCѮ>~qs昅4ZV31գSHN  i~Y0ﺄ/յiZsJx)gxz]fߤb}{8|집Pyݎ6ϡJi|a0˸rvVيcAmz;0h^6 *pL̢m_܍Ew]< eM7n6H.RӔ-]SPA;lPguhu|Vٳ̚҂ U Zq*-]VC} Od<^+;ݖ"&`;%9UHӯqQKY8b6oYƺN}hhR2Xؙ[-bW F.d8i43aU kU6h ny"%yÕSV5O:(v~j{Q.9^ ƧD3m>_D_0~KVNAпiZjf&XL%/`2qܪyE~Z̤a|>kEɢY`gGx6IQ#oU0/jVɟp- p';UzuwJIL[c; *P* <3@V`P"ni)%U7ǃa;gH^3db.\* ×\hRz.kΎ60 7λeҳ,S-n6B8{"Ḧ 's_+xe{"Ks!ˇ;Ӈ$y]GoV2sAG/.p$A4T#%3t (`Yolo2. 19m&tBb֫W~ <{ĴolX~3ӧQMT`kEt7׆BoynkiPoi;[Z;ܱ08*oiU% c8&oWwqĜR{kUc0m;b)TRcwЀV0ɪآ&W(KzByz!+k13[U7¸j%<9>;\ _\iM%c) ߕb.ګ#51S|yW Y >=.QZ6p i}ǖ H3I:klnQBPKP,ĬvB'cX;7_@ P_g]AKWVM1)qT|hWKNMdR\6Vؕ E Ip,'iǡ a+8Jg1p9H{ӹ@%" 2۽k93uP~,*` ϒNAV-[eēe}:JA@=3K^բ+^5t\ڣR^h AVx I00 ak5ߋƞV?98[ﱊNnAMA[vR]D۲}I&ZmQL@nǑG+v rji6aM ]<tN*GpqKOvO9teFsr>0G.va+];Ҫ ǂPۼc>>ѿ}'V2GLp`㼲G1p[;ߢzm\KH'PtϏ(9=p; ~NgL<˴2OW#$"H*CbqzG=|ys:`/ 6/f-ui~'24s'tW]~nWGcq8|Nfa7a7RtS|n h54=]g.K Fih^a>3/qSb|ՓUc(t21N Z\k|ݯ!iAa2VrO1H^4|{kiH6Sim=~Z!]xM O\_³v- " z~9qUxܥx <ζ_yWlO(} {]ɰ2n}¤Dn䥓y(6dٷDפx ηm{\pn"82m.o< d>4w i'Gg7z0^|K8Kn:+l],ȱ9*^V*N{?c/δ<%yRV7ĨA3zefտ%ʲV?b2|ʎv&k /'gsё\ <\%=wS[ (W2;/G|HB [8N&8&*&=gׯ3sva? isj˶<[6 *oY*Y=g q*1,%Fc;bKo,ɒ'(W[ٝZ^䆶(Y\ v꯯xb J[1=c={ tORkS*lkp:*|If-5?SvQ%`dtpDO`p_`͡Ѻfaۃ\B[l|TK^Qjܩj mZc6gTX)d-9CSx EΠK4lQfт^tL &u2;"dE3ќ1G`: 1^FajT{`Da@5&|pM%A"XQ5:Zͱ bHFUFdW_u_TsmoNƗ{m}׭TKަ/L6}aQ&lV}o`27_}tUyt$/KfCGU~rY{Iu-}Qt/4qYr颏^d^gI4z'_$}kDrMW֛b&2x<0IrVPiK_o#%N Ͱm $jM.1g|0+^?&{)v>_?l_?N~dz]N~`wt2:Ԭ qƶ@E1L0Ղv?l{n\$[_Z TULcTУUw+!V}p꫁f;zmnNJmGWi;9U_[`kmڎq;L$ tGh93-uQfK.{g=@_R8?dʽ-_E+W_N~e:lh):ƴ'ԆEE7 `1@8tW뿔2-)`Rhb|"[˶zM|vwhZ ;\rph?B3HBa3#C@P&Gp>tП'ח;rKRo춣9n7dynԻMQnڟu An )1&=N"5pޔSZc5!]=] jDH!AS)ŝ8C|_+X>M _67w ;uwUZw\zS_@F e]_Hʙ.# ]pxSS6梾!#^eзG=Hہ i "FNn%v(sk$OFWs(sCao0XX:Fʠi\ׄ 旺֧LR2QN!0 2o-\DW S{_m~3EMR6XB|:L:!IrIL~2{W>tls?Jv !D4HCir\IEOoHIӖgdx]"J]e-;j$ |Iڞ85r˯ȃgM,r#%RF&K*.q \A'D H"PI}H=Qu*T,\략PUeE/i[H>@-^ȶ]_zZV!<:mjJpTVfJ=c)i6.Ze U-5Qe:>tG,UR-+w%ۓ 80ދy(~E]&"@&ָxDJkgKbn.+JozLQͰN6e9_1g>cj36M10rԛs=Ô=A3+ҁNl3{]_KI|~eLJjhZ5GtyR';Zv)L*-+u=0 Z~g'QfFGo9^Gг>ٰ`y^ 0yoJWu'iOq7ݨtn]v. 0cY'@YoշxJcu/jQ{ֽ? 'cw ;WE3ƃq.a\jSb 0j6Aϧ~7s}>uE?<{ѡl%һ I-B}>`o/uww?d2DݺBOS^v?oE?<~Ś6H5U5ߏT(ՓuË۷߼'f&3K~޼R~-$(:gq=}-8{R_9Л9khV9U*6쒫Yfݴw{NppV߾wbqPQP][ iox:] {VC37ݻr͵VN''r jFL_e..ݰ;9g/6j]:J>cvK fn0=^ȋ +w14;Qٴߣ|ρoFN{ݰ?uC,dt [@opaovן5ww#/?n/+=I7-")b3Ǘ/t BpС}MEoj )!*q~wwo~:,q'f,]\ޑפ ?ݱ9>/>Zh' Kںכ6Ω|}^7jz k1aw}l||!+qc'~f)rk u0=3` S2qrlo7[Ǘw mp6=$%Z/gyݐ{/nd)h <s;^s52;aѫG??u؀ >4*ٍQ$ nGtENc)Y5esdv-Qxrs'wqy)|kx!c2\UR8gXϙN[7o-.R]d,v) CͲiV*)tFx2@h enu0 JY^Ma<r[H,__?y h P?v|w>C>GbeS MכN'Gx#aDa7O \ƃ'i.fI7a|p1T0ShZNݳ7ox75' \b:؝pãxl]r#߇l^fԛ/`؛'lgAevBo="e|)ikJ{n\?.Ɏ_QW B޾~I"Soh=: $K?HK tÜk۹t8gqì endstream endobj 747 0 obj << /Length1 1341 /Length2 1640 /Length3 0 /Length 2508 /Filter /FlateDecode >> stream xڍS 8o96Miے_F1W:u!Pi}f17$#eͥJX]lnj5.QRZڲ99ߠ9}^#o_3176ѓFT*LIFF~.IFaP?!0>gW1K4  T; &PddEHkh֖!#\.#b\0f p<҆Bdd(BBġv&@K`qJ 穑IFOHAb!…Q "Ey_7 s`?ig+ !l2b*GPG0h@(O ȇ D!v ,%]Q0_q-/P. ?}I-ٔre)vĖ q9ġD(CHxna=a"k p@Ch\Lf0s]P.S i bh>Y p"DJX$H,tZj)>- (g̕h=0 sI]`\İKՓteftsSz55#?ӡ`ڛ#ZݿN\7ۺ|Ԉ#zfP>ec&LE󞹲,^r+KT-Զb,QIwCn_da?{}/[wxX#c,oPGp+h7zR=n0Vo"QntMT^3cT{?vBx^z^u  ,hT\neCନV8a'Ր\AQF9{ԳiLud6Ep KkyQW졞ouT-=ɜE_7w,?Pk~9S:NA"nݿck'w C{ZZ4??-M;9*.Ω+g{5:o)xOl|MfvRCfu+yQ: BuA۠K'* &KTm~^LOa *w{5bY>mOZ781<JrrS>ՖFR R]K|u/tCuhZڒ3UG|n+)d[~s`'mvt>+?:}c -~"t5GeX}mױԱ',(/ Ҩ=trS긟GUz6xxq^K7oFjř8)*g7q\_:miwf-y{؎݃Y}y֥ѯTtΌh4$9%c!X6nlFɝ]Yt(Ǝy:|ԻOQG.+ϒ5/ Ϗh)>gK卡W EZBwSL| }^=RÄ4~!F)vdvlZ5SAMm=[o/?Jj&N=(ixktCιbi2Mm//2 6n`~Vo^wK~Z> endobj 749 0 obj << /Type /ObjStm /N 83 /First 720 /Length 3297 /Filter /FlateDecode >> stream xڭZ[wH~W1>^ޚ3g7'q2;2XXId[- ;PuwW_]-P\a%pc2H% 恀)t  E@(LQsE PGE0X;r2 aB)"+DHZT@h)F: "0b0ҀQ|Qe|! QO@D>P_C@+u !G2+)0lrr0ʅ!R ` `BpCaJa*HS z.#a81%`H hCTF3҈@(I-!*pUG@wT,!FpSxA"0UBƅ@4+IQ+p`1+`?iDR hOi?a_pB>MR_ `p-Ӥ 9]n7<3GcBb,K'_c]G6!^n^prqq}μ'^(^=_} `=! c~Կ9O?czlp/~y:|{k{RW|w_=j{; 9ϛ?pyo?xO]c]w-:svҮ񜚋H񇓯'Xw|g呓s{Dxp9}wRyf>ltz}^; \GG׌}X'X0;:FcNIkA~<:1zݜ^_6f~8x%:%~y5X<$w ;NkcS|gn͓=1{;9-2'x]_ߝc+W*yG{x>wN5vWm8gIl miOFU:- f8%Q>&('VZdW4.8Wv:TK nݭn-WBZqfCim m2\~M7jeCHvR~]%3TE*'ki[ >֫n{qSծdw(^fVʨ^)ec%y1ڨuZtD[F?mO2 #JqP!UjƃFlWY)< |#t ǚn27馮 ̥{fߡƥۇmfWYN*[fHw+%8y[wzti8YƳy|[$?[/4 IJ]r&?bG=Zug88`"GS<6RwUe2;"Z*tqIܥ1˧I%i*AUQQG4.vh@~'bU@y !nH F;4h;<#9?ݟ|#Kr5&Х 1#Og%_EKp, ~_yWrh/<.4: endstream endobj 784 0 obj << /Type /XRef /Index [0 785] /Size 785 /W [1 3 1] /Root 782 0 R /Info 783 0 R /ID [ ] /Length 1926 /Filter /FlateDecode >> stream x[hYǿusK$ͥdڤifM6m.6M%Ҟ6 JEԇ/a3* 8 HgƇa_A>/फ़DwXK.>xVMKPZfX 4 lxfQj5:` l0lܚ%:5tZof7OtQbDM5E~ڧaG}TR)b%_ z4aF4,L*:L< :,yu6I>v[C#:rDș9"pʒ'UfA{xNv#ڍh7"ۈl#6"xLdHD qt/Z}Ӕ7{!b66=5|4$5a5"PDDqʒG!@" 3}G=yK^5LoБNA]8E+a:p8hV_](kW8z@/')p3٥ĬI*Iųq ` aph'_#YקZEp)~sXm L{>x XWГg<*VFU0w& p׿`,}uS5Pe^4a`}U ݿ?K~LԂnDg  R#X=Ni,I H*pٛS?d@23{prKU5"PS@M5H^uFHdm@نVQpPi v5 H' d:+ C>V [l @BXj H/瀞zF4+`GrZd Dn#2@ ?Q H4T &lM,b Ӗϗ4R8 s 6|G#-5CN׻ĵji{ ;ox#y傣q\NZPvPhqQRR[q:ʖ^}~uQ8*-Ǎ,N(*(]\4pn}G!.w [ yp[wu.!7ESR!KřˍZ7` p (V7jsMi0f矣v\nUv>D/,M9RVիW7&M~jرr#“ }-QlпI'zj3<:#O'kJkdm[OeX齟ڵrϸ+obg2S/H铂d# H@F2d$ #t}L~$ ӡJ2ğ! g?7mğ-Zyro endstream endobj startxref 541104 %%EOF mixtools/inst/doc/mixtools.R0000644000176200001440000003543313617013707015670 0ustar liggesusers### R code from vignette source 'mixtools.Rnw' ################################################### ### code chunk number 1: faithful ################################################### library(mixtools) data(faithful) attach(faithful) ################################################### ### code chunk number 2: geyser ################################################### hist(waiting, main="Time between Old Faithful eruptions", xlab="Minutes", ylab="", cex.main=1.5, cex.lab=1.5, cex.axis=1.4) ################################################### ### code chunk number 3: normmixEM ################################################### wait1 <- normalmixEM(waiting, lambda = .5, mu = c(55, 80), sigma = 5) ################################################### ### code chunk number 4: geyserEM (eval = FALSE) ################################################### ## plot(wait1, density=TRUE, cex.axis=1.4, cex.lab=1.4, cex.main=1.8, ## main2="Time between Old Faithful eruptions", xlab2="Minutes") ################################################### ### code chunk number 5: geyserEM ################################################### for(i in 1:2){ file=paste("geyserEM", i, ".pdf", sep="") pdf(file=file, paper="special", width=6, height=6) plot(wait1, whichplots=i, cex.axis = 1.4, cex.lab = 1.4, cex.main = 1.8, main2 = "Time between Old Faithful eruptions", xlab2 = "Minutes") dev.off() cat("\\includegraphics{", file, "}\n", sep="") } ################################################### ### code chunk number 6: geyserestimates ################################################### wait1[c("lambda", "mu", "sigma")] ################################################### ### code chunk number 7: geysersummary ################################################### summary(wait1) ################################################### ### code chunk number 8: cutpoint ################################################### data("Waterdata") cutpts <- 10.5*(-6:6) watermult <- makemultdata(Waterdata, cuts = cutpts) ################################################### ### code chunk number 9: multmixEM ################################################### set.seed(15) theta4 <- matrix(runif(56), ncol = 14) theta3 <- theta4[1:3,] mult3 <- multmixEM(watermult, lambda = rep(1, 3)/3, theta = theta3) mult4 <- multmixEM (watermult, lambda = rep (1, 4) / 4, theta = theta4) ################################################### ### code chunk number 10: mixtools.Rnw:575-581 (eval = FALSE) ################################################### ## cdf3 <- compCDF(Waterdata, mult3$posterior, lwd=2, lab=c(7, 5, 7), ## xlab="Angle in degrees", ylab="Component CDFs", ## main="Three-Component Solution") ## cdf4 <- compCDF(Waterdata, mult4$posterior, lwd=2, lab=c(7, 5, 7), ## xlab="Angle in degrees", ylab="Component CDFs", ## main="Four-Component Solution") ################################################### ### code chunk number 11: cutpointplots ################################################### pdf(file="WDcutpoint3comp.pdf", paper="special", width=8, height=8) cdf3 <- compCDF(Waterdata, mult3$posterior, lwd=3, xlab="Angle in degrees", lab=c(7, 5, 7), ylab="Component CDFs", main="Three-Component Solution", cex.axis=1.4, cex.lab=1.5, cex.main=1.5) ltext <- paste(round(mult3$lam*100, 1), "%", sep="") legend("bottomright", legend=ltext, pch=15:17, cex=1.5, pt.cex=1.35) y <- compCDF(Waterdata, mult3$posterior, x=cutpts, makeplot=F) for(i in 1:3) points(cutpts, y[i,], pch=14+i, cex=1.35) dev.off() pdf(file="WDcutpoint4comp.pdf", paper="special", width=8, height=8) cdf4 <- compCDF(Waterdata, mult4$posterior, lwd=3, xlab="Angle in degrees", lab=c(7, 5, 7), ylab="Component CDFs", main="Four-Component Solution", cex.axis=1.4, cex.lab=1.5, cex.main=1.5) ltext <- paste(round(mult4$lam*100,1), "%", sep="") legend("bottomright", legend=ltext, pch=15:18, cex=1.5, pt.cex=1.35) y <- compCDF(Waterdata, mult4$posterior, x=cutpts, makeplot=F) for(i in 1:4) points(cutpts, y[i,], pch=14+i, cex=1.35) dev.off() ################################################### ### code chunk number 12: summarymult4 ################################################### summary(mult4) ################################################### ### code chunk number 13: spsymmplots ################################################### pdf(file="spsymmfig1.pdf", paper="special", width=8, height=8) par(mar=0.1+c(5,4.2,4,1.8)) plot(wait1, which = 2, cex.axis = 1.4, cex.lab = 1.5, cex.main = 1.5, main2 = "Time between Old Faithful eruptions", xlab2 = "Minutes") wait2 <- spEMsymloc(waiting, mu0 = c(55, 80)) plot(wait2, lty = 2, newplot = FALSE, addlegend = FALSE) dev.off() pdf(file="spsymmfig2.pdf", paper="special", width=8, height=8) par(mar=0.1+c(5,4.2,4,1.8)) wait2a <- spEMsymloc(waiting, mu0 = c(55, 80), bw = 1) wait2b <- spEMsymloc(waiting, mu0 = c(55, 80), bw = 6) plot(wait2a, lty = 1, addlegend = FALSE, cex.axis = 1.4, cex.lab = 1.5, cex.main = 1.5, title = "Time between Old Faithful eruptions", xlab = "Minutes") plot(wait2b, lty = 2, newplot = FALSE, addlegend = FALSE) dev.off() ################################################### ### code chunk number 14: plotspsymm (eval = FALSE) ################################################### ## plot(wait1, which = 2, cex.axis = 1.4, cex.lab = 1.4, cex.main = 1.8, ## main2 = "Time between Old Faithful eruptions", xlab2 = "Minutes") ## wait2 <- spEMsymloc(waiting, mu0 = c(55, 80)) ## plot(wait2, lty = 2, newplot = FALSE, addlegend = FALSE) ################################################### ### code chunk number 15: bandwidth ################################################### bw.nrd0(waiting) ################################################### ### code chunk number 16: plotbweffect (eval = FALSE) ################################################### ## wait2a <- spEMsymloc(waiting, mu0 = c(55, 80), bw = 1) ## wait2b <- spEMsymloc(waiting, mu0 = c(55, 80), bw = 6) ## plot(wait2a, lty = 1, addlegend = FALSE, cex.axis = 1.4, ## cex.lab = 1.4, cex.main = 1.8, xlab = "Minutes", ## title = "Time between Old Faithful eruptions") ## plot(wait2b, lty = 2, newplot = FALSE, addlegend = FALSE) ################################################### ### code chunk number 17: gaussexample ################################################### m <- 2; r <- 3; n <- 300; S <- 100 lambda <- c(0.4, 0.6) mu <- matrix(c(0, 0, 0, 3, 4, 5), m, r, byrow = TRUE) sigma <- matrix(rep(1, 6), m, r, byrow = TRUE) ################################################### ### code chunk number 18: gaussinitial ################################################### centers <- matrix(c(0, 0, 0, 4, 4, 4), 2, 3, byrow = TRUE) ISE <- matrix(0, m, r, dimnames = list(Components = 1:m, Blocks = 1:r)) nblabsw <- 0 ################################################### ### code chunk number 19: sqMISE ################################################### set.seed(1000) for (mc in 1:S) { x <- rmvnormmix(n, lambda, mu, sigma) a <- npEM(x, centers, verb = FALSE, samebw = FALSE) if (a$lambda[1] > a$lambda[2]) nblabsw <- nblabsw + 1 for (j in 1:m) { for (k in 1:r) { ISE[j, k] <- ISE[j, k] + ise.npEM(a, j, k, dnorm, lower = mu[j, k] - 5, upper = mu[j, k] + 5, plots = FALSE, mean = mu[j, k], sd = sigma[j, k])$value #$ } } } MISE <- ISE/S print(sqMISE <- sqrt(MISE)) ################################################### ### code chunk number 20: summarygauss ################################################### summary(a) ################################################### ### code chunk number 21: plotgauss3rm (eval = FALSE) ################################################### ## plot(a) ################################################### ### code chunk number 22: gauss3rm ################################################### pdf("gauss3rm.pdf", paper="special", width=10, height=5) par(mfrow=c(1,3), ask=F) plot(a) dev.off() ################################################### ### code chunk number 23: true5rm ################################################### pdf("truepdf5rm_block1.pdf") par(mar=0.1+c(5,4.2,4,1.5)) x <- seq(-10, 25, len=250) plot(x, .4* dt(x, 2, 0) + .6 * dt(x, 10, 8), type="l", lwd=3, col=2, cex.axis=1.4, cex.lab=1.5, cex.main=1.5, main="Block 1", xlab="", ylab="Density") lines (x, .4*dt(x, 2, 0), lwd=4, lty=2) lines (x, .6*dt(x, 10, 8), lwd=4, lty=2) dev.off() pdf("truepdf5rm_block2.pdf") par(mar=0.1+c(5,4.2,4,1.5)) x <- seq(0, 1, len=250) plot(x, .4 + .6 * dbeta(x, 1, 5), type="l", lwd=3, col=2, cex.axis=1.4, cex.lab=1.5, cex.main=1.5, main="Block 2", xlab="", ylab="Density", ylim= c(0, 3.4)) lines (x, rep(.4, 250), lwd=4, lty=2) lines (x, .6*dbeta(x, 1, 5), lwd=4, lty=2) dev.off() ################################################### ### code chunk number 24: parameters5rm ################################################### m <- 2; r <- 5 lambda <- c(0.4, 0.6) df <- c(2, 10); ncp <- c(0, 8) sh1 <- c(1, 1) ; sh2 <- c(1, 5) ################################################### ### code chunk number 25: generate5rm ################################################### n <- 300; z <- sample(m, n, rep = TRUE, prob = lambda) r1 <- 3; z2 <- rep(z, r1) x1 <- matrix(rt(n * r1, df[z2], ncp[z2]), n, r1) r2 <- 2; z2 <- rep(z, r2) x2 <- matrix(rbeta(n * r2, sh1[z2], sh2[z2]), n, r2) x <- cbind(x1, x2) ################################################### ### code chunk number 26: npEM5rm ################################################### id <- c(rep(1, r1), rep(2, r2)) centers <- matrix(c(0, 0, 0, 1/2, 1/2, 4, 4, 4, 1/2, 1/2), m, r, byrow = TRUE) b <- npEM(x, centers, id, eps = 1e-8, verb = FALSE, samebw = FALSE) ################################################### ### code chunk number 27: plot5rm (eval = FALSE) ################################################### ## plot(b, breaks = 15) ################################################### ### code chunk number 28: plot5rmcommands ################################################### pdf("npEM5rm.pdf", width=8, height=5) par(mfrow=c(1,2)) plot(b, breaks = 15) dev.off() ################################################### ### code chunk number 29: ISEnpEM5rm (eval = FALSE) ################################################### ## par(mfrow=c(2,2)) ## for (j in 1:2){ ## ise.npEM(b, j, 1, truepdf = dt, lower = ncp[j] - 10, ## upper = ncp[j] + 10, df = df[j], ncp = ncp[j]) ## ise.npEM(b, j, 2, truepdf = dbeta, lower = -0.5, ## upper = 1.5, shape1 = sh1[j], shape2 = sh2[j]) ## } ################################################### ### code chunk number 30: plotISEnpEM5rm ################################################### options(warn=-1) pdf("ISEnpEM5rm.pdf", width=8, height=8) par(mfrow = c(2, 2)) for (j in 1:2){ ise.npEM(b, j, 1, truepdf = dt, lower = ncp[j] - 10, upper = ncp[j] + 10, df = df[j], ncp = ncp[j]) ise.npEM(b, j, 2, truepdf = dbeta, lower = -0.5, upper = 1.5, shape1 = sh1[j], shape2 = sh2[j]) } dev.off() ################################################### ### code chunk number 31: gnpdata ################################################### data("CO2data") attach(CO2data) pdf("gnpdata.pdf") par(mar=0.1+c(5,4.2,4,1.5)) plot(GNP, CO2, xlab="Gross National Product", ylab=expression(paste(CO[2]," per Capita")), cex.lab=1.5, cex.main=1.5, cex.axis=1.4, main="1996 GNP and Emissions Data") text(GNP, CO2, country, adj=c(.5,-.5)) dev.off() ################################################### ### code chunk number 32: mixtools.Rnw:1282-1284 ################################################### data("CO2data") attach(CO2data) ################################################### ### code chunk number 33: CO2reg ################################################### CO2reg <- regmixEM(CO2, GNP, lambda = c(1, 3) / 4, beta = matrix(c(8, -1, 1, 1), 2, 2), sigma = c(2, 1)) ################################################### ### code chunk number 34: summaryCO2reg ################################################### summary(CO2reg) ################################################### ### code chunk number 35: plotCO2reg (eval = FALSE) ################################################### ## plot(CO2reg, density = TRUE, alpha = 0.01, cex.main = 1.5, cex.lab = 1.5, ## cex.axis = 1.4) ################################################### ### code chunk number 36: trueplotCO2reg ################################################### for(i in 1:2){ file=paste("CO2reg", i, ".pdf", sep="") pdf(file=file, paper="special", width=6, height=6) plot(CO2reg, whichplots=i, alpha = 0.01, cex.main = 1.5, cex.lab = 1.5, cex.axis = 1.4) dev.off() cat("\\includegraphics{", file, "}\n", sep="") } ################################################### ### code chunk number 37: CO2igle ################################################### CO2igle <- regmixEM.loc(CO2, GNP, beta = CO2reg$beta, sigma = CO2reg$sigma, lambda = CO2reg$posterior, kern.l = "Beta", kernl.h = 20, kernl.g = 3) ################################################### ### code chunk number 38: CO2iglesummary ################################################### summary(CO2igle) ################################################### ### code chunk number 39: lamplot (eval = FALSE) ################################################### ## plot(GNP, CO2igle$post[,1], xlab = "GNP", cex.axis = 1.4, cex.lab = 1.5, ## ylab = "Final posterior probabilities") ## lines(sort(GNP), CO2igle$lambda[order(GNP), 1], col=2) ## abline(h = CO2igle$lambda[1], lty = 2) ################################################### ### code chunk number 40: truelamplot ################################################### pdf("lamplot.pdf") plot(GNP, CO2igle$post[,1], xlab = "GNP", cex.axis = 1.4, cex.lab = 1.5, ylab = "Final posterior probabilities") lines(sort(GNP), CO2igle$lambda[order(GNP), 1], col=2, lwd=2) abline(h = CO2igle$lambda[1], lty = 2, lwd=2) dev.off() ################################################### ### code chunk number 41: CO2boot ################################################### set.seed(123) CO2boot <- boot.se(CO2reg, B = 100) ################################################### ### code chunk number 42: bootresults ################################################### rbind(range(CO2boot$beta[1,]), range(CO2boot$beta[2,])) ################################################### ### code chunk number 43: CO2bootse ################################################### CO2boot[c("lambda.se", "beta.se", "sigma.se")] ################################################### ### code chunk number 44: modelsel ################################################### data("Waterdata") cutpts <- 10.5*(-6:6) watermult <- makemultdata(Waterdata, cuts = cutpts) set.seed(10) multmixmodel.sel(watermult, comps = 1:4, epsilon = 0.001) mixtools/inst/doc/mixtools.Rnw0000755000176200001440000021573513057562065016251 0ustar liggesusers\documentclass[nojss]{jss} \usepackage{amsmath} \usepackage{amssymb} %% need no \usepackage{Sweave.sty} %\VignetteIndexEntry{mixtools for mixture models} %% macros (Didier) \newcommand{\CF}{{\mathcal F}} \newcommand{\CN}{{\mathcal N}} \def\Bg{\mathbf{g}} \def\Bh{\mathbf{h}} \def\Bk{\mathbf{k}} \def\Bx{\mathbf{x}} \def\By{\mathbf{y}} \def\Bc{\mathbf{c}} \def\BC{\mathbf{C}} \def\Bz{\mathbf{z}} \newcommand{\argmax}{\mathop{\mbox{argmax}}} \def\bP{\mathbb{P}} % Probability \def\I{\mathbb I} % indicator function \def\bE{\mathbb{E}} % expectation \def\bR{\mathbb{R}} % real line \newcommand{\f}{{\vec\theta}} \newcommand{\lb}{{\lambda}} \def\post{{p}} \def\defn{{\stackrel{\rm def}{=}}} \def\vec#1{\mathchoice{\mbox{\boldmath$\displaystyle\bf#1$}} {\mbox{\boldmath$\textstyle\bf#1$}} {\mbox{\boldmath$\scriptstyle\bf#1$}} {\mbox{\boldmath$\scriptscriptstyle\bf#1$}}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% declarations for jss.cls %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% almost as usual \author{Tatiana Benaglia \\ Pennsylvania State University \And Didier Chauveau \\ Universit\'e d'Orl\'eans \AND David R.~Hunter \\ Pennsylvania State University \And Derek S. Young \\ Pennsylvania State University} \title{\pkg{mixtools}: An \proglang{R} Package for Analyzing Finite Mixture Models} %% for pretty printing and a nice hypersummary also set: \Plainauthor{Tatiana Benaglia, Didier Chauveau, David R.~Hunter, Derek Young} %% comma-separated \Plaintitle{mixtools: An R Package for Analyzing Mixture Models} %% without formatting \Shorttitle{mixtools for Mixture Models} %% a short title (if necessary) %% an abstract and keywords \Abstract{ The \pkg{mixtools} package for \proglang{R} provides a set of functions for analyzing a variety of finite mixture models. These functions include both traditional methods, such as EM algorithms for univariate and multivariate normal mixtures, and newer methods that reflect some recent research in finite mixture models. In the latter category, \pkg{mixtools} provides algorithms for estimating parameters in a wide range of different mixture-of-regression contexts, in multinomial mixtures such as those arising from discretizing continuous multivariate data, in nonparametric situations where the multivariate component densities are completely unspecified, and in semiparametric situations such as a univariate location mixture of symmetric but otherwise unspecified densities. Many of the algorithms of the \pkg{mixtools} package are EM algorithms or are based on EM-like ideas, so this article includes an overview of EM algorithms for finite mixture models. } \Keywords{cutpoint, EM algorithm, mixture of regressions, model-based clustering, nonparametric mixture, semiparametric mixture, unsupervised clustering} %, keywords, comma-separated, not capitalized, \proglang{Java}} \Plainkeywords{keywords, comma-separated, not capitalized, Java} %% without formatting %% at least one keyword must be supplied %% publication information %% NOTE: Typically, this can be left commented and will be filled out by the technical editor %% \Volume{13} %% \Issue{9} %% \Month{September} %% \Year{2004} %% \Submitdate{2004-09-29} %% \Acceptdate{2004-09-29} %% The address of (at least) one author should be given %% in the following format: \Address{ Didier Chauveau\\ Laboratoire MAPMO - UMR 7349 - F\'ed\'eration Denis Poisson\\ Universit\'e d'Orl\'eans\\ BP 6759, 45067 Orl\'eans cedex 2, FRANCE.\\ E-mail: \email{didier.chauveau@univ-orleans.fr} \\ URL: \url{http://www.univ-orleans.fr/mapmo/membres/chauveau/}\\ \\ David R.~Hunter\\ Department of Statistics\\ 326 Thomas Building\\ Pennsylvania State University\\ University Park, PA 16802\\ Telephone: +1/814-863-0979\\ Fax: +1/814-863-7114\\ E-mail: \email{dhunter@stat.psu.edu} \\ URL: \url{http://www.stat.psu.edu/~dhunter/}\\ \\ Tatiana Benaglia\\ Department of Statistics, Penn State (see above)\\ E-mail: \email{tab321@stat.psu.edu} \\ \\ Derek Young\\ Department of Statistics, Penn State (see above)\\ E-mail: \email{dsy109@psu.edu} } %% It is also possible to add a telephone and fax number %% before the e-mail in the following format: %% Telephone: +43/1/31336-5053 %% Fax: +43/1/31336-734 %% for those who use Sweave please include the following line (with % symbols): %% need no \usepackage{Sweave.sty} %% end of declarations %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} \SweaveOpts{concordance=FALSE} %% include your article here, just as usual %% Note that you should use the \pkg{}, \proglang{} and \code{} commands. \section[Introduction to finite mixtures and mixtools]{Introduction to finite mixtures and \pkg{mixtools}} %% Note: If there is markup in \(sub)section, then it has to be escape %% as above. \label{s:intro} Authors' note: The original version of this vignette was produced using an article that appears in the {\it Journal of Statistical Software} (URL: \url{http://www.jstatsoft.org/}); see \citet{Benaglia+Chauveau+Hunter+Young:2009}. Populations of individuals may often be divided into subgroups. Yet even when we observe characteristics of these individuals that provide information about their subgroup memberships, we may not actually observe these memberships {\em per se}. The basic goal of the tools in the \pkg{mixtools} package (version 0.4.3, as of this writing) for \proglang{R} \citep{r2009} is to examine a sample of measurements to discern and describe subgroups of individuals, even when there is no observable variable that readily indexes into which subgroup an individual properly belongs. This task is sometimes referred to as ``unsupervised clustering'' in the literature, and in fact mixture models may be generally thought of as comprising the subset of clustering methods known as ``model-based clustering''. The \pkg{mixtools} package is available from the Comprehensive \proglang{R} Archive Network at \url{http://CRAN.R-project.org/package=mixtools}. Finite mixture models may also be used in situations beyond those for which clustering of individuals is of interest. For one thing, finite mixture models give descriptions of entire subgroups, rather than assignments of individuals to those subgroups (though the latter may be accomplished using mixture models). Indeed, even the subgroups may not necessarily be of interest; sometimes finite mixture models merely provide a means for adequately describing a particular distribution, such as the distribution of residuals in a linear regression model where outliers are present. Whatever the goal of the modeler when employing mixture models, much of the theory of these models involves the assumption that the subgroups are distributed according to a particular parametric form --- and quite often this form is univariate or multivariate normal. While \pkg{mixtools} does provide tools for traditional fitting of finite mixtures of univariate and multivariate normal distributions, it goes well beyond this well-studied realm. Arising from recent research whose goal is to relax or modify the assumption of multivariate normality, \pkg{mixtools} provides computational techniques for finite mixture model analysis in which components are regressions, multinomial vectors arising from discretization of multivariate data, or even distributions that are almost completely unspecified. This is the main feature that distinguishes \pkg{mixtools} from other mixture-related \proglang{R} packages, also available from the Comprehensive \proglang{R} Archive Network at \url{http://CRAN.R-project.org/}, such as \pkg{mclust} \citep{Fraley+Raftery:2009} and \pkg{flexmix} \citep{jss:Leisch:2004, Grun+Leisch:2008}. We briefly mention these two packages in Sections~\ref{section:EMexample} and \ref{section:pdmp}, respectively. To make the mixture model framework more concrete, suppose the possibly vector-valued random variables $\vec X_1, \ldots, \vec X_n$ are a simple random sample from a finite mixture of $m>1$ arbitrary distributions, which we will call {\em components} throughout this article. The density of each $\vec X_i$ may be written \begin{equation} \label{mvmixture} g_{\f}(\vec x_i) = \sum_{j=1}^m\lambda_j\phi_j(\vec x_i), \quad \vec x_i\in\bR^r, \end{equation} where $\f=(\vec\lambda, \vec \phi) = (\lambda_1, \ldots, \lambda_m, \phi_1, \ldots, \phi_m)$ denotes the parameter and the $\lambda_m$ are positive and sum to unity. We assume that the $\phi_j$ are drawn from some family $\cal F$ of multivariate density functions absolutely continuous with respect to, say, Lebesgue measure. The representation \eqref{mvmixture} is not identifiable if no restrictions are placed on $\cal F$, where by ``identifiable'' we mean that $g_{\f}$ has a {\em unique} representation of the form \eqref{mvmixture} and we do not consider that ``label-switching'' --- i.e., reordering the $m$ pairs $(\lambda_1, \phi_1), \ldots, (\lambda_m, \phi_m)$ --- produces a distinct representation. In the next sections we will sometimes have to distinguish between {\em parametric} and more general {\em nonparametric} situations. This distinction is related to the structure of the family $\CF$ of distributions to which the component densities $\phi_j$ in model \eqref{mvmixture} belong. We say that the mixture is {\em parametric} if $\CF$ is a parametric family, $\CF = \{\phi(\cdot|\vec\xi), \vec\xi\in\bR^d\}$, indexed by a ($d$-dimensional) Euclidean parameter $\vec\xi$. A parametric family often used is the univariate Gaussian family $\CF = \{\phi(\cdot|\mu,\sigma^2)=\mbox{density of }\CN(\mu,\sigma^2), (\mu,\sigma^2)\in\bR\times\bR^+_*\}$, in which case the model parameter reduces to $\f = (\vec \lambda, (\mu_1,\sigma^2_1),\ldots,(\mu_m,\sigma^2_m))$. For the multivariate case, a possible parametric model is the {\em conditionally i.i.d.\ normal model}, for which $\CF=\{\phi(\vec x_i) = \prod_{k=1}^r f(x_{ik}), \mbox{$f(t)$ density of $\CN(\mu,\sigma^2)$}\}$ (this model is included in \pkg{mixtools}; see Section~\ref{ss:nbcomp}). An example of a (multivariate) nonparametric situation is $\CF=\{\phi(\vec x_i) = \prod_{k=1}^r f(x_{ik}), \mbox{$f(t)$ a univariate density on $\bR$}\}$, in which case $\vec\f$ consists in a Euclidean part ($\vec\lb$) and a nonparametric part $(f_1,\ldots,f_m)$. As a simple example of a dataset to which mixture models may be applied, consider the sample depicted in Figure \ref{geyser}. In the Old Faithful dataset, measurements give time in minutes between eruptions of the Old Faithful geyser in Yellowstone National Park, USA. These data are included as part of the \pkg{datasets} package in \proglang{R} \citep{r2009}; type \code{help("faithful")} in \proglang{R} for more details. <>= library(mixtools) data(faithful) attach(faithful) @ \setkeys{Gin}{width=0.6\textwidth} \begin{figure}[h] \centering <>= hist(waiting, main="Time between Old Faithful eruptions", xlab="Minutes", ylab="", cex.main=1.5, cex.lab=1.5, cex.axis=1.4) @ \caption{The Old Faithful dataset is clearly suggestive of a two-component mixture of symmetric components.} \label{geyser} \end{figure} For the Old Faithful eruption data, a two-component mixture model is clearly a reasonable model based on the bimodality evident in the histogram. This example is analyzed by \citet{hunter2007ims}, who compare a standard normal-mixture method for fitting it with a novel semiparametric approach. Both approaches are included in \pkg{mixtools}; see Sections \ref{section:EMexample} and \ref{section:SPexample} of this article. In Section~\ref{section:EM} of the current article we review the well-known class of EM algorithms for finite mixture models, a common thread that runs throughout much of the rest of the article. The remaining sections discuss various categories of functions found in the \pkg{mixtools} package, from cutpoint methods that relax distributional assumptions for multivariate data by discretizing the data (Section~\ref{section:cut}), to semi- and non-parametric methods that eliminate distributional assumptions almost entirely depending on what the identifiability of the model allows (Section~\ref{section:np}), to methods that handle various mixtures of regressions (Section~\ref{section:reg}). Finally, Section \ref{section:misc} describes several miscellaneous features of the \pkg{mixtools} package. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{EM algorithms for finite mixtures} \label{section:EM} \subsection{Missing data setup} Much of the general methodology used in \pkg{mixtools} involves the representation of the mixture problem as a particular case of maximum likelihood estimation (MLE) when the observations can be viewed as incomplete data. This setup implies consideration of two sample spaces, the sample space of the (incomplete) observations, and a sample space of some ``complete'' observations, the characterization of which being that the estimation can be performed explicitly at this level. For instance, in parametric situations, the MLE based on the complete data may exist in closed form. Among the numerous reference papers and monographs on this subject are, e.g., the original EM algorithm paper by \citet{dempster1977mli} and the finite mixture model book by \citet{mclachlan2000fmm} and references therein. We now give a brief description of this setup as it applies to finite mixture models in general. The (observed) data consist of $n$ i.i.d. observations $\vec x = (\vec x_1,\ldots,\vec x_n)$ from a density $g_\f$ given by \eqref{mvmixture}. It is common to denote the density of the sample by $\Bg_\f$, the $n$-fold product of $g_\f$, so that we write simply $\Bx\sim \Bg_\f$. In the missing data setup, $\Bg_\f$ is called the incomplete-data density, and the associated log-likelihood is $L_{\Bx}(\f) = \sum_{i=1}^n \log g_\f(\vec x_i)$. The (parametric) ML estimation problem consists in finding $\hat\f_{\Bx} = \argmax_{\f\in\Phi} L_{\Bx}(\f)$, or at least finding a local maximum --- there are certain well-known cases in which a finite mixture model likelihood is unbounded \citep{mclachlan2000fmm}, but we ignore these technical details for now. Calculating $\hat\f_{\Bx}$ even for a parametric finite mixture model is known to be a difficult problem, and considering $\Bx$ as incomplete data resulting from non-observed complete data helps. The associated complete data is denoted by $\Bc = (\vec c_1,\ldots, \vec c_n)$, with density $\Bh_\f(\Bc)=\prod_{i=1}^n h_\f(\vec c_i)$ (there exists a many-to-one mapping from $\Bc$ to $\Bx$, representing the loss of information). In the model for complete data associated with model~\eqref{mvmixture}, each random vector $\vec C_i = (\vec X_i,\vec Z_i)$, where $\vec Z_i = (Z_{ij},j=1,\ldots m)$, and $Z_{ij}\in\{0,1\}$ is a Bernoulli random variable indicating that individual $i$ comes from component $j$. Since each individual comes from exactly one component, this implies $\sum_{j=1}^m Z_{ij}=1$, and $$ \Prob(Z_{ij} = 1) = \lambda_{j},\quad (\vec X_i|Z_{ij}=1) \sim \phi_j, \quad j=1,\ldots,m. $$ The complete-data density for one observation is thus $$ h_\f(\vec c_i) = h_\f(\vec x_i,\vec z_i) = \sum_{j=1}^m \I_{z_{ij}}\lb_j \phi_j (\vec x_i), $$ In the parametric situation, i.e.\ when $\CF$ is a parametric family, it is easy to check that the complete-data MLE $\hat\f_{\Bc}$ based on maximizing $\log \Bh_\f(\Bc)$ is easy to find, provided that this is the case for the family $\CF$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{EM algorithms} \label{sec:EM} An EM algorithm iteratively maximizes, instead of the observed log-likelihood $L_{\Bx}(\f)$, the operator $$ Q(\f | \f^{(t)}) = \E \left[\log \Bh_\f(\BC)|\Bx,\f^{(t)} \right], $$ where $\f^{(t)}$ is the current value at iteration~$t$, and the expectation is with respect to the distribution $\Bk_\f(\Bc|\Bx)$ of $\Bc$ given $\Bx$, for the value $\f^{(t)}$ of the parameter. The iteration $\f^{(t)} \to \f^{(t+1)}$ is defined in the above general setup by \begin{enumerate} \item E-step: compute $Q(\f | \f^{(t)})$ \item M-step: set $\f^{(t+1)} = \argmax_{\f\in\Phi}Q(\f | \f^{(t)})$ \end{enumerate} For finite mixture models, the E-step does not depend on the structure of $\CF$, since the missing data part is only related to the $\Bz$'s: $$ \Bk_\f(\Bc|\Bx) = \prod_{i=1}^n k_\f(\vec z_i|\vec x_i). $$ The $\Bz$ are discrete, and their distribution is given via Bayes' theorem. The M-step itself can be split in two parts, the maximization related to $\vec\lb$, which does not depend on $\CF$, and the maximization related to $\vec \phi$, which has to be handled specifically (say, parametrically, semi- or non-parametrically) for each model. Hence the EM algorithms for the models handled by the \pkg{mixtools} package share the following common features: \begin{enumerate} \item{\bf E-step:\ } Calculate the ``posterior'' probabilities (conditional on the data and $\vec\theta^{(t)}$) of component inclusion, \begin{equation}\label{posteriors} \post_{ij}^{(t)} \, \defn \, \Prob_{\vec\theta^{(t)}}(Z_{ij}=1| \vec x_i) = \frac{\lambda_j^{(t)} \phi_{j}^{(t)}(\vec x_{i})} {\sum_{j'=1}^m\lambda_{j'}^{(t)} \phi_{j'}^{(t)}(\vec x_{i})} \end{equation} for all $i=1,\ldots, n$ and $j=1, \ldots, m$. Numerically, it can be dangerous to implement equation (\ref{posteriors}) exactly as written due to the possibility of the indeterminant form $0/0$ in cases where $\vec x_i$ is so far from any of the components that all $\phi_{j'}^{(t)}(\vec x_i)$ values result in a numerical underflow to zero. Thus, many of the routines in \pkg{mixtools} actually use the equivalent expression \begin{equation}\label{altposteriors} \post_{ij}^{(t)} = \left[ 1 + \sum_{j'\ne j} \frac{ \lambda_{j'}^{(t)} \phi_{j'}^{(t)}(\vec x_{i})} {\lambda_j^{(t)} \phi_{j}^{(t)}(\vec x_{i})} \right]^{-1} \end{equation} or some variant thereof. \item{\bf M-step for $\vec\lb$:\ } Set \begin{equation}\label{lambda} \lambda_j^{(t+1)} = \frac1n\sum_{i=1}^n \post_{ij}^{(t)} , \quad\mbox{for $j=1, \ldots, m$.} \end{equation} \end{enumerate} \subsection{An EM algorithm example} \label{section:EMexample} As an example, we consider the univariate normal mixture analysis of the Old Faithful waiting data depicted in Figure \ref{geyser}. This fully parametric situation corresponds to a mixture from the univariate Gaussian family described in Section~\ref{s:intro}, where the $j$th component density $\phi_j(x)$ in \eqref{mvmixture} is normal with mean $\mu_j$ and variance $\sigma_j^2$. This is a special case of the general mixture-of-normal model that is well-studied in the literature and for which other software, such as the \pkg{mclust} \citep{Fraley+Raftery:2009} package for \proglang{R}, may also be used for parameter estimation. The M-step for the parameters $(\mu_j,\sigma^2_j)$, $j=1,\ldots,m$ of this EM algorithm for such mixtures of univariate normals is straightforward, and can be found, e.g., in \citet{mclachlan2000fmm}. The function \code{normalmixEM} implements the algorithm in \pkg{mixtools}. Code for the Old Faithful example, using most of the default values (e.g., stopping criterion, maximum number of iterations), is simply <>= wait1 <- normalmixEM(waiting, lambda = .5, mu = c(55, 80), sigma = 5) @ The code above will fit a 2-component mixture (because \code{mu} is a vector of length two) in which the standard deviations are assumed equal (because \code{sigma} is a scalar instead of a vector). See \code{help("normalmixEM")} for details about specifying starting values for this EM algorithm. <>= plot(wait1, density=TRUE, cex.axis=1.4, cex.lab=1.4, cex.main=1.8, main2="Time between Old Faithful eruptions", xlab2="Minutes") @ \setkeys{Gin}{width=0.49\textwidth} \begin{figure}[!h] \centering <>= for(i in 1:2){ file=paste("geyserEM", i, ".pdf", sep="") pdf(file=file, paper="special", width=6, height=6) plot(wait1, whichplots=i, cex.axis = 1.4, cex.lab = 1.4, cex.main = 1.8, main2 = "Time between Old Faithful eruptions", xlab2 = "Minutes") dev.off() cat("\\includegraphics{", file, "}\n", sep="") } @ \caption{The Old Faithful waiting data fitted with a parametric EM algorithm in \pkg{mixtools}. Left: the sequence of log-likelihood values; Right: the fitted Gaussian components.} \label{geyserEM} \end{figure} The \code{normalmixEM} function returns an object of class \code{"mixEM"}, and the \code{plot} method for these objects delivers the two plots given in Figure \ref{geyserEM}: the sequence $t\mapsto L_{\Bx}(\f^{(t)})$ of observed log-likelihood values and the histogram of the data with the $m$ ($m=2$ here) fitted Gaussian component densities of $\CN(\hat\mu_j,\hat\sigma^2_j)$, $j=1,\ldots,m$, each scaled by the corresponding $\hat\lambda_j$, superimposed. The estimator $\hat{\vec\theta}$ can be displayed by typing, e.g., <>= wait1[c("lambda", "mu", "sigma")] @ Alternatively, the same output may be obtained using the \code{summary} method: <>= summary(wait1) @ \section{Cutpoint methods} \label{section:cut} Traditionally, most literature on finite mixture models has assumed that the density functions $\phi_j(\vec x)$ of equation (\ref{mvmixture}) come from a known parametric family. However, some authors have recently considered the problem in which $\phi_j(\vec x)$ is unspecified except for some conditions necessary to ensure the identifiability of the parameters in the model. One such set of conditions is as follows: \citet{hettmansperger2000ani}; \citet{cruzmedina2004smm}; and \citet{elmore2004ecc} treat the case in which $\phi_j(\vec x)$ equals the product $f_j(x_i)\cdots f_j(x_r)$ for some univariate density function $f_j$. Thus, conditional on knowing that $\vec X$ comes from the $j$th mixture component, the coordinates of $\vec X$ are independent and identically distributed. For this reason, this case is called the conditionally i.i.d.\ model. The authors named above have developed an estimation method for the conditionally i.i.d.\ model. This method, the {\em cutpoint approach}, discretizes the continuous measurements by replacing each $r$-dimensional observation, say $\vec X_i= (x_{i1}, \ldots, x_{ir})$, by the $p$-dimensional multinomial vector $(n_1, \ldots, n_p)$, where $p\ge2$ is chosen by the experimenter along with a set of cutpoints $-\infty = c_0 < c_1 < \cdots < c_p=\infty$, so that for $a=1, \ldots, p$, \[ n_a = \sum_{k=1}^r I\{c_{a-1} < x_{ik} \le c_a\}. \] Note that the multinomial distribution is guaranteed by the conditional i.i.d.\ assumption, and the multinomial probability of the $a$th category is equal to $\theta_a \equiv P_{}(c_{a-1}>= data("Waterdata") cutpts <- 10.5*(-6:6) watermult <- makemultdata(Waterdata, cuts = cutpts) @ Once the multinomial data have been created, we may apply the \code{multmixEM} function to estimate the multinomial parameters via an EM algorithm. <>= set.seed(15) theta4 <- matrix(runif(56), ncol = 14) theta3 <- theta4[1:3,] mult3 <- multmixEM(watermult, lambda = rep(1, 3)/3, theta = theta3) mult4 <- multmixEM (watermult, lambda = rep (1, 4) / 4, theta = theta4) @ Finally, \code{compCDF} calculates and plots the estimated distribution functions of equation (\ref{ecdf}). Figure \ref{WDcutpoint} gives plots for both a 3-component and a 4-component solution; these plots are very similar to the corresponding plots in Figures 1 and 2 of \citet{elmore2004ecc}. <>= cdf3 <- compCDF(Waterdata, mult3$posterior, lwd=2, lab=c(7, 5, 7), xlab="Angle in degrees", ylab="Component CDFs", main="Three-Component Solution") cdf4 <- compCDF(Waterdata, mult4$posterior, lwd=2, lab=c(7, 5, 7), xlab="Angle in degrees", ylab="Component CDFs", main="Four-Component Solution") @ <>= pdf(file="WDcutpoint3comp.pdf", paper="special", width=8, height=8) cdf3 <- compCDF(Waterdata, mult3$posterior, lwd=3, xlab="Angle in degrees", lab=c(7, 5, 7), ylab="Component CDFs", main="Three-Component Solution", cex.axis=1.4, cex.lab=1.5, cex.main=1.5) ltext <- paste(round(mult3$lam*100, 1), "%", sep="") legend("bottomright", legend=ltext, pch=15:17, cex=1.5, pt.cex=1.35) y <- compCDF(Waterdata, mult3$posterior, x=cutpts, makeplot=F) for(i in 1:3) points(cutpts, y[i,], pch=14+i, cex=1.35) dev.off() pdf(file="WDcutpoint4comp.pdf", paper="special", width=8, height=8) cdf4 <- compCDF(Waterdata, mult4$posterior, lwd=3, xlab="Angle in degrees", lab=c(7, 5, 7), ylab="Component CDFs", main="Four-Component Solution", cex.axis=1.4, cex.lab=1.5, cex.main=1.5) ltext <- paste(round(mult4$lam*100,1), "%", sep="") legend("bottomright", legend=ltext, pch=15:18, cex=1.5, pt.cex=1.35) y <- compCDF(Waterdata, mult4$posterior, x=cutpts, makeplot=F) for(i in 1:4) points(cutpts, y[i,], pch=14+i, cex=1.35) dev.off() @ \begin{figure}[!h] \centering \includegraphics[width=0.49\textwidth]{WDcutpoint3comp} \includegraphics[width=0.49\textwidth]{WDcutpoint4comp} \caption{Empirical cumulative distribution function (CDF) estimates for the three- and four-component multinomial cutpoint models for the water-level data; compare Figures 1 and 2 of \citet{elmore2004ecc}. The 13 cutpoints used are indicated by the points in the plots, and the estimated mixing proportions for the various components are given by the legend. } \label{WDcutpoint} \end{figure} As with the output of \code{normalmixEM} in Section~\ref{section:EM}, it is possible to summarize the output of the \code{multmixEM} function using the \code{summary} method for \code{mixEM} objects: <>= summary(mult4) @ \section{Nonparametric and semiparametric methods} \label{section:np} In this section, we consider nonparametric multivariate finite mixture models. The first algorithm presented here was introduced by \citet{benaglia2009} as a generalization of the stochastic semiparametric EM algorithm of \citet{bordes2007sas}. Both algorithms are implemented in \pkg{mixtools}. \subsection{EM-like algorithms for mixtures of unspecified densities} \label{section:EMlike} Consider the mixture model described by equation \eqref{mvmixture}. If we assume that the coordinates of the $\vec X_i$ vector are {\em conditionally independent}, i.e. they are independent conditional on the subpopulation or component ($\phi_1$ through $\phi_m$) from which $\vec X_i$ is drawn, the density in \eqref{mvmixture} can be rewritten as: \begin{equation} \label{mvmixture2} g_{\vec\theta}(\vec x_i) = \sum_{j=1}^m\lambda_j\prod_{k=1}^rf_{jk}(x_{ik}), \end{equation} where the function $f(\cdot)$, with or without subscripts, will always denote a univariate density function. Here we do not assume that $f_{jk}(\cdot)$ comes from a family of densities that may be indexed by a finite-dimensional parameter vector, and we estimate these densities using nonparametric density techniques. That is why we say that this algorithm is a fully nonparametric approach. The density in equation \eqref{mvmixture2} allows for a different distribution for each component and each coordinate of $\vec X_i$. Notice that if the density $f_{jk}(\cdot)$ does not depend on $k$, we have the case in which the $\vec X_i$ are not only conditionally independent but identically distributed as well. These are the two extreme cases. In order to encompass both the conditionally i.i.d. case and the more general case \eqref{mvmixture2} simultaneously in one model, we allow that the coordinates of $\vec X_i$ are conditionally independent and there exist {\em blocks} of coordinates that are also identically distributed. If we let $b_k$ denote the block to which the $k$th coordinate belongs, where $1\le b_k\le B$ and $B$ is the total number of such blocks, then equation \eqref{mvmixture2} is replaced by \begin{equation}\label{rmgeneral} g_{\vec\theta} (\vec x_i) = \sum_{j=1}^m \lambda_j \prod_{k=1}^r f_{j{b_k}} (x_{ik}). \end{equation} The indices $i$, $j$, $k$, and $\ell$ will always denote a generic individual, component (subpopulation), coordinate (repeated measurement), and block, respectively. Therefore, we will always have $1\le i\le n$, $1\le j\le m$, $1\le k\le r$, and $1\le\ell\le B$. The EM algorithm to estimate model \eqref{rmgeneral} has the E-step and M-step described in Section~\ref{sec:EM}. In equation (\ref{posteriors}), we have $\phi_j^{(t)}(\vec x_i) = \prod_{k=1}^r f_{jb_k}^{(t)}(x_{ik})$, where $f_{j\ell}^{(t)}(\cdot)$ is obtained by a weighted nonparametric (kernel) density estimate, given by: \begin{enumerate} \addtocounter{enumi}{2} \item{\bf Nonparametric (Kernel) density estimation step:\ } For any real $u$, define for each component $j\in\{1, \ldots, m\}$ and each block $\ell\in\{1, \ldots, B\}$ \begin{equation} \label{densest} f_{j\ell}^{t+1}(u) = \frac {1}{nh_{j\ell} C_\ell\lambda_{j}^{t+1}} \sum_{k=1}^r \sum_{i=1}^n \post_{ij}^{(t)} I\{b_k=\ell\} K\left(\frac{u-x_{ik}}{h_{j\ell}}\right), \end{equation} where $K(\cdot)$ is a kernel density function, $h_{j\ell}$ is the bandwidth for the $j$th component and $\ell$th block density estimate, and $C_\ell$ is the number of coordinates in the $\ell$th block. \end{enumerate} The function \code{npEM} implements this algorithm in \pkg{mixtools}. This function has an argument \code{samebw} which, when set to \code{TRUE} (the default), takes $h_{j\ell} = h$, for all $1 \le j \le m$ and $1\le\ell\le B$, that is, the same bandwidth for all components and blocks, while \code{samebw = FALSE} allows a different bandwidth for each component and each block, as detailed in \citet{bch:festchrift2009}. This function will, if called using \code{stochastic = TRUE}, replace the deterministic density estimation step (\ref{densest}) by a {\em stochastic} density estimation step of the type proposed by \citet{bordes2007sas}: First, generate $\vec Z^{(t)}_{i} = (Z^{(t)}_{i1}, \ldots, Z^{(t)}_{im})$ as a multivariate random vector with a single trial and success probability vector $\vec p_i^{(t)} = (p_{i1}^{(t)}, \ldots, p_{1m}^{(t)})$, then in the M-step for $\lambda_{j}^{t+1}$ in equation~(\ref{lambda}), replace $p^{(t)}_{ij}$ by $Z^{(t)}_{ij}$ and let \[ f_{j\ell}^{t+1}(u) = \frac {1}{nh_{j\ell} C_\ell\lambda_{j}^{t+1}} \sum_{k=1}^r \sum_{i=1}^n Z_{ij}^{(t)} I\{b_k=\ell\} K\left(\frac{u-x_{ik}}{h_{j\ell}}\right). \] In other words, the stochastic versions of these algorithms re-assign each observation randomly at each iteration, according to the $p_{ij}^{(t)}$ values at that iteration, to one of the $m$ components, then the density estimate for each component is based only on those observations that have been assigned to it. Because the stochastic algorithms do not converge the way a deterministic algorithm often does, the output of \code{npEM} is slightly different when \code{stochastic = TRUE} than when \code{stochastic = FALSE}, the default. See the corresponding help file for details. \citet{benaglia2009} also discuss specific cases of model (\ref{rmgeneral}) in which some of the $f_{jb_k}(\cdot)$ densities are assumed to be the same except for a location and scale change. They refer to such cases as semiparametric since estimating each $f_{jb_k}(\cdot)$ involves estimating an unknown density as well as multiple location and scale parameters. For instance, equation (17) of \citet{benaglia2009} sets \begin{equation} \label{spEM} f_{j\ell}(x) = \frac{1}{\sigma_{j\ell}}f \left( \frac{x-\mu_{j\ell}}{\sigma_{j\ell}} \right), \end{equation} where $\ell=b_k$ for a generic $k$. The \pkg{mixtools} package implements an algorithm for fitting model (\ref{spEM}) in a function called \code{spEM}. Details on the use of this function may be obtained by typing \code{help("spEM")}. Implementation of this algorithm and of that of the \code{npEM} function requires updating the values of $f_{jb_k}(x_{ik})$ for all $i$, $j$, and $k$ for use in the E-step (\ref{posteriors}). To do this, the \code{spEM} algorithm keeps track of an $n\times m$ matrix, called $\Phi$ here, where \[ \Phi_{ij} \equiv \phi_j(\vec x_i) = \prod_{k=1}^r f_{jb_k}(x_{ik}). \] The density estimation step of equation (\ref{densest}) updates the $\Phi$ matrix for the $(t+1)$th iteration based on the most recent values of all of the parameters. For instance, in the case of model (\ref{spEM}), we obtain \begin{eqnarray*} \Phi_{ij}^{t+1} &=& \prod_{\ell=1}^B\prod_{k:b_k=\ell} \frac{1}{\sigma_{j\ell}^{t+1}} f^{t+1} \left( \frac{x-\mu_{j\ell}^{t+1}}{\sigma_{j\ell}^{t+1}} \right) \\ &=& \prod_{\ell=1}^B \prod_{k:b_k=\ell} \frac{1}{\sigma_{j\ell}^{t+1}} \sum_{i'=1}^n \frac{p_{ij}^{t+1}}{hrn\lambda_j^{t+1}} \sum_{k'=1}^r K\left[ \frac{\left(\frac{x_{ik}-\mu_{j\ell}^{t+1}}{\sigma_{j\ell}^{t+1}} \right) - (x_{i'k'} - \mu_{j\ell}^{t+1})} {h\sigma_{j\ell}^{t+1}} \right]. \end{eqnarray*} \subsection{A univariate symmetric, location-shifted semiparametric example} \label{section:SPexample} Both \citet{hunter2007ims} and \citet{bordes2006set} study a particular case of model ({\ref{mvmixture}) in which $x$ is univariate and \begin{equation} \label{spmodel} g_{\vec \theta}(x) = \sum_{j=1}^m\lambda_j \phi(x-\mu_j), \end{equation} where $\phi(\cdot)$ is a density that is assumed to be completely unspecified except that it is symmetric about zero. Because each component distribution has both a nonparametric part $\phi(\cdot)$ and a parametric part $\mu_j$, we refer to this model as semiparametric. Under the additional assumption that $\phi(\cdot)$ is absolutely continuous with respect to Lebesgue measure, \citet{bordes2007sas} propose a stochastic algorithm for estimating the model parameters, namely, $(\vec\lambda, \vec\mu, \phi)$. This algorithm is implemented by the \pkg{mixtools} function \code{spEMsymloc}. This function also implements a nonstochastic version of the algorithm, which is the default and which is a special case of the general algorithm described in Section~\ref{section:EMlike}. <>= pdf(file="spsymmfig1.pdf", paper="special", width=8, height=8) par(mar=0.1+c(5,4.2,4,1.8)) plot(wait1, which = 2, cex.axis = 1.4, cex.lab = 1.5, cex.main = 1.5, main2 = "Time between Old Faithful eruptions", xlab2 = "Minutes") wait2 <- spEMsymloc(waiting, mu0 = c(55, 80)) plot(wait2, lty = 2, newplot = FALSE, addlegend = FALSE) dev.off() pdf(file="spsymmfig2.pdf", paper="special", width=8, height=8) par(mar=0.1+c(5,4.2,4,1.8)) wait2a <- spEMsymloc(waiting, mu0 = c(55, 80), bw = 1) wait2b <- spEMsymloc(waiting, mu0 = c(55, 80), bw = 6) plot(wait2a, lty = 1, addlegend = FALSE, cex.axis = 1.4, cex.lab = 1.5, cex.main = 1.5, title = "Time between Old Faithful eruptions", xlab = "Minutes") plot(wait2b, lty = 2, newplot = FALSE, addlegend = FALSE) dev.off() @ \begin{figure}[h] \centering \includegraphics[height=3in,width=3in]{spsymmfig1} \includegraphics[height=3in,width=3in]{spsymmfig2} \caption{The Old Faithful dataset, fit using different algorithms in \pkg{mixtools}. Left: the fitted Gaussian components (solid) and a semiparametric fit assuming model (\ref{spmodel}) with the default bandwidth of $4.0$ (dashed); Right: the same model (\ref{spmodel}) using bandwidths of $1.0$ (solid) and $6.0$ (dashed).} \label{spsymmfig} \end{figure} As noted in Figure \ref{geyser}, model (\ref{spmodel}) appears to be an appropriate model for the Old Faithful waiting times dataset. Here, we provide code that applies the \code{spEMsymloc} function to these data. First, we display the normal mixture solution of Figure \ref{geyserEM} with a semiparametric solution superimposed, in Figure \ref{spsymmfig}(a): <>= plot(wait1, which = 2, cex.axis = 1.4, cex.lab = 1.4, cex.main = 1.8, main2 = "Time between Old Faithful eruptions", xlab2 = "Minutes") wait2 <- spEMsymloc(waiting, mu0 = c(55, 80)) plot(wait2, lty = 2, newplot = FALSE, addlegend = FALSE) @ Because the semiparametric version relies on a kernel density estimation step (\ref{densest}), it is necessary to select a bandwidth for this step. By default, \code{spEMsymloc} uses a fairly simplistic approach: It applies ``Silverman's rule of thumb'' \citep{silverman1986des} to the entire dataset using the \code{bw.nrd0} function in \proglang{R}. For the Old Faithful waiting time dataset, this bandwidth is about~$4$: <>= bw.nrd0(waiting) @ But the choice of bandwidth can make a big difference, as seen in Figure \ref{spsymmfig}(b). <>= wait2a <- spEMsymloc(waiting, mu0 = c(55, 80), bw = 1) wait2b <- spEMsymloc(waiting, mu0 = c(55, 80), bw = 6) plot(wait2a, lty = 1, addlegend = FALSE, cex.axis = 1.4, cex.lab = 1.4, cex.main = 1.8, xlab = "Minutes", title = "Time between Old Faithful eruptions") plot(wait2b, lty = 2, newplot = FALSE, addlegend = FALSE) @ We find that with a bandwidth near $2$, the semiparametric solution looks quite close to the normal mixture solution of Figure \ref{geyserEM}. Reducing the bandwidth further results in the ``bumpiness'' exhibited by the solid line in Figure \ref{spsymmfig}(b). On the other hand, with a bandwidth of 8, the semiparametric solution completely breaks down in the sense that algorithm tries to make each component look similar to the whole mixture distribution. We encourage the reader to experiment by changing the bandwidth in the above code. \subsection{A trivariate Gaussian example} \label{ss:trigauss} As a first simple, nonparametric example, we simulate a Gaussian trivariate mixture with independent repeated measures and a shift of location between the two components in each coordinate, i.e., $m=2$, $r=3$, and $b_k=k$, $k=1,2,3$. The individual densities $f_{jk}$ are the densities of $\CN(\mu_{jk},1)$, with component means $\vec\mu_1 = (0,0,0)$ and $\vec\mu_2=(3,4,5)$. This example was introduced by \citet{hall2005nim} then later reused by \citet{benaglia2009} for comparison purposes. Note that the parameters in this model are identifiable, since \citet{hall2003nec} showed that for two components ($m=2$), identifiability holds in model~\eqref{mvmixture} is under mild assumptions as long as $r\ge3$, even in the most general case in which $b_k=k$ for all $k$. A function \code{ise.npEM} has been included in \pkg{mixtools} for numerically computing the integrated squared error (ISE) relative to a user-specified true density for a selected estimated density $\hat f_{jk}$ from \code{npEM} output. Each density $\hat f_{jk}$ is computed using equation~(\ref{densest}) together with the posterior probabilities after convergence of the algorithm, i.e., the final values of the $\post_{ij}^t$ (when \code{stochastic = FALSE}). We illustrate the usage of \code{ise.npEM} in this example by running a Monte Carlo simulation for $S$ replications, then computing the square root of the mean integrated squared error (MISE) for each density, where \[ {\rm MISE} = \frac{1}{S}\sum_{s=1}^S \int \left(\hat f_{jk}^{(s)}(u)-f_{jk}(u)\right)^2\,du,\quad j=1,2 \mbox{ and } k=1,2,3. \] For this example, we first set up the model true parameters with $S=100$ replications of $n=300$ observations each: <>= m <- 2; r <- 3; n <- 300; S <- 100 lambda <- c(0.4, 0.6) mu <- matrix(c(0, 0, 0, 3, 4, 5), m, r, byrow = TRUE) sigma <- matrix(rep(1, 6), m, r, byrow = TRUE) @ Next, we set up ``arbitrary'' initial centers, a matrix for storing sums of integrated squared errors, and an integer storing the number of suspected instances of label switching that may occur during the replications: <>= centers <- matrix(c(0, 0, 0, 4, 4, 4), 2, 3, byrow = TRUE) ISE <- matrix(0, m, r, dimnames = list(Components = 1:m, Blocks = 1:r)) nblabsw <- 0 @ Finally, we run the Monte Carlo simulation, using the \code{samebw = FALSE} option since it is more appropriate for this location-shift model: <>= set.seed(1000) for (mc in 1:S) { x <- rmvnormmix(n, lambda, mu, sigma) a <- npEM(x, centers, verb = FALSE, samebw = FALSE) if (a$lambda[1] > a$lambda[2]) nblabsw <- nblabsw + 1 for (j in 1:m) { for (k in 1:r) { ISE[j, k] <- ISE[j, k] + ise.npEM(a, j, k, dnorm, lower = mu[j, k] - 5, upper = mu[j, k] + 5, plots = FALSE, mean = mu[j, k], sd = sigma[j, k])$value #$ } } } MISE <- ISE/S print(sqMISE <- sqrt(MISE)) @ We can examine the \code{npEM} output from the last replication above using <>= summary(a) @ We can also get plots of the estimated component densities for each block (recall that in this example, block $\ell$ consists only of coordinate $\ell$) using the \code{plot} function. The resulting plots are given in Figure~\ref{fig:gausstrivariate}. <>= plot(a) @ <>= pdf("gauss3rm.pdf", paper="special", width=10, height=5) par(mfrow=c(1,3), ask=F) plot(a) dev.off() @ \begin{figure}[h] \centering \includegraphics[width=.99\textwidth]{gauss3rm} \caption{Output of the \code{npEM} algorithm for the trivariate Gaussian model with independent repeated measures.} \label{fig:gausstrivariate} \end{figure} \subsection{A more general multivariate nonparametric example} \label{sec:generalmv} In this section, we fit a more difficult example, with non-multimodal mixture densities (in block \#2), heavy-tailed distributions, and different scales among the coordinates. The model is multivariate with $r=5$ repeated measures and $m=2$ components (hence identifiability holds; cf.\ \citet{hall2003nec} as cited in Section~\ref{ss:trigauss}). The $5$ repeated measures are grouped into $B=2$ blocks, with $b_1=b_2=b_3=1$ and $b_4=b_5=2$. Block $1$ corresponds to a mixture of two noncentral Student $t$ distributions, $t'(2,0)$ and $t'(10,8)$, where the first parameter is the number of degrees of freedom, and the second is the non-centrality. Block~2 corresponds to a mixture of Beta distributions, ${\cal B}(1,1)$ (which is actually the uniform distribution over $[0,1]$) and ${\cal B}(1,5)$. The first component weight is $\lambda_1 = 0.4$. The true mixtures are depicted in Figure~\ref{fig:true5rm}. <>= pdf("truepdf5rm_block1.pdf") par(mar=0.1+c(5,4.2,4,1.5)) x <- seq(-10, 25, len=250) plot(x, .4* dt(x, 2, 0) + .6 * dt(x, 10, 8), type="l", lwd=3, col=2, cex.axis=1.4, cex.lab=1.5, cex.main=1.5, main="Block 1", xlab="", ylab="Density") lines (x, .4*dt(x, 2, 0), lwd=4, lty=2) lines (x, .6*dt(x, 10, 8), lwd=4, lty=2) dev.off() pdf("truepdf5rm_block2.pdf") par(mar=0.1+c(5,4.2,4,1.5)) x <- seq(0, 1, len=250) plot(x, .4 + .6 * dbeta(x, 1, 5), type="l", lwd=3, col=2, cex.axis=1.4, cex.lab=1.5, cex.main=1.5, main="Block 2", xlab="", ylab="Density", ylim= c(0, 3.4)) lines (x, rep(.4, 250), lwd=4, lty=2) lines (x, .6*dbeta(x, 1, 5), lwd=4, lty=2) dev.off() @ \begin{figure}[h] \centering \includegraphics[height=2.5in,width=2.5in]{truepdf5rm_block1} \includegraphics[height=2.5in,width=2.5in]{truepdf5rm_block2} \caption{True densities for the mixture of Section~\ref{sec:generalmv}, with individual component densities (scaled by $\lambda_j$) in dotted lines and mixture densities in solid lines. The noncentral $t$ mixture of coordinates 1 through 3 is on the left, the beta mixture of coordinates 4 and 5 on the right.} \label{fig:true5rm} \end{figure} To fit this model in \pkg{mixtools}, we first set up the model parameters: <>= m <- 2; r <- 5 lambda <- c(0.4, 0.6) df <- c(2, 10); ncp <- c(0, 8) sh1 <- c(1, 1) ; sh2 <- c(1, 5) @ Then we generate a pseudo-random sample of size $n=300$ from this model: <>= n <- 300; z <- sample(m, n, rep = TRUE, prob = lambda) r1 <- 3; z2 <- rep(z, r1) x1 <- matrix(rt(n * r1, df[z2], ncp[z2]), n, r1) r2 <- 2; z2 <- rep(z, r2) x2 <- matrix(rbeta(n * r2, sh1[z2], sh2[z2]), n, r2) x <- cbind(x1, x2) @ For this example in which the coordinate densities are on different scales, it is obvious that the bandwidth in \code{npEM} should depend on the blocks and components. We set up the block structure and some initial centers, then run the algorithm with the option \code{samebw = FALSE}: <>= id <- c(rep(1, r1), rep(2, r2)) centers <- matrix(c(0, 0, 0, 1/2, 1/2, 4, 4, 4, 1/2, 1/2), m, r, byrow = TRUE) b <- npEM(x, centers, id, eps = 1e-8, verb = FALSE, samebw = FALSE) @ Figure~\ref{fig:npEM5rm} shows the resulting density estimates, which may be obtained using the plotting function included in \pkg{mixtools}: <>= plot(b, breaks = 15) @ % plot(b, breaks = 15, cex.main = 1.5, cex.lab = 1.5, cex.axis = 1.4, % cex.legend = 1.5) <>= pdf("npEM5rm.pdf", width=8, height=5) par(mfrow=c(1,2)) plot(b, breaks = 15) dev.off() @ \begin{figure}[h] \centering \includegraphics[width=.95\textwidth]{npEM5rm} \caption{Result of plotting \code{npEM} output for the example of Section~\ref{sec:generalmv}. Since $n=300$, the histogram on the left includes 900 observations and the one on the right includes 600.} \label{fig:npEM5rm} \end{figure} Finally, we can compute the ISE of the estimated density relative to the truth for each block and component. The corresponding output is depicted in Figure \ref{fig:ISEnpEM5rm}. <>= par(mfrow=c(2,2)) for (j in 1:2){ ise.npEM(b, j, 1, truepdf = dt, lower = ncp[j] - 10, upper = ncp[j] + 10, df = df[j], ncp = ncp[j]) ise.npEM(b, j, 2, truepdf = dbeta, lower = -0.5, upper = 1.5, shape1 = sh1[j], shape2 = sh2[j]) } @ <>= options(warn=-1) pdf("ISEnpEM5rm.pdf", width=8, height=8) par(mfrow = c(2, 2)) for (j in 1:2){ ise.npEM(b, j, 1, truepdf = dt, lower = ncp[j] - 10, upper = ncp[j] + 10, df = df[j], ncp = ncp[j]) ise.npEM(b, j, 2, truepdf = dbeta, lower = -0.5, upper = 1.5, shape1 = sh1[j], shape2 = sh2[j]) } dev.off() @ \begin{figure}[h] \centering \includegraphics[height=5in,width=6in]{ISEnpEM5rm} \caption{\code{ise.npEM} output for the 5-repeated measures example; the true densities are $f_{11}\equiv t'(2,0)$, $f_{21}\equiv t'(10,8)$, $f_{12}\equiv {\cal U}_{(0,1)}$, $f_{22}\equiv {\cal B}(1,5)$.} \label{fig:ISEnpEM5rm} \end{figure} \section{Mixtures of regressions} \label{section:reg} \subsection{Mixtures of linear regressions} Consider a mixture setting where we now assume $\textbf{X}_{i}$ is a vector of covariates observed with a response $Y_{i}$. The goal of mixtures of regressions is to describe the conditional distribution of $Y_{i}|\textbf{X}_{i}$. Mixtures of regressions have been extensively studied in the econometrics literature and were first introduced by \citet{quandt1972sr} as the \textit{switching regimes} (or \textit{switching regressions}) problem. A switching regimes system is often compared to \textit{structural change} in a system \citep{quandtram1978sr}. A structural change assumes the system depends deterministically on some observable variables, but switching regimes implies one is unaware of what causes the switch between regimes. In the case where it is assumed there are two heterogeneous classes, \citet{quandt1972sr} characterized the switching regimes problem ``by assuming that nature chooses between regimes with probabilities $\lambda$ and $1-\lambda$''. Suppose we have $n$ independent univariate observations, $y_{1},\ldots,y_{n}$, each with a corresponding vector of predictors, $\textbf{x}_{1},\ldots,\textbf{x}_{n}$, with $\textbf{x}_{i}=(x_{i,1},\ldots,x_{i,p})^\top$ for $i=1,\ldots,n$. We often set $x_{i,1}=1$ to allow for an intercept term. Let $\textbf{y}=(y_{1},\ldots,y_{n})^\top$ and let $\underline{\textbf{X}}$ be the $n\times p$ matrix consisting of the predictor vectors. Suppose further that each observation $(y_{i}, \vec x_i)$ belongs to one of $m$ classes. Conditional on membership in the $j$th component, the relationship between $y_{i}$ and $\textbf{x}_{i}$ is the normal regression model \begin{equation}\label{regmodel} y_{i}=\textbf{x}_{i}^\top\vec{\beta}_{j}+\epsilon_{i}, \end{equation} where $\epsilon_{i}\thicksim \CN(0,\sigma^{2}_{j})$ and $\vec{\beta}_{j}$ and $\sigma_{j}^{2}$ are the $p$-dimensional vector of regression coefficients and the error variance for component $j$, respectively. Accounting for the mixture structure, the conditional density of $y_{i}|\vec x_i$ is \begin{equation}\label{mor} g_{\vec\theta}(y_{i}|\textbf{x}_{i})=\sum_{j=1}^{m}\lambda_{j} \phi(y_{i} | \textbf{x}_{i}^\top\vec{\beta}_{j},\sigma^{2}_{j}), \end{equation} where $\phi(\cdot|\textbf{x}^\top\vec{\beta}_{j},\sigma^{2}_{j})$ is the normal density with mean $\textbf{x}^\top\vec\beta$ and variance $\sigma^2$. Notice that the model parameter for this setting is $\vec\theta=(\vec\lambda,(\vec{\beta}_{1},\sigma^2_{1}),\ldots,(\vec{\beta}_{m},\sigma^2_{m}))$. The mixture of regressions model (\ref{mor}) differs from the well-known mixture of multivariate normals model $(Y_{i},\textbf{X}_{i}^\top)^\top\thicksim \sum_{j=1}^{m}\lambda_{j}\CN_{p+1}(\vec{\mu}_{j},\Sigma_{j})$ because model (\ref{mor}) makes no assertion about the marginal distribution of $\textbf{X}_{i}$, whereas the mixture of multivariate normals specifies that $\textbf{X}_{i}$ itself has a mixture of multivariate normals distribution. <>= data("CO2data") attach(CO2data) pdf("gnpdata.pdf") par(mar=0.1+c(5,4.2,4,1.5)) plot(GNP, CO2, xlab="Gross National Product", ylab=expression(paste(CO[2]," per Capita")), cex.lab=1.5, cex.main=1.5, cex.axis=1.4, main="1996 GNP and Emissions Data") text(GNP, CO2, country, adj=c(.5,-.5)) dev.off() @ \begin{figure}[h] \centering \includegraphics[height=3in,width=3in]{gnpdata.pdf} \caption{1996 data on gross national product (GNP) per capita and estimated carbon dioxide ($\textrm{CO}_{2}$) emissions per capita. Note that ``CH'' stands for Switzerland, not China.} \label{gnpdata} \end{figure} As a simple example of a dataset to which a mixture of regressions models may be applied, consider the sample depicted in Figure \ref{gnpdata}. In this dataset, the measurements of carbon dioxide ($\textrm{CO}_{2}$) emissions are plotted versus the gross national product (GNP) for $n=28$ countries. These data are included \pkg{mixtools}; type \code{help("CO2data")} in \proglang{R} for more details. \citet{hurn} analyzed these data using a mixture of regressions from the Bayesian perspective, pointing out that ``there do seem to be several groups for which a linear model would be a reasonable approximation.'' They further point out that identification of such groups could clarify potential development paths of lower GNP countries. \subsection{EM algorithms for mixtures of regressions} A standard EM algorithm, as described in Section~\ref{section:EM}, may be used to find a local maximum of the likelihood surface. \citet{deveaux1989} describes EM algorithms for mixtures of regressions in more detail, including proposing a method for choosing a starting point in the parameter space. The E-step is the same as for any finite mixture model EM algorithm; i.e., the $p_{ij}^{(t)}$ values are updated according to equation (\ref{posteriors})---or, in reality, equation (\ref{altposteriors})---where each $\phi_j^{(t)}(\vec x_i)$ is replaced in the regression context by $\phi(y_i | \vec x_i^\top\vec\beta_j, \sigma_j^2)$: \begin{equation}\label{regposteriors} \post_{ij}^{(t)} = \left[ 1 + \sum_{j'\ne j} \frac{ \lambda_{j'}^{(t)} \phi(y_i | \vec x_i^\top\vec\beta_{j'}, \sigma_{j'}^2)}{\lambda_j^{(t)} \phi(y_i | \vec x_i^\top\vec\beta_j, \sigma_j^2)} \right]^{-1} \end{equation} The update to the $\lambda$ parameters in the M-step, equation (\ref{lambda}), is also the same. Letting $\textbf{W}_{j}^{(t)}=\textrm{diag}(\post_{1j}^{(t)},\ldots,\post_{nj}^{(t)})$, the additional M-step updates to the $\vec\beta$ and $\sigma$ parameters are given by \begin{eqnarray}\label{betaEM} \vec{\beta}_{j}^{(t+1)} &=& (\underline{\textbf{X}}^\top\textbf{W}_{j}^{(t)}\underline{\textbf{X}})^{-1}\underline{\textbf{X}}^\top \textbf{W}_{j}^{(t)}\textbf{y} \quad \mbox{and} \\ \label{sigma} \sigma_{j}^{2(t+1)} &=& \frac{\biggl\|\textbf{W}_{j}^{1/2(t)}(\textbf{y}-\underline{\textbf{X}}^\top\vec{\beta}_{j}^{(t+1)})\biggr\|^{2}}{\mbox{tr}(\textbf{W}_{j}^{(t)})}, \end{eqnarray} where $\|\textbf{A}\|^{2}=\textbf{A}^\top\textbf{A}$ and $\mbox{tr}(\textbf{A})$ means the trace of the matrix $\textbf{A}$. Notice that equation (\ref{betaEM}) is a weighted least squares (WLS) estimate of $\vec{\beta}_{j}$ and equation (\ref{sigma}) resembles the variance estimate used in WLS. Allowing each component to have its own error variance $\sigma_j^2$ results in the likelihood surface having no maximizer, since the likelihood may be driven to infinity if one component gives a regression surface passing through one or more points exactly and the variance for that component is allowed to go to zero. A similar phenomenon is well-known in the finite mixture-of-normals model where the component variances are allowed to be distinct \citep{mclachlan2000fmm}. However, in practice we observe this behavior infrequently, and the \pkg{mixtools} functions automatically force their EM algorithms to restart at randomly chosen parameter values when it occurs. A local maximum of the likelihood function, a consistent version of which is guaranteed to exist by the asymptotic theory as long as the model is correct and all $\lambda_j$ are positive, usually results without any restarts. The function \code{regmixEM} implements the EM algorithm for mixtures of regressions in \pkg{mixtools}. This function has arguments that control options such as adding an intercept term, \code{addintercept = TRUE}; forcing all $\vec\beta_j$ estimates to be the same, \code{arbmean = FALSE} (for instance, to model outlying observations as having a separate error variance from the non-outliers); and forcing all $\sigma_j^2$ estimates to be the same, \code{arbvar = FALSE}. For additional details, type \code{help("regmixEM")}. As an example, we fit a 2-component model to the GNP data shown in Figure \ref{gnpdata}. \citet{hurn} and \citet{youngphd} selected 2 components for this dataset using model selection criteria, Bayesian approaches to selecting the number of components, and a bootstrapping approach. The function \code{regmixEM} will be used for fitting a 2-component mixture of regressions by an EM algorithm: <>= data("CO2data") attach(CO2data) @ <>= CO2reg <- regmixEM(CO2, GNP, lambda = c(1, 3) / 4, beta = matrix(c(8, -1, 1, 1), 2, 2), sigma = c(2, 1)) @ We can then pull out the final observed log-likelihood as well as estimates for the 2-component fit, which include $\hat{\lambda}$, $\hat{\vec{\beta}}_{1}$, $\hat{\vec{\beta}}_{2}$, $\hat{\sigma}_{1}$, and $\hat{\sigma}_{2}$: <>= summary(CO2reg) @ The reader is encouraged to alter the starting values or let the internal algorithm generate random starting values. However, this fit seems appropriate and the solution is displayed in Figure \ref{co2EM} along with 99\% Working-Hotelling Confidence Bands, which are constructed automatically by the \code{plot} method in this case by assigning each point to its most probable component and then fitting two separate linear regressions: <>= plot(CO2reg, density = TRUE, alpha = 0.01, cex.main = 1.5, cex.lab = 1.5, cex.axis = 1.4) @ \setkeys{Gin}{width=0.49\textwidth} \begin{figure}[!h] \centering <>= for(i in 1:2){ file=paste("CO2reg", i, ".pdf", sep="") pdf(file=file, paper="special", width=6, height=6) plot(CO2reg, whichplots=i, alpha = 0.01, cex.main = 1.5, cex.lab = 1.5, cex.axis = 1.4) dev.off() cat("\\includegraphics{", file, "}\n", sep="") } @ \caption{The GNP data fitted with a 2-component parametric EM algorithm in \pkg{mixtools}. Left: the sequence of log-likelihood values, $L_{\Bx}(\f^{(t)})$; Right: the fitted regression lines with 99\% Working-Hotelling Confidence Bands.} \label{co2EM} \end{figure} \subsection{Predictor-dependent mixing proportions} \label{section:pdmp} Suppose that in model (\ref{mor}), we replace $\lambda_j$ by $\lambda_{j}(\textbf{x}_{i})$ and assume that the mixing proportions vary as a function of the predictors $\textbf{x}_{i}$. Allowing this type of flexibility in the model might be useful for a number of reasons. For instance, sometimes it is the proportions $\lambda_j$ that are of primary scientific interest, and in a regression setting it may be helpful to know whether these proportions appear to vary with the predictors. As another example, consider a \code{regmixEM} model using \code{arbmean = FALSE} in which the mixture structure only concerns the error variance: In this case, $\lambda_j(\vec x)$ would give some sense of the proportion of outliers in various regions of the predictor space. One may assume that $\lambda_{j}(\textbf{x})$ has a particular parametric form, such as a logistic function, which introduces new parameters requiring estimation. This is the idea of the \textit{hierarchical mixtures of experts} (HME) procedure \citep{jacobsall}, which is commonly used in neural networks and which is implemented, for example, in the \pkg{flexmix} package for \proglang{R} \citep{jss:Leisch:2004, Grun+Leisch:2008}. However, a parametric form of $\lambda_{j}(\textbf{x})$ may be too restrictive; in particular, the logistic function is monotone, which may not realistically capture the pattern of change of $\lambda_j$ as a function of $\vec x$. As an alternative, \citet{young2009mor} propose a nonparametric estimate of $\lambda_{j}(\textbf{x}_{i})$ that uses ideas from kernel density estimation. The intuition behind the approach of \citet{young2009mor} is as follows: The M-step estimate (\ref{lambda}) of $\lambda_j$ at each iteration of a finite mixture model EM algorithm is simply an average of the ``posterior'' probabilities $p_{ij}=\E(Z_{ij}|\mbox{data})$. As a substitute, the nonparametric approach uses local linear regression to approximate the $\lambda_j(\textbf{x})$ function. Considering the case of univariate $x$ for simplicity, we set $\lambda_j(x) = \hat\alpha_{0j}(x)$, where \begin{equation}\label{lambdax} (\hat\alpha_{0j}(x), \hat\alpha_{1j}(x))= \arg\min_{(\alpha_0, \alpha_1)} \sum_{i=1}^n K_h(x-x_i) \left[ p_{ij} - \alpha_0 - \alpha_1(x-x_i) \right]^2 \end{equation} and $K_h(\cdot)$ is a kernel density function with scale parameter (i.e., bandwidth) $h$. It is straightforward to generalize equation (\ref{lambdax}) to the case of vector-valued $\vec x$ by using a multivariate kernel function. \citet{young2009mor} give an iterative algorithm for estimating mixture of regression parameters that replaces the standard $\lambda_j$ updates (\ref{lambda}) by the kernel-weighted version (\ref{lambdax}). The algorithm is otherwise similar to a standard EM; thus, like the algorithm in Section~\ref{section:EMlike} of this article, the resulting algorithm is an EM-like algorithm. Because only the $\lambda_j$ parameters depend on $\vec x$ (and are thus ``locally estimated''), whereas the other parameters (the $\vec\beta_j$ and $\sigma_j$) can be considered to be globally estimated, \citet{young2009mor} call this algorithm an iterative global/local estimation (IGLE) algorithm. Naturally, it replaces the usual E-step (\ref{regposteriors}) by a modified version in which each $\lambda_j$ is replaced by $\lambda_j(x_i)$. The function \code{regmixEM.loc} implements the IGLE algorithm in \pkg{mixtools}. Like the \code{regmixEM} function, \code{regmixEM.loc} has the flexibility to include an intercept term by using \code{addintercept = TRUE}. Moreover, this function has the argument \code{kern.l} to specify the kernel used in the local estimation of the $\lambda_{j}(\textbf{x}_{i})$. Kernels the user may specify include \code{"Gaussian"}, \code{"Beta"}, \code{"Triangle"}, \code{"Cosinus"}, and \code{"Optcosinus"}. Further numeric arguments relating to the chosen kernel include \code{kernl.g} to specify the shape parameter for when \code{kern.l = "Beta"} and \code{kernl.h} to specify the bandwidth which controls the size of the window used in the local estimation of the mixing proportions. See the corresponding help file for additional details. For the GNP and emissions dataset, Figure \ref{co2EM} indicates that the assumption of constant weights for the component regressions across all values of the covariate space may not be appropriate. The countries with higher GNP values appear to have a greater probability of belonging to the first component (i.e., the red line in Figure \ref{co2EM}). We will therefore apply the IGLE algorithm to this dataset. We will use the triweight kernel in equation (\ref{lambdax}), which is given by setting $\gamma=3$ in \begin{equation}\label{beta} K_{h}(x)=\frac{1}{hB(1/2,\gamma+1)}\left(1-\frac{x^2}{h^2}\right)^{\gamma}_{+}, \end{equation} where $B(x,y)=\Gamma(x)\Gamma(y)/\Gamma(x+y)$ is the beta function. For the triweight, $B(1/2, 4)$ is exactly $32/35$. This kernel may be specified in \code{regmixEM.loc} with \code{kern.l = "Beta"} and \code{kernl.g = 3}. The bandwidth we selected was $h=20$, which we specify with \code{kernl.h = 20}. For this implementation of the IGLE algorithm, we set the parameter estimates and posterior probability estimates obtained from the mixture of regressions EM algorithm as starting values for $\hat{\vec{\beta}}_{1}$, $\hat{\vec{\beta}}_{2}$, $\hat{\sigma}_{1}$, $\hat{\sigma}_{2}$, and $\lambda(x_{i})$. <>= CO2igle <- regmixEM.loc(CO2, GNP, beta = CO2reg$beta, sigma = CO2reg$sigma, lambda = CO2reg$posterior, kern.l = "Beta", kernl.h = 20, kernl.g = 3) @ We can view the estimates for $\hat{\vec{\beta}}_{1}$, $\hat{\vec{\beta}}_{2}$, $\hat{\sigma}_{1}$, and $\hat{\sigma}_{2}$. Notice that the estimates are comparable to those obtained for the mixture of regressions EM output and the log-likelihood value is slightly higher. <>= summary(CO2igle) @ Next, we can plot the estimates of $\lambda(x_{i})$ from the IGLE algorithm. <>= plot(GNP, CO2igle$post[,1], xlab = "GNP", cex.axis = 1.4, cex.lab = 1.5, ylab = "Final posterior probabilities") lines(sort(GNP), CO2igle$lambda[order(GNP), 1], col=2) abline(h = CO2igle$lambda[1], lty = 2) @ <>= pdf("lamplot.pdf") plot(GNP, CO2igle$post[,1], xlab = "GNP", cex.axis = 1.4, cex.lab = 1.5, ylab = "Final posterior probabilities") lines(sort(GNP), CO2igle$lambda[order(GNP), 1], col=2, lwd=2) abline(h = CO2igle$lambda[1], lty = 2, lwd=2) dev.off() @ This plot is given in Figure \ref{lamplot}. Notice the curvature provided by the estimates from the IGLE fit. These fits indicate an upward trend in the posteriors. The predictor-dependent mixing proportions model provides a viable way to reveal this trend since the regular mixture of regressions fit simply provides the same estimate of $\lambda$ for all $x_{i}$. \begin{figure}[h] \centering \includegraphics[height=3in,width=3in]{lamplot.pdf} \caption{Posterior membership probabilities $p_{i1}$ for component one versus the predictor GNP along with estimates of $\lambda_1(x)$ from the IGLE algorithm (the solid red curve) and $\lambda_1$ from the mixture of linear regressions EM algorithm (the dashed black line).} \label{lamplot} \end{figure} \subsection{Parametric bootstrapping for standard errors} With likelihood methods for estimation in mixture models, it is possible to obtain standard error estimates by using the inverse of the observed information matrix when implementing a Newton-type method. However, this may be computationally burdensome. An alternative way to report standard errors in the likelihood setting is by implementing a parametric bootstrap. \citet{eftib} claim that the parametric bootstrap should provide similar standard error estimates to the traditional method involving the information matrix. In a mixture-of-regressions context, a parametric bootstrap scheme may be outlined as follows: \begin{enumerate} \item Use \code{regmixEM} to find a local maximizer $\hat{\vec\theta}$ of the likelihood. \item For each $\textbf{x}_{i}$, simulate a response value $y_{i}^{*}$ from the mixture density $g_{\hat{\vec\theta}}(\cdot|\textbf{x}_{i})$. \item Find a parameter estimate $\tilde{\vec{\theta}}$ for the bootstrap sample using \code{regmixEM}. \item Use some type of check to determine whether label-switching appears to have occurred, and if so, correct it. \item Repeat steps 2 through 4 $B$ times to simulate the bootstrap sampling distribution of $\hat{\vec\theta}$. \item Use the sample covariance matrix of the bootstrap sample as an approximation to the covariance matrix of $\hat{\vec\theta}$. \end{enumerate} Note that step 3, which is not part of a standard parametric bootstrap, can be especially important in a mixture setting. The \pkg{mixtools} package implements a parametric bootstrap algorithm in the \code{boot.se} function. We may apply it to the regression example of this section, which assumes the same estimate of $\lambda$ for all $x_{i}$, as follows: <>= set.seed(123) CO2boot <- boot.se(CO2reg, B = 100) @ This output consists of both the standard error estimates and the parameter estimates obtained at each bootstrap replicate. An examination of the slope and intercept parameter estimates of the 500 bootstrap replicates reveals that no label-switching is likely to have occurred. For instance, the intercept terms of component one range from 4 to 11, whereas the intercept terms of component two are all tightly clumped around 0: <>= rbind(range(CO2boot$beta[1,]), range(CO2boot$beta[2,])) @ We may examine the bootstrap standard error estimates by themselves as follows: <>= CO2boot[c("lambda.se", "beta.se", "sigma.se")] @ \section[Additional capabilities of mixtools]{Additional capabilities of \pkg{mixtools}} \label{section:misc} \subsection{Selecting the number of components} \label{ss:nbcomp} Determining the number of components $k$ is still a major contemporary issue in mixture modeling. Two commonly employed techniques are information criterion and parametric bootstrapping of the likelihood ratio test statistic values for testing \begin{eqnarray}\label{mixturetest} \nonumber H_{0}&:& k = k_{0} \\ H_{1}&:& k = k_{0}+1 \end{eqnarray} for some positive integer $k_{0}$ \citep{mclach}. The \pkg{mixtools} package has functions to employ each of these methods using EM output from various mixture models. The information criterion functions calculate An Information Criterion (AIC) of \citet{aic}, the Bayesian Information Criterion (BIC) of \citet{schw}, the Integrated Completed Likelihood (ICL) of \citet{biern}, and the consistent AIC (CAIC) of \citet{boz}. The functions for performing parametric bootstrapping of the likelihood ratio test statistics sequentially test $k=k_{0}$ versus $k=k_{0}+1$ for $k_0=1, 2, \ldots$, terminating after the bootstrapped $p$-value for one of these tests exceeds a specified significance level. Currently, \pkg{mixtools} has functions for calculating information criteria for mixtures of multinomials (\code{multmixmodel.sel}), mixtures of multivariate normals under the conditionally i.i.d.\ assumption (\code{repnormmixmodel.sel}), and mixtures of regressions (\code{regmixmodel.sel}). Output from various mixture model fits available in \pkg{mixtools} can also be passed to the function \code{boot.comp} for the parametric bootstrapping approach. The parameter estimates from these EM fits are used to simulate data from the null distribution for the test given in (\ref{mixturetest}). For example, the following application of the \code{multmixmodel.sel} function to the water-level multinomial data from Section~\ref{section:cut} indicates that either 3 or 4 components seems like the best option (no more than 4 are allowed here since there are only 8 multinomial trials per observation and the mixture of multinomials requires $2m \le r+1$ for identifiability): <>= <> set.seed(10) multmixmodel.sel(watermult, comps = 1:4, epsilon = 0.001) @ \citet{youngphd} gives more applications of these functions to real datasets. \subsection{Bayesian methods} Currently, there are only two \pkg{mixtools} functions relating to Bayesian methodology and they both pertain to analyzing mixtures of regressions as described in \citet{hurn}. The \code{regmixMH} function performs a Metropolis-Hastings algorithm for fitting a mixture of regressions model where a proper prior has been assumed. The sampler output from \code{regmixMH} can then be passed to \code{regcr} in order to construct credible regions of the regression lines. Type \code{help("regmixMH")} and \code{help("regcr")} for details and an illustrative example. \section*{Acknowledgments} This research is partially supported by NSF Award SES-0518772. DRH received additional funding from Le Studium, an agency of the Centre National de la Recherche Scientifique of France. \bibliography{mixtools} \end{document} mixtools/inst/CITATION0000755000176200001440000000155011665556372014266 0ustar liggesuserscitHeader("To cite mixtools in publications use:") citEntry(entry = "Article", title = "{mixtools}: An {R} Package for Analyzing Finite Mixture Models", author = personList(as.person("Tatiana Benaglia"), as.person("Didier Chauveau"), as.person("David R. Hunter"), as.person("Derek Young")), journal = "Journal of Statistical Software", year = "2009", volume = "32", number = "6", pages = "1--29", url = "http://www.jstatsoft.org/v32/i06/", textVersion = paste("Tatiana Benaglia, Didier Chauveau, David R. Hunter, Derek Young (2009).", "mixtools: An R Package for Analyzing Finite Mixture Models.", "Journal of Statistical Software, 32(6), 1-29.", "URL http://www.jstatsoft.org/v32/i06/.") )