lava/0000755000176200001440000000000013163013465011177 5ustar liggesuserslava/inst/0000755000176200001440000000000013162174023012151 5ustar liggesuserslava/inst/CITATION0000644000176200001440000000204613162174023013310 0ustar liggesusers## desc <- packageDescription("lava") ## year <- sub(".*(2[[:digit:]]{3})-.*", "\\1", desc[["Date"]]) ## vers <- paste("R package version", desc[["Version"]]) ## title <- paste(desc[["Package"]], ": ", desc[["Title"]], sep="") ## author <- desc[["Author"]] ## plist <- personList(as.person(author)) ## textver <- paste(author, " (", year, "). ", title, ". ", vers, ".", sep="") author <- "Klaus K. Holst and Esben Budtz-Joergensen" year <- 2013 journal <- "Computational Statistics" title <- "Linear Latent Variable Models: The lava-package" note <- "http://dx.doi.org/10.1007/s00180-012-0344-y" volume <- 28 number <- 4 pages <- "1385-1452" textver <- paste(author, " (", year, "). ", title, ". ", journal, " ", volume, " (", number ,"), pp. ", pages, ". ", note, sep="") citHeader("To cite 'lava' in publications use:") citEntry(entry="Article", title = title, author = author, year = year, volume=volume, number=number, pages=pages, journal = journal, note = note, textVersion = textver) lava/inst/gof1.png0000644000176200001440000013675513162174023013534 0ustar liggesusersPNG  IHDR) IDATx{UOuUWߧ{fzzn$!+]4* ɲEYVE5$">dQYDPEA@Q@Pard.g~ws~Ꙟ_###EG>ڼysP@)iu[SBYE|{Cxw +8kvر_4ނA o~!tVKP( uqPVo&B;0 ü #Gvw|88 ب*??/[;NN.ʪ"L"Z[[|>kyxG___{{{KK s.kYdeJ_~0 666^zǎw_0 Bx>W^y !LQC0J4B(4BPj* RPP( 4B(T) FMP(5 h BQ@S(JBBPj* RPP( 4B(T) FMP(5 h BQ@S(JBBPj* RPP( 4B(T) FMP(5 (}-(( q8۷ow:Kq&{/~q.@(+G>344?O/p89x hg|fGAY>;ڶm]uN!=I~-?[WkKݻWU%:hJ$;oYLTHHP(5 )1G۶m]eeS(KOv,j>;HARզ\MY5>;XI RPTTgQ@S*} PET)hJ Qe#4ơ`KFɤQ%20a~W_z%\/eIhJ,5SoIW˼\rO~["]]]_#:EǷmа}v_>3g+o6m B6mz?JAYի!-#G|x?я–7xkx//;+/駟d2~?S^x!B3Θ'?Dzzzt: ;<#p6?z_`{D";<~կ4M]|w}wSSӕW^9==3<_r_z###ӟ*^x^{g%Gd2}{ofNLLlڴ !iӦ)ݻ?я뮻O*sϺu.R~6 ۹PՁ K-eWgϞ RX[8000qW_:Sٿ+^o}}߾~~9zokͭꫯ8þcBH4<EQt򗿼k׮/}K`2۷矿뮻gL;馛]j@BJ`׮]رcBO4І=a{&ya3<}nϞ=8,P[[[__ ]|ͭ7|B//<:0??j#G}Ֆ *'=YW?}׃_fggo+N/I_|]wr .۪ݻw~7BɧGe:s`m۶g}VӴ{ .=w?d2?駟;I}4m˖-mmm?߈}*MTPdB̝wwGk}gqF>?s?׭[wСn[U .k׮:ttә!IZZZ6n>ݻwر~c=aw}7(uos=w- nڴG)J-%׮Y~x=ʊ0*E__$I|;{ eC 뮻__VGGYItuر㼥i A-h$HJ䤚G)P^Л*o=:H'xbP+P^P H$BhrrrUt: Pn HY4W\qrV]=.yEQxGEV B(SheQ__r qNs)x7?B~8W0W^y%eǎƼmbMYLLLd2`ll#u]7>>k~s^BHEӈfTL>ln/~qiBs9u h*eY\b߾k׮O}S~[V__`#[lo {gVvۍ7ިiZ__8l7ݹs;@SV-9á(J2O%ٿE]{)8prm߾_nVx}?S駟&?Bw?~|Ϟ={̌(\pAkkΝ;SBhhh+8묳< 7pP~dYx<^wR댌ܹg?BH^{-l? /H??[Q暇zMOOy?H0I[nQq_~A9)X2!R 199Ӄy睦Y:@SN"^ڵk T}'ws>Su]G~~o@9o6< / wBw[l1-tR{wO* ֹ;99y%"6mD y睊twwo޼yڵBw[nCV Bw귾-bZ*z8rWUؓ<[l׾m۶x<~= ^z饣GNP\<+jZh&♞z---Rh쬪MMM 0 fqa:~Q΂,tMY,[FeFUꌇBY @SV34yqMYHV6W '[;*ДIP.a߿PNEaYVQٹáPhJ"JכU(ژ r?R.T3(J4E|> mp IdYgʊ)ˆ((zl9r̋dhh? mVyy1 ]yeY.WV}Spź\bA &NקR)R5M3W:$qWYGf(< tr8+^j&ڵkrP AQQ[ָ\`0q၁\.G:;:::D_tDP^"}XӴ%pݙL& WP9 F$FQ0eP(â҈&@S.CCC >$IdPap_f.[f irϷd2|>oOe(蓚\.p81æ"|>됋jJPfz!i֋}}}u766VQ(ZkYl盚samPE,x<`)$?~pXRhJr 8NӺq8??؏X,f,RFZ >/& [hYEQW BAUUZ P,YYcXww>jp8DJv ;i>ooo'}$5R 5|6]}r8A-%I[Y\@D MRih*~ǣ(i,V| !ax6eUU8񀯣^$ ,*'(RD\4ae!ܹUUÇCqYM[P__O~$L|rd2K"P%6BBzX1.BkvvWt: >id\.GzӃ#7{Ǒӌڦ88AX]sss,Zy"xr\"Us8. ,#TO:BlܸnBKyf$x|XY'&&BLG bZ322"I"o|ohh8twqM7裏VgXbk<7n8nbb"L`sndp:!$~ ^46 yփd[@ R;:: o͚5<2(J]]DU>\1*cnn.k[(f)V].W$Y#?~?o a:;;)"˲1=R)'dSO~gUB7FnMӦ?,xh҂JѨ(:_(\WWWԎ[(򆆆r;0S@KT7FŒW> >Yq${wx7ݻ^ oꜽȲdt6!/א4MkmmucccXdYJf円dYzLLLyc&d2WL8+ nZIT(p$ ȲFuե=O]]]65R35d*տ~.m1G>˼%$)pD`'] 4mff~:nڲeKu^M Aa-LZkSVzYA;gٞ_z;_u8E-iCCC]RK=-f^D1 cg8׫*YĎtb%I z ,hi<['+l@1"tz<|>+RA`eYvńi~~>JQ|.*i755E"_p[hdtI.5Nj7 ?^ZT`0hSM8+@}С<,\.K`bmmmtqhhȦW4kAL 4Ms:`0rҒ$QʐK% cxOoo/~-IԂRe$x{ݳgԔ(k֬پ}mfôBv:uuue}Eh.ҁiZ,3EqxxZ%I ByСbB(ɐҟNY՞E)긼hVʑ#G<+ Ī)T,ϋ*Q%ꪫzM6!}٫gЕ['I7 IDATEAA[[['''B,'Iw<'] 'z=~,”9>>[*X(B,r LپTI_}էz /755mٲeʐ/5?44nPbȲ\,bMy<B>U>[n'?i&a~_q9J%2==0 5Gz~%IbF 8p8Q汱1ARGyvvvܳiPeY(?²ƍaxPA0xɧTU{g>311a۷oǫsvt:eP?/p4z='Y5q$An)`Z>exÜiUF{{s:J1p'k@ka 820===*^>efbbBeZ?77gDż='\Pʥv(K5k>P# ^/QXb1<<ٙf?\/L+f}ATJ-P(L$IrvrdY(HA {^(>蹹9;77WOQbu>q U&BP2|ss3*tq_`wffdOYjWJjy2& :0 Hd:>pD'] 0h5q#MMM(Jَd2#«PqI4dfggeY. 4V^ V,˚ Lfe=5[Á\|8be9JmHf8At 쭶&ˑ[p/˲PGI<ŋ} aL,e9 rZ`1=p\_)P.MX/ gEe2]y nXFTUeYbI,b@+L8_duJA49a-Ii}Z t5EWϛ6BbUUx8Fi6;;K6[Ju.K/L&mBo%2FB"vu(>OsT@|8/5|>Z] E`mL&Bni"B+ޣ\.K<VjpGkurx<-yUWet,;<<ʲf'ۋ(U"tMTqYgqaVt F ܌} h4v֑8t%MiщNW-iZ}}=^-V\犢<p8XVV Y}uۼyŻd9T*tJT(\[Gobr\`ʄ,rrk755500xoڴ0!"8/J,5ǀeٚaP*UBŷz뷿˰>Ry\F\#~i0= BnݺBkBBJٳ^Rݻw/x0 `V~*Ks#r9qU-_MӲ,A?@df+!t!Ii1Z⬎'*;vO~RS&NTUūylrqg@ I!ϫp8LP"FFFAcF磇d(~ьgggMd2Еq,BYq8\At+FRTZǫ u*{^ ]e׹ /  w$IEӬe rZZZxfNpEtdy<ߟJ]  2i@@*U*:]ZDQѣ%EPzTI$i^E0^zB\nPXoya(0SSSp in: ,:557$Q ,֢m pQEQ ,q<e;0{u"|LtZ*0 q䷨!r 0*2??,CP*r2o߾;lݺ[nء qFa,:h́a4mpp0˲܌ ---cccD~ԉ'zh 4 [ww\)fhRB-( F(Pd``Nf e O3vgyiK/T2 gFR(5MYHh$p0,FBA' 2;ϋ׬YiSrYQOuz?Dv\>ϸ`u_K Byf tmT套^*P.EX@өݞd@()C % phhπO_t[PO>Rm`kkkoo} hb1 k;11"jKPM{3.If:<TiBT;`O10 |>\qtll-iZ]]]8ƒ0L(E44Ԩ =f؂eYQϨMЃIy3fY􌏏Wȑk븚L%LB` sCĆ |aItNs>F8*v@ p8zzz"!P?'_a8ZsOMMtb4A맧<2!~._Zaǡ,*'---A;#Hcybfyh7^z)0JRO,˰BIeYd200I$0Tat:.4HY|P cF333Nǜ`*Rc\dqdrh4M#!V)V|2$J-L' ?KŽɚvjqC+ e,5TK ΒaZ:z!#gY ׋b̐׹P(KX٬͚XY%ϫ(,Pw7ǭ:ӊ+P*r`ŁQAӹqcʅB!Ht,V_XM@ƴѣG=H8̡Cb`,˞~飣&O _UUuadL(911QȠ *8=@@(UU ÚЉ,>8occccccoo/NfggKٱۄ: /]`:>lw)27ژ_1+)^g߃lyaRe7zZ(د(4,XsYDAܹf1?Ru .`#t:mٞx<.IGg_UՉY; D9Lꮧڂ ?~1aJfgXE1H /?vd2;M<5+a?\L!,hY ?*?!MkkR8Ʀ&;P t7 Dyuuu. *( PXM ì]^`4bd7b!P i~xx8ɘaiH9+tFId?3NpgI!(zS#5؅ ˰x$I6lw_w"z{{ 3|Ztp8/_B>xO?c_X,f݅S0HB BK) *%(3ܙ:=] f|7JY[Upt@:bUUx:xІ}xP't*n7q %,C&O*;˲~pN)t[pT |S#E vUߧ(aF2iLZ*Zg``.hXaֲp8 Gэ  !ǎ0G˜,%:taTS3ɐ ج% _Lc @SPdpq~[=*+ UOnԕƵIX0L s|q t`0hlWHd D3蘚•eEdta$`<ᇉfQ#-, Xӊq҆-3R㸎E=b]Ղ l:)O$t8B(^V'yakӼP14JRUr).`biQ|>_]].]nh!DK ڹ p9Hm"p#C|>?55f 7*ݪt^8үn:bW-y`m5G~ȷ ?nƁed:_]>)փt *+](((EiZ"Эt 4ACkQY™dvvvvNLL?~< ];IDQ=p t.H˽bԨB'΂ D:gpT8,d["yKS`z0>X(tpr9I.+'9T#ӱDqnƑ$t:NXm왙\QN؛$) rdl@u B8Nv_@ۍbH((T444@p<6ȯ ;F3 L[RAzHdV\0kv\"j04["t)H#ڔ@3}NEQ ;ƈp̃:xbX 힝YZ(IJ,4Ʉ$& 3AQakF(q KerϚ,I_U|kkq# iF˪gM86 ? A ܞ~c$c7 `#dЉEBxƪ===zjiaz Bop'hjj"2?NZ&two IDAT6!`d":Ç+b|FLHQi><vrR`j]s>jЬ`0IDU#GXoUUBap{5wAn7l6 {B<Y4SNADI$@"Sy1tn[qE8t,@?& FN&xpqRS9$ J/hccExvX1p2Mlxd {?r=X,Ъo񌏏[_LR.˲1&tuu,<5MkhhW$<-v\n;LH*h9ȪL :#XQdY2u1uǩ`X]LQ4vNJڇ t `$LU*/[Ou.iu5 ,RA2 DCCCSS,r!]ŲBp־>PH&oMNN*B&A,%"L檪Dezzr6leOt@9VFF Ǎ$IE˭*ܜn C4go]瀜ZF%V1TϨ~nO?m~\ ze}ȟFe114Qwf24z{Y[XY X `Y <"G" vC*]hll?}:p.&x< ĺ95LQI`WW~ (GW_U[z)25mBeL pAKd]n4Xg=ziyQL@!n ^v+ۙuEIRǎ}wx07s gM4 3~+Jy<lvj,WRCeY}~3Avmccc E%w\J,,$4;;kTKK 腳mPȸbcV\.glA@>j\DBg|l6 1v3 B8M244&d|$Aw]. Y].U13]@[`^%h6BEMbt?HQq$0[4Bk痮K}dYnmQM bT?b 5Tv[c+)c/An&K'W 6n8<1j\ k$J,ǎiA1 jfWf}MKT#s@/7 x<ɲǭWx u0 88Zyt^*]&\;J40ovhk]Q!]PUovn]$9dY,Lv!IH$ɉj ,F'. O$*DTGBLj/,>X ,K^(Syz@YK!jvj@ MӠdͦ+}xolle PT"yrYNX. Odq5`zzZp5@B8>~8}E!S"xCƞ7)ϓ9$aGgff /Ɂׯyc!Xpd㥞XjQ^ъwYYSv t G]]m``@W4a@/͛7766FѲU@hJ6r-~nl[Q=yf_rnCCCAN {bb⭷ނ [PTT3 2XLA'փ@CYnS,tlzzڨ,˒VQH Z]C[(ນ>D/}DI&*ᕂ t E)n:FM H$IF)-4=N[HBy "O'[\p84i\_~@q0 iׇw}Ey^f,\bډ@jaNLIŇ ˲ 0HcaPp8ƥ0L8koo/u0n|>PMZI:^1"Zx6;mf=[K:|ﯯ/laa{[l߾3BHAy{+IVSUnTaTU-= W:ta>Fy:4 h1eJB'"IpbG* 4k֬/~k]uC&bV@/bPk6l؀_C܁k׮5mZ ۉF٬u9tJw4mll {N )_~Yg[o!x;wbr:. BigjGXؤ-N0 MOOuM#Bw6V,vuL}p;6@ ~O,NIJlSS(t&ccvWȆn4sP:.xeɒ nHb WQXjS<E,7{B*CT p4HIze05:cu&9UU-sJ"I6dYiqک$?`*FQYsw>chD j65-nƐG„>::0 ʲ bNaS7Nuю; Y,F>rjnccc*CXX  a]]L-/("D8V[GX !y/Q_QeU[8pj622K/A B8Q/˲:҆X,.IA ˲SdBimmm P 2uV*o2C}jEQt=jN'hL&GGGa}bkV8˜\$Dos}P(:<@Puiae12NXEH$j$]PgՃc/dTdo'D"d,&ݣ(#f4X|@|>r@?Xw`J7s ^ͭyTUxM799 xIBS 0$Y2S-:a>LnʲL _,ItX?\r߃-N]քz%I}D噙LU[P..u&J&_,'IqЀMr7033CJ  BvΐĭЉ.}tQϰؘuuu09D"AZY Bi,4n?{{ 5c í}>0IRt|7|rC0 5FY BzZ`ӵJdhMiZlqX?|>S/)Neq.]I #`bc̜h4C, uBq;jA uN$D"7xc~A\8i4L`ri %/Dyy;utL amy<WV"M|>O60;{&y9r"R,nCQzc5ykᯰ|!iȑ#GI&---gu_m۶UP$-,L@M )+?-P|,DQ^QU9ck}2 B{|SQz8ySG-<ŷ9NL +`M˲Ӄs֮]z<;t.^l<|) -nTۨMР#$$+v.W[ ~-랄=b浦i6ဦ*BlI ^;??O|b֭k֬y~jjjrrqD"r?xqX1jEWo]|PTsẺ:&}}}j---+渖&n?`  ֈ`: RQ]8Xd"96c Adq%HC;+(~{ǸdYk"rg*./u#\..0ˉ|>o굫0;Q_qovm~~sss_Z_0 MVMȈ#-J-)*sde#H0 UQ AyE[hoo0LLL@UhrH,œCCCxsݠ;KPd2. $L0>>N*(^X+0a`XHeQj0H1 _췘M؞0W2 ?]0Bs7pi(E*ILݻxS*o:lA5MK%;¨N׿d2aÆX,qsA8#7Ꚛ lCCC*"CAe( ġjL&^/v+Y*p&88߂3`!Z[ş%*Vj5?seKNlŏa-Gsoo/(W_ݲe>Scl>#i%WWt:ÇrRzo:@;'cS( `O t 6y|>޽{/"t"#N pv;::zr$ Bpa!x;/QBv"yjkkCMLLtttA~r%,p#R(8Pɺץ5#"477ŰfƇX,FtM_n-[LW"P΢5,y'V򿚦ُT,vFNzF'p(\p`{xN4y8&訂^|{sG#FxCD"* 9TAX,Dz{{'9|Xՙx^״ N+aÁSWx!k9[/%rmR0BJ$"(5A͛7߿_QFkatQ#]*B\p>G(o\\;.|+333 ô<>d  3@N  gxv1yv)ՎHpUO;;;!ƸTٷo0d0qb'QUqw%T0G$EZ$*Bv:/)oۜs9===oF&O,*ܐMFĥa; q dV} TUD":Cd8xDb ˲aŠ>q13HӽpNTU&&&`r}t\1ׯPkӕra') L ;At( mPTUmhhйb,g,YB!9\KG$ѼMo. ! |\QUD"AyZl%?4UU-ٖ.E駟F^XmS,fggUU  EMq8---6T?O7oLPVDx7JPNB"M?lY1TŬ~Sܜ,I.e O jverxu/,{fgvVJ$+c#]FØ`(#c'qA(p(S)rN8 IGG_2ZieIcx>=εk{|O|ǙT?q3ǝ͆._˲$|;#G8glllttT~}3EQ|MtŹsp/($I˥t]w/j43L D'/ htϞ=`Y,1CAi2jjB ɤ#Bi}`Cv=qdbqqdpZ:+ uh u=QbTuꕶ_Q#H4+v2y[/~ S"S ?~Ν;|g>s1xw?3R>ËvmG WWWIWKӆMMM5{`q 5Ղ5B`0H`1lGEMHc *!AӴh4m۶ ]pFRj 7^]^'Um)؏ ND"8 S-lCX13UdN^%|3d}M2\YYPD_2_"v^mC E=zcZwItѣG*f~P{+J;v|߀SSS?]w#,Ν;ͥ!%5bX}E '\vhj Pa) Cb4M4*fii ($ف(<ŊŢ,<_#UxɃ{Z>c,H@$ƊIraf(Dcr;}#"ҙ`pHi3vhx֔oCm /Cd:Nz ]dGV؆]<'e IDATnNqD:35$y~߾}8q[lKaE|>ǹ-r`018]Å񅅅L&ST@spQcRA&iHswRAB߿D֞[n$(]b ?FY#5\C0ԵN8a[9pSOMMMyOB."U`#1=&`@/[l*~9Oc.U˼o'>qw8p{kIw ͚Tch0$GxT*ț9ڮB(rr.MQ<={!{G}&w1Aš.b [~w xg1v?Z5dA~ͳH$/v(mU:H.-Ig(F$;wNuCS0 B0+'ޔӦR,..[+Kb.>ڹsٳgt$o}ԉ(Ym4RV'ȍ? >ij(0P<+w]ׁȶj6)|ڽ 0N-q~zgz\M-!y~2ꪫwxn&O?/w@o$O]ׅ >w)zlI.AJINpmmXSA:Ϊ_VqF^±cN>-I 7P54ME. \{JwSN9;M =v?7O:ղ|z]s5 跾ZSj|>R\ 4in .4sӧ90*ʕW^ ;p)|>J+:";G^z6MӨ ?>#j3:Vp}0QѨ(|[8&pYijj %Z IRVvfACB+&I|f'|֭ `gT:xxPBYwYOfSZX^^ndVVVfffŢ#Ajo~7tS$А?C[\{7\.;B>oK(JC z~~~qqȯd2xMӜ/v4 rzcǎ u=LjTa[?^THIrܰ\.*epKiR ۵PM4M#;j]v.;-oy Y(ЁꫯF%^}>94wǰ^Ä<~Fg!՝;wj =$DȲ<00pСUן|Igj)*mJUijӧO;Rh4z…$iVTTUMֵ(ZBW_q 0_Ŏql4͚ ѐfG峇Bԧ>7^KZ)o u[$'e:KJ5 éъR$2)Jd !w :@}8>(RUn喍'řh4z 74ZO߶(wX(f;v,//9>>NNőH$r\.V^8ݠ#/-تORP;D9 RFq&v -jJZV).O^`}0 %!mj  T*|; iODO* VǑpLn#-VVVUq4h6ld5m)xBcq:|`¼T*aS,hAAzS@/"wOlH2LJ>3.\<~,;uU1MZF!j5lX[/b10~Z}Si?Ϯ61a0s5% `/`&M-e&`нk*?>/c *QWWWjvlo|c'T @.'IqRyHm#Jp8L]iSUERU211Nsa`dw]ǎC;S_y`ڿKvq$}|W}QYOL*-w#ເ a8+}:0BPP?ۢV8y{1^VSUZ,H$*wth A{G['?o}[q\.˿ O=zthh6nHR$G?y(mUr"( ?16gȺJ' ppx</G)h<M[;s뭷~ _Z=&(d2IZߟɯ$ PI }뭷.,,ݙ<2$rR.Ckݸrଌ\ C>hr 7|>=g9;}47urkok-hŢX<<+N &p8JA'9;OSkP(]_Gƃ/p0yP(J EQ ~8WaJX+a˭p,ݻrzB*.dwT*m!>|CaRw}wtl/__.4ۓTokdf2D"PEQRҤ\.nd6e0 P {0fq*뮻nP(ʟxEK5#$I< rʕJ\SCzezcԩSW^y[Ç-oPwu'Q 9);q/o۶ EQz m9ժ{ "A| ۢ|>_*B!=T3$aePo@ ۮM$C(E[1W_}RX|~V4]JQ{?ٳgcC$|dzK"qgϞ}gykf˖-o{nÇ{Tw-(0;NkKU:aTxqTUݠ͆΋"4F|>}p?m79y~hhcI^|Ẹ IR2ۿ("p.w' T4֢B@e ZQ^:'lٲŻ)W-ti8@A.%ԃt㸧~ZuJ>T*NNO[n.nH25=pYP.UU_kE2 £RT(`C^0;}0<`pˡP]ӰqG|@*'QeXi ХU$b-hc@fw KkSP(̩S.M:A\<->G?r&Y__^FvoE2"\{.ߵNM<0ۇ IDATs @Qs0&8r766:u*0+9 à J%p d2{h-*Sonbyyzz]&AXj]qxkkko0NRy* Yx7ܧ똮؇<6lقK)t9rr9M9Ñ0ѳgbAG@oO8uv*`1kkk胮jdm-<:Kzʄ* &!mxttG|%L&ӂBc(ںu+m q###( zݖPtQq Ϟ=.Cf:s(7{O àhV$٪ZYԬ$Ir☜L$\X,R\;ɕNzarxAPZ@_v911Ӗ,v$\|Yr#\W0DW4%NG8.j! s#4Lڃ(vT.}>z#?N-|>$, =tWPW_mV=}&Yp+WW*fffnJIRX( d#|0k0J2 FNEzTh4JYr"0m焐G=ABm~BKx7@G$Qhmx'*l(c,{@޸~ I<CZZ=tP8VUݸ%w69) ~nKKK藀lUg7@^o脵5k(@@UU%-TUmVu(J.˂`k}(ܗl.bII!sy~ddr9}4IpQiⷜ O, dYpqp5Rq$v4>88辈nT*J%HÁi pɐdb͉WVVNBUQsvvT*Yy\.NgΜ`bbBuJwFhN1DJ{I4M lCnW ^eA ViP˩) VOT5D+xK&V n,/`YpZjؐ"JaxP }m]qϷs`0nB… TʋGV,GvuJ ðV蠴4u.40 Zq|:vS8ix^W ( pv[$Iְ5h/=qh͆7%IFjtY,GlMq\XTJ߆RAݶm~x^`Pv T*PerBF+"XoI/Y rLJL"۹sab#},p릦˵B![<ͅad%B3LV!=\< @mBC={<ؗ<A@'En x)mݺ'N 2p8Fcrǩݾw:,)Х _m۶AF( k$jummmrrҋbiN,b @S*P4e :io6ٻB(6͑MIT.r]C0jb8qD;ÑC4X@ߟdb>WUwBxzl:ժmqB L~^2UUjtF{YLA^l6[T(#}> =ӸwG(tWUR|CRyM: F }kB-[vmU)O;" BYck<3 #σD$ <'5k1i@L;޶ضm5o0&KpʏDҒuTNY!cQ9k`zT@hVV)z$Kczx#xߏj$&''rAטh U4yvMұӂTWׁbe>Et v7}Z鬩~Pjk~.sd-艉 rE9cI#}-.QfH;Vtf8WGS6 F-]_u`BUʎ8a p0] CNW(dtT_]]]]]%C8^{-M&EQe(H p *E'Eg.E]}>P|aX(XYYiXI,iF}$8OfS!r˅7`0h5"rm&[#_]vJz00֑---,",(kp Ӄg˖-P*4?ikq.u]Š[x<[ |brr(,MÝM, (FQ/uۧO&'" @8r(UUk/ 86Z̏[cbM.B6S֢$IHb&4ع7 sgu}jj JE%,kkk'P(xYJ>B'rg"uV 5d:p1ڞ,õ/4 Ambb";4Mqه|"71>$ZXu 9D"Q12ҸZ ` ~A066h۶mC L02 ÈD" Hf+aqLL&I>ٟ9$5[R/4̹\5xpjZ~S///d*) BA2łFR KWB!n(lB2E )ʰ>vj^Aڎl/}  %IR\1*M٬ I^,i&.a d:Sa*)iYGYI8rp;_y!23 )SA5xAs_ ˁ(6;tApkAiG -y77Lŵ@`04G"pm2lDQa -[&9dY^XXpRzCOB"3rp8 v9 jdҽAp-//[e!6 /|!=F]$I='JPWtrm+T&G"X,I,d7)IEE qT_ۣm,*',b<$7|~S`ӊbH-UUWS8|J~ꫯޒs0 u(AV%gFZm*s7l厕Q2ڰ+++PiadYdHp@pԲ]ׁ1NA%;i\F+Iqg%"t ݪ:e2 wfppZSSSMgRҏ7]Mi֍S*pfxxd\@zlz J)݂bΝ1vے(T(cA1>>OLW>' ˽-^&bqllz 6lE+-E6Q}>_^S휎yupul6C ^Ӟ* \#%RIfI.;1w7w?Y}8 FnFn;aӄ:1'Q8;Zk-`}}ݶi!^UUטHQw}2dY?TU]^HQo y'6wq,zJL5 ctt|&H5 !טA4NUղl0$ (k y\ PqjZ)S0`0XVѨ,_R 9= f?R)0nNL=dLsH.hb@rbg^Mt]%_(Sxf$RfT zև5\gΜZ;pWV%IET*pa+\!z0(\.ŃL^9K&V{/V-rL]!DS&Z-@ЪR)P^>\VPVC+ibiU(J.怷mw%|߰~ٳ̙3.sΑz(tpr!ehj5]"8qL4NQ =N yUUG{Ɓ:V.Z+(˶=xw,--W;v={"r@E3pN+z'e1 0 hkr[w q۶mgľ}ࣳgʲnɒ$ng5:994lvi!EF^f?  b>e˶0HhǙ9::|VVVAWxuX 8i /a Fy߹s'T`+r4%} t`&"[~C] FQU0A"L11W<u%L wX,P IZM9)uMIER,[!'O2¾#h7߿粜C[^?֨RM;lrJEUUkY^a(a4ϟ?eEQd2$5~[__&"`022 ixbQ wpp\.CY!onn$8TKOlPoWnY0UﳾNa['(A7Ysn*n}>(u.liZ:nsjjvAr`b\ @;KKKD"\^T/ ,r< @co7_{A@`0H:^:q%I~a8y!{׮]tz~~믿 V{o/k:rǕJ%DxCKKKPZrt}0~wKe+H$^9j$b1ɮV@4bPi9s&H$B.cFh^eYt=#2+++d2[ d@d˖-y0Yui[q&;mDQY\.#yfNf[!IRTF:L 4.lٲM(0nRd]vf&(~ꥒP >D axs(ȭ/y0, QΤЩSpH$?T. 3A" :KU0nu(nZ… M-SMŅ wv`\.uUUᢠ%#gv~Z(eF\n!+I))ݰT*aY#B8vIpڨ~RI-2MڥLVϟo*C`$W@JۈPv^^^nH\4#0 bEQܹsؚVl6QzD߁@*Ryl ˚aG4][[s)﹓gP>'4M[\\TU]5YdĻ]P*rՂ׋` yd/]#L&NCz\Êaҙiw@~4-R <ǣxX4)"yJ.h4 < A#.aN4(1G[ubw倮mL?=PCzm`?:02#+qa8:.7Ǒॄ2ѦN7F߾}z>22e~qc<ӊĩ-a^ £'w1͎ B(7=`%,//#LOO^eY^YY<9+Rl4v Lfj,gϞfR)[?K./$ì#Bwʣ$I6\$Q'=}) %#I/-rd]׭0m 2d\j>/u* mېX BVT*Ez<0 LƖة  h6 2޽{'&&8$4r)`%` Qё p=(B{@4݀BrxHddddaa<AH$Z yy)ۏ?NFɌ`6yѝq714QCy6ZГ;ÕkkkX>p`% AK^`ԔKzT|p tR0 C_]]m4R9sKdfֈFQP!h]KЏ j:5¦\όp;F9Y1Nj~%^:m#'D"133(XZR=;;q)d*z2^Z '،iPAȐbVT*EpzEH0BUզ5S,wNozK~1K0SK}bV)d\Ν;O>@~!!ۦJsR,#u;}B%UUZb5kD"0OxA O\PT2EІa@2/G))/_Uf#6Ck)WZ^ KGqh|>fr0l*w߾}aO@juLP($ah%!l%4FM@{oC* si6#A\( .ٸFeY4ӧO#~L$b`Ƶ@ (($ ei$TmƁt_FGGa`KKKk/eYV4 ݽ~@ ƸR><W|}>_<ᵵ55%I$fPiFdA$9WR)J\ڔ@-\NG*L᰽X蘛8== y/F4޵88Z A Mtй\X-DG|>! xlkP(Dc)O75yh_`֑eyhhȥKtv1a"#Cev! ϗJܛ$U{9A7 BJPaP^o 0E V؝BAUU/"0{bdHSn?{F]m]Y1??Ol?ԅSB  9!sjT*^IY>oii c;ز9X$IHsσ 77a ? rf;LXtc9B8]iTXSCyX 27>>NiZ $AXA 4M "ǭGJgggZe|`>А;]NLLDv5Hk(Hi0fh+(DWPW.it:gY*0QJ:44Ԗ8L{:Nu(z QnDQnFTjkn?j-}~10]TD" qU#$|Flٲ*fVk${eP[Bթ+=00Չxoxan ]"{+_JP{Htwmzy諊6@ dw4A-h7M2tI@IPaE^o. Iǝ:u*HK/,,W6 x٢KOO_=J&''g?{vw[HTVCښ$IV^a5V@4tSm܁^ h.G";w Uݻry[orVjI 9^>mKrM`Ne>Lӟ… t/} u c֭R5L ~r I,wAy*O&ko nm`Nѱm|Rf;{5 Êx/gGЅBa@gΜ|8-{6ڜNjHbbI8n QuX%Ŵ$)ϣNY^7UC l=Pϲ8z.7xWz}?}饗nO9{!r<g=JuT8399ib>W@|t[kS!-P ''6<ϛ靠UU B!t_|ECh0|衇:ԝ_`Нے@@uQxûH$P4%~O|;_*ҫA20A_XZZj\;АumOOu}'2 \.70Baf ]rsMUKu}ii)t 6~Y!jZ[dA3t77,,,`jvvRAرcugOv \],˹\MloZZFPZvQrEQ2Lr 4Cvh)|R ÑH؞sa>&c)0fؔP%PPȶ ? XæĹs瘁pɃY <ϯz00t6pVT*E+ A3l&,d* A3+atвf;-p8v'ug0 #MwfgpGS9cM7%2/5/%0f`Ь'z?4CGDb N=#h A_^prn2t Gfw#A_fdq~gOEiq00tGfn /`4#F,M O 6:"~ #h$ӎ2cS·KSqI^f 2:7X1&bo0 ^|wt m#h6`/ekcF 6DVzIy>Jq6-A3'Nqoݺg`(A_Ȥ]I{ ̰ Ie(3lv4;>#hnW@̿\D _~`0ثQ0 UUZaIw5 tۇDb#|x~^<߷onPS07?N^?zx^?ףp+O~ףpK/#O}Ssq0000)A3000)A3000)A3000)A3000).|>_^Fm0<4ގ-(hףpD?4J†xj5z !row4å]_0000\`Ч`Ч`Ч`Ч`Ч`Ч`ЧDG?с~z84}ѫ*L>|_pl XGAc}}ȑ#Tn[__plП O]Ǐб]SO?^7ph4K/Uտ뿾{="_W>|pV?of?u}{,Ey' (=؁z=>{w t??އDw{4{n޽of?u}Κi$a3d Z_"(hȌ!-$rhlAD*wĞ|{yӾ-Kᄎ=m-H$eee't:ׯ id2E"|ss`t<[.S'yϮz^coރnnn4MӴ#&)HEox2麞 N SDMglP(x"D3hDd2k|E0466~||dt.~YGO+t.www_^^677ۍNY %H$  ñΎ4:N!>uVRJ>co;<<(//鹺2:n \`ap8u a$\.7'+[P(h(h(h(h(h(h(h(h(h(h288$d=誑<ŁV]̈́ t_/ D9Q'xg!:GS{ >Âcq E'@礣 @S , @Wd ÕdЫ0Z*2 %`SH0&8gv0%Dp5y^0Ef]uX}YI aA#ckGKY᏷{ Y б-̱ieU*'xl-  nvoۏods T`+rt@sH90m?,ec`4p*¸]p.誁ha>p8nЍ1b˗I@|vΝ}2LA ?gϞرcSL!B-L8?$MMMͳg!?F`ccG:⛘< ֭ǏIAqI&N!pXЈ.fΜ~)2L]]]A E=KJJHAC:0`A#>}CH@˗$kkkA шstt4>cE{xxhbM+fGEE`#,hD/S ]xqРA222 4vݺutbg۶ms%Dx^zS;y!C455I,hD;ڵ֭-uuu{?> Bh6l`٤ n?^IIt‚FtԦMGEER]]}3g"lXЈ.\f͚_ ++K:aA#RVV^x HAYd `A#rss{QFF ?9sݻWBeEǿ3B4.W )+((hСƤhSNÆ ;p yyyQQQ .$,hDw-:uTAA Gsٵk",hDw[nwaSS^zB4BлwoMo޼ ?H! !͛7ٳ'33tuuuAAAY`A# ##6}jYoʔ)fffߌΝ`Ah… UUUnnnP4B3~x Ah*++k֭P4BٱcGXXسgHOOσɑBX}GZZȑ#fͪ"^||||||~WA( /Xo۷;;;B-X5a̘1\AMΜ9qFA( -XnݤpzWsGO;vHHH&Dl:u / 6ItKBBȑ#FҲ$GܔL2ôzPw4B?#++{ԩE|tRSSuΝ;B]X5C]]ѣnnnEEEډ'z{{%?P:v|x^^,dڵŋ\!~?~-[H:p85k={b#&!>޽˗7oN:L<BBB !SWW?Gϟ___O:`yn„ W&E`A#$,k߾}Ç/(( GP޽;iҤ={3t1 //޽{;;;\rYHHŋcbbTUUIgOx`ڵkG555_>{,`A#$p'Oڵ}qq18ckk;rM61Lq4BBn:''k׮»sM2%88ɉt𘛛FGG{{{ gϞvZ.]Hǡ,hJIIiC})8z捭#GIǡ Ł^^^֋-҅ 6mtݻB/@H]t FIٵ;gϞ}8lgc{uĔ}I,kڵ'DDD$$$pɒ%FfѢEF3f=х)%%eܹ'Ob0ebb"wG~2Cz %^oN:qǏ?>11&_{gCl3Ñ>>>Mh q{!֭kkk_|TWW_׀v{l} %45I1񸔥; ><))DFF?> ۙ8ŁX?#++l͚5ҍuҥK-^ڙѺ2‚Yl-~e992___XHG Gȸ a*A |`yB:pwhEO .=N:]rK. y& [8aCUloڴܡAʰaFvlg8rWЃPԃ,*S/5p#nd!L⛐UB? @E4!-]xtH0jcchxm2iG3zQ^dօ3qzHt]̈́ DpPW\+rvAA"!/",d@Ƕh| ׻`؛rXpt8Q?ۙÁ ٦m,)7U[<_αL^9٭v F孽tߑVTA/:r0 n=T @S , @Wd ɕdЫ4 LHlѾ}^](;Ǻʨ]*Ωa7p֜ؔG`2U%o01cZV N;l`LN ۤjv0?xZ5RR ;Y!@_#e#{<BRf'p8Uƚs$%3?'wO=eCL_ʝpuM:RZPЍԴ~~ѢEOe='HI•\oa!t.wBǶ0ǦOeU*"~`9p,`Q^M,.]:~D'udU%M+@MUdF= 9]]nLe,v&PS0?_Q;7rQ={/ +(<|')WЛ Q+rNRQ]۹Y$HH025-) ᩽cߌ'8k\qiCO-vBW ]5-ҝPX lޖA>F??n9Z5­Y^ڙVmI zTOaC<0ύZp.>_2h,ҝPl w3aZ^@Ɲ;w{1%[<^?"=zOaןM0~ Z(fP] WR*j#q}ZR߀*p*a<4qG / `AM/{9MB(ë4j?tf⅒?wzeqE2n=#'sPk.r'lV0sdu Ku; 櫌5HNLAp8p۞CTV:t͆ >g+޿ϗQ+r' +Bu-KF]Wa T0L:NxY ÷I{Hk~8j`0k4(̬UͰj/PRZږ[.D|ad ntUA{d;M M%xA< )oGKA9,T~ );p'57\gR tyujlQv $]+MVϗuT߼*Hzu9#>hqA_AIHIrADwP3ŋŧ}G/ =<7*ΩG%:8E("a;5pitD$;p'˒%K~xuOkWO+bwDFFgi~O DK.aS6 Nw9=bb3")w $j&Nxă{j*;l~5☘lgjَN:]zӁK0Z[^ٜ巢>XܺRGGQ`A#$2  IDATرc/_gמ7ezX+[(j7k5;猇vrV35]I̸v{*2㗿m M}IIIׯ_ݽwkYYSKiI)(K6ޫP% jJuа,.nx &f- e sss]֚:thɼa0'OkMֻs΋/x~{XXQpttyt~GgΜquuy^zOw iee켼O>ܹsժUC РCD$%%eQ`0BBBH!i޼yL&͛7͛7:>TRRrwwY0o|ǏQXXx=uuK">lmmGw/_7TUUUp8ܧO (- >4͎ѣY```NNS;k7n044dX˗/_w.^b2w!GL(++ٔ)S>}҂PPP?x𠬬 z!?\.\ҥKnn}<<<_\|ƍz:A!))YWW'Sryy9HLTVVw߿e|"644^QQϤS$--k󇯯7|[wUН;w~6N}xн{w)Att4=z4߷liiUXX-#~TA/UZԩND^aa-[o.رC@G'޽{ZZ6N}o߾%66=}ݻw dѰa?~%600ƩO[[;ʬˇDӶmlnn.Q:♠ `XojэVqq1H=y$..nh/^yB$P|Unݞ={&SmWJBWYY9wÇfn0̉';vL!MMM}*hhσ ͛7ϯ]vСC- `Ay1CzKZ/**JZZZOljc: 'rt $b^~k.ͫ/>>>{Y,hIII)) Ae_&ӧ7"L۷}E?'>}<|PCPVI@d...&&&DF7o^PPlA/11QCPVǎq/...77wƌ/==qFD-hKKK>R͗B-R\\rrI? >L g۷%6cIL)E =zlqn)j֚5klmmK:(((tڕR իڷoO:7ơ7t͜93$$t 7Anݺ%Q{S zҥKés[iVV^*$=3411:lΝ;Hg3gH@B(h2dH||/~n9ɓ'\FA><66VQFMtRFFuN=2fE(]s>͟-//oΩ|;*FA3fwԤiӦ-q)U;vL^^~ԩ4U"f0 sp:~zС-[•ߺuCH 4Lg3oJJJ=*%%E: WL)D+={fdd mD0`۷I@$544oڴIhѣG_pt Z^AСC/]$`ذaqqqS /^loo?p@AZ,&Ԃ:x0G6m|ׯ 2N8QRR2sLAZLZZZ]]tjAkkkgff sP*0`ݻwI@_g#Go!ԂYf߿_ȃgggG5W鬤dG&GW\!]Ѓ zQee% 'ɓ'oݺU. m۶%%%8Oa4L0nke1L|D!̝;Ғtٳ'N#@A9rn###I@BI: 4ƍSVRR0`?4A#F|2H]vM64"4,Zhl6Dt1==t$X/^XzcǘL&,gH!S***C 9}4Iqqq⭤=""BYYt~244ħA`;w'@ r |I&mڴcǎY۶m߾}K:+heeQFRRRgϞ 8ה)SD~nn+h7o޾}hu̙3ixZcǎ"3***H#qF[n޽#SxxxQQъ+H)))ZHQɂfdd!L4\1Jݸqbeo+4 ϔ;GG˗/p]lnݺǏKJJ"@,t :"\`dd4tАAd5*::tZ^^^GUPP EjjjdeeI# K,9y$}1c}~ &"p_|BDܲe̙3ihV6muK:QUUĉ7mԥKY\n(hӧeH+V[>gImm ,X`eeE:|YIIt :JAܹsKKKi5nv SL>|8,SYY8PA޽{:#AϏn7:3sO0ti*EfXG7oIg8eee''Çxb###///A]P@KKkCí>>>۷ouDŒ3>}xbA4E}n۶tݶm? ۶m #6mڐ@ST,h6lؒ%KžGQVVO;v|rΝQ__B666ңGVSS#Gf- ڵkCBBh۷oH)A70`@@@OHg6m̙3gժU8p EEE:::S  n:h$A}ƍw Yb̘1ᤃBCCڵkIgj ,h055=pիr[?~ӧɓ'wڕ>hcG4())EFF;t>cXg~ ,bjر;,M40 __Nn 4ixEIϟGN: uikkNA_]ЍJ: ueƍNNN8>}d'''Y('A$ ڵʚ6mZuu58Կ L2&<QFmܸƆt+..#hĤ@RRrӦMvvv_&FmccC:8xッIgzzzSЗt#77+WOf͒۶m 211!E4{NSSt _ҥKG[nMKKۻw/ *&&ٳ۷'Edl 'O5jTNN8!!!gYDѣGcbbzcL&t Zς`7zzz^ׯ 1vtlذ͛.\PPP Eîۂndjjohh8hРxq8;K gϮ8)??_WWt Z1mڴ>NZɓݻwBiUUUƍ֭ۆ p:Ŀiii>|}̘1|Dhh(vO8::zxx̞=tQUPPM] 7JKKmmm322Hi&ŋ+Vp8qףFZrQHgax8z4Ȭ^:88xžd۷OQQݽtxԩS#""G:h#hhWЍ:ut%+++Qeɒ% rpp ,9sfϟر#,"SM K]\\ Hݴi~w1Nx"Nv Wɢ#55޻w_ѵlll?n``@:5442H/pb"q `ii7a„ӧ{{{-,,׏NHH E***t/wXYY]reԩ"Zӿŋׯ_D:>|Μ98p4`AիjtQSS)((III&L6l,͛7XaA7[M\M7.ݻwÇtA9}ttݺu#E b`AUllW^ 4)00̙3/^l۶-,)77CStka .|-Dܲ3gNDD,|W77'Nʒ#plⰠ5|7o2ۤq}=6mc0666O>p@jj=<<^Jl۶mttmff&8MwرcwH: 4E0auڵN:9;;ٱX,ҡ'33s̙Ǐ9s&R?|0gΜ?L2e„ |ˡ'aAp>|xԩ[XX=_~RRRd#>>=zXlY3Ο?m6NP' o7#5k>>I#Ro8g ̙3k,\e E@EE͛7ccc ߯_.]=z积m̴UR24'VѣG Almmϝ;KRyuBB½{ӕcii٧OEEE {k<0Cט5gÂaɉ/))СU5o9[q-?31ئuudVEn){^-5x--=׮]i~NjGGǫWFCCCZZZcYhkkԴ5"z $%%[V6gPSP%zktĤRŏ۷o93j->}b99ݻ7z~~~5ΙTBNӽwB9f;#:*TW6[qL.</{Q+9sחtMUUU)))/^hhh044411166ܹs훼uP=Mnwvnm_׬r~+O^1pv/UooYDlܸlᤃ JMNNܼ9ׯ?~f:wldddddԹsg?^iT%@BϹwjZR~rQ;‰+|u`AرcǎW^|ƍ(**zg`'7d S^x32h`^Cmfto8:TUUy TnSղoD~{#RSS","F? @'4}EWk  Vqj1/FK:,h$ j6L֌f (==,h1&KETr43 =te5j ,hFi˛ydϰ?2`n5+''K)X?M8O?+Λ&|]ld s=[,bӳJ!{)x…Beee05~fݺugv׵@MqEG{|, h ‚F?lޞ-z*K^IkZ򒺄p]v]/) 5ͭGGqt:O<_yW7Xw^uuu"wޝt \5Dÿ'|ڰữXYY dԜ߽{t L:xJNNn|,,hDi۷o?qǏI7RRRFb=GYJbb"ߠ>,hDuZZZ۷owuumhh E|ܾ}ښt ,h$,--@:xD}XH4,X 33ٳ*%##C:j4۷o&D%$$ hDBxx{UU, O@ ,h$J:wpB///ADۃC:j41cƌݻw/ dʒDφ \@:H;E4=-z,$q $MMiӦד"bݻ'E4UcƌYl JBBBNNt,h$f͚URRE:ȸ{n~H@‚Fm޽{} !>>~СS naA#&##sȑ3f"p тDe9,VRRk@,h$Fѭ[;wBi7n4hXjխ[n߾M:u h (--1bDTTTvHg"KKDA:A#񡪪Z[[K: `;,h$VLMM/^L:\~}Ȑ!SFfԩUUU'O$ZL:j,h$N:Up87otЁt2XH IKK9rϤPBZZZnH@-S׮]w4" !Cc֭wM|(‚FlŊ>v $֖khhZLt`wEWWt2ݻK(|tak\bTZZt ,hDw>vؓ'OH8`']XC">>%%%ǏoQNKKkƍSNmhh o<رcG)PkaA#փ Z~= |C:j-,h={vA-*0Ç*w߿?)))Ԑ*111F"4BӵkW___///AZÇS >F;mڴ !Gݻwh˖-gΜIJJ"gϞutt$>+#Ľw988\pA߿|| &mvǎuʫW 4BM۷ʕ+IiӧO q-,,}9rӧuttHg۷oBB$O)V!n۷oԩ|޽{}v?XŋINddĉI@PL:ѣoP=}tX[[[A:?,hZLZZرcs)++#;B:,hxn:www6M0FyyyIIIΝ f@lnnqFƎK0(,hxzR"##I 'N";qA;;;ccc!.q4BVSS#>+..nδ0 2o߾4!c3\FNЏϝ$2.;<%$09`ȩ, ZT.SCЍ4jf$4H0*!uL&% tiQ tm톗$ԅY3"pSRomp@) JhP@R"4 old`]ze/{fmۮ=T7=vs6"ЀV"(zxEƆbyB6ٞ1 cltRIGpo `?t%/^,W֤p%g-+QUώov(H"Ѐ"kde|tA4AfYa|0>#Ѐ"w/7E;;]ov)ED-{c¡ǛZ3 _IPthT;&V)m @*\yŶ6n6 #i⪐a^i[Ks%Ofdn3:4r6⛄w?55=P'hP@R"4(E@)~ }9x\^: >RWcvmKo@) JhP@R"4(E@) JhP@R"4(E@) J}IENDB`lava/inst/mediation1.png0000644000176200001440000003571113162174023014720 0ustar liggesusersPNG  IHDR&T IDATxg\]XdeAJK$"b0k{L.Q\kT4Q(Ŋ DKPQTHuk4 Ι}3}83gΰ$  {l@Yr& *hr& *hB:#Iy7##SN ,e}ٳk֬𶦦۷o_[[;==w޲8@ puuHKK)6.;`SSSRRR444dq"SSӂ ww/_fff,́yƚ_]]I(r\TZyǏwڕbFFFEEEEX,;;;$ewFP9V^^.H-tЁɽ{,N@ID"}:g/--ptt$rv,?n``@_|==={{Ī*"d6@geeE0ݻG(\X{ۣG HBaRRQǎIgrGuuu]](޽{WVVP ֽ{w)b)JNN|*o2k-lmmuttݻ' Igh=T_D"ѳglllHy;ZZZ޽{tVdffx2 EQ|>)%%tCPoF:Gi׮]޽>} YZCr8& ...YYYWD"Ȑ)kl6I(>zw@^({K=LLLD|RW@ xENHi;ݽ{tPȰaqnݻW[[K: mۢАt)vppxAEE,良֭[7)}...酅VNvj۷'D\n>}^xC: ;(c0|;`َ555X< LٳN:",Ύ%&&bqtS___ZZ$!!A Ǐ+ҪajjjmmP__O: E)[唕vjmgooXYYI: 2UD"IOOڈ;55tPvJ􊶜XleeE:"(11‚tP^2Qڂݻ233tP^R9Z[X={RѣGPEfff0@ HLL477755%"r7BUUť\9iiiݻw'w@QÇ{ )lkNSSt`kk(Hg#333tB:<رYBBBSS,sӧO\nǎI?iii<tP4 8ihh(**277'D.8::&%%F+yE:x...O<),,$UNII ;///''tPV9^ªp8555PT&[c]An߾t’f8iB\ իUGp<{ XQA3Ix40q 4AMP9@T4AMP9@UNG=ea3*Nàņбڛj*Bv?k=zz7rBTzb1EQc݄Ž$*xf'y5\د?5(^0<¶_h۩ރ:|Ͽ]Iw+EQ[W2Dtr-q5}zlM| EQ_3Ϛfm  EQWß\o~~+ȭ~[wRsf$|968 r*JRn/{P5zZ~ hknoҞŢl{7K9mWskx`xDzbnIe* *Y,I?\YYhKYnkbÇ^_,k(Q,ӓZ9>9ngcau Jʞ<,J- ;t˙SGZ[ᢑOzO>x?'5֋ 8>E'6=:˩n*o2֞o]?ćM1ڛۑxR%`:Zg9] t tQ9&(⨰%bɅ#ͭ5uB*ϗ\vNt8a.{]NMX__ԩӪgt d}V<{Jhh( EEE24h&ᾑMÅP=& *hr& *hr& *hBN666ߏb1q+{Xd)СÉ{T(H$@ 455xsRyƎٳ_ڵkǎ^^^eee`KHHX`c:wL:{ŭY&44Аth=TRt֭[CCCIg/Z椳@+rÇO ͛X[["}"믿ݼy3,X>]hѢӧO+dPp'C(kמ9s7{ҏbM6M( k ѣ'NxdϞ=n:|0%"##?N=z4,,Sr([q8Y6i$mmm//'O꒎h6nXPPWN7h#GQFÅ5e˖{*m߼vy[YYA(D秧~Y"==}ԩ,@QX$D"ї_~٣G͛v:{;wQ>>>...k׮%q,,,Μ9w]zt9WVV6nܸEyyyPFFFΝmll1b8J rXNN΄ n:`Ywԩ(/TJII3g޵kWYC_h,J #޽"O4YX> -[viMK5o;-H-ZY >rHXX'Emٲ%--~SQ rɮ]ST߿?***88]v( TXvmaa/frt?~<88ĉ۷'@)ંH$}*6Okiiy{{萎P9L׼?@:1b 333#@ 555?gϞO3Y0WmmɓǍG:ʚ2eʾ}IgPX0TEEܹs7 7o^||<, &***;v5k<==IgQ.eee˖-2d, ?00(]]ݳgWUU;tEYL8q޽z"Ey׏?gʔ)(T͜9ȑ#666(;@0}>},\tŁa?~cǎEQD"YdXc obm߾]GGgbtE!3gLMMIg-]vڴiBt k]t?Oxx,^G9s̱cIgcN:믿a'c;{}BCC555IgWx.cǎ:u k |>̘1%TxbHH,ܮ]7V5w֭C-(55uΜ9S-ʡۖ-[ۇ{ʯI&۷gϞGիWG5KKci?|4H$~P(ܾ};^&LLLΜ9?FEE 7p/dѢE<[(m4Dg϶[d , }'N1bČ3Hg`:Tl )S}WS0*GǏ=uTY@bW_}kd/?~/fΜK: ЄbaiAH_EE7|3fYnK.ׯcc#,{9RVXXO?8q/;xˑϟܹo߾I&Lhv:,, /;x #5n8pnEQFRWW=zɓ'H`Tt<~x̙𚧧wHH8a$$$̚5+$$}oqqqٽ{ϳgHg "sssr?gҸ˗]>SL{$;;- #/\pϟ755uQ9,MZz-,,uo۶ rss'L/888.\qѨpT}ׯomr """:e&V,x<# v9˅Y yuُj?~ɒ%<ѣ!!!!!!<˗/m֯_ѨUblhTh|ӐQRoРh rJi9ir[wϬOx|J[K>}vhJJJ7nhr5HLGϬOHSglڴiذaR ^9[l <IJ+Pձ'w-#'A M;Km4R$&DǼ]vHҥsՉ'v$i*xVvKAq .XXX, ƍU5D@V 쾗u/ϟ?߹sg-rf_Ctl7rWƯ\|9 2iҤ3`KhL]un4ߥVt-hSVVd7qԗ&l6׈lx-qbPEUUU9::3״y1$=Dl|kPSPP`ooTCK$TDPA-==z|2TH z|U#55UES(NTTTX[[O٠ۭ/`? ?x` sVk E)5SSS fxTYCD"`AݻPo. ԩ/yUu Ot^߆ۮ orux|à_4Z)qVTIIIkkh$SPP0xL%l~~Xws6CQH(iUUՇ? Kr?˱,k$m1宮SLLL u xvn\*}CQԃk}ʡB+իW_+)X,ZgAn6l[Gsn%D>ϤI"bNݻW*Gh Eٳg=htņwniiI:mڙkZb):ՙ,sbKOlmmr4{9=z=]ּ=#O-,((T唙9i5ɿ aǜ|G: Br(?mBi<ł/_QZ~xpYM]UsΡo!r(o<]X-h$Hp`iUL0w\_@l4VX̡ ^"ퟹ*ͧ}\9oۺ#>>>4iժUavIېјQlvTWW zPثW/I*?cܹ2E%bꉢ\HIA.?~|Μ9Srk4JkaǷ뻸hhhp8MMM׭[B(-<==u:]h&0ׅxazܿ*((lߡ`b3]#َf}9{g͚m66[q.\Śhnn1mt a_T dq_WfY^ŋ7N&&&s|' 9P ӋջN_reݺuzzzvIZJ1g9oڰaî],EFug[f上eR `ӦM۷op׭/ihlj'U_(ٴi_7o۷رc SN-..޻w/+,**jݺu)))=sӴuhƌiUYlҵkWvյk&''vi묡mвX13џUY)WWM6cf1bEQ.\;wD"9zTk/^޽nP[3qy|WeEHP+ Xnnnwpp M:x۷-zRFMF.K"4*7槱Ν//++9rdLLLvt޽V ԩ$rޔÜz|>300ի.鰠bbb_d.kggmll[#G/Z/XҥK&M?R`iӦq6NGbѣG嗙3gΚ5 KڀDnnnjjj]tщv?|xrrN f)++9r̙3cbbd}+##c͚5bx֭2=(-r\reedd4dȐÇ]t9qĒ%KfϞl2Cd EEEÆ (%K8qttt6lؾ}B!=%`==׿mI[W__A۩Ar}V.dɒߺu=>>`Pbں [l޼AAAdÛ39I;vxСwΞ=GׯҢ?`0`.Fm ЧOhOO>lΝXY ob,MFy󦺺{XX8 gP9 eeeennN:;Ι3ܹs1bDbb"D 7p/:t`ddD:{5,x+x<^@@Pt0D"!b탬CCCiӦ-[t"`4TCE4۷oTT8 H'B0X,fԊb|}}ccc]]]/^H:0 he#G׸\EΟ?9|҉Y|rܹ3==}|>L^t,?u588gڵ yr<ry ++9rϷmP9 CQ͞2eիW322<==o߾M:`(y>pC7R+',`(弩Cw6VN3Qiֻw77ƣP vaFu \]]/\@:A4\SYk£rJ*GG/_nhhaCCCҡ@&0`(]>}?ǣJ4|Qigll~Iq@ʔn@ 幰ʕ+))) 'P9yMCCcڵGPPnn.D X>PJ^9:tYf9;;\P9ӧÆ [G4Cr+W7/1 ZvxK.=vXpp1c={F: 0f9cjj_wmݺuڤCG`(Tοsrr4h+ I'À`(T.]D:|4Cr>]dIDDD\\ÇI'½ݼysffիHg(,lmmO81e @ i57nXYY 8ȑ#t" 4CrڂfO2%666++?$( Xk߾}ޠ!Ha@0HKޠ .>}5kjkkI'R^Dt 1`[[[Cb҉*paMX,ĉccc =<<]F:`(T/]4,,Æ KKK#HQPBȔΝ;Җ/_nllqFҡ4C^ w~Y__߱cnٲt"`(rhyU0q4:gΜ1)&`&wwX)Qffuuu7l`llL:B,ڞ4(((11qܹCr>STTmT@VCGG H:| GAW^]__e˖N:hX>&;vQQQ 2dΝ;B!DLQFݸqO?+ r rZYYY```JJƍ ;F:{9;;{xxs.TCr@ff+RSS-{v$]"HAQQ=Y 3fHYӼK}u$^l *hr& *hr&&H$,t `?3;'{3q'YO0UKrH$q8)A>͙B=RlCn1v5h-`"Xf'/iݧIu1U^1 D?i7q:*ͳ_UfdU&B;vJ'fH rqZ D$˴(J5%50x]:vszk;b&B?e:N?ճcWmk6Q9LʁH$/kY GoHjTr-_fϴ՚(6gO^铎r6)Px n o~Iir DPH&BBB0**P9P9L$ (i&ŘA0.BB0**P9 rxeE:D"%zj55pcccq*0aҵkfggo޼Ύt`", qݧON:&""""DFJBB{E:A0.磝 LYjjjmvڵp/0ˡG󣝃E*|x61 P92|rSN鑎DP9L$ qaM^|nݺܟG(i&,Gv}5k 4t%{9Lʑ"X6dWo,paMZbbbV^N:Ø`"TNeddY?3 4rڢt[lٳ'87i&BN}};.^z!Co&B崔D"9v옻k7̄1 D}U455Iǁ˜`"TGzɪU$ɯjaaA:|4r>,000%%% t(D!8|A0*}"""\]]ccc}}}Iǁ`"T?ݽ{wŊxE\Ø`"TΛ?zz"ZaL0^Dݬ|˖-w OHǁ½&*5966}P98~iyyիWF@0Ƚ{/_޽{˗/kkkRF߰aCaaaPP8  f۶m׮]۸q+8 C <*66}P9@FDD[mm͛7LEznIIIk"""tuuIr>/^X~}aa={Ir{쉌\vI2p/dK,9r]GG'::}P9 Cu9s7GDzz|ɓ'HF@lذ!33300ޞt`TD"9}sA]]󟨨kO:0*qr^0ڛx">*q\9у.//+pa q\.owފ+"""hhT0rW\diiI:1T0Z]]ݻ/_n:l m{9ÄYX,n~SGGի rxƮ^zA0 k͏v 8MMMWG;'Jr k`Ν#Gtuux"d8tVNttGCCCllȑ#9)(-\X`@@C大/[ɓ>` Y)++[n]ffmz!Gv6^^^CthƒH$3Ƚ:Zcc#ŒbL>7""bƍ&L7o *@ = IyϞ$ٳʕ+H'兟t'nݘx;yAAAxk'{9fo P9@T4AMP9@T4AM#T7N.mF|T#W]oK kxZ`$C0*dzCEB|v7' @p/qw$'/6@PtiB0˽kg~Mb}OIg2TC]_cH*)ϋooa }84d6Wńkx?2z}b(w-)( eF{ZAQT򟯒|6A(r{FwI-r& *hr& *hr& *hm=jN HG K" \B:{9{wlS r&4AMP9@T4AMP9@RŸj`IENDB`lava/inst/doc/0000755000176200001440000000000013162174023012716 5ustar liggesuserslava/inst/doc/reference.pdf0000644000176200001440000042313613163013465015363 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3582 /Filter /FlateDecode /N 60 /First 469 >> stream xZYs7~_+⾶Ru؎-[dEv9&Hc!1R43 @ t7У`ɨa wˌ1 K ڱȢL &1Im-nLI'U^9`2Xt (0%Eu)ɔ r5)mL G/i>0m1xF9F H ZI ixg&̊ԈY3(I&Y0 "!Yio@21qSR*jg% 1@ڊEQY4v@"*Z!IaE GJ1 RZS 5PEϋ4Zi.Gp.Cr b$e2 R؜-UN[e*mǔ^WIII;Uv6نZOO$Fj\O*NYޥ5L-Xc0EhviY D ss)Փp߲+3PnGگ'}|͋C>zޢM`Yn jlׂWI//<1 uv%F`Nvߝ|ɑ- ܺ%f g/U[*i[oGep3Nt1Q3\mz8(?]Fnu-.ݧF ۨqPGRFk[_F-_n/Ec$6ײcTԡS9d\'eG>#tړ8J; :@^yԤƽ-7Cg.Π 4.K1 1e͋wQZSKEzЂε!>#Ij:h -76ҟz١kp@֌4yK0{:w!u颦[@bKkr')3(Fˁ֋5nKe™ 6`AZ%%6$a.@PrܞTPK>㶿(qu}W7O򻹘wp7_F7%==8~1ar<N!Uw؇Į"@.1&0a vmXظb0^1d9lV%XOg$_Fr8W#@G`q _y>Z:nޖIeTvehK}S)Kg"]AkФ`NJOE.jO1t ۈXC[r}X@'(i d=#!HI z,ELD%& ڡB}I.|L<2 !|%+(! #|l`<z;/a 9(ᒰJ΀Ŀ 1]҉6/ 陵E,IHj2H*TJ(V 1IS7ICr$J<7](H(ly7MkCu;:)Dˋ@<1MQIُfwZdjןÌL9 (&ߜ8Y*@!CҜHj GrFp^;|EA֣5Z (u%lE$7Mrzo* 湗 w4>0J2D(c{p xOHDj--TNGڬIےR0Pe $%m yEKI@-n,Ń'}B%#Mƥɐ5وIdȸnqQ{he,+t2.IF;.igLd$dH=:`46 ;KN0ӉZnd INi%WY.]l2 YSz^dIpؔ!in9ٝ>&ge _~c:XOfcfZ~>BPXi"`<0cNW||zSN >Vhw^2\8Uqn;Q.ELQ%H^,CjNEagE_>&lMEcOGd{2jW2Vwt5݌"ew82n u'Z( 7Af'%ve1旳~Y>"iz[RjK߿شh˨PlB(@~.P%׫/Eۆc,}2vn?bPD?OsW3g1VU:0!>5T u`fc>]O,$0.c!Gl6e"% `=+YZSGXU1~mBh,~Z {@tȆ^OքzwXatmYz'\+PiņkY)QO(E-Z$vH8?Kbڒ%,M } ^r<^C)ĥnno"KtUG$7l$HtKn)ytX R7zlͯQ珞n{ޕB)/[6Yy_sH (vSMG+ v^'tGir;cDG:(L ~>ͤ,اiq諁q1XˇE>YWendstream endobj 62 0 obj << /Filter /FlateDecode /Length 6347 >> stream x][s6~ϯpe !.&Y';g=$Ne@I͍n+Ɏ4O7@P[(lMHhuh_+~<{]PAŧ .BC~%!I4ʯ~_/ J/ވdTM1sw:\LT/ԭJ/RűHfŐ$25MWWC" _M1ߨ \yUibV˹qc01؂<]4·~տr)N1M`ZYF_gR$"˜_Єp )."d@=Pb'EI8SL, )˿W3-'| /gy]-U>(?$c.P,a*yz櫏Dfi鉤1i،fWÈu4Coyso;zE_o>,S^<0$gqrxX;F3iH2}Iwb>LueIw\e.s٨!ЖmF@ҥlݗ!DʗΉ "MI꧁Bd}"Ɠdd_l7+\|_I^/e 뽹ZOb݇iʹn&+JqfnT%qiZǣvE$1wPnnWaOC9b002WG48˺<)"5k`PZ3`I!čO3qM%cov܆8tA$N3$6!ƤlqUWC[pg_zԆ>Q5Ҙگ\_v͎'1]@%-,3ir7,W/k~[1)'P9 7vx34!4ћD>+t[FlpXjRas{9<$Fqggӄj3|W1Rj]Gy Ï{R+٧011 Y%,"_]עN0C=/u56·ߜXUsBSX;fEc".!$[D^9GAoL* LeZVj')'mrKdvp3ga3kv9K$ +,EHIU'g+U$|'ӑ od*ɩr?o3o2mDkE1SFM]c7>T`F¨ֆqܜ}oC^&e se#hs$!`|GWjw؊燵B3aVٔsb@ {UL`I+]Q[m'_R!\R=c̋JǼJ;!Is gTӠc݄|^,ӝvBUoוw'5BݖMƂŧxr5RApid3\/h'!L:j"(a-~wӌ9/WIX{Zqhd^ *0 "6CW#"we̛ gC!R DZqQWKv"Qd-|hQ{EFwq1dtnUrl]x\u{uWvv&-آG&EIvQWX@cqe 32q=m2ݣZo/LB,\$׬-βB4$;"t:r8,B78;iEKC֛5ty"]A-"}>ͨqh|[_yuFSzl& IBg MMrB"0Gʮ:# N/Yˆ IwXB Q $'pqkxKǨ }grg)-8DU.!qH/`$*c%"ab8{93~at]d ZX7>AĵW9 K:3ȁJ$q ORXL'ل m{l²&vV+tnEzeq8.m<6zq9lfl2-M{УN'ډ8epx]Y#5M}5vdm[m*qR o`Up: aB ΰ'mg[Zy 't&󢪝ᡖNOQZ[Z 嵋3^wtZ MUQ o˚ rl #қ(hokI$8XK3"m%$dZD#}$N ƘݯE=gzˮɐeaܞ0p(_ni4v~M6ǽvau|SfؑgA6AƻK@n߇m,2pTՆ V93ɧ녽yONv '|95Gyɺn-hgMmI:!C9zc H%VᥱŜp_Kd%<&[܀9^lԮ|QG1aeD-&eq4 n)oC=W@xtLe "왉a13xݩmjp'{x~Ÿrܤvodok/rMǞ znppڡؠ $F쥼g5sȲw5#9>EAE2)"g_[k/ph5* <#Ðh}zK 8N2C+ AUF-3Y7BJ]ywYZU&i&i\ޡ4wܠާinyѺP&ܸύ eZRgWӶSxژNX3"KI#;w,ɰ_1[֜YC:(5S;tSyvhgŝ7?ʒx ds`ݱG$/<+ϐF I)gXB93j̻KTPEP>~X!^fku@ELKy2&,[^,jayJ'>^W0 uryf$v)D &%膏Nݖ#k%I&VfI&Vq8o*l`cr! -Qx4 1ۑΠ'*rr.4mI͙Q9_|JO(xw ?sx#A\b.+iBe׳W(z QGm O+M  V0m_u1fYotסa x+_ߊyS(b=uayv:x4",-^m97'(G,v߰mQN½#3<,>ĸ;ލq*jF7q{c|& pE,kP&DuP&Md~b>C9-VLq^cqD8@rO`QYz*n~ LwԩX ]jxtj;VEVmUN"x^jujW6wP Zjm!tCU|֫[C3 :itvkiٴog[u&[$t{u Ώn}j4o\V SZ϶.ٯ-fMqm/^4/ݚ%,Ŷo-;ҺA_jti^V^RW g2\5j栲v[ϙZ٥Y2Bc񽴪P}yשZ j,uV۠LuϤ+旍L>iI ` 2mFka1:w~ta Нοf녞Bot;;*H&Y!D1ɼg ;[3e |&6SuY0^`泪 4@,0d 9(T;DKWo^pmb,ے藟Ib|RҟTw`˻B,~B»e<Ó~=e!â+&#3Z)˒C,$qṴ' JQvRс5)Ѻ-mv9a{=ړp'{tn91m޳J4Yn4 %7`+Ft}۟nν^6N&HG:o)2ODIW\u㳺"dPN'՗v6bDzs^lA[|vJX Gvk ;C\T9J0hh HtDQY;'\LJT5Ҍlwmzl\2db)mD%RyR9',J_#l`\[%D(]La|aqoX=`V`Ωq9t Xy`el'x>.?"9`crYT^\^z#?@x.VV -._rU1 .GS,Z+@UqY mI Le 7ܜVXxf cd 6xI7y,2MM3g| 8{ީ5FuFD<"ʘuPo>u5*K#S2xX4T16rFL3ik5^/F^_'s_#nVК'Hffǀ3M~E9yuQ ]+\2OIxBwWͭL2:N_η> OeX 4`پ^ưO@#OƦDbRyޔDs<&]8ۛ^T%o䭰bU9|UftG'ؖՒ~ /z% Q¥knJh4jRy<KB\PfиDŽKiȴ!8}o??8[lZ 8>f"4+߷̿P|Ύ*[݁O9m{1-:V ׵LyZˈF3|XݪO5l̓O Ob 2<D]ρPlx%Dtd6Bǹ-6@@fiHm6LРgA[(ODfO#*uQ)g?^ؒ |Y>^ </#1gaO=/v1.lRU|rV69uݖ@o=I rqnjqWN-(ᘜ,kb"b}Ck@0$8I^)Vg"6͵[7Y"(BS;Z陪-Qjeh|?L_Mj endstream endobj 63 0 obj << /BBox [ 0 0 504 504 ] /FormType 1 /PTEX.FileName (./sem1.pdf) /PTEX.InfoDict 21 0 R /PTEX.PageNumber 1 /Resources << /ExtGState << >> /Font << /F2 22 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 2052 /Filter /FlateDecode >> stream xY%W0~lH3Dm@$@>.ޝgYb[??u??w?wtG#S?Jp%ջ/r<(t_uÐ.6icݥm¥/BT(aT*t.xbkN>QG\SUҎTEbS=p#T$ wAeJ!Tt I?ѓHgJؘQ=>>g^Rr$D಩'OJG>9,I'x1È$M,čIl%<+d`N }/ b$8جzg7>ھ&z ĞUfOҏ68r|qç~ðƋ t ۝"rZ™q[$MLN[BT2ZL I9P>ϸhH_BLc$Noki4R۞'k3hbJi;Fa C~e  vR{Gq7 1xHI˖ -)irCL\PBءl%-5W^zXֳxW{6yCs~Qg]|u&{7wqL_<4`qԋgSSMYqajӊSV|O߼foDޱkɸTPt=|^z*nfJ׏Fi;<~`{jD]v!ϋSfea.qalZM$VV<XG`Ie)C#z"2ȒY2"1{%Q3f.FV*}*?sFfd]p^9K At̬Rb/';@Ky݋hFq eM^v7OBuBtDŢ6&Q$z-R=!,$:"$g 9 D랙Fgϐ".3Fy&[S_Xhl[p9AZ?>,"aM7O$2YFS0׸靎I[ㄞ&'34fLCfN/rӟkV8l 3&F j͋$.ġ֒c+9 piEKd0Uဴ p@'w 4'Yu-<{ji8 ?H1r׈t$tEUH_&uv :d,M>}4 (MG0[4`YKC|N$54.-˓veX66'j$G=nc_ "KKa, "q9i_qd5t0頳stO8QgmC??C!uE B)k+%#%RR\L:ŒHS-yŔKvKM{U)wjZ6"jNH+zSکiTD깯8 B'W-LΛ*$P:cj2aT 20SF^ 2xop 2;n=q\7.ޱ zrA-R.H+"\PDo.+N½3 u(5t8Q'* H T HP`C_1/7^D.q89$Ov|b$9?*:$yše 2rmmc_`Lx>|%G?>S^!`٪H0A !`) ,_noR )?쪶nH)Yjœ>~¾CMh}2V(4x1x ťۢW{i.XĽcn)Hcg>eբKS~U+?~$Ϥ*^? \$endstream endobj 64 0 obj << /BBox [ 0 0 504 504 ] /FormType 1 /PTEX.FileName (./graph1.pdf) /PTEX.InfoDict 24 0 R /PTEX.PageNumber 1 /Resources << /ExtGState << >> /Font << /F2 25 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 1791 /Filter /FlateDecode >> stream xXe5W8*HD"HyH_9e=]˵:U}~!Kp+kv޾M&w4t_ԥ|՗b~rop]͞A̼1ԟyѴ|EyJ[6c/_CSzRPM4%YHb^Ǚ$f$)!I>6%Q$UD|%I눓Iʒ4xш!>d8%W>O{Z⯚is>FH)Q$3^#D$ yZX҄CB SْkB! |QHCB3` Bd^d/V|'Wbsůܩ{ >%> =0\EI^KN .(u3cBRd`tz3$yH<ځdFuW="j2%RtVs= DX$?@5{㓤P)Is\$ WRsT}'cgl$M?IQj%-#;brjJJ;ᙒΘOȩ.(v(ɹ3GzZ^oz_lNX6oX~mnYoYbylysÕ͙+WW7.ll~^0y~p/wOY}gMvvZ{ܵ~yza@Ku~fsrERt)htw{z*vqtD^q{ͼfx1={Qx#b#= Ey1kX*(Tc5cA3Pt˘\(IY9T~aۇwpD96IO:Hf%Xyoo?Q1 *Zn"P˨;k\'F1dRwD2]X/ճ l9: wiG|'%N/'R' ?ӱ@JqDOQX^Q# k+ k+ k0GzGzN 2G/ɧC"J ܹOA K(6a/vfN&%F6H~V4a20" Y?x>, 'h Hƹ.' Ѻ~CՋNAЭK1*^SQt*ip *z^D0PZ1`66 0C$#"G(6`\"p?nt8agvs`a(&L X9G<+N?XױۇwpFӘtP+99WntGsșݖj -O(6 j~2s 5P:Ʒt4fS{&w}gRQj*,}D \gv0k.?.sKhیfɳ1ۀ}XQmX(!)Z{G$8H=` J3l0S! bi|D8QͰVDD Ӛ<KXA#9+o*";Q|H#炽HXi# 8ziD,F9m ogs#lhV)FʜY\i4rQRu|,)`'r&x3ѣY>-_EYϸendstream endobj 65 0 obj << /BBox [ 0 0 504 504 ] /FormType 1 /PTEX.FileName (./circo.pdf) /PTEX.InfoDict 27 0 R /PTEX.PageNumber 1 /Resources << /ExtGState << >> /Font << /F2 28 0 R /F6 29 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 617 /Filter /FlateDecode >> stream xU=o1 +8CYRh [ѭMbǯNCD>=C,"$lbݼy~~/_I[PcS8*N% ’i)LdM! {J4}#Dl%eI,v\(uzjCl| !5JY+w;\o)mn)ћNj"e6;z5mû#)[^.8>T9e(1CecVy%\qָDڅ+烹 v[Ҹ&sM͌W]͋i,&CN}<2C8߬w>'4W{NSJwiNJ^4P/VDu9=qz)7{Ħ'-K1WvB6L5tҍVn_*LS텫z͜kvi+Y hYFʡw0%@rN(!a*L`2Cոt,fd 5"o>#ʼn]WS1 /HMk6lrد~q5` k %Mwsގ%TZ㴑/80ɣo/͖ ~bD󒂖֣//gendstream endobj 66 0 obj << /Filter /FlateDecode /Length 2780 >> stream xZ[sX~ϯP ɹq*8$DvH6}\@ k&-}/ܙup q?dwRqNG|;#mڹ0A e{ȶ`hڹ2zחq=p.5^63>/1>Kqq~yg3 y}O{J-A`_B |p8 $ u+f՘F/% JD%Na_/T#hܓ00X (;ܼN7RSَݱCvp`"NKQoU_ȥ|jd$G^cJ_g,4D--ù…SZœ'(Bo+Ңo{[|xS N@niAOm.D@:ZPd.q]~3b@c"1 j R vb -A) fiea'rr618\r`$i 1MeYi<>m, ˕MglA )9]Hr+b9A6>_`c[;Yu*o遗sy~ىufNrAš\Ӡ()1ĭ9Z{ʥ{3QޗR\TdɏGizg~h|xcj/0dzY&&݈QVVvYe7elg"ѮA Re'0 0> ^s`,mhħq`j6!?!޹C|Wz'(jj< y`j #fePl1>'A~W wY(fjkw{ >wDIZ"9Ț@h J݇plXqO*6uq.>QE}*ĦUY.{Sܒ'Ih΄Lapky<$E@+nTjõJ/=2#Mej]Kfuٳg+O޳ nnBP? E)x(DE)d6-AMڤ Xm70 Zck Y-o2:ƈ/ڿ8k5j LAtG4-כimWRbRtI Z:,uW'z&MNj+4^i!l/$8dτ[;k5޶WP rbw pCT+sǀaYMO'T5<4/XOmm 6ZE{{?8Ʈu`}˾<ܮgǛ|y; w Zn}r83 uCNq^U_]I]~yлYb\_~~ůWk, _cZd"u1ni5g\?Đ4 <0W&o"]t;QEx+0b HC)&D}{/Kp,1"DWP_ &"j(П*&J"U}}RU(g8TXm<Yjendstream endobj 67 0 obj << /Filter /FlateDecode /Length1 1398 /Length2 5906 /Length3 0 /Length 6857 >> stream xڍtTTk۶tt(lJ@!F!ff``HIAAiA@BZ DI~Z{ssum>.c* D@r-̐(_fr>  Au,}aӀz_`M ĤĤ@ @;4>H8` Ġ^|,߯L#H @]aPoPp{`.JB/0Ex!>8 0`uGA&B"!go_(\PHuCX9<?fS_P E#.3E(+\C}H"͡ 55(,. ˂XR ] vE;c? .7x!fw0\ 0vVUq/C~~7+8 ]`.ԁPK- H{uPE#K 釀#aH.ʣh1 k@PbW] A&R8X bPr/(v!Q87Q4"8c6*Dp?x?_n &EϿ@!`00׋CTowI-j,+H<>-hnU-&mwb&kZ)zĿze(0m6dZ|][ZCA& ݚ%P\ %~DjfkZ6]^mZߏҺߊ*CΧPyf(=Y~-Ŧ8Z|T3@Ss]#7$sl1NK: Sv?>M6YTgסp$o^TxҦS-ky%el"H7`!S-k팞{ WMn/E usHNeܣX"@ʎTb?nZhXj7W=:SYL-xj?D8U9.>T!8 O}~^ᨬD= x7]svϵvrҲ٧syޒTM! ՗=9+,ggT/sOMJґ' ? 'ļWJӥؖ <;.16IJbnDֲSX  flYqxi= R (ϊ`ڽzvy6|({XyCd4d̨;d;|/|J:-h'<#~zOm[+T%TIk6(dGy)w2+R" f.{0q0i識1Ztid N8T>zjSpt}P "b 5*qC5[M XI˘+ս6}FnGei 4i7yxFھ}Kcn,N:çƳxK`Hgvۈav&NyuQq(SB0-[_ u5BAۻ:tqp{:^17"H[̄q;kG_sɏ_H8[T /Z1o|lVH%Rl 8O9~>cT;"q;b +j$yB6{&,aٕ3ةY*ɑj4[2=~m ,NfO, :=WwĊO[Jm;I0? 1 X|y@NleU~&XV4oX\=vBNMףhg$U%=;9QsKqԐ f@6c8k!tMyC>1r}i}.!оWu?w~{ kEafDX1G7"-@*y5;?teIqJᾉXµVF0ӻ"xweP!|/n ?5W%SZ_zv$ڊFO42en>Q=s|7%VrK8hY7G$riԺLdsy񴷬W>cnՔ`qt;Lk.zSQay }EhFsp"ݜţ`c1|/B#xqy$ F$bV*ߠ=B[8"{M7N}DВV l^=t.lWhT@[hfJcd9_3\MZ,ix956V~v y ' PFN=h9uEX@s͉%FJrdڐkǧf6݉NŎ mmn:&'I 1r/9_1v)̡8U$2QZ)ffc?Sa'd y7FKDB갟n(iH%atO>`ʅ#ib &&fA)5@>ҲkkF%xZS$( GkonCY87{N,ȍlvda4$A+cv,y*8s$K}!x2]#q K\K~ryx#\>a_U7Xqa Dqd֙eC7ʂ+_a tk̰cܔ =5ҹsfӨHϷ5Lꄈ)씘к,&鼪YgTCzehH,x.4[C[ ,VLRbNJr|fsȽKq\-Τc&>LM.Zx:W%q2d~Ш/)+׃t9]bPt3;Ch?>S?lDc)'Ŗ fx?^.CMkiT{mD8DF9=6QngW0!%b%֩bk{EkE>3,r/? LZH_gH?"w"L/Vs$OY_?`^%y"3Eş4Ac+qMו$̕z4IﷺOϹؿG7F fQ q4s[#=kXbڰv[|LN (z?;2L}dm*R|^~{Wu0>,]; Wو2]LzNeiLzJqyuGЖ%8Ue ,8S.yjXRJaP⤆F]zũڽ SڿT*}Ϯ'cĆ]$犞lȖÃFJQ(%2Pzy]hUb).7s\q@zi:I^n^EEiŦrJJ6B(cgLf%`O{G\oz=d1ҩ8W2_ON5;-h*S`^Am[9󂵖(3m-W\>Q͞Pɶ/hp]%UJg:D $;qdqoSNRa[=Jl]!Y[x>XYj] >h:#ç˳^|aʠ}yVԌ]80m$3/H( ۝*oظ:k-~. ;שay$mo\Z]v$ӸO]b0 P#$. \yφ馂(f𒦣ZB'^y)w[r}-fLYy)"Sa;yjHmT!7: Z:C: ?<%|-4mO`e}w$1oHLr2 wk+RǗĘ|B)3,P3$˼֫w9j]tv-dI Ec nj-uvze9BNy )po,e&ӯ$>f(8SeP:wYzwn9F(v?qv0+/T`VȲϟyjucje9դgjyB2TE|,ڵnȌMY_gcc+' Z"#ށL $H)U$uZ!u:!~qhXE[.DedkU'Bols;t~E|i}aS ,md(,O (!TH0 Ds0:Ob3o48lNGO$e~*0Pb믎 t:ѻzٿ#` ȧBc0w= ]ϮDStDȑg >1 Wo3C4&[ iOd$ӡ'37hOE>;$[UnAͥ#~$u7MZGl:m i$gnE\G™H[hSu.IN$=Ro:Y}~)o㨯~js`TS*KrANLR%Wew-sbPJ$jbkpWǫDoДʮj6nHqDoTo=?Jj!ы {\K&V?1uPV*JF/z=h*>LQȑm [N⚤DY3Wa#?өk.gѤO:o T,eJ,.dRA\fXl=1q~g Q0XX pNJ5z|\hhQI лieAm`2GؖnIҕ͇%O c[U:ɳ^fsdTw*6 ]2 ôbb( 1&`%Vߚ3F#e&OK_(bF 6;P?^ۦN,~?ox,:Ź˄rPGݻl=Xl%FcsH4BűI&Ì| U+}Xs,=rkއ*:s{xVa?]YUOW©{9y|;cՀ"Sಘ,\vG{9DzKIw[{A΅ \X\rO"pU}l}v O />^:fiɓ(ļ{;V+-% 3 qzkA{˒ɼlDC_Ԙߝ GWYLqs > stream xڍtT7t3FJd( c0b1ZTÁRJ)AK@PJI Sg|w_}}s݂|w %$I E#  KM#hĺ0h堁EpD& Gb]w'XS+@)HoG V @!P @Fj`\(;{G\VPPsFbQpĊpG!qJ!lù(JJzzzan@ NET"ݐX$ 0@ H#0Gblq0,@T8H1@bC=m$q_wIWD(`qvQh;- p!<`(' w0D# rPN JJCe-4BDh~"k3YG4`B#l@HQHͿ\*tvH@tM^t y |]0.["Iuy 8;-р  PhH?2qXD>^ Ԃh6?6uuXSBJKɀă߿kP'` ^8<_+# w } H?Էɂ{~x+Y0ÜQN9#C\"2@;U#ڎHq $Gr;( qCzqQ و[w$*nĉ1܈+=_2dC ~m¼i JD~w$CD~[ ט Y$ꗎ_yX,o:-^y$ BBqzJ,*N;4| ׭5j`5sC8髻|xS5o̾/d͎&n>xוIz|ubl7Ko  WuN%z 1=6뷾z,KKds.hYvM8w96cex~ƕ[ʭG4 zzg/wqGz-x˥)CYi2[p$c#FQwW a!+>f co H=;~]/Byr7ݓQփs }h_ 7d?/{aCZ\hdb 4hSr* [%60Eqs`l&,D6;^G-ҩ87GT0)zGrUV)t0Z yZ0tCMװ>+2lmoL$7To %&}K0NqZS }2I0}B};Jf\%X+r|\!]9- M+:L9eP!?~/NOtӄPaBh'@W">abIT:5tL T$sjr̐H8xvm<ΖT_nnsKi<HNo5sM|ڷNbH'f{c T"Wq aW5[)bK!&?D䍘8J'\2{Am73) }a)Fc̕BY߈ H0Jy+)vmt;).5lqQ !@x{FP -;B?@ GZR Ū79 L1)sډBg)oe׷5ˡk_cܠ]IW)EYz1e(_&mH/=g~)Nc3뎧^Z.۟OTIL1VnRJT2\jy4^3KK!'_<h_,~ǍB|kxXN4mb\ˌ#γr3,[p?REGVse7*vȷ'>n c~fDύr|itTZqg2>`w3. K43H+<GMbH뒙2vp'M_@U %d=B4p黣s@=|OR7e_z$cת R?IB)r oqqVfh"o,m[Y=+4'1B݃3#z?BXe^/7_-<Fiدͩ*7Yh̔vp՟,VTߖ"웄TffK =^hWkag};\e_,|9W:F2M v nӒE)e1F"+$L„lbxcNcG+%t6>A+TlX! s5r'V|mIW J}糪l m5e͌{!bd~n,oo$'B8](R4lRbm<wǥ+d6Iɾ}PH b~{! :l=#ԏf8ꮵ67MףaݘhҤh6Pnr6, ǝ1Ln1U@r`i4"&$&EM[>޻6#YJV~Bu'2ʼ1`.ys#4tz20XgRkϠHlP+yc৮[GbU$"D;g48"vm B*7eA819yo!m;}Nb-zӯn!2mz-qi(&,>l~t5lE j!6&|2%S@g;O:댞ǡ򤙃 /3Cj)>;̿iQ{`*ucP`DXC=\A ~}$*\<~9]Z3г&TɱK@bZ4UzY_hpVdRN&lȹSgnbBPEcWb C%`:+߬HkYd|' =l6mX :ǮR\B?|} ܐ4b,.F1ۛ^\IǮܢ53{?A*Eo548S"6O(0z&א4ӋRrOs)6IUPy /z-eɇy~k?/G?* 8f]:L8 /-w+ IzU2+g'c㝜?iK<>InP׭;`,m>QbmFv z ΊyOO6T?z,8N~.ԃE~G ՆT^(;sGo8ױ6w.# Ce2 -/mtCؘx9b~n+J6wAnsg|תkKING5w5W#-WM>fښN ::BtBb7d\6TZ6Xr=i6S /|$L{MUS{I{vjb9=rZ7κ&aBԏܝY:k$F_˞ edVP EЅ})5=D'[ b#[Q}C![mLՒR]lB΍ph[4ne.%Iq)Mf~+.ƾ߂7X>Ae- {e=rqvu퇤)]l>KGZf_O؀Po/,'yR;+~[lgN_}TdQv=?{cyјi|1<}`4)bgp/z2_QjN1ב4fzL -1X .x뎛Y/NKa]7y|n*B&ͻC[*PlI#|/qgQau;F6VѲ6{rQ-f@=D;^Humغ湶2r/>TLwwyt^:󸈎%lm`BWu͓ɎrA ^ƙ1F)KjYAKb츓4ٟŘ[¼L1MKfWNfEhS73 }ev!f;\\h!o.oT!HpܕovC7̋sĹ>TC$&}%Á5ERg䱪~(&lK #GƝFħldh Oڽ~h%%^]HJ.~I 5 ~Iޗ]cINҫ$PMkOϵ4׫h 2}K/>}?t/"'e$PHOS6*kg"SfnZޖi+b),XYA˿tZj-1}s82߸+/u-5 ۯ7 ZU y2om=B[PX,z[F ΗfF\!?+菚*v3O s4eJTJ}u5j O>MvQ@jS1єxei>̷}%߀Eoߕh@*nFechg7]I=#qGMJ$s<-+%AoŌm@Wѕ=0YydA ;[u#ĥ)d)/|B4.^ϼ6tyMsP,\W;+o1WzVI-?\o<=k~2M,|nB҂;*ֵ71*/ u84,֦}-06ZV1z.lb$r)-Oh@7쭿>D~/G -=(5l/[9bC)%.\~fZ3F_T65{ ?^*#IՕ+m,iڻ}ɗwOTli㇇Ɲ90:; /h9ijNϛ5W X%o֋Pt֬N L*O&Nq–G'בּÑSW?̞OYbB}Y Oi  25(Ya)(Ofrguh_4kNG2eУ2㧂[x%y[|)osendstream endobj 69 0 obj << /Filter /FlateDecode /Length1 1605 /Length2 8514 /Length3 0 /Length 9565 >> stream xڍT6L7R*%0t ]- 5Cw#!HII "4Ht79k}ߚf纟~VSS fA .@FMMIr<8vpGfF} *20aqj0(@D@"@ W U qaP hG%+$,,' qCj`- ЁYAUEw;q\mY9vp[6 & P;Abƅеsˮ{]!uCdC Ds*@+X߳@.wBv?`KK3mX9B\p/8 vt!`;G"y)-Aoznvp7.7;ALYj%sr@n8'k Dݛu<k;oVzP;w!?6@\/K[u!f_g3Aog A= ;?p@ %`SaXjx Dh!/+?˭"a^_N>'?AăEUlTTZ*H /"aw uB?7-_ 'Uoɻ;:q`';GZv#B j+;w* 4 esXi-mRҿ ф~ !ZqC_.b.7 !Y^G~썃|Ax< #Rk+kp6A‚n?Hmp[FU'7n<n B[ BBȃHp{Dk84br^\g~f)f_zY+Eɹ1*~qi9Zd0[MyR.?7Pv]=]i;礑߲Hy8GJxF7CE4Hk/&U a'M#sdOe흵e9O Zӆ7[n19Ň5~E"~YCe+&X#d!N$'LFm^nSZm2~”2[zLW8dmY?maAWݡBySt6ѾVbKAͳ+뎸-,9~eǁCR:1ՖF\/!3$=l@܋fPWnoqF{-B֍j92xNJw%M")88ͲgX1Ϟ<-'~5\InyF4Rh̷F=:1o 0ۣ犹VT.k4? ]!+rX4 l>i-sPq7[ܢ NZNo֝~*Ha^(WTߑn0ha;|TI%҂o&$k+$\{sV#?K [MzFkKJ󂭫)hc[}Y%О|y XstqY69|W@h4zV `M"mA-Kg%a2Odo~W6}JػІs>\6qRh~c!Dh.CK}/] Qq7Js6ųOd;Rԕ!b_"tN$[J5Xt'.jQP7ÿSKM2Giz.4ox`>uᥧrVyE F*Cۥ'1p5xPŽGeIY` ٘48}W/5>;%ˆ P*+~NyD6),8%!6B:-]j8oVPrerg_0 0wW=ɗ^{1gZxR bHdQ v{k aCj@9$O$J/fdM3c,Q(މ2_'`a,:D!_}V~dgn  bx_y(fpGWOQ{5N] "|?LE+ƘcЈ0vX&`"g+7 U_ oyv(CQ \7Zgc8_' ﲩ|g5ETV=Й)Jo>UXx3?V^GU<..n_ GFlүDtO<&&۞Iu(|5yG<&fy% dzW7K4eU̐̏94*BԐ(F'Z]RlӲvi%Z?Xz"m\yehű!/5YƝn`ܛ^4AGUi Ⱥ|t =lGF嫣 (.q O%{աƟOMޏ(.w2J>4jY<Ə_:B|scQ@L/ƎW/v%_% R}H|>qyU='TH+2$lt[:C}Rv|&EMkQkC=sɜD/Ny4ӲCױ ;Ҿ[0ԩnу<P-hEd澆{޺tW% C wN/^1;2HkJou|NXIzo* Ihyz:/˸3v}odڢ󡽶^=_+n&r Tj'xo+N=&*4*tzrF|5G6=+T)=лgCg`{4JUd^7B2RjE&^ѣRҕ3wON76( ϸުQxPŖ*fRY SFB^9xVRG69v1.%vT'Dv=#n?9 bu!iSz$=f* N lncDZWIG"X DOE7i 3EQ(ű1 LDMoum KMutfIM @vi+)p&_ –C~ ǐZPf ɕ$&_ #w&\uޏ#Us| Wɾy[|9- BqH-&֫g5uF7x}MClmcN^.jlUPA5 c0uNpEpkarSZs7ݎF#B)AO{JJ}%w;wM8H0)K)bLUEA¼ruZH^|M99{=$^/t>޽Cs=#Qˡjю+)@>NmE`N{1îh2U,,Oۉ,G+[ZAzoC6<EI)u\^o?<7$vBȦ_eH}[p[*1(+VP!ʛdՔ;?GqJ/ D7iN)W2M6U>z@kyi(źkƕcf z5xx}K%{ۭ.- J6(C5'IfPs:ֻLFnH{ܸWDME6m-8UTڂŨh61TS}m$b*_ ZB2뾆˱kuЦ z6PfO13 й ;mR)?Z2ڌ{]Z&؀7Hg9Gi6zJ+?LY$LG)8`e?Xxzf) Ǻ@H;%P~q7&3_S쫶AQY/2[g[T^-= Kfj똽KQHGK]~X!+Y1Y_`_e(c+zbڣGcZq!~z;(݉Bc6Sa-fs(:_9áwQF垌 \iC>tO^5R9_2h+Jr K Iӯ o]ɾ(3z^I!rVWnm+6<|IFYc6Xwؼmc"r6biW(23Y&uQ%%30.^mUnv̤JGy5>zjб>K]z?'ǛB\P%t>B{i/rQ!acCXK[hFQfCDVjM92 㚘w;ڰz5[- /LY9ؖꉒUrfݵBE oGv/I3S)*#e' H\;C\CT0ϠQƬ_<%߆38+Hó{\iL e>oCa7w6"ϲ+G W.F ?Ě07t]qf>w\=)U_8r^lĘe sݽK%q%{'~c'%綵qƹRU_t$?*͉6+>~h/b\}XCS̴qڢ&O'4[2VbwrB CQI\.";Iѭ>o}C'^ ǹQ\~v AF89]kl5R{5yp'όbwDm&$eWΓݾ!^HÞ,ʢݡWy(.̎]ήR8bڗuN"PfF,/C}6buZ·҈:@PryXgPUNUQ}S qʽLOB;7.:7.N)Soxt02d},VWEƳ /Jo"U=wF@#LQNGݹh؛eA.oDWu)\ } Kg?₊-p+ZV~sEV.mjn'q"Y~(WגV|*1gAIlO^R خ&-.'g[*ζd}nExJ洉*_Ib}vLkޞ>x)U;* n|;YO§Eb 1Qs,3o{*V'՞h.# 6b>_@T|̴:T쒏ߤv9BwdҤX4.VVo_7?$%Sp\u4ֻ%EbtU7 Q5YGA~£#:nKwsāUP~=h2v֨#n4(!ty,sh#T? =(CIXqfstpZηHk+owh&V1$}/hӑ@c[GXGLDRHS?V8Jư 8CkPQ~ٲ4ޓˌHqI7I>#ՒsFطb뻥 <(Nx#1h(R<X\m `o*Ưcz_9``Z{qk7w@Mԛ:!{9[ ޲ gy)Ƕ窷TVPeѱd(J;Ӫ8IZ*ŃW8JAy,0J`*!7Ht |yɘISwZuX YCD+݆ԁ"+;7(ȋQk8fz7a ʡ~7:SWX֙ Iv[ |Zd?9YPl#u 0tr&rrzaGghfak]-95f.Hk~:oPwإ1ro w;–Rh0b;{ON-).o<¬۝LP&˱` 524=wcCMqsqKʺ7ܳtꙋ̧ H,0]Z'C]5>ѧ]yiT "o/^\v*|9U MAzjI_>1¶pxpp BeQjescNi ȕ חJs61>yA_-f5w87ux>{I5/ɷXfsE3Ͽ`&,. iݪa)Z &[,@XX`]+-k֬yU{?_QO/#dd$ۋ%~ӋBSrSDdkrmb[7 0.N95> stream xڍWT컧SQa] JL`1`t4 !tJJ7H! !q_9O?=9+9 p_H$ P5@" 0hEB O(&*֩h7]8 K IH@aH/G8B :tZpēwE@*\!)) %7jtHg=` BJ%DK <'yn> 0xB/=70" # jy`]`wpÁNgDP`= œPW@_]GB0_`WO8: ԕ `4?y#HOO/Ҡ/Y wsDS" [=V sp]hVs b HBR @P΂C~~h@#C /H7["8@;FOv<X~}>Y{M,Lyۤ GA~a1@HHJ >;[k蟌0G8@ s]GV 1=KM!e?h{r2+ b/$z#t轀9%օ8@Ӫ7C %B=ա(i:+1{B=3~!?l]wA?%I6AЫj0{ï/zhI /^N708@ 8D)*D?VetAB^z~_M@P{iLģ>k#r5s^9a+Jq(3_o4Gpil4hvgh[.~+P-`$A!ՙ T}wPz6eiUhG`]=+.ɏIʼfd^bU) aNv 'CZt3WCSo? np!6?Js1 bSkůKq+aI=XZAi~hOJTLdYGp{e}aKMT)/&z(X81{ѕO5ygwq|%1t< ҈=ոI{׈6F2QYT^k-*$WpO CP&ɸ>J򨞄l`sAYq 7q&%bs+UBA4nB^ atSV; ߨ,mQ7RV97Ou1[Uk9Kvɕtޏ5 %'&4,E߫r/N'+:o= L_ёc6nU%{c5q>^6]_Y65%ZyP,l#l~,m[-.ôioD d>pGgܚzڠ{AAStKl>Ny7OFIqxAx^4޷VM˳ڭ^l?gU>]\Ti=&F4J`;.Ya)_IP2L?`%N֯!)c奅y@/bV~^ώ,v[tFYYY>UNVLkV} 󦳓OάI>`n{~:9΃%]zm8儊][?̀9r iPS,Z/EtEHO*bT@{grW"{|wҰXyU{:JZ^K}[[Hڣ >}$"*\"׭1ƞN3̔^B5BIڣV9;̗%QI&[4K?rNpmcMJ<0DEKJ,ԍ=SZi ϺwgS0%KGC?祵_:/h%I\ eN'p:*sL M/{BEdO=Oosޛj{ā7f櫕(+=V*uo ͡2 ņ6\g%cݴL.C/mu>Ч`֟7V[gKe;X21a^;2_=pRL^ebQ* |m|J6KCWw笌\`f\%]En>xЖS>>Sus'xKVZVR'ԚM]5xK~Dt. VŊH#"NZϺ9*56,C6v= O:պ~zTۏyϴ_DIZj?5g #5+1 Z,Υ;ϓ9B A( !ƨU0I;Ytn6mu aeL #͆~&z٪߽{dʥ݃po&C5ܽ oxfbه"(L(n ]mh"#GE&5&ksMsForIqھ$-t2˾^V%'g]ؤVRr9F7o'x;lK}pgįdW~jɨD`;YZRpLGIS?w8$o{GZ[e41 CG2¤g}o+՛}2됻  nc򌬃l=5$ԉ&bZ,]X4' S=>U?a6~x:>_eM٘ YofxM\:tRUOVͭcȱ|Ch{9}pVx$}ӃzVD0ΉoRZ87.;fO=kJvjcRysJT@0h8,7:NFQ>e^wO<߅GFȴgU4Q?8)jΕm e'dTFv  h0S0ƫzMBմ=wcՌj ^7a,x}6TU4زsbon=== 6>(p%ew  euc\2έ®Boͨf_[v9Щ~${r^c΍npWCG{]zS<]asv^Ԗ&,SXVGIub2$sejV۰]X3 bWqci!Rܚ,_l?pmu5P%%e>fgxr ΥVTau2,Ѵlé<3QQCpE 0puZP+9?i>q"tmRk]U}# aǥ#c{ٛX/J&ǫ|(=KuN%jr=VqTn!D:$2hʕG@Sk2 (߶8ΜR-8|!K)F]l ksgsGUD",BW4/Lg I oh.rt\^ !ar& 0 \8FM4_y+B߾pkӘ(=1KXrL['D5F%i *usQfr@X+\y^$6"ׅ=G&Vol8fŃGXz6~[44aOvfxOy8 ^[Ow? s>K~UG9F98{dj*DR]Uޅ_ IP 0V4ke xgV8_5ei,$g`T8Lrc%٪=]Ivo)d D'p*m$dΣ$6۵b{^&W*8kmH+J*a>e - ΄G ? {p_vyI˿u%eQ΅X|3G}U^"1HQ|i&74}{H9>Ru.C+,MEp;߀$F JD*7I͟ m͹[ tc,;ҥH4]>2H^JO0aNRWwWhyRj쨨I;iֲrB%jPQ i^AF| g7C զ%wQZ~8U^S&6c8Sx2&2,`>G9K2"k G2&x Ԏ1ܟ8ef4(y1mYZwY"YAgof\-楶aU._C @֣x- fQR775͆RTn HV@#8:嘨@fwI #PdX3E.JT\;>ˁ 83rE0Jgj?˝UzFQ[ gm[_. MU ] }&Omղ6fz]zoÓ&e]l쨵~(rzі dˏeRfJ=[xZ#Í{ FS2{ΕE-9!oIW ȑ\[lo/'eNj$&:͛\3&H_(endstream endobj 71 0 obj << /Filter /FlateDecode /Length1 1520 /Length2 7049 /Length3 0 /Length 8057 >> stream xڍtT]6!HH4Cw "ݝ*)10C "H7tHKt(%RߨOk}ߚ9qu.¹ܼbYu//?7//F @.`T? s83#aP+Āb>^^ѿab9s7@\da`VK6PTTW:@ 4 Ď. {[8Qݝlt@. g7']1n,&-7 ;A] P+37@WY V j@nY lni sp4z6k0TP{9P" 1@:9@AZ`';Kg#܅ ɐg%Cda ( Kĭ{j=CRruч\ArF 0 +,  K[<A`}ak/v1wή _tV`K8dbSۈ;=F?LڲA!j.!/dd`o.~! /+ F,|]oqj<?0o ۟`s\A1yy-YRYGe`~]ሉP!ߡ@Xdvuo21PߗvQ{pK0s|f\@^!f :BҿZ¬~ x%" Kn( H aX?)\!@^~""EXP?6/@! k?(O9B\]$y3"K+̀@ KYk׭k)ݹG%qy;}VjB:yҦ<͉->7f:-XI ?KRcRqI:ڣ6!w0:j}wQ-_ީRž)֏2,fʳȞ!GsQcxM_LU=/6\㋹Zs$g$7$F='`KQy<]Z<:C6zmk|wf4&JNƃJڬPB.}M…S3| 59KksIZX 'TvSysֵґ_ym:n҆3D\ Sl#^Gk:^-[Hweʖ9?TJtTdR KMƷU#%dj 6lLe ?}F:̤".bqNm؝ܙhoc >djS.\r)A|icţP}G-eR8i}9Yr<>U ?N_d!2QV|ef-5P 4l `8,ߌ27aF wi!TqJQ8WC-X"v^SL M7q֔!ʜq|xVS. ŬĒk Aħ | RQN. ~&gq߅Q|Gf F(=otc¿$ M&ҡOkfsQ+ukҧΌqIxCܰPkľ s- R #uό5zmΥjy^k+ӱYm)v3DRqt@lu[:ڨ(xp'M7dk^;[riD 12 1$IsG] {5Nz&jXÙ'*'{Gcao<Ԛ#'\iLZ&&vqEǹ˥dPF#(3R/T ,jU^xʗ1sMsE~^QB_(D$]kzSp(~mF| 5jf<4R!:ra?{a7NdexHrb{ bPys9J U3EOeYzCv:LBEZ;2}-Ps۴!'Ƞx2$/!ѳ^B+EH=q|XZމvzMQWwfgbnB9Mτ%,l` g?p*vti|N-Z67vay1nA̞GvY Y^pKF}1.n6!cCr/Ube]Cbj&W踰6Egp82 \إl7\h9E$.u x.E X` n|1t%Զڒyl;6*yǴ%Ckv&$ N싃3cboIDNT,RkVC!ԍ޷=rgExE3 ؂Ā {y-ljʤ5inWnuUwKrcg:#_S9MGeg! >W@6 4";&ǿPC-n%JWn", t%3')x;M:8C|CwnrmnX{ uCݵ  RSN\Í7fp *g9yI2 5e@C5bI_ηY֘iGYC9oëG㖢.D8jk%nQUk^jdQ Xɹ߾fR&hsc$D;W zJkJT/.2ܲړ?mK0Mqx"ĆWy\At]=-ya͖,QP ^"a-mz߬Cfo<:HaSQ‭Ϩ u0J@too[]fXTVmhPԒ&EDe(8W50^bCƛz/h|/m CDp,2l#W.s~*7LbӜ́x,LC5JYu ;̑G© hT:M'MXᗜx6KDTHWk%̑p4m Cd+t~}+@ٛG aoAY9&)şfʣ%}-5:́#y;܎^] ˟ nsO'[ZSzaSPma@M@ ,Mpqlew1Qxe8l8нMvS^.g Хm |,j@Ck.-6koUiT<[wU‡7Mi3Yj1PNFbC_ ĝ 1l;5 ;2k3#$]f`4ˬ$ _{*){=A m(V<=d~oaW؜ت9s2Y@uoٹj!1h T,V.i\mNrMݧА0v6^47-qx-zSeu,EVp'Y;v#ɕpF' s8Rf!SAz]!c-zԉUua)#Y{$K{)c*^:4?вQTnT|Cef(vfm+/bn,NTvDJNFG+ G33~V:.| i]%K{gwE.|~6gB#2Y%2nF9`>kMX$ijCeV2-,혞ՋU5g5q+zK]SوT `F2av92S%<@HLj}~F} /C|+o$þ pJ5gƥمY*yxWôIad"k?" ,CORideгmJ!sǤ;7zR-/> ^~2g/ ^߅F QVK/d(DzsICjmſ80Ti7ZYRsZNNDN;dzLwsqhj'|#[N4=4/ZQJw:EZ2iA+;oQo^%p]~$J$Ġjo|;0_rClYp:F`>N-!J{նB3>qz=S Q%*śߊN^~ObR5t\ޝbjݓkB*¶{, 5Ɨ>كy) _yf_cS%'>f-i=WE]EM_I+LK4@),bO1J qofqW >8G2Hl&hY:$n,z2,O'0HEpE5J^]fߡj;ْsƖ;U5Ad$hS]Ƙ`gvLm}s{NoTj;j_un~$~xt6:O99 Ŭ!shH5|cE#!P]`]ڸ¤+IM1B<}۰1|ډ.Vq_`E6SuS!V^i2By;:ai8M!2Pq[v7Qe0Aגn=wR,i[R4b9$'N-/=G0șF5AQw%PGkOba:^p-fF(tƒzx{_xq@E?0c< an4 w(p{~@U\Ct7g6A .Y[K.ru;KtŜ@0#W&̴#oҚoJYx(8H7 ?Yr{e1Oc&u˂kvRЈُ瓑g OU{߈\[;Tʈ>yrV"/T0G$hcy=+7,i~䋁,rW{!>5fPLRD]?[as;2wٔ¡[ O>{fDɄ=sy*u>==J xyM9j*aK[/ ,x^' vx|dp[M{j Ѳ f(u).W xI#g9H;:쩾>,c*Do7" |a.$![mҢl/QzðJǪ+DJ9;6_'QJҟ ւ`Q7;52jP:5]o}'D{OA3bL"=7>)?U lZ P8t2aF/XN b : AJLW*rtgzvF6๹7]Uje]ڤ^V`ʗvQԥ23y!J6?9{}{Gr3"ZȢЊ˭yy!sH鸫\}L;sE37A}M_C%$Lj5Q]6.x/2n/PW*94E|L}{2ǿñ8& :е-kϥ |ix>ѣt73i y:m-i]U+;.oov=\qw%><Ƚ^St-&Tl)80uDp³'g.qRMSWƣ&*hEҔ8#a>!d MtESb|Id)Dw<]M&iH\CSuC~QePbs[;. R:xXXi"U.y(0jLCc s[e;bt+&iH~5A6@ 6Sch6K)|0޽Ae' X ؠ1^:y"}e3C,UZm\D*⍼4c\"5&׼0w0t;bJ`ImhGWO~R N~^Srp&\2iO?B+#Uue,Y,> stream xڍvT[6(ࠀ0 -Jw#-8  1ݍt ҍ "(z{Zfg}}]{=ZҖ duD|Dz0=/3> CW, Fmr`:N@8HT @R vY*8ԙ]聄Y۠m~pBENH;@0Pl=@AQ*)aB9nnn|`g>Z @HW%`;@ǮBP`@ % @7*4?j:re*NC G0XM5>; wF`={`6.o B>qE ο$w P?7懠oNW{C .ݜ+MeZ8j#eu%´(0Zpk4yAB|B0g;R !ߣ@Zgد_> v[=.(ZT+ ,O@XF"Dh  @hZB#P $5V{(PDqA"Mt׿C!Dӓȣ`Ҍn_["⌺…QSlǜd[ h}) f>n13`!Y)!ߵ*sțwqb`MX%?f~vW-wodɌ(#VeT)Y,$EOV,\)c$6~7+EjGHy(%= k@B=u|J37ْ  &&Nz~@4,3y[eitPW+vɡ[MH$M0WV (oYo9P8:6bˊ9kY0r:Ty1-αQ6[aQYjomyhtguSYtw קv60&e/v 2p+co,_*t@(-f-?e !j_D+V.ϙ^N>+02VEC 0B>HXDcUy-|}/YMKWǏ1\qT^V6d`OqN\\UO`2d,(.5m^Mt09eYw Pb3`˜ԗ'=Y͗tcz+OǑTlFu*`q Y˃P!}Q`Ҭ*qB<";qK$' Yzy|A;.++ L߄%( umPb*'Saj_>e:Ī4M!Ҟp>s%pRvyD|MK*&()"! Lk7}q,^MS#?S@zؐі 3*FBiЍ[r//,dn1{jt%k9h|C|;'Vn Gt qi/鷆^oh% YR 3H5 fɪŦ#L|$WEHgcD}srU*b+|3i}ѷmuxfg dupu}-vX!]7!VݽLK58#5!) *_>-77PܬJOͭ"VR3G?2̬5woBsvI/R2bEdrzd/ʞ^0UرeRTq%˾\%|;@zH +yҩ0 ;3CCUC-bEQpbF%0/ktsmL`/.|y17OCYĔL#aNf':fd*Mz]WPq14#OTӭ-bt236ڞZabP !.]؞-D/N> &q-́<vPWJ .)X̨)S`yxS$Os{1"?l tnQ-ҢndJbN6Dm ֫yX~7̻nA*DO0` ZJt ˃ѢLC+ n|Z%9񃅱r 93MRrj-i\5DCp}{^x ]K?/.pan*NY%F'f˓Ƌ&xU6ʱ3Z$h%3ieS]'w:7EET@vfYicP''t2PI;we [;>:"+מp;5q$ ذ@_q1YOv'yĹ7%l?V0! W5V#;\Ó0[mr%ʖcҭUjC]w/$sc(W<&ߊ"3v, h~][̤2nH.zGkL8Jjm"X@T`ӊ.;xe5a ZQJ 3><0N7A".~}cV']L]Լ2kT|8ך:#o?724"V.tL/nwyX#ٶ1G+W8QBZcXhg kSV^aG> }:J5fǣIv[xe0Txy9((@s-֩w~!3] eY=Bg  зIO`x{ݎP.h+1 Spg 'PhMӚ$.y(nߐjm mwmVͨzNXJR61!_?;j׮#XFr:4֓zVֈú=َIWgļwWWl?CR[>2Y,˼N= OB%&B/GxߒTsKNwLOfȱ_LZ3]\1<ޗ/JL(jFXlP nvnQ/ִ'RVQ@)hgO>ou K'*p& -mg<}*XQ1h6|I.d@=jbBDK4"vtކQƛ|LAF^ߜ3 +B>!v3On0` q`w3'fPt#*v[Iq>2s8j58b T,v! yFݏR5?=aPw 7gI`GאWT5 ,dGͯovk"7F~?x"ʆt0y,MU*nH`~4˄'F]P̈́+4U`n?+bǀt׬pȊǟϡ^OKyK NxwJ=}H l2؂|G9" C]IXhPGcYAg'McT)صrc mdP}n2Vi _ٯg^d"2d xI~L2]J!=YT~ OA{fSME%8 hxTYJHVQQP*9ɋ;o;e:/>,xIHqZXVhڡ 3U;)X>\T2j71)%5m4p֐۔=Pe=xl65-?Dl;A>F°haш#.&޷qlM[$~dKTb>!u;8DdΑδrr)qYKn~Z>.cy_=2γip.[WSvJ?n,>o`'ډ]i{SQo:OR&,E\\LsR2>o>No΁k*#ˉ?цz4=SuRfy{2L<'[U+ eU -YIEI~?:L{*H kt!&ȆRhsE΀_Eg5 Ut̅ꦻtƈY|b Y$ XHY}6s9AB]thYA5 gAlNA.8 7bBek]z"dEEe@5Tu;EswkYg u} %fb$ a/p6e?LdOE̟*)֙d<+Q[۸D!tm3W=~ hSSe!UN҆͜=QTΔ~;lSg9tvRx hT8D}'uҟkydtH5'We۷z;ߥ>Vk7qڡe 13^'pZ:%걂vs%BLra+{J/ˁIDU$J^jZYQlFIlQt"7]iȑ/k0z j#ZZvkm8@?g}}f_w\.{#AMu-1ëX4ēix~$7>@n}ܴN/絗YqQdh7f0D}=8镳5 UV*_"[; bR&/ bJ|c<qA9'mp H&t;@d@.}/rbtL*"{:APŹu}O9`!R½yIY%%Q}c[B󆚇,N푭<_aMwTkX&A?3vXC3-p,ܴmJpq}#efVUq!E8N|]6bfm놸7R"$ݪ)6g$E_n YL;C#6s >L|EKfR7ϫ TW ģc8^;nw1b׶dڪB(mV;X}N+YmKvtbXͤMw.|ғ;uI)j'S:)U1nǔ}^5$~7`x]z_bم@jej7j>}1 fL_v>b/7A +cna> stream xڍuT[.] Bw#]z/$;"]@) "H.ҋD@|uzgfgNVcAe(vezƖ@aa1(qepPh8!(1XtE$eEd2(Y ꁀH TEN6y @)@e7 #z` # 4FB0R;a0BB D9* ' Cy_n?@NntxQ0 hl' Ct0E@"IWDp`0ts#|GԿ ``Fb^`+ }r0!%=4wǠAh/B`"` |jp -Пκ p ) R 590@ aIIq 8 Joe2wG$`pG`@ ^DDP89c0?klQp0V{"@_l"Wꮲ }b@A PJJ4)[ 'OF-(xK< /;Cb #}a aG{~t+K}<w ';zHp j3z0(Z0v<X p8߭ G h%,_>A\ ۰.v:>Q I "Ū_;Po}@$r : Q_m كQ 5f!$ r#<`āBH_8;H(ۜ_BaU $D.ܹ:R[o-1I]Nײ4F=T@nL`:iK|QZT=os1C CR1ۏ XXs?1j[)_.lAX_1b)dhiC@42ċ'4ӟ.Z'd~ݕ.Q!YIEO͚ۗCO8[å"q*5[$>cZL}・wY [F;]ճ]B(E6 gkҶfl7x:?}P u껑v[U-o~U:-&:T uᎩOP~?=p AfI&O^ē _)YX3}LQخLYţB}qQao!<_iSkk;4(0)IKA^zKLp;}U_{1D~/}4'DE?P$L.$|ex3vƓ& wOJp]ʞ&ucR ` v'MKE"aR\} V05m^ƛ, 3nj3popخSra\SU" tѩ[D_Q 9C:#HCq越d3x44AǷm T܂]|9jC4M?N]{)8ӄGXsx[I;='L@쮩Vshh)xEo$0Jfj&Iӌ%HUP0 uYK , ?(IxR@>]i/66lt'q@CjMހп طO3[i4Xƥ LQ%s FX߼u.3<,yB0t:xJ^ 逧B 1Tɩ_i<91,2Dݰ~ï̤U>ϗVe*ypsU K.x0+Y"|xmIQsܡ%'[S)7Im,v-_6fj%y:3?몭5d5jm/:[ptn@růZ Xˉ/ۭv?/PQRqOŜp?xDa|ȥ6pY0۠6xee:xt]"YjHFVcb 1ګͼ]8Yo=68dvY"_w{: iiyre|8d=qYx)ұV;?#UKq3O%`Toz=5)U=*&~jaXsNYxWJ1qh/BJd[܈A֨x 6E~V;7Nc*4hUG4dici>2(|Wbuu폨[=\M7%.N>^;R,?:~y<5Re{w%6)k_v 댰|\ <.ͺ/_7#!_`Цs|^W_z%[R.ZҫdG01&CQF-OoҖO:%sua1dy*~ѯ<5Lc.WloyQy:@" e~;T 59[}imy3[w: _'JhMT6!HȡY9禞}j VO4*1KZ\Ew+~`G 'Aκ8OD> 3Qgh9 W,,><4gRZ/;[ܜ22Gv$bdҪ!ݧrs<5ݾG ٛKʞ =ՇpQ֍$Y]iD:'Rq2Zu|&5Ctr} 7=(tYL$;lMNgxO Vޮ}Ş\_2t!uϐrB%K = Fq\Y]$;WYn,}[eAsU\:N.}NH։~M~mYqbTd>:CmP?C lr6wLo?[mǩ4X'EnDsMlYo{%K$ul?nخ>Q)OsIxT|q;s^Um>-SiR1beCBW5< َ?|/Hk!RL7k6{%&TRTj椚 \b>X]F>|B&kQ?a ~֬6Od,JLް{Twq\UB8VSc8?\L;_*wg6r*  tGv6nk陋j/)':0ɾT5Eс&̣Wgy T{^aIEy6!?>w?} Q1Q썦p9@Ӻv~˼娊v!ICf=o2%0s)O-y= ϊŵBy<*Cwj@b-)2's?s%gءi4ۛVtFߘ3PX V]6Li['{q]$T"z21w^)rp0b]qRbaPVnϤ I3][ZlMR܊#e8 ~Ths;?1OR;dqz栋-щA$P<[Bf"Ų7,Cwp+G[yIpd<.(8-Nw*ށyB]wiZ4qdɡa@k^A{wZɾ|q~Uק)8~u[M;wcSH f׮+7dP hfCWV?360:1sn%W?QCV~oyȍ ꃴwnu1ϛ\IlCȧvשnu^C'R 8NjƗq}GWnfRO|^oz Bzb8D?8UQ␜0?{fayuW껬:ETu~[Hb#/{t_VӌIXba{f-TNWHH}wK .snx4ۚ[BHj]qZJCBϟv\8vOؾ(8DWtN&N8۳YYӋ3> stream xڍT Ti\!x BIT) *N (׬2 ΀8(xQpv_tU}U}Ֆ3"{Nsِ+ cXZc3,#PR_^$ Sh`0pʝ AA.@tp&l@਒aE(I,NF}_\Y,}:$$8)@wD`9R_`(Jj6ddPc JLB%@Ġ>jl%aހRjDc+.AI@w" @^pP/.p/u0\ #d RL 6X%: ,Wt>crXLG 4>~J:]}p┒;7F}ɜO鷤.hHT 8B01C)A3DjG GN}Z*ތ`[wQ J6O8Ĕ1 ?Q!t`υob!7EIKBLp2st)\zG%F/maE]*$C7RY\z 9|f)"IzOoE5(¸@ nKI/~_(Ts0\5XgrR{*/7Ce72[=l|D(~qݙo..׎LFqIĎZ:I׏>s*cQCj6f0uv~QbIAݍN{wE5AS>5Rj>iJ`l-|Xca s8=)V^n90EyUyzY◇1W|Vm\G1w< ? yg  }2jWx\J)4iO妙8ƿG2C^k_p SgZƼӢh /̂f#6D>?y:{zowϭ$;rV%zY!/Ͻwnuya{Ipgۯ"ȷMT SDR1 6gWmNӞNrxpQ*lEA_p5iɟ>Mr 42 u qߝ]xp -#AŎ:vMV]sAjxdEۋZV\&n] nkVfǗy띤flac7[}~R4XYWM;u٢S>$*@Y'm23+ض=3xO{fgreqwJEmu0Pⶲ 29YF,]\^Ҝ^pCso4tFŗ&&cwqeK:\ҟAFkGCNش'lBʉIWl,^fyqβqSY08}r Γ^V855 {rPbq}ʋ1{ &/n~nVI6^HTlú䱷2> stream xڍT\.Lt 0t0twt+53С4ҭ4 -%H# R"H w|C|_Z0g}hhsHY,0+7'P #y8@Aȸ]2YsW Pvsp@" 5wX80( srغ"l` vXCj殶`GDFKs6vOfQ[WW'a..NsG8'Fqh`wa#ofX [o6 @ `(J '0ocտ ;?޿A9[Z̡^ hȫrz̡V 09aWy)M9?.'W8'"ׯ0.Ad``+W}%^\O !P+_$ܜtg7?&֝ <@^  FA?X>psw0 `tXm P1o ` D7'SzY^w͗KOJ቞ߌ뤥a> 7%}b4]<%5 7DeR0s`X͠Cl2|&@>% wspK6w8xcXd7WġP!Md5J!qF\ zqk]" ~C~4n tfiMQ#3A-aVN`bD,Í8V`Ͽ "\v~k ֯~F.;$.; RC<.;RC.HҺC?; ҹCt"o$7!pKVr^/1b+;abniF֮wr߈$T*F_ wR *yR\p@EDm~=J?Meh-B":lD+Vmq|XP6GݕpGtFsB[LËX'awEj8oȃ荳le?ȍh]>Dp#俳F$+qqںDg.Y 98*G(#zmԇo˄۠z{R%Haҹ{k쾣bpU77v#3 ~VDZCv Ol3pb GG'F nh}/])51٦D?M|,<{ķ3_̎ YIqR9Ob6 `)Gvxl$園-<0zU?RZ"[|UDz^}{%W չF]*yO [1$?4ZU}ȉ^y+mnaɇbCzTo^qM~ 0[Mf-2p&$!}qBBI9l [,Ȯkzc2?%ʥa9e价M$^Bc1˟7߾ p7uT`(XBKw1Q q^H?!diWvS}HBw~'|^0RxH+?%9ʕl!R"/o0Gdz-}{BQ&~sF4&0-I/u1[*x:[o9ىZ*9[c'}jp}\]b(av=`.io~ٸj,.,4BͯdG0dz׵@"'5ٮOH8pAyPrX50r팙,"}'' {~UdG *aQ6I3£ IJё5Tg7Cg㘰.xχzR{cZY֛L.au0Si?xYV,w*2s̗ lrW2pNC#15(>t~j|hH.Z{بZ9ұ>f +^853 z^}ݻ&)TYPZBg*%`<羕'ܢⶪ!s3PM1egbm͛Vt>i`dBW:]-V [7@4XWj})ًfɆ?7/E7qXU|[~I2/-7O=idC:ހm5,U0mkEV$1dZ/j%t.mۓ<֧b B: 3؊ ʼ<6Lb ވ~T x=pK"𹎕@IڨPamIJ\w7_XqnU_ǎX>5C~.,!έz$gzr i;c+^|n ։|dobv6?c\L:o=τ) ˦^-)ՉFwŠ1a =z>Ur~=cN(&l`(BduәcT]XMԉoqyo/2,1{lھ .o63Jiw2h<̚&bH$skIbU.B`Q~y8rT;ĶIn| ኊ73.`8|(3:q[@TmHDLA{|DAiq?mi 2L3I|FO1)B!vb] [,ŹZ'4{锘qȒ0W)d+~$iA/P{r$"PK'm[12Cb$FQF ث7[BoȔ \wcq(ֻI7ŖWݼ ĈC㩾,A;!|angjLpCZFIﮅmn> f(y^9BDκzvABd HD%y'7,Tׯ5jUFSѵ>e/R{=:LYeO2+{̇/n`Q. ?9?Iz=a0yW~I'Rh@+.{.9w`;w*盯鹮nB^΍NT0(F]۳FȔ!o6QvNtOS^YIEC}޴?l[{ܯ֙4BI%^uPKRu\ln=;7O"^e(+c$\s{rc ʤcR:|p1$^dr>% i8oH8Eb`+üu.h1&};v 8@i:}*Lt5;y`&H? ",rvej`]ĉp g?0,[:#./mL|+H"5.@]GWY6TCGJK1Ij/'0 ?SY^$վ |櫎=O7/sg\R197S_E,ʺ|X9RfzL捵"d,,?)'2=SJZ2WҫXmhА&b7I`蛷Kp# crqⓔS9d0|t&ASW2c'9*]b?7͖8"jڡZk_jT|}O Z;dYLM!]C<7%  P@S,t([_Se0嵠)) vGmz`w~kwf2T`gqȉW pj9!S+PS,v#,rAO?mХIt&8g"3]O3LI/0K8l+&>E)tZWz^زG ؘg,l㰏)qF+yL*,d>tTz\ z: 6 ĜW†[n,kK=Y}vZ ˑq y䩰=<<7ɝaAg6;$}7);fwuMe_ǝP?a[¿)&QRi=͉"~1z=Y>_8$HM05JabGG?c~qOUS~* ݧ*MY*{ʕ)xVU&1^gv5Hп+oii"_Ekx(_m X_..3T gD[̄+*a%KSl>xoŵOC -u{PK,sY|$n(̘Tn䁣[$1}h3kLu[끒E ZG-Eϴ61 T Yɓr[K{aTON: Ky 'bSC]:̎g;wȷ ?LOI?PwW Rүcay㡯(RC’E+\Vc:mx~꘏ JyCP-։f:_?,$S{JAݠ\K~1L~2i, ɮKE} J6p>>PX@}s2V/dx0tV3:f⭼eq ^xb}nl*ů#? }}euRv/~>tH`:f.4ĝl6/4e80jH [;.AQ.CuR R3㹅&:]e 7,eqWpjhVi?R!znؠ.d|I.0)bc=.)5q«S,6XMkaqfռAN3YY>봵w]. ^M Y$2 Ѿru,~ :kO۴7zkVwap0(M:O.dž9=43Ue0^H|2e2+E_lE.uٙ~ΰIc'Q`-8_"@kZRZjvr/f=.+SxGnz\HoY >xWgG49igTs1Ӏߗ^*zcyƕT0W5S!UMCxJV<}ܖב<6H59Zn9 H@{D<ϛ7:3esue6E3\jŸƹ0]|h͇`40)H32S6$|5$ҺBq[a[wyč1ÍχA|Ȃ'&3uϱE(ӱm+Atn2F7ڤaڐDka:I{V*'vLh %Ecң 2~KwaH1"log*DОM, k;U:? Hf]TJS .W`{fw.({2~+C6Xe0̰FYٶ Z ao5}!Y˷SeNR׀>_wG^̫1H t ENFS#pu62aG[SQ<uK/aQMLYXC·[ů|K,tF:ČtcΙlRyJPLP퓢@wVkb \߾Ʀ*nӝ>7lȼb';h+G+谀 N5IXa1짒l_b,?W5m ?Ɯ7~sNIAZҽ+G &!gdTSxF| ])LvsSMX6WAX&rQ6i|X#ژ_ xM"AtV".K]Sjz!Uq6(ץ8۱'@]%-iy_a J}$q+U^G9b)_JM'ʡbbM{crL_]Ɯ끶ۈicPNt>(|F6i8pj˥ς4S,dzeriO-Zl< hIy ѩ\P|13j Tg$>yq4$^ԇc+ruUIԸ6h8RdgAٝ \J\1فgX+4?ϿRοkJPN77~:i 1UI*Krv_KD kن@$\$՛eKi-ʄZ Gde'v4cT-6ɎIB^榳0y/O J$ Ip%ezh4_ RuZ2`''5$ihȨ_}\mk75Mn6hLn;̼si-80Χ+qZmnK= yxf6L{y @ Ji}y l jppJLx) p#6>hҘ\=l?x[W(UtMqX |_oJ2BXT}|+e7F9v$]6$41=8SYP7KXYoYx)S%SCOڻ&HZ-9Љ(꘩Oha5/Xd<T. N˥$b_xsZX%׎|E+e&M[L Nq# ghJ3r% KҖ-(kˇ~yǧ's,L;ij(>:MShU#QF aiehbhtŸy7>4i"A)[핪Nmc{/XPx_w} Q=y)_<^xskl7a[smhE)|M1|snjBcR GEUhU >ɽUg): ٕrx=!V]*Ӣ߆w!T]iPE۝K=z*E7VH'T,~۹Tx"XlTx&$ j,̠0 y֡ #M{LHM >끓n:Sk>7M^Zt?r$i&7!3o[sow 2=g5@m{nOwdbm); kUQ^|>[huI,`5N_x><#S0"МS`>;N6GࣚfWMGO*No0ϮT}E|e C3b/ɎIzQo~#!HQO:^Vx!QOϐ_Ǻ ̅eҬ/jX3N u%R։eݾA /. xİTӃzZšjK]ws3()|䙔L .ex,3H7֠w?j vv?sxx29=r7GM9?:Q*1JmbInNea4n +V·eY/AIlnrNר=|zz0xkVƆixkI6c4D0@P(ƺ0&.teQn+rck0nu*tm"0>Ou}?MZx"L1빜<ˊȨ00S7fÀ7b*=_HZ>``i&,mB_8G~21a]13f _\LAMPخ^]HŠBH-`!8j9dDKڦK/ul+iyL++Byc/A!*-.Y/R{ i3GIP ek #uIE}GW3hm ` S!MIfwA L46qL|*7OJ [BF~!OZ{EpIyw|(_)S "+Sm3T[=l-A҄j1dݰ5-i}Ah.˴կq;7FܘPnOt4WK~q)>VXUh>پp 6. ^H%m߫NO]m'MW;`EV@eoUM?6TSP5<TAoJ#nm?_4x3HhqnLYqzљlV|1|WPhiu€zسщU:]YӸ̩7&rZ"h?Ms@.*'_ xbH_R~\[uaتo"Y׼# /C$.> stream xڍtTSk.U 4 ltH;RB $BޤHޫ&H.Uҥ ""?߽kݻ~gyg6FaE24 .. cq04J10Ҧ^t(@ "2"2`0 K Dcd(KAe~xp@A@!(@u^VB1a}G uz1ۼ7<`/0qL8!< 4 P2cM@ @@DHt%B~CP@Ga} Az/!^b }sh@.  ܰB/¿\vY堌vu_SA``˶ 5Y@98"&|pi\@aX@ K0wu | 8^ a ? b`&!hrg4K+S Ի_@؃כ[ڤSqwc<*ϙgd&>9>ɪ ؋+\6{DԣzlO}ݜ_Yi\.WgSx`s"\&*>xXj?#;Fp;N_B7鵜 0Jxl>|Pd~__A q7 q/iŧXVɼ{17ݐA :~l[RIh'm;LV:dX+㑆ַ0&|uf\Kr{ o39 X6BU⫃9U-?=y[‘n%Ff I{^L[>t:uKJTvl6i͓N+Qdbۇ8q&f'GdÞ>eӦN{3:bx MmϮ&ЅfwjqA%'/0vjOscN|X:NQ:>Լ&?Xظ<\NfPPnjCOaw1+ 8_'5^jBm>ftShmߝT?7d :9t'˼nL0} 2m,S_5rNJc?mz~%wS & LpRX [9TԚ~0' z-ŴgQZH䪳GlkA{WWF63ʆ}ۣ◇(6ɞ r1x&@ %^O-uc=ݠv/cUFs{Zڱ-°íw`yҗp u~4pe]"j% mA_D 9F77M:#&&!lc-/m/'EU-շ!]&vҊ ?rd`$hip= |c$@ӏ5BY;ޘwE%Y)W5[yM~`k7;'^>*\ m& c{ ܣ/V}*rNjvތaM'i:=kuN_ta5nz%fs7EYu-.Z E.:.K1Q pաSjCm[լ"?Uջk'3Fʯ&pj-y9d\Ni*.d2FJZ\L3~?Eh_7q,*)r.SYMIv]o'K|\cՔϤ~2?p߬С ,֜)CD6UIMK a6tS}l yE:*q^"Dl:,>|H(cP-5:֍⒅NJg mxwRqehrzۨRF M2 .y #|n gxKin;bU6z V-^|^1R@ <9Te b$ ZTĊluU[|ډ_IK7cr㸍B܏kDu7IƃfOVr:`{~2f!"mte涵3o{#yU9%><[vk[{fFC1 ]A':VU?eڹ6{EU ~r P#+r'* eR㻵Tl?^מe?Vy'*%& bε$N֔BĔ<LEQ4xkaCS/˕#'9lJ69N' Wɇ;µ/G8k dV+[ȅ\6Hw5ЗR6twlkp(pl#.8վh#7eP1S P2E!Ȯ[]QٞDzuqI{5mSr%R =g+sFa;w˴ ](u0^lUNJ̩}jkE ji#“4,aX+)u_^dtσ\@0SMҜi΢m:?MN:)dq; :&_ư[S{DdjJY.7A@$P {PЗ lG}bFg1[o-_Z%&sO;_hܻb [m=G&$_?=X"g?ܯ%B-7Sk<2P\=ALLr}"WGٿ~aQ*-xؤuDn3 9yK #Gu>2ĚdE[cb b2tt䃯GtV EF"i _;N56Eߦx߁S߂ݼ[cfu7B7.Ou62.}T'޽a i.ljQ9ߏE1 Iá /:y(⸬iUC-eQ`΄^Ebǡ%wKN ޶Zoh8}0x=OGf+ I,ݼFuIH&Ȕzf^JLC(+/Yg AgS1܅᜞й:KEi}ٲN%ݺ[r鲐CX(~.fa/\\6ѯإrib#p>5jA-ugygM vPZ'wLu- [[9>E *Ӵ>^5,/kUШfx=(l!:L!q&q|lh%j9FY eǶE!ʰlRL4(Y9fk?D^P-u{Z(FQr3*g*-Y*5:›C=W^ mG4A jۚ+myLܮmؖ;նh9[O*$q@Ȼe^~#lLgxL+S{H;3, YG[Me/93Bn6̈́>B< F:On*O1H\̸`]Q%ӲĊ3=bޭDda xxrCg3s)SNM}7Ço>$ܦ y%-ɻg܊cY_S9()JX/?9vcX;[ kP4o0# RiY]ZIHl}lz /Oi!X:N8Z9w"^Iw T*F)2gI+P05K0G &6cc5yk߰z4²#Hj\}{mE^ž|uN v8G]=w w$䍊Hk. x":1o']ժi^6۱3x^v1^v(|.0i=UN)Y4صcNR+1K P$1n>~$ĽcS%\T~(xbW'/qH?#M| )rq -0ws$Cn@nZib ' h"ȁhM^?n.Rų7P kJgݣN6d8]P9!g {84"~Nb2L-Bu/L8`g8Ei*6ܩ> stream xڍS XLG72#getUT(̞f̞ƘR9^7gTR(4yW3%r~l TC*AR$3_$D`w mk dd悉$\;#ZBk]8p ȅ"0‚%qP(ȐHJ$᎖@D2D 52  H D0*%Bd(:0:X6n0ZAuDpG0X&Ѹ5P !D!vZ ?)Wq)Y)4D]Q &(.%i߷ \Jp#QLkidbJ D`eDf q2hp4W@Pa5*1&|F0GRI!9 p Vw h4C8-;awb$7t !P\7b )xACNgg,lL`cK'tʰe{g_~22z| , At+2, z72\C@ߴ?2P!BU&%?VkGᄆ ڇHH4Cp`@<:;LXWCgH"$*:1F2'Q 'BX#W4,-W&;Q]0 sIwqBDQN gut[ênUO]VĻ>c+ɩ5㣉/FB\pcv͉''.B-z?@yS^q*6XNzV^k~YcOvǏKK3m"Q Ny΅N {z$Ғma(pz^i7+4=t֗n}6!"LZUT5[⢞i 4<;wGFTCmӘ{c#2ݹ9DZﻅfl5rȑw\haٹGrvn 4+$Y3^]ov7ߗгC6$yf;^TtB\`+K ٵpZǸ}~UM]{`1 gW׹7*-foӗx4Nnp멫3~>w̠<'Y?j\6U5翙|RB;>)}gq]t$_>0€,uߚ'=zm(Y.i.S"dtN[s=Z5*FscN^84&m<%pމ@̹`TXúܓ+ QSܦudKwq8nP`} +QGnU(o=/W1/'}ۛtW8F=v& X`nR UIٷvAlbl|vl(>MT;) )%j1'Ykgf,u>]\$UŴo9_E&LjܷcnHeZ&{}n`}!7FQ#_@[n:ʨgSN N*0^ws촨8dѬRjf=WA);ԎۖٽG^yFNEI;UX+}-޷qv`]Lъ}*bIoݱ%8S3jǭhꛙb8Y|+x)GO!OLy<Zг* 3ż65;6@+7tst\+nM]X._kh;!ۯG {n]¶d50_O^_35cEI?ŝMՌnq;P=iج$s5n2ݟ}׼7m27fs pZ,m zۚW>ME~LK H^mL%18*+~Eumg?/SrH$ljm -Y B&0OXMWfsE@D |cTezlNK)`endstream endobj 78 0 obj << /Filter /FlateDecode /Length1 996 /Length2 15570 /Length3 0 /Length 16319 >> stream x}cxem-vVtmm۶m'v;wξ55jԸc- Ez!S{c =37@҈^bbqXۉ@S `.ld;;_ kisLl,,3s݀@' ;`wfg`jw1Rvffমs:9U _ixLfp.E<,-\&4Grr_C*.@[g_] !?Ee3?,@K4[wS'#S5,;,T\#;Sƿ=abjd t;//jTG_;fN@_܌,mm<g `j`d/Zi_Wddki'CoR.F3Y:[zM-]L,fF6oҿ?;X-zf&S4:;ڙOWW.<{8v,\T=[?XyytzgfcгUbf?&NN@;{Wlf< hlobRc䗨Nėcg-rތ;O"MŃ ̘5ufʼ@ dُm⍋>穽';E3ߙc%JEtw c{fꊋr_C묏g#IF~ڔ4:ǩFkV٨գ^R״˥Ӂ[Gvag4aU}&~1os=ћ?ěsvClg[K[`X k͢8A ջf7}l._=3-ՑqQR)Hk1Y<z6>|<3_ګQS;tyP_&˹_L\.C0.'1[o/vro H7pɿP.?hmߊbh,Օ%86|HNz͌ '1}l*KFA;qNwjݘDSK*s7ќ X+LDK>V#p Px{qҶ\cGC>@ړ?=wPpc9}B$IP ^I@%=(-qX(9]2c5ѐswF0cÐoWs[YM`vu>(wx4[x7;gXo˘u&UCmP~I:Sėېo*ض^g! e@uz1 c!u(QuZ4R_t8ox}Gݵ,+i/ ̬gٲZxm kf,#'AdR[ѝㆬ y`iѲ=V s VQjRrLfp"~B~jՂ#6 3#xR=kg0kV} qMz |/ yqvx}w3XJ;fzhˏ.މICqeljaRXp=,(㺪:.O(S?>{ 62{P̗{Xp᯷.u2D.ui3:BmθNBcݪ_%ED kRT8(GsoeWܘ_ iJc UL,AuoJ| (jZnqpXL z 8=T.P\0~ =56?hS2S]Tx1WbdE뱢2zZhrb"<4aW-t)v!qէ箸aqci"D"!cظƻ8|$:fKnGy(i:&{ĉUG %TD,/]gfd'E8=r6e׀ rƀ M]1x1`1Gw!p%&zI^lagq /3 mB }i E}G?D$=MT&Eh 4AE$K1,q³G\;[cQe h'PՒSuglrgrq2w\\9Ԣ@uVqǏbbe*+&9Îکw9su*¨ʦM|!65ߗ꺵Gw| (cP $  &|ɦFi!(( t;@¡CWUėZƆR=.Q& I<3&r:㩢c!d<>,Xֈ<_Xg8B.Aq$):b| L:@%;_<e EisL:~rRA"e&$WQ3_om''2>Agza,A_H~[_sMt'Yb޼Wا@U퓀^$pLͰo9a.rkN':ȀzOsBU5GfيtN=p-1!I3/:a$OD*!#PY>;gNȯ5NXVVɉ%-wwI 缡&xۃը__w!ZvπRgeEk$_3-mBN5޼L!v Tjz:Qcw-CN;(J(+E"Q`9 9):ոJYU%TY':oꍭ52YP ey"]M1;$16SBjJ1Vaٲ"re:SВ~v{Cqt%@nXa +4cvi Xon,32/qa.V ;EY\++RCrl2{ %x^կvȟֱ/=?eu97ȴ_3Q>y4 թCZsfMWmRXlNo0! "y(lgUJЙ뗜}BZiLj g[p2o_RLAkvmՔL`>t StծFRo73K+{ 8 EITU3<\z!4Yuhoݗ_^H8,3o\CfHDja |/{g/H{߰ڦoLMŹ^er'(1{O܉F}av0p"a6G l莈sk\`%,~ISnGYR.j>Gܺ3mX7lc>lƎb% ")7B]ov ZCw_7E`ԠϹ(LZ(s6L{#+h=Sop9uՄL*t8aova70Ff N4$QSJփC-m78QC/q510d_Z TJP(?;UB H?0o"cjIdT &5 ʥہ]ou4V? : g ay~Lr`SGB !vQ+MZǹ캖,& uVd~u@r ERgg&(U7)KTNj=Wl.`:oE;ZP>c]}BbUx4IPB:т`&ܱIy׻gmuܹ^û<`Y XqyȷT~6nA0Xm7sp_NJv^cF}|<7j_ %QLCÿSZ~"/Y?1(Cnj=neV"~朘!CK{@xK10d7f68[DHk%OtL"K0bmcA55} ]2x;~,KiavBж혙Y0Ѣ ]Ԡm%Ջ93+*Foǹ@63G3֎^]S6tB'#!UxJG*k7CCX0mS&&qcO)Oz8 ̋4iB$#yȣ;-(@_̓{"ϴ%>13o] z,ã(bQu1iw@/zB#WV5{(YHz6Q(K@p=W'u$;.hPf\C?,08ѡgbkjo>dbLbqY gl$kP7 wo9uE\&WIޓҼ; =4 Y FItO3ރW^tT-0w|Oʛڥ*)|ѽ+=u+^jI UňlV~%/n ~$#[-ƄoPo/.]| fA`0KiZ[xSN?gBaN:-=7Di':&w,(Ciay|M96j/ȫqiEk+$b@!VptNM"뚢PFy֡p^+O-2dNlN(`7옺T[wOx?%mЂ8?v62p5BkJ!2U^NTH!m6ӵCS 'nSZbsqnb Ŋh-}g"4k`!ž0_ݠ7;h;,}Ωx9g1ΘxROPg֍ZgND6޻#vBSn|zA@PO {;`-)ͲM#*OwPi|/^?v'Q ˬELR@g_Q^fμ^? 'PYۋ j~NR{ϔʅuE,Xa˷J)ʠKYE?$MSOjxf`!~}qfskb[rw&{9a\"?R7JyX|ڨ0&v5`WdO@':Cў̨ cKemcdsY<}%AyqV#㽉y.(Y:D8g{s1Ys ᗇhpJ)VY-R3Q/PXXܢ)8Fx%f!y}sde,s4xade!Xgu*;~}v,6d {A4#ƂYqmacNбK;35T'OmP *TP1L#ɁH0Z@,uCAb77 q!-zIhH. 3͑"m,gt 0AF<ǯ4'7l&wbw .JoA1c|4"b%P D'z,ʠ ʖ2a[)kb#U R3ZoٻŽC== Y.ٙCx,-V^u^[At.唿3c.%yD8ulUc,'@ʄ>B`0N/%htFƼ  (Ԋ.p|`Y88-x խ:U~h䇱Vׅ}}r[yrLfZHSx 6}07pC1G,. Y)4i}B2 / esn?Q9u =ŇQ74M8G'DH*bW>)Yil|JFxצٚL3('@,ЎQQB& m3@]Ȱa =$,Ll6CG |Ȗ9i.2S|# R/Tj~r̸&`˟xO MN\% wcY#@]}k¢ DT,`cF94~yEk}Z|e 2F"1hg]W]6T%g$iDv|]甙Dy}ܕ3[7)? ngr"ZEÍsDN@4qg1+݆fJe`ŶMDcM]-O$u|9]]  jjrR1JE*0h䧍|K56Gm+,iO;]`O9=6L-F. ֡=&NT4cA^ˬO ^s!ZcN.^\}H&L3̉G[i5G_F_.ZA .'jZ{=.tlj9HOC <%@4HJ,4G~fT؂2hY|sԗR^'M'iYͺ̇aZNRB ]ɛٚ?";n)z?<FôXZj>a;{ul~:5vѰ?$EfV͠rgSkpOlipcH|GR_MbZwC c"N22%0 ˛Dmks^IOQ*JdnK~jn#.dgpW0BVA-}PV·6 Nu#B1맣˵7nzNl N?9/ՠBCw~9L/\+|A5RTĈ`ҼAeJj }G9=&=)~ܼ;]$ڦ άbZۅmȺ&SbyXU7:K> vU\V G9bmzs7Rv*JQ չ:|^пNmpRA9TG bHW"OC-9=́lh(B۽ XZG[yK%X[|E~D^q+D *AVBW#>|0wkKCsPRk#y Ș9ޑ Hd#&?*l ZѳȢA_\ 9ێ|gV"PY4'"m}A`&NiNnJ 0\N{OB'MuBQ K^+Q&,S>.zgc'z|ڠ{(8!y)eۃ~{?fom¹ei'ƜE_CrrG{pdJ{U.% a0>$ǔ?Nu#M֪( q R>:? guJj LƯFC^%}Ddt-h%X 9G:$~`gC/g?4 %4&ph2018{/*'/NXSAٹ5g!+KQ4GIy?[Q1hRx{;?% c}5&ˍ [Bd!vkʋcAC?РE)Z-JerO!сOpoQ(7lɦb" OtTH0Y/ny6O2fj>>ƚ}@x1yys f͹6W0I㢳Gk+2Rķ8H6fHEBMq 2!0Pbj̏tZTs*Jǰ^$ 'cEd c%e]6!MxrĞ_g"ښZ劵Ǽflk9Qsu[0B5Fɒ Ώ` L-̛SJ*KCk)U©R*#;6xĪY<+ӅVǁϭ'exF8AШ-:%N䗆Mo,MFT$1'8X kLW v~`O5G5Fjp_#ak. 2Wm.zN 1)69YtdgȊUN_EJOnev-ɜ-eb$W|_I|.ץTxoBzqpGg=8+p2bl7Ÿ-c3b3ll_FXORڵ-! tYgn0`)ˢox@\p-3ڋWɤՀa(GImH'z%P܀z/dQ[պ NgϘ`ѫT˼-.{qQ[sTP6 wIlI mL(HwT=kmAeP@.fD'} Nau i*VZʠƑ/پ)i?+^rsCXv|7p'FmL!Z@|i_^=UB L96JT}?.߭vbO}3_smI;LfUoMw*$Yz 0شX ˇ6E4 ]AC* l;9 uSI%ZNSۇ|a}sN$DY3`{D Q%@粩Ky|=[`C.ӽtlHyȽ2ݐ~u#ѷ9΋EsT2*#@2YePWg[Qb\;zwC gh-r alv4)_^}W!]gdNڛ9z%ݎZxxkts^liN鎷j"Lpc8'4xF7d*LKtʆB&LK~qmO` m猜dc@+Ru}?֘ciQk!g薅k87i [HyZp␜4Rz.( v ߊM8J㴲.y\ĥ)' - *{sVxp''#rqML4) s_"P H{D7b)6q#`҄懭<ri͔~k1_c 1qY"]2n a3 |2A 7' |TcVrGn_gj.Bf~^X:uUak2@hqat3'5k$6BY2wlrJ8Xoӥ 3E qcۜGtl=V2}jdgHzd8wOwPFsL5BĶc<)sy%aIuUnMAǒ\*v*q>?~m$*XU˙"ZrHbؓ&(FbCNOx`3(/`uX>n0L7Y(#uerOjX G3+E^ʱ`-!YO~ eJ@:(9>+֣@NOzfRaPdY[؉ptR{km]G|M})i\^b7^ߤ$|jZRa]&QLG8=Ft,/JjB5}]|oLNgS^Nn4M:@̘<͗zpvE?e.ݖW+dE b4 V; gZJBM47HU+OK^hiGW@P5{npqv hi]%$ '~@: ab~f$dIq b>G&)r`V^…\e+XPE?%5l@cR97&,ַnoI㝣XIfNdxىM7Łn 2Sdy$CZZd.7Hpȧs)`dsv׬+|1^c )אtʏGytǪnV hLWUmh~̖0,wQ>Y&SUi0ʒ-Cqsc:lTģwrp0>~~UuAvpE4V?5&.= 5\VDe*RLEn8pSg$Gk5Y3(@2^g{-X8Z$(9`Έ}܌y:"9~L^~ awnE[n9 uCfv\rjL|Q~H: aEFg{]7<*“@\kTf1 [qd#Q&jjBg{-܊4qǵ8uK=jJ"?ƻ L宝 djdy^4$qv(WB h0WJHt*/r4v؊Oҵߩ_#p{/ء(Wj[>mFڿ/*MI.V0 ޻&ͪ`g~ֳIQ5{N G o3kT%{}3TOThVxTliJurhD9)ťqwՆ@qpoJ*Mk w8\ʶ,wFcL 0Tv h+w$,]д*o7y+e, z>iB7D{.4،/W16GuDܭƲ孛>5=^.a"#`9Z|2Ya)9*ClwL!:k4k]G0عf5k*P*@ p_ܚ7Dh]I}X΢y dJNH֔6vFґqE8FƝDfwQ0k^}$a@6<{1n{@ڪKoIGlH!z`:#v?c5U8m '0WΗRJpq F28V,.=cr194Hd_HۦMB~4K =Tߍ1E}Lq#>^)Pm` G;<Ĺ@+dVJQ׮$\Z{ˎe4ɋ4t5݃ 3s:~6kNcϻ4&?9z;"p^tHCAÕKܼ&"֊Ť,)gд &]f؁`n~mf- yn/ )8q8 >(B"qhGnɕÕ[ .aPp%{Iœ^0XQ% bJrGםws{)h\"!ګK `U:&AV>kdG(E. jE2u8&J](?^ A4sln>bCsNsk} VR2ua4Kcֻ@tRU?N)f= #橓 +|mK:q։ýnP4/nvۣ2y[h$I(9=: òoqxLЉu"=VڤpDKeӟ@ڒrQZO $v)zB9Pd/?U+snC-./P"6V| u~Z͂ lo㭾(D7(W!f0P[.a%2b),5 vby$ ӣF)ӿ #WsaN4QhKFȷW歒dATpy2 ʺ/]#(\Yr1Ug"l%%]0ȨUh ~WO!M;-EYgڹs㒜*;]h͟St6'~9Я8hJ:_ɫ)$R&SG%]wedg i rNVuSoӛ>E-ӴNqE"$Ր EƔAD7wF[.TQ1iT[;_ϩB X?@ҫ{w+~Wܲs'?U(endstream endobj 79 0 obj << /Filter /FlateDecode /Length1 1008 /Length2 16191 /Length3 0 /Length 16944 >> stream x|'ݳ-ژtOm۶m۶i۶ힶmvs߽!*bg̕k愈*b|9Ej~#[c['jz:>XXFHɘjl5t098.H;;02ءR6ÿUMӳdl]  tt,;A(zzߎƦ6P01ol1cǿLdŞ𗻑;V/̐oK6v @ nch`g O#@T;͝roI􍌭,a`n[ lcз1[6NƎ;8ZGh?eWc2[XsfNNvNΦ4iͥ8[Y[ᅧQͭwDGNg12e(bfl$gdh0ѷ{+3?@MOG?bJf憖6Ǝm'翃˘VZ\CQE< ?{bc`mk߶ߍshs<^$ `IMfK;4tvp0qa;&36v36Z]5 HHsE3!k(m5UioW)=-YKr_w rDsoW5i 4CڲuTڝM̀w^*콈6n˜JfT"S>3lr4eC^vy;:)yZ^m2O9t&/5閱M*IuSGg Uy02FqF9|3x ocfntvqu~Q=Hsfڗv=t2%>02LW\L5ӔFZrj1ۀ>9QFJ,+L=hݒQm͋"mv n3H(uRXg/d@F%pRyIהeHtMk bDړCRn\`F+E3hlIĉ~ɂmUVΆl:ONS!IT%f`j^V@ @v+F&I1a<#E(1sh< BrtJ.gxޭy;tSo7ȁ-~qּ2k`/u#]켜^T%rMU_7%|%}x5GR&}?銠{cuŤ/B5 9Xe4^ QJY</e6.@ CU=%٘=PVlw'͵=rM S<"|Hc.P1k#}FFi!:ynѫl%S8ո"Xڝ9#5-x]a1Og~/4ASONj/0vQ{0ny,ԏ0i#e/4kNc- ma7 ._h,z?#Aai/r8 );C"mT7`M#DeI4OH|c~e50ks~j<)Pr)Fh|j6KH kâ0_@ UoRh$i{qK&CWK3ZF ^)ϛR,TQ[͜=:)\yBbڱ,[L0Zz1vdžADL0rjQCr#n jff _H .ڡ ]'2}'EW_&VI=u]U8~y j5H+ۣ`2ݥpĂмAx/$htA!>Qs?*k-~g ?N~E&?]k}3~Ma{ۡĄzփ!]B[k&F+ʐߛ4V˷9#XflcE4V;Sl6祐j?h FEeg7BaK8 :owjvSYX:Ѵ[qRybGf Dߏĵ3q<,s'8j+/c#j>mkT::~N&+'lu ҧQMBֽ茶U ozE|{&9]Mm9` I:Zb%߾?f^@+_;;@$x`T1-R50d{$ wqcos[1-ktSQpPraRuygJȝP+@4^"͇><2 -2CygW}.BnoXJVikL9i\T1wZ]Y. I<>t2a*U盀@շ϶K~5yU;ʤU%0Qo`~!2R~*,2 sEƪ=3ͩh >XC!zyz)Ά?;:e93H(E3_AWLF4 'P!Quk9_Q쁜 _ϑ6BМ4RڲE|g T;##},<;Eلt XM+fs$H+]^;||P-Uy$ 0hC,;x,W.'jYJU<ԥJKI{BLmnЦ OEQ3'_fM73mB.#Cena(qړP@{YӈeQ9 F.7Zȍ%:ո2{LZwH_P ѺT.c Abul{"S?N~g||^Á0`~$zLQIR@~ec6g|5H$roNI .GMPu^m? zeFӒy]xKx٫E?;@)sfHLߟ*%}F[&O.$CPڎc1Ү AӍRe_]oJ&GHʟ 턻_s_H d)RՠkbQ|֩{E[ڊ &:-Z1Ss bv?GI7=vݒ V׻z9EnњvF[ >a4c0'$iO:"LԓlײT99(\4DM0eV OYB)_/S~G#YC^Wr"!h:ZZ@ nR/CM,ݖgX~p~";"`wnQϮHZ=>v-\Ypι]y.-*/KB}%K_!ǞӅNGd jś;/=7Âk;HY.S&$~l`Fc^7܁wVL,Ʋ:حv,pQ؅qr@c<=5QYg)O+'t< g ;Br4,lspd7:C\$kM~1[Qˏ!x9 p`xn{UOK+?MJe'9!aBϢzW4~e[[̪ }iel>^*_*̭SPCJv2wD>l#MZ%t=¼| iB"w6TD[BI37vs٧3#4[խujZ펎^aMGKa*].с^M>QxAd('x9 ?e9#/`( Q朚[È:*! wE<#-y=h;.3)6Uݧw [`;@>oqۣ#1Aߙߦzm3V, # <u|vO"2LJlIĕPtqaKGf_ ]y H7\ՏdyǰXbvވ~ 4H{ 3}z-PaՓ0u+nk]S3T!*i_،No Kxy rqkb=ȁ ~% qn4 HkCK]&*0d,Pv6AVԦ1mCǓGl{&Bap$ww/@~J?cUT{P6({f`Lc&VfJMS-ۏ\ rԆ, f_a>?6 TO X&,=[R7e9fSzB##g1Pu sHgBWjhn)cc:Jhn^scRj\l鐮 +A\}d@jONJh=J8a>+ Rp̃!1u5tMcV7vxN5wg>NhBj'"ž544frCƬ ־rJ{UN.*L#n7ew_[8/Ira Ym`G@iJ}F%PT#jΎo"?:_jɈ5x0b@r؋sNydc 9ZPBIXdrCn? :N~m0[N~Yit cof{ Ǚ!34vKc#nڛu3-8!~ W~c[92 l g\ޤ6c"^CGOZr0]h s)EW(B'ƄsFF_? ` ]:]OS}}>,lp0I1l MsQxەA1%:d`Fě<fHjک~]}❵w gWeL7gvziA3}s|L9XN+L!^a>$)!2ðd׺7՛3ӀsΤTk8g0v^Yüpn%S*X; jޠ@$6ub;ܥ gLEioId $tf ^W oYMB͹0Y)VG ր$=֤:+@^zsI VaTstЫKDeU]{=0̳l`ބZGe=S0-[cuX^ҧ(˯Eh 11.^o㬽mzNd\(ĝ7\\~ 2 /iŸAt"qāmeh0%KQ9`qO5Kc xD)_ ji8oNߘd11iӈJ\߬M2xf]OCVw(,O[N\=[ ȵQߔ T2yKlΦ:zh\Ѭlsϴ2x#)S} .Wy<ĸ"ԤQ~W)v(X6I꘎8|0\&4heC/.jɐ/ڃDԲW?N(@~}nXLP,<7a|M Fh| Q+VOoRyJ46LTܘ\k#ְ!Yw'~]NCW3xnه';JP2䢩̻6PU^qmZz颍#b;2pWqC:pgkeP8vHv6pvQ%*_P^LY#o+Jbt0^M>w90>o=#I< U&V*6AyAUx+@!THzyrUŁ*kzQqCE&ބ5䗝"#3Gdh׮Q_ BUt[SP]G-$IJŶu7.ͫRe.꽩mq=zeZBm7ŀCGcxF)I@^#B.jъ6ڢ#%u@Te H%X0 S#U ZJ@@ 3B:RRX9'Mܔc#vP,y9ڰQQ3ZcSCd(~0bڤeLso|L+0@K,YhG**˴rx<ࢵI}3o %j#bMl-v4YQ>ef [m&+v .>Lůj,LyGT\byNfovkn[hU|`>7͂HG+l6t&X'%s`?>ѐNғwwn_@^=2{B)@.xHY~?grV E97Dp-bE߁EEatDe&ԗk[/E~%hK3,6ϖ oj8k8;yn(#Ńgb͉wF4n$rKCd:"پÉ=,0a#kp K![Uy~2HxIE cqqEl̪Zӎ8?b iNc_VT9,/kQW6|1}7A|Lv7,O.߳/yjX[jkY'Dn]Wyç +I ƍ?p`O%NZיD=8%(Ja6gX]*o9}F M0iypɢn>eE6Zo3%QU=eưg)8v)A'b#ca K' 7Pq_UGTM=,juɠ|=ODʧ$nr]5k MPosR kUO3◤UtQ݈[ ޠ,O1Q]k#70j%w_ouײRS%zHqm V<ͮT}Pksx8"z-S.JaH8vP_9o8 D4/~5!Fg q_*@e55F$F'Mea~oLmTg^8Z̐xVYh-y~pb3Au Be;MvHgЁv$R &. Tٌf>風'f_ԜmrV~z $êR$(hϜrDg[X|;ÒF=iUQb1^蜸B)"UZDgyc\&Mx? Np:"^`sӠ:trB"$M+|w k*adОvo Ŵ3\~2~ڔ83Lcx`yB3\sARhvߙYepu2ꓨ-}}ՌHV`4Ijք&0 qDh\HM^=~Hj(Dᒼ/`@c_.M1vK8Kk;jq<BNpfTP!۟{pI9pڳ#)ܨe f[#Pɂ;Ϋ2F(P^Y~3$|ArWOxe{G{"09p4WN R iL"MU;#S6ī_X:Y㻧ύ8Pp|S_U9c!WQ|ʖ٥LJH"ȷ[߸_ʹs|#*E $TMޯMDB}SL ײFBzco}܍n\tg +V\= &27upb5 A*zt: %T؋LP:gȄ.,.A"4ts 3y?Db|!h,p,XWl %lS[a ] i#f(bXFahL[ǢT(>aGJV%cn v`#Tm8V&&:J?aJda@|wvK#Ƃd\;Y{?Arӥ_H9d~\QrΏl^n9bCnXkDM6IdASacFQX"» јindW)R'Z=+xyT'tGLs ox凲Z,:_SmNJGMɹBZ)[f_hj-:_\<Ԩ eu,ˆEHU!GVuOjݔ95=KziG\S?"0ok$_;DBA=lؾiU5b\V^"|J-L{TEV=վ/O&' uH>uykـ؜RDkd5ޟ9z=F ],ΛMJ@>#c%]jjJɃ??,I*E:uelf|7X'q*%a&ad1kh4 , 3wEKK׶K;;fHIp#KaOrS( ^`XRPQ8 -֕;޻WKq^ h1n\+KLX[ȕZ>fOnsB*%yU%YD헻3wou }kwl3u%ʶEk?k^TA8'b\&S͙Zde-/Y22,TS!,NqgEgэOX vA";RM`sS}5,eX]ʩWڰ &T{-JXHDuPOOW-aj|-†;>wdR‘` XJ4R&* h:E 7>|G*=q+ hv4O Ċܘ/.$duc"¦([~4Ƒ :+/k1P7OXvV4s/}Z3U>hpٯ"HО>8kiRM3gD0-$Y"_4=o4tg˞bQ҅|ktU2J gyzyh55nHLi+ibK{5ߘޥj?eA˘40hxE5&\R^hA%,i5RȳMqJu-'̳c~c% .NVٰؽ9$] $ky:(tĬ}Sa JRTsY#?hoJSCwC{qThߣek/'/y\|ctW67)OkQcO3]|6Orl@lj.j~Oޮ<T5s]{lZk k'W̚lv9kO4$x{ sȗ}‚Yx F{ж9vƧ+Q#k BǟgC+ؽ ,v+Me)}|̕5cs7Lm˜?xj,sAXP젻d7? LDKƴ)ʜ[c{OI3,_PEp]_`f6a}ZђIخ up$GOIQMFxCx =\Qhu߾trQ~ᥝ5ىQ@Gw yeűBː,z-.YGu@w+ T?"M = ,Y݉7o'p^([ilTqpB"QHav,K0ZqZc rb*FNQaF G5<N&n㝿 VmkB ްt7'N6Y_ 6P젵ǂz lضvюj=>_Org8d{% ҥξEkI,p(?%}dz'4l^o@rp{meyI:K,V ";5t.NA%kԦ!qrߩ6D+ZnO"^Mq C6?v^5@ws"2B J_8;nRāJrÉ/E_U}A̻- mP`a-$}$!A͝M0+W1˘VD`" 3h`%% f8o}u>t3MN݈\`,8YTJä1ڠ!PYvuq`֝f\/;]\C=!TcQK'vvu x!`4W+!Jg[$۠V_TX䷻9~QT|+tSp[Fe=QlRvyP5$]XT '֩OVFVFE1FL*)9#)m~j7y io\Qpv&#xj5sU'CX1 V W~t^έ]hM{IJ[94zeu5{ۋƑ 假mj.9K"Y~ t;u YX=="h9!}h4{#t~_2/&!˅bRxK۴(߬ͩO4_Ǹe#aOd3avW 蹈QNQa"οvjSo4].A<2rzqBoJ%a4kNc2{8EyъЫ!oՀئq˪NtݎǏ~?W }] ҰMCz>-MNDlhqIJ~lHAq !a`G۶TGLP症 (NRowKi7Â>as2ߖݜז%=vAuԈ,TlXF @ 'HmR";.ߧ TfKP/)B,AԔ֎)*sY" ~ٱ ښMRMS=(9\x%V Et(:ͧKo)[MbH^?6=uy9[O lvuB+5K(A1%;,ŷ3ϺOwuLql1󦆂ki3Нf\0Bˌ]AȒ_Mӧ \+fIsRA]j 76)"*fu\/cS!H4ZRIpAL*Ų?Z] +GcPetqƊ{%ͫD:\j8?Чԓh v{ ث Rx6lcM8ƱB#sC%DJ!3?QNuu̷)Y QLXq_hW"J(,J:TOiI`$[:^&Xa.JkoElb]k-D)˝}2WTӃxr%QVy*(™V%! f:Ir[o]+BҼ[T]ՉgC*\@(ҼղI(; jl2UAN~Q*z!{eϟE4zZ[%n#괜OFzzڭFBl!W2[2~6* 8xvHuv^KB-{hݜW7{rS,}$?&"FzyH?5Mv4깺?# yn6'. s:$$$ZIfmM|͈]z|ǎ3>tB*1@^C顢Eb|]TBM!3F4\8FAOΧJL~]or}:K蓳PG_Wh?kL~)U\Y&L,~80$OvO~jPrf;(2}X?7FT?SpWc H{cs+g\.&:JSQ ܭAo;Gofwㅺɰ@hh#>F))HDr*%jbekT1}YI+wt4@E 2l]yS1' /~%, Ŭk3>{v_3q*#P'^|$PZoXHKHS;K{r!ZL౥ H3t^y40o&9 Pt˶jjG;]e"IN G0[vQ4)|Ps)75]A}Sp f nd9e[D<CL$0??_rԯ[8? c4|-wVR m.Bߙcav?r. %w MS"!y܃`iF"p:v)=l2[~=RT} , ڪ w`5~ݪ4kl=}?&SEEH0ḥ'kmt8iXhzrvYWZ<ݷ!;swI]s'4A7sܪr|\?[$<.4T]3@I^#3.C^\.Nu:B.T6A RȺd4 ݘb> stream x}cx]-vl'ٱaOбmc6:ӱmw|^{ιujTը1kJ"&F I['F  /@QdSR9 ,lm @M @ rrp@pΎf6v ++dYlAc+C St @dmrpLv& > <0h2g-\@L4=-;d Ϭ`@#fk`af1l%;:26ƶvL"VV* G?p,?9~4wdm` u F g%?ӧ`hcYƶ6N gOf^b}&; Ч .VFV >#0wrefvr6cu0co.OJSU?Z[XGzRʊؘY@8-%-@&JNSCυׯV6 %[G3--m@@6&rfVSWPU_)Xؘs&6&&oXI`?5ǖ7<87? O'#aoHcgӿsM->@ˋ|A?RӜjpuz>߫Uz+^I2ew aU/Wh(f`s}7N_xk2[ғ| Y1N1R *Cن@JJXO[RVYWE{mg= }<0ߞIS3~"Xe Zv3$in8v})ZRC9cV6`D4DKmcuLbe;[&r(\Tj[j~3mN rw53&:__=`Q_ijҘQߡ4R<ʡaUTI8#_P+b"f4B\j:&U^Dy 8v`wcܤ^:jºZ3cyWWȬ֨ϊ :Y!f Ezmѻ/J{ rw:%/F۝(*]S#;_^.1Ӟ`\9/t .S~ 6}.쇩?tQI0~)"yUX U܇:&rJ,pd e^W*(6>$Ow[2@Zk+&IZMv1;l,5LUsRBf 7]rK+bH|!m_Ti}(` OKV휸EhJ^I.D`ՠrq[h|{ (ɛ7>zH{#'C܂pz7m藘\nڼ`ܼVqb2"o#2#z}< QJɏK Ŵ6v'XW 8Pi2}kt=)u,BA{lIM흜AT!x5 ci=M&z]G ,y-TQH4bpNSl' SS {{6^Eg;= },7 -^.Ŏ ﷃg(gYH̆hUEn\-~7/H%8Kg)%+w{1_\PLmQG]1N¾ߴuhKw=֎_]&>*L]yӧh,@*{!4 N'ū+6rGeqhxU~!ᏽyK 3N8 lU|8ز[өfNjv9s2G#=#'嫮K}m0įiNGd"?V1Uh eS'elx>῅ū/CWṭ \9CzRVEq[}F/5O6;vNFAAzK/sL~T%v@TȰB({[QYB32a :D31n^,K YU|>+yA/e8Ue?-_IQTܔ#Wl:+tҫD?ZClr7Yۨ,1GkRYFGw| _d[_;ER'HlÔ)ւ(VRDž5zW9%) ?a)zK?+ k_jEFn&}S~cƺÒgTy&Gp}]|E]R:\tl0 ~zTt͈`NOg$+aPŪWd6nIK,qBӕT8fwTғEs vw Urlפ0;J d;L.vg'2d1ZRm%D_ǚj5z:e7],*&"ܒL)E\^٢(zRg߉ zNav|jIxjI?A@Uʑ`oub pԨD؂r!!gMn)/Ay3j: mweɦ Jq!2T ґjQ"ɧ.= f% )igQn-4v7;%(p% Db:l=HQ袡 mƐq=.&-WGj<- m^2kŋOAM}4."38sd}syvTaO9DJ: /-!i =Q`Z-.!7lq˲ɚJf{Ԕ?1Z8f1 ?v_q&]61kSN7qMI&Xgp YBCwk IbX3S Qؙ_w`/j6_+,&c'z |U +"X#7Č~݌)]P?˳+d= CX7\2m=pŋ#[;Kގ_E=gqo'0ޕ.u.[1# L\}'BIcNe?:!o0$E9炠ׯju E i(0pU WnZl'Z  j%=.2RHm?IDǸ&xC狂:RĬPY<{J7*R_TϛZ9PaˑX Wz IO6' 4gCkExިcn5";|~RT-|mĥ&lmwWYx'﯑s0/CD9(l} mۆږփY#3ђN4oHY帵WhZbFo-KYBVћkuFWL90\C抟IZآ[Dݧ4{#m.z? |SCD R2?hёz/H?Kg¤u<G:i*vH㯈1o7̢>7K3'"{#J(:0\? Tc%aZH983as"ICa !uOw6ׯF̗fD9)Yϊuh.fn0=/k>! ;$-p G4 fiA=]VzʅV%$ ?b *E&4}9Q~_ÀŔ\Yl7pn`m\m[}Lat~i]11LfG17Xxɽ_I׺=@pͰs\e.W4ةC‰^>C`XDօ+l6? KΆ}.?LXWoclO@tw䧞6Tȏ`?em޶:i ތc&!YӢ/J(g'h'=Nm(Bp U1% @> z:y.FLUBtA>:6{<7ך[P'`DK|ܖ&r ɤ739yEE2dʻ[;{ yUFM^Fs/n&Gcefq2v0qWS/p=)@u~@t5QP;};]nM?Py!V=Rr)Ok*d ^2%a^ "LXEV6% c:hh}AؿAw3d}f~J%f=Yʴ򰄝@#0UwP,IcRK&ΥW! 0)XjT|md[0K.") Y6ק7Mn> YWV|P@8cɖq./qee0'VZ' mH N9CtLp"b5m(=T#̒֓B3 FԬ] MroQq1d`zq?/'y(1R26UhW&{)9/=G@]1ۄ88>w 4 c`*j0cȪ> dDKK#F!Q׹8fLCIT:L3R;3[LUY!nf9:B*x%` '+በ_0`h/w1Z>MG 0b<]e4G|E=6+g'G=όMT-}U`^u%s{OGԩYaA)gXt#rKýI-5_Bx9w+*3@,Lpь:'}u:&L<hJ['cCvO.l/ 5KTW#o3hClt$6F_ P#w7*({9YQYҢ ^!pOצcyhx.<&t@.rJAp0<' }/d$a/AAzQ*^@ĻA\Wk=yJ -j9*NL%R$ۭS4zXd)4~٭Zcx+$Ļ& m,a!jGgD@e733fs@Fk5?~sYxQLDӾ/&hj< Xx=-FAcJs3 15C\_HDWbC˓}`"%oh 4×B<>U頣` N7؇+"\Ar%eO.à&|-YAT7fhXnB))b`ܖ] ȸX_#pŚ)Lǡ1ͼk讉\w~ O`>0t躊}qƽJ#Wd|+fҢdX3YG4(e,{6aG1mĦfÊOfat82h&5:}bDQɓP|yJ#DGqpn&-˱Ⱥ. ?jѷ~@am7d$Ȕωg[:K 㺢TV>~ɡk~HۀN4?stb_Ce7,/6’c6oy))Ɲ<*,LП W;J첺B9[nD8vºpL0_AΛ~~ ػube>+JklvW3 \c%cb75qKk^wTAMJ̇2\Z ; #?3qdVY"$ggj1tXC~`'& ^9.@xywmĩ)`/"rp DOXgͼ)yk̯A9$1t2|ݰ {?7N_ -!(}todjt?7TDOtjKAn_o%^.Rx2@B=ꚧ?m#[CNxEeqܫ㼖8dJhލ@ ^hU5xхHy7 dvޡr9W/} 6wم7z$m [_OJY{`c28 )tMxԏw7DXl|U$򙝈89TǣxptyaBV/S啛$[PF 7߮GwBOh8~wtfXR;e+lI^x5h-ll8Ii /#(QEB&xj;To,EbݯF{l79F])H_MƗ`˓2ǹ6g]!Nit&IrMD{&A8: =nܥ}[Ɯ au4AZ;3Lw7bIo~Ƽ:VKw w*ThT+ - }ǥ%nԤuM]?w鹅|B#Ҏe9$ KOG  j~֫S7+pItmT z+oPV|mcS3oD$GXPܡT{-\e!*s0VUMVʹDAo7E03~TO.`n0ضSaLb gx}}`_uF1sIk5k1a[1q屒t &-A3s0Ea>y-t"\?}J/61]Z`N3S}/eΐ̍O?"_b> /W [ 1 3 1 ] /Info 61 0 R /Root 60 0 R /Size 82 /ID [<297cea4fb66f652ada97a3379270ba6e><0d10c154310537300b061f586ae6a09f>] >> stream xcb&F~0 $8JҐW|Ȗ62P,A$*2z R,D*ۀ$0؄N @"J}5 endstream endobj startxref 140518 %%EOF lava/inst/me1.png0000644000176200001440000001425413162174023013347 0ustar liggesusersPNG  IHDRJ NPLTE  ###$$$%%%&&&'''***+++,,,---...///000111222333444555666777888999:::;;;<<<===>>>???@@@AAABBBCCCDDDEEEFFFGGGHHHIIIJJJKKKLLLMMMNNNOOOPPPQQQRRRSSSTTTUUUVVVWWWXXXYYYZZZ[[[\\\^^^___```aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrssstttuuuvvvwwwxxxyyyzzz{{{|||}}}~~~q;IDATxݍ՝,Zj[X$$b@DȃK5Yb4c$bW?;5Ͽbx86p<2 XdqȤ/g3 `bQfo.Ӷ [ݒ?o*u+)|O?ѳ65ؗA܃?󅃗ӿ9gL=yٴrwn97Pa: =|9CUOޚ?iMm`}yG{=#M,i~xxҫ (. ֯7a]}^t˧Gj[ܖ~# g:$i}zhtMt$\6xR \ok~WZu?|nTC.m rJ+ֻR3_,}E;=5w`F}e_n8pDqnmxfeE i`m;fw5d݂.k=G7 {wk⭼7gU kmZ?l:+/ͧ=o=.&wSu@֚WocިY pxݦ7rpм O&I>pt98/wn7MaY{\ BT G`-#?p.^({ ׄz\?zzj^+Xsa(N=ﶷ=|Wy`? CauO0+la~}#X=u7~Gh VNYzjkAjǿa~jNNb}urv?'R=o4Z'fX%C_7Z- 'VLV˧vܷ1'nv g. I^NhoS Žڜ= BFck:]2QD wN09{7?9ie 1ެ6č_1Q&pwx>pYܻ|[pyRLU| |_e V*^<^-V] `aڧ~mN}+8J?Op%Xԩq$Ww-EW&>iXBokw{,*zjk}\puU`Q%'U`Q,u͟T޷@N RՏ(K:V%+"վB7o޳t,zW>޳9,9_ːkZ -q-&w+Knqww+[N i5=[B\cWP+*w.gWM|*{NמW,V ?_7~{O'\ۑz!BbR{X:,$Z^nks_GRT(v;$w`}m,87!XXȷ/g`X|E6/a ,.\Xߗ+گ r@ۖ~}~h%oRe ,V}N!μwe,;-W| }=ЛUL( V;x|j=//`ZaJxyl#p̗乮juݯ}&r~AO5G'(vzH>A/Ƚlڋ\mpf+ |qEUȹ~iGL=1a^a{N>)eV]gUi;Ty_M/6I40)x* I/_~ց'.Wgx.#kI231 'f$29aLJ $ݖg58`lJ&0I+p\IR"76_b \_%k4|K 0I7~WILm,m+F&gISoq71X\K0EP"|}% |% +@L}$`ʮb_)Tr)-\bI_a:PBb;K 0e߸Svg4LUoL5C0 T)%SxK7cq3H2=0W_YJp1& ++^ qD|L.>2Lmg8 ŢWt=Lp$E^4x|0a s oɽ4܋s\\,L47q0[O|P}Fbo`)h8}`·96jzY xk&om߽ m`·y:20_Sx,">+&|g{VΙN6)5M'R͡,7| U̩SSꃹU\k #[xZj0廂8[XO=';oc6\5emM|UPerSwQ`1#!b+k\}$]5!fM=fQ͏osW"|+J|8?ԧqh+%>E Ln_ ૻ(+/rS꣸\t~m8]|g$A.6OLsA..ofks*xTn(|}Tq~m&k _c5}gpË&| 6 #8l1V},_ _.-/g'4S8j*U5]xY?gick9gL=yٴrHpSwGp9 |s5cE{^0@GRxҫ (.':4܋nQmKp1<~|ä?šYOp yh`m$K 7 VH⎀HQ^=+KVi :»8"+<%g-|x2FUx+y >~e0|#+ fhqٿ KfzD^kE*@6 KBB}d7&3s9;_|39|9D"PDW@@h R@T 4JP) *@@h R@T 4JP) *@@h R@T 4JP) ЂxG =z4/BZV@GP)2*0r ]`ܹ @Z*''gɒ%3Aw^QEPْ 6,ZJz/bnnWHmdnW`bܹsΝZjUnnʕ+ihhuuu^ZKRU cT׮EĀ5jw(Z~#GnݺP(4qDaoSNzV`07nLD"gΜ233-:DO$Ι3g[n]`l9rݻ#gMYe˖ 8077>uꔒ>#fy~m(o~3xիWSXv38-$ %%%FqڴikBhqЍW_EM67߼B7n[B/~ Cvv6;,׿+ZvSOt:҈-ZkdÆ 5kּcǎUx@͘17@uvNNOO4hsN YnB讻ڸqcAA1c2jF.hqoD=z4TWW#nfz/]wEp tIIɉ!?Oɔ>-[اm81!rBq秥EɆB  ^/ynBUUUE"Çs ht\(B%ΝD"ǎ #\,0A 0O>y뭷B_1G9 9r$B|SN=CӧO!##!hBC`M233zѣGO8Q]]}a"ʁ!++KF򟤺vj9Oxzi$ ?ÿ~aĜB/]^曟~iR& F.DI;HnݺׇBH$lٲ;cϞ=GZO<_ڳgOo\vǏϜ9b)S8p,ʟ3QF!V.ZMo~b|wǵY`[w x~iNN߿|oرcdh& F."p@Q<'Nlٲ%ѵTA ,?~|+"g:>x`zz:{ڵk޽I'`'Yu2d9+(jjj] )=l3P) *@@5jpҢD4  \Z$ … wAnrϟ駟{4#GRVxQntQA^dIyyYbEqq]̙HS\\l2gφajI gʡ@o۶ H vYSS#***Ν;SOtww߿~%%%J\P.,ٳ7l؀裏*++/\H_Џݻaҥ9 )AShNJ+֬Ys=bM_ioo(R //VTU%Q HԸh**F2OD+VZJ8pTTT$V>{fϞB .l2tȥ ~yyydZ h>ދ/nqiF illû3=h!d-@aŊϟ///x%#)U~О,q_D s1V將LyZט9B .\~ɓOcM>]Z]'@sq6CX>=#T!*(1-+*+U˞cxO6/zՋ|Ş0@\I&!JJJn.\cr t>CaÆ2hm2ك:x𠜬,YB:ŨQ^~evQ;qc1"2t2c$^QAgرl`0PrĞ u$˱6=iӦg?͛%gm6>Ta=ɼ".+,Ф)&t\C:h OжJ!P}=y|U~1捦!hEӭ1ݟ{Nѐ]?8uR fdJ2`Ū$S`xE *4gnjCܹsHpjmQeƴ_z*VX5ht[r)8z555<̡CNHJ! hȬ^OIm1Gcf{>}:9ɓT,ڽ-(%#M&j/#|j}8!Cng+[!옾3ےC<9Qo~QE Ixz@gp֭Zn+)ڣ#G!ʼnBEqs|WIwH @{}}:C[Eu +رQu{n.[h\19! LɆ#ıdɒ;v,X`ڴiRO,T?.ެ$d.X^ #8#t>Ix/%7 @ݻ:*-_%^1sDv01 zqvlڍLWWxz&dzZ{3o޼zr׬YH@6ZW>lnzA<'dH %XTR`UOWMT3sΕ$wfST!}Txk׮?>oΝ4 '| N<4[`„ 333;;;}رӦM H Ŵh~rx490'b1:%//ONٲe8߲Z.lCN$iݔ*p<18 \u֭+W|7[o5==3ٱPVQQz!K+!F|]jK#=sLnҥK?Sz:($F) ? DBljc1¥# TilUV͘1gӦM+..޵k`֭v{=Qk W\,mÃ~W_}U*z^AZ۸[WݘH8f>4WzW F{޹sgMM?qF3k֬2zCя444=k   B---T|2|a0fΜyСɓ'׿޲eܹsGkڏ?wq\odfO|"y_}Uzzn'h4k֬ylٲo߾g}h4~'uuus)++8p%s3v]]]A@pcccoo/+=zp8 򲲲۪`08rȜ_xWUU9l͖ vWwvv qƅB!P 8pBd̙ ZgϞ&L4hg^QQ`^YY>ǏgφtVt:}>^7LC%zqM&SFFFAAD"^ K.>}fѨhSAAAuuuff&y_m(V"3ϟWޢ$ q&mrk̂M ϒ=Ϟ={Æ P(x{-Ќ9&{V-(( " \.b6MF"M]]]˖-[|^onn ._pZZZ|x8ZǏ h4`ŋB"pV,X =z4 9bɔ~Yv뮻f9 %K3ҥK&)=== 2d2l.ϗv5lݺȑ#~d߾}Vjwy'yHd߾}ϟh4+,,5jTOO?qY,P( _|qr|>DnXt:-ZN'A`vttt\uuuo~k|MNeddHh zzzF#:A'3e[1E}K)+ H@ogNZ + e˖Yf1~*))YlٳgϜ9Ci`=z4#G9r$89p8Ka *gI2e O8OYdYfسgϾ}>5!  IDATZBLKK=ztIIIkkdx<999ӦM{srr;{lrcǎ1B;d! E=\̫ !TQQaXz#<0g}:7䤥,kݺuGꫯ)_y啎H<<6 233*ɗzHHQ0YVVV4>ͧ?F7GQd^B*/1Ǝ]T䣄=/]cn(Dqq>"^߼ysKKٳJ?,7<~7ғtb|(d R}JAN, ՙm4uFF[4pJ aCCG$;.~_OOO0 71Εt!-nù3&Ģ^4*g$ĺ5pٛ^G'?Yl `=eL^Ν;71ہQO(wrlyEzG#D!Ω)y A&fxl=1<.>QTV}1P=G]hDqS Dka*ά$|عg{i$[^}^{<g%s6 F,.xF9ލUhbۙ$_ρYf*u9äbaӍ|3hٜv˩Y2gmii'UVVRH}嗏`0<7p?r!dٜN'B \.Wvvvggg$X,cƌq:FF=77`0L0F ==]rݙ(6G?Xڔm=yY0~>О9abŞ-O |% 3@o{H@g?G3FYo3|B֭'O6mZOc;?~_c=vu!4MYYAھ}OVVVΛ7oРA?N~ֶUUUٳ̙3SNE5 !7x߹svl;233??~ʔ)oldDDТEɵ }N1 "} YԩS7`0 FJSM*!d6f3Btjva7!C$~ԩS>fyܸqyyyΝtA---'O$\{z]]]-0aBUUUff)Sȅ#NB… D H6~xɓ%%%1*!j9+p82339Eiiiii)89*Du[ޟMy$56,6[ciHV#@ӿʮz;󟢺<-+PF҉Ԍ}8 ?%滜^|)r˜S! gm{O̶g ns}dxуVoپKZ{~%r LO4BwT?Or{ap}m[J\_r~6M&f=s/z\F1] HsrO+uq]5ƧI%qb&)ρwE:!X-CH3Zb9Q)%f/<ι$RҲUg#ˡfVFĠMs c-8 gABx H(G+3:XZdƯ~b;+*zD/ {H&E!sMVOM-Lѷ.h:(',JCʫ3s3_J!Eí2Uy~s/9 9LBivOc^G5#K u ]CڏsyU ˯eO1Hl2Q chIYbDJ!~PYp!D/] t{N`I{b5)Ρ^6ž5)BRSSH ?؜I0C-߱17DH2 OIB RUhj!2)и Hr(}[ji Ɵ|OӱӉ#,Q=H/ʱ`dBQȆR91QemO MCx#13 WH&gvPNH0W9]Lݑh?߅ $ż*Ӏ$|cvҩqh9pRJ3YX ޛư#r^ H_N[3 #Jǜ֤ٲE9;?hWۢ6VQ_!%5>qc{-}h}P*4rEWXiR 4<<1_9#:$[Y~q2"Q[u¨$j_bs7W(asK2朮@G*3Ϡ!˃W\f3hu GwY aeVSEߧMl$sSe$i!w{ =qF KlCj?kq1~x2(НR>`\G5Z~#ur\N'-;%+@ Qjzƍg֬Yeee{ٲeSLzgGN8Q^^.%]>qMZqFs$TIU`ɭ߅8s3bFcE( )[(I7}TV׆CBŋ{=RsssO<貲?~dѢEp8>5o}g cj%.-b(8Gw);nΝw}Fp8ї^[/;XK n sEO y?׊J2am 嬊P$-[̘1cŊP(4lذDWr<_9(ٓ`*=H^)AϞ={Æ ߜѣG_z%C?^`2ʠ{6m(Aq?əșF&OF>(ǚ5kBNB fqxc)bz=hcĉ/3gɍxf4yyǣuPuĂ5X9Of1K88Ү_HkNS*R?wN/Q BJx-BIWg9%95ZxVWX,O.L.gϞB.O rss}뮽ZEk%(NB&cUBL Jz&/bŊ]v͙3b[Gpj% IteJHF#ImqK*]DHpuFFaP 3FߣtWOѝ9qezG,хITOY7nx ЀPG}c͚5 JPi@B@@h  ؀l  |0s б@8B*@@hACf]vedd`,7 ͳjbP<~0~YYYA`̓ Sҏ߿?I{V.ϔg 7tyH8'M@ w𪪪1c y8p`ʔ)3lnn)))guuȑ#1YYYX>??nXD|{L!Ԟ#'r-x|_uyΛ7o7|3 ?W^yo>`]]<_z{H3ĠT 4JP) *ErD!fСC gvvvIIIvv6<XZZ| 6 [!C`̳pĈZc/\==c0홈D"B*@@h R@T 4JP) *@@myfƍg֬Yeees۹sgMMM8&ܷo׭['O|a,"wW_}+OPEEŹs4 <,Yr̙7xpUrڵ+W3س<B/޿?fϞiӦ555sر3gάu۶mpUԩSgΜ6mŋqB ˍ$qO@T 4JP) *@Kº?8ѵh$"ѵG̘14ѵG3pEbϪݻw={nJtEb{Ha~ɒ%8Şـ+@{V@#&Mt}%16{C&" ] )$= W 1SYY] PD"PݯppX7%|>F4Id`0 k4^$@o۶-33kW1AHdVۚ5kVXT] ` IDATpg9{?ᮻZ|#nЌ3v5gΜUV7:6 j] ~=G#'''+++ѵH>ɤI>۷o---3gٳ_\re[[y\m(9 9yyyBRzO>Mx nF+WZbEyy1c, B`0 xr(Cp8֬YCĚ5k>L?^s͚5!2%7;={UH'=#^z%! b$CZgD_w/P؏F_vn?d$nժU ,((~g ̜0a;y >{ܸq/>|СC8@_/X`~!C g3Et{3gΜkĉիW{<9dȐo… Fq̘13PL&2~AWWWO0y͚5C YYYUUUTjPeڴiUUUcƌyG)nD"/K/D>u!% N4o!k&iڵԉjpb%MyA]< {~kw?~b뭷H%-uŊ...>t́tfnݻ瞉'ܦ̺uzɓMdɒ3gμÆ 9}K?7?0{&g ~zĈV!dXw˖-[vh\z5\XxjAzÖ-["H>ѨO~^g&Xl߾)A;;vL2e~^?s cQ{ 5ky?OR=e$iiiXrKs2x#Ht ԩSyyy0N,YX"++K͚5&##nӏ23Ji{FSSl b۶m *:zfD(;4hЕ[P؟%/Gmmm^!p )|oiه6!jLty^tw3(-h4lsPuMt-@A/rlvk@nnnnnjkkpFA>=!9ʽv\.F;OKKkkkc&.C_ůnI1h7nxjr4QQP.\cRBŋ{=@>|ٲe#˗/'R`75y5p8ڶ!ёG5Ȍg' ~gqz!Z377։{m*T((( e[fff8VV(皎KMo =-!bZ5Vmii0`Q.h((аN494gFj/iU?M8<\yyy=pBnO?4uA)0H2Gj+̀@+.lxCM&ByR\> [M&SlfjtdԼ[ &) Ӳ;;;Z<*W/IǑpAFFFFL-MrSbI=A-5B"s U u BO~ŒEssh% v l6{fl@ESl @>_;ÃSHL&H3++ae4rV*TZIZ6 ⟿!r}),KNnsWL%+[F#: x uvPvc'J/O 5s%Ҷ4C,===#233N4x$H7>D'FyyY ĉ~(@rJ^Ό.uL)@xΚ;KVn!uŒ!HlP$}f볊ujWI 4f@Ey(BiPDja&jXEUIFqh.& _O@1p:PxzV˳7 `/ 455\QGvˀ1'9ʿkP$''N}$9Z)Æ hV 6XIsssz寥[[E#//=Ҍ˾e4pdLCɋהm A|>bCAX%J2#=y/b/"Fmmmaam{:jBss3}ڨ&b, j*n;MbbahXH . Ng)AB~؂R h<;wf麾 e*UނV~Nw%j5b/*^(TPHXж@Fe:}@inna`@_hSh 444+ h{j%?!d2BHB> EdBX v1׳$#Kgg'!bؐ3=!{sq@\;!E4rss[h6,a*AK@ANu=s\6 +-`:s麾 e Ox#!}JBq),BЕМj`!qt=_m%8"@SGl-FjS+i 1\rsFNDdN#ikk.B! $&t(̽NiZ`\a@Np8,DxЖyHBA FD؇4wqDSEgEo(Ν;kjj%K9s76O{X"++K͚5ӈ#^~edS`j{jRlP+dj"B"!D(>#4mZ9B5`7Gj@/]T"&͗tDt2fP3 ===VeDG\Z BD3{/' upfn gP h@ pyH<O$A,;u!пo/]ݕb@39T,F1u>߂{5!mLH*4mX9sl߾=~޴ˢsUMVuGCD MjADEZb^ңxo??W^]TTO jNбlAφF@"gH"v&h4J;' 04G۷G}衇n>lȑ7|s+jBFhCV,`74Bl6KiGbZ~x^#W_-eAJr.(uy\ɤ |-(] sw1v Njmυp? dhNg~hQE Ik8!D.\PRROƽbɁDX"'bX/wd2IP F P8Xgi?yφLKKf4Vk{EB+Dp@xW֯_geggoڴ)uJ]I˟WLlLB탌xk䙝M_T& -@oڴ?֭[wkſNjv_Y 4s(`Y=J$QΉ }9Zl6rdu!I__~?y"[O,`ϒur'eeeuPi,UJ 8:ݻvӿm;"% ofiӰ.^Y:_Y,T+i{†,ye{{{Q,UJ ~p׿/~:vP!h4.  B~%# ox<`/Ǘ-ZhBn{}'Ox/JgQ\*(Rz&;"D%p܀_Z捲2%"ZڹkY:dGh4XF;U+@޽捲q: ~Æ Atwwtr,@ BZs"yyy=x<3yCo喣GrM=GD)233;CyQ{ $z> Knݺ3fA0DokDoŒ=>ӽ==(/U`Ϣ< #O$Z4b8CY%7!il4hӏjqYWW'p}>_Տ&YzWeЌs#7Ӊkc Zk?Y_uFR$!k$EjvtߙI%ph]VF)ӧgggo޼y:"!vNtu@3<9J{j$ۋ©0@/\pӧO_~}N\.eքq#?0alRY8"h$^CmpeZ8rM4 !TRR+la#o`  ,I=ŢwU2377ތ]>;0fN8d2ٳ!txWG(1BH.EO ] %{iDѤYYdddx[bPp̤ٴi~~x񯓚.,Cg;|;nY`bxkqEΟ=ot{{{W^}EEEϟ8qbj*Bx(hϟ2Kcv^O*, c4564};~xEE鬨8z / jtEx؅a\sӃ 2?{I1EA¡sp΀&hi陽Q@ܹs˖-F2Fںu;U3UM79+=& ol6{] es't:Y$B`0\jΝ555?ƍ=ϬY .\H?RWWw)T94HZvkyT2<Q 9!=\% ADWd^D]AɜX6{lrP(xC}~+P% Gu`F)ޜ qދFa-nϩzχ*ji,'@K*OA9rDdVX1hР-[̚5 {q%-4C g=$p tȨл2ɲZ;;.(*#7J,%tRyjQmOm8]ݽ?r"2\LM[7HtB(Pjr~MQ`e}}=F eg~? ~H#BV"簩ߜQؐ;]UJL?|:fN555?uUͣ<A%rِs9@¼B|p\.&ۻ83A7)RDɲ%ۊxI1Y!C<@l/|FCV &}" dd1lc^ɲ&ꪮ{Ù(RɮK7Ah:w9ΩyэoGEPz DNQQ{#(Q}0>u6:==}gnܸ+c=4n K 7AfͤtpYX]]b{l6|@Hunz,I+Lim݀ \A?DEn\qǂ(?ɘdyXm[,h{T*K/u>sܹ8U՜{\kg4Zw,V A?^!Rn8*P `! ~ ]-I#!\%I2VWW?]dlE4؜R'@PU5I W+?*BQXn'hGz<&K% lh|&ku=Х]~/hZY (3W8u_O'(O 1] _*8$q(]:> ͨT*)u]V_HRVIgB }:jAi}.geF\b 躞\1 +N7"<#8d2A,7'9 Y}vQ AF,ᙐs=׃RIr?`DQKD,IDAT&aPR&Iu݀}ZZ9Z 3PnPҹj|ߧ|ާаxƵA9Kdh5yLV } AoHQۑ:-rQ}j'Z"#6f]HRE`ܞn>`CX}5! )}h~:7>՚ \8ڃh.u]2p"I2-]anrb1 zCf|;tCuӜBBAD ˲$ qd@;ƧD`"X[ui9 A^DA?YЪm;!q#˭Scp AO.B&m?K.H)~"d jABUcP AU>rG <~8IBkDЋcDgsN{BH3:"v`5-VӸ&bٌ:m1Y;9:Ao9Ϋp-Oސ+ w@ԦBFo/ zɷ|"FGGnxh%U-bUd2f7@ \*K[eĐ6 cq+=A `oZ+=J7²eYa]㻌^mܐ3V_65K|2ݯ}6f^@O ¶#?9HN-yUW)TZNB!< ʂ> l^ErXJ!5KQTn ^*L$Q}>ԪsQG"):-144D|!Hޞ/zuX2/y#:NS"SJf^!A߃g?~|||uL###g̅J z 63rǝܼyѐJ}0ub2ըjV,F5Ni A>AmU KaY㎪>}c3c*ccc; 9P6Zmv91DzN c AUF B-8ٔ7_.!AQS-e]&娣O L#yok$HVF0ˆl#}pzoLњ;ba"G:H7jzhg %9iZ5gِq_n/..Y)6>m $E;U痖" ㋂Bz%p_#˅AҭFCE(ODK}/HDy晼TT"㋀z{r*:8#{酅QiMasGD"C;AFv]׍gjuu5Ed|:6>\5ﵖoWU5kuZ;i'h뺆aDR(,슣iM.|)(BHu49LeYzy({\.s~l*_|U@J FGV[BNvEEQ[ݯф;y#N FȲHfm}Q;;A/rkZwv$)WzQ8h_x0ec5~/ENV z+GyYy[ay3_L&\q?z$Ru,;(i;֕7YYY:=L4'p7Xֺdwj搣!|x0|;b[wE|V˶|&M $Avj. j4ghcazihLanުQƪ!h46(g%f1/5W7/uz+aS"RU59z%7WAglԱ-j*$˲Q$#Y\B:u=7]r[JDӱX qH!;XϲZw-KnL2;@W(Gr\,˾OLLD>.i'%Rc*k׮Z;4>$jYՀ;x8mr3Qѓ|ZF(WAeYc)+W~ _dⵯqVis!s=j'غlێՕ0J5[$Iٳgjj*&woA QDz\yANNNF Q3',&]vV1#O2(}!488l6"3(JO AkfhlYeh"CBYUO̱x ;u\;!YEQvZT c}Ju,=rkp\;mv崞>QgL2|(jϞ=w܉p~NЕҒyac!8cL|?İn#qlq/QYoǡrժ}sNc1vi%hl_8cn`g'/gz(;d2k7_'*B(x L^ߤgDz!5Eٿ[y>h'/gb3r֊ Y)Gz[l"B~T3.A?Q)2u?n fpHM}?M~vř3Lxtka~w\ׅPlVxo}<j?i(BUGڬ+Gˍ\.l nQGu>_E_gA`&>+&rA٨q 5A U\EeP1foQb~] EHA'Vy6R]ԜD$O5LaٕL& Dv(B몿|qv.+Bj 0`>s挦iǏ|uRUUwEH ʢ5?eBj+Z*J!~i&qm#A2,äc9$)|D M#a ['ɓ'_{ǝyOg>^xaMQ7P1)Z|}A$);s`˹^tm_g!U;VB&d̖iWj.A4-O m\v؞Um$_maAHcܮT7FH [ðmi٠>gL<ϟ={###9rϜ={ɆT*Jmjq?I4EQpgY,a˲LݻTUFqEQa|ZM&Iyo%2^]Iif2EQ2AxR{nATUeY֭[e ~"Ҿ},"B4 L/JTJEeYvg```~~^EEQ,EQx+e493Mu݁۶UU$u]clPWji!f:C[՗dG\psl,u=˲7}qEٶ-80(E%r/nq~+W˺&HGEQ,N7 [NZ-LNNE㖖LEQVWW%IT*djkz.IR $ITX,r\.8I˲iZ2Tp˲XR)/qfSE*4Mw[uEQ4N!$/4MWZ:1 ss޲Av;AI ?{-yիWEQl£4 <_oߊF?=}[]Nwqkj}+|R?DDoq܍!IҲ,$qxDp1ׂGBAx~7?$I]GGGsLPŋEQg}͛KT*.\ؿ{c=0 >l8%᥸iZ^^uD"aYNxd2:i$I:mƃ ض8z]l6KDV#I2JU*]D.3 eYUUt| J%x}˲ݻWWW4,0 $Ex [bYyAj:Mxrx<6}?cvR=zJcf;ol˗ggg\nTϸw^Mӆao|\ţ^C8 N4cYq0(n:v n$x0жmekpn 9$IzW*ApGe<N$IZ Y%j5J9d2OID"0p;Ӽ\.K.u;? MOOr]ɓ~aA3Ԙy|}W\b|{{7X&B(ߺukeS]lgV:Len0[{ȑ#QDzSNJ%My?{/uP 5j0) expect_equivalent(colnames(s1),c("Model","Sandwich")) val <- sim(onerun,R=2,cl=TRUE,seed=1,messages=0,mc.cores=2) expect_true(val[1,1]!=val[1,2]) onerun2 <- function(a,b,...) { return(cbind(a=a,b=b,c=a-1,d=a+1)) } R <- data.frame(a=1:2,b=3:4) dm <- capture.output(val2 <- sim(onerun2,R=R,messages=1,mc.cores=2)) expect_true(all(R-val2[,1:2]==0)) res <- summary(val2) expect_equivalent(res["Mean",],c(1.5,3.5,0.5,2.5)) expect_output(print(val2[1,]),"a b c d") expect_output(print(val2[1,]),"1 3 0 2") res <- summary(val2,estimate="a",se="b",true=1.5,confint=c("c","d")) expect_true(res["Coverage",]==1) expect_true(res["SE/SD",]==mean(val2[,"b"])/sd(val2[,"a"])) }) test_that("distributions", { m <- lvm(y1~x) distribution(m,~y1) <- binomial.lvm("probit") distribution(m,~y2) <- poisson.lvm() distribution(m,~y3) <- normal.lvm(mean=1,sd=2) distribution(m,~y3) <- lognormal.lvm() distribution(m,~y3) <- pareto.lvm() distribution(m,~y3) <- loggamma.lvm() distribution(m,~y3) <- weibull.lvm() distribution(m,~y3) <- chisq.lvm() distribution(m,~y3) <- student.lvm(mu=1,sigma=1) expect_output(print(distribution(m)$y2),"Family: poisson") expect_output(print(distribution(m)$y1),"Family: binomial") latent(m) <- ~u expect_output(print(m),"binomial\\(probit\\)") expect_output(print(m),"poisson\\(log\\)") ## Generator: m <- lvm() distribution(m,~y,TRUE) <- function(n,...) { res <- exp(rnorm(n)); res[seq(min(n,5))] <- 0 return(res) } d <- sim(m,10) expect_true(all(d[1:5,1]==0)) expect_true(all(d[6:10,1]!=0)) m <- lvm() distribution(m,~y,parname="a",init=2) <- function(n,a,...) { rep(1,n)*a } expect_true(all(sim(m,2)==2)) expect_true(all(sim(m,2,p=c(a=10))==10)) expect_equivalent(sim(m,2,p=c(a=10)),sim(m,2,a=10)) ## Multivariate distribution m <- lvm() rmr <- function(n,rho,...) rmvn(n,sigma=diag(2)*(1-rho)+rho) distribution(m,~y1+y2,rho=0.9) <- rmr expect_equivalent(c("y1","y2"),colnames(d <- sim(m,5))) ## Special 'distributions' m <- lvm() distribution(m,~x1) <- sequence.lvm(int=TRUE) distribution(m,~x2) <- sequence.lvm(a=1,b=2) distribution(m,~x3) <- sequence.lvm(a=NULL,b=2) distribution(m,~x4) <- sequence.lvm(a=2,b=NULL) ex <- sim(m,5) expect_equivalent(ex$x1,1:5) expect_equivalent(ex$x2,seq(1,2,length.out=5)) expect_equivalent(ex$x3,seq(-2,2)) expect_equivalent(ex$x4,seq(2,6)) m <- lvm() distribution(m,~x1) <- ones.lvm() distribution(m,~x2) <- ones.lvm(p=0.5) distribution(m,~x3) <- ones.lvm(interval=c(0.4,0.6)) ex <- sim(m,10) expect_equivalent(ex$x1,rep(1,10)) expect_equivalent(ex$x2,c(rep(0,5),rep(1,5))) expect_equivalent(ex$x3,c(0,0,0,1,1,1,0,0,0,0)) m <- lvm() expect_error(distribution(m,~y) <- threshold.lvm(p=c(0.5,.75))) distribution(m,~y) <- threshold.lvm(p=c(0.25,.25)) set.seed(1) expect_equivalent(1:3,sort(unique(sim(m,200))[,1])) ## distribution(m,~y) <- threshold.lvm(p=c(0.25,.25),labels=letters[1:3]) ## expect_equivalent(c("a","b","c"),sort(unique(sim(m,200))[,1])) }) test_that("eventTime", { m <- lvm(eventtime~x) distribution(m,~eventtime) <- coxExponential.lvm(1/100) distribution(m,~censtime) <- coxWeibull.lvm(1/500) eventTime(m) <- time~min(eventtime=1,censtime=0) set.seed(1) d <- sim(m,100) expect_equivalent((d$time0)) ## Std.err expect_equal(f["Total",1],3) expect_equal(f["Direct",1],1) f2 <- summary(effects(e,u~v))$coef expect_equal(f2["Total",1],1) expect_equal(f2["Direct",1],1) expect_equal(f2["Indirect",1],0) expect_output(print(ef),"Mediation proportion") expect_equivalent(confint(ef)["Direct",], confint(e)["y1~x",]) expect_equivalent(totaleffects(e,y1~x)[,1:4],f["Total",]) g <- graph::updateGraph(plot(m,noplot=TRUE)) expect_equivalent(path(g,y1~x),path(m,y1~x)) }) test_that("Profile confidence limits", { m <- lvm(y~b*x) constrain(m,b~psi) <- identity set.seed(1) d <- sim(m,100,seed=1) e <- estimate(m, d) ci0 <- confint(e,3) ci <- confint(e,3,profile=TRUE) expect_true(mean((ci0-ci)^2)<0.1) }) test_that("IV-estimator", { m <- lvm(c(y1,y2,y3)~u); latent(m) <- ~u set.seed(1) d <- sim(m,100,seed=1) e0 <- estimate(m,d) e <- estimate(m,d,estimator="iv") ## := MLE expect_true(mean((coef(e)-coef(e0))^2)<1e-9) }) test_that("glm-estimator", { m <- lvm(y~x+z) regression(m) <- x~z distribution(m,~y+z) <- binomial.lvm("logit") set.seed(1) d <- sim(m,1e3,seed=1) head(d) e <- estimate(m,d,estimator="glm") c1 <- coef(e,2)[c("y","y~x","y~z"),1:2] c2 <- estimate(glm(y~x+z,d,family=binomial))$coefmat[,1:2] expect_equivalent(c1,c2) }) test_that("gaussian", { m <- lvm(y~x) d <- simulate(m,100,seed=1) S <- cov(d[,vars(m),drop=FALSE]) mu <- colMeans(d[,vars(m),drop=FALSE]) f <- function(p) lava:::gaussian_objective.lvm(p,x=m,S=S,mu=mu,n=nrow(d)) g <- function(p) lava:::gaussian_score.lvm(p,x=m,n=nrow(d),data=d,indiv=TRUE) s1 <- numDeriv::grad(f,c(0,1,1)) s2 <- g(c(0,1,1)) expect_equal(s1,-colSums(s2),tolerance=0.1) }) test_that("Association measures", { P <- matrix(c(0.25,0.25,0.25,0.25),2) a1 <- lava:::assoc(P) expect_equivalent(-log(0.25),a1$H) expect_true(with(a1, all(c(kappa,gamma,MI,U.sym)==0))) p <- lava:::prob.normal(sigma=diag(nrow=2),breaks=c(-Inf,0),breaks2=c(-Inf,0))[1] expect_equal(p[1],0.25) ## q <- qnorm(0.75) ## m <- ordinal(lvm(y~x),~y, K=3)#, breaks=c(-q,q)) ## normal.threshold(m,p=c(0,1,2)) }) test_that("equivalence", { m <- lvm(c(y1,y2,y3)~u,u~x,y1~x) latent(m) <- ~u d <- sim(m,100,seed=1) cancel(m) <- y1~x regression(m) <- y2~x e <- estimate(m,d) ##eq <- equivalence(e,y1~x,k=1) dm <- capture.output(eq <- equivalence(e,y2~x,k=1)) expect_output(print(eq),paste0("y1",lava.options()$symbol[2],"y3")) expect_true(all(c("y1","y3")%in%eq$equiv[[1]][1,])) }) test_that("multiple testing", { expect_equivalent(lava:::holm(c(0.05/3,0.025,0.05)),rep(0.05,3)) ci1 <- scheffe(l <- lm(1:5~c(0.5,0.7,1,1.3,1.5))) ci2 <- predict(l,interval="confidence") expect_equivalent(ci1[,1],ci2[,1]) expect_true(all(ci1[,2]ci2[,3])) }) test_that("modelsearch and GoF", { m <- lvm(c(y1,y2)~x) d <- sim(m,100,seed=1) e <- estimate(lvm(c(y1,y2)~1,y1~x),d) e0 <- estimate(lvm(c(y1,y2)~x,y1~~y2),d) s1 <- modelsearch(e,silent=TRUE,type="correlation") expect_true(nrow(s1$res)==2) s1b <- modelsearch(e,silent=TRUE,type="regression") expect_true(nrow(s1b$res)==4) s2 <- modelsearch(e0,silent=TRUE,dir="backward") expect_true(nrow(s2$res)==3) e00 <- estimate(e0,vcov=vcov(e0))$coefmat ii <- match(s2$res[,"Index"],rownames(e00)) expect_equivalent(e00[ii,5],s2$test[,2]) s3 <- modelsearch(e0,dir="backward",k=3) expect_true(nrow(s3$res)==1) ee <- modelsearch(e0,dir="backstep",messages=FALSE) expect_true(inherits(ee,"lvm")) ## TODO gof(e,all=TRUE) r <- rsq(e)[[1]] expect_true(abs(summary(lm(y1~x,d))$r.square-r["y1"])<1e-5) }) test_that("Bootstrap", { y <- rep(c(0,1),each=5) x <- 1:10 e <- estimate(y~x) B1 <- bootstrap(e,R=2,silent=TRUE,mc.cores=1,sd=TRUE) B2 <- bootstrap(e,R=2,silent=TRUE,bollenstine=TRUE,mc.cores=1) expect_false(B1$bollenstine) expect_true(B2$bollenstine) expect_true(nrow(B1$coef)==2) expect_output(print(B1),"Standard errors:") dm <- capture.output(B3 <- bootstrap(e,R=2,fun=function(x) coef(x)[2]^2+10)) expect_true(all(mean(B3$coef)>10)) y <- rep(c(0,1),each=5) x <- 1:10 m <- lvm(y~b*x) constrain(m,alpha~b) <- function(x) x^2 e <- estimate(m,data.frame(y=y,x=x)) b <- bootstrap(e,R=1,silent=TRUE) expect_output(print(b),"alpha") }) test_that("Survreg", { m <- lvm(y0~x) transform(m,y~y0) <- function(x) pmin(x[,1],2) transform(m,status~y0) <- function(x) x<2 d <- simulate(m,100,seed=1) require('survival') m <- survreg(Surv(y,status)~x,data=d,dist='gaussian') s <- score(m) expect_true(length(pars(m))==length(coef(m))+1) expect_true(abs(attr(score(m,pars(m)),'logLik')-logLik(m))<1e-9) expect_true(mean(colSums(s)^2)<1e-6) expect_equivalent(vcov(m),attr(s,'bread')) }) test_that("Combine", { ## Combine model output data(serotonin) m1 <- lm(cau ~ age*gene1 + age*gene2,data=serotonin) m2 <- lm(cau ~ age + gene1,data=serotonin) cc <- Combine(list('model A'=m1,'model B'=m2),fun=function(x) c(R2=format(summary(x)$r.squared,digits=2))) expect_true(nrow(cc)==length(coef(m1))+1) expect_equivalent(colnames(cc),c('model A','model B')) expect_equivalent(cc['R2',2],format(summary(m2)$r.squared,digits=2)) }) test_that("zero-inflated binomial regression (zib)", { set.seed(1) n <- 1e3 x <- runif(n,0,20) age <- runif(n,10,30) z0 <- rnorm(n,mean=-1+0.05*age) z <- cut(z0,breaks=c(-Inf,-1,0,1,Inf)) p0 <- lava::expit(model.matrix(~z+age) %*% c(-.4, -.4, 0.2, 2, -0.05)) y <- (runif(n)0 df <- function(x) 2*x*log(x) + x df2 <- function(x) 2*log(x) + 3 op <- NR(5,f,df,df2,control=list(tol=1e-40)) ## Find root expect_equivalent(round(op$par,digits=7),.6065307) op2 <- estfun0(5,gradient=df) op3 <- estfun(5,gradient=df,hessian=df2,control=list(tol=1e-40)) expect_equivalent(op$par,op2$par) expect_equivalent(op$par,op3$par) }) test_that("Prediction with missing data, random intercept", { ## Random intercept model m <- lvm(c(y1,y2,y3)~u[0]) latent(m) <- ~u regression(m) <- y1~x1 regression(m) <- y2~x2 regression(m) <- y3~x3 d <- simulate(m,1e3,seed=1) dd <- reshape(d,varying=list(c('y1','y2','y3'),c('x1','x2','x3')),direction='long',v.names=c('y','x')) ##system.time(l <- lme4::lmer(y~x+(1|id), data=dd, REML=FALSE)) system.time(l <- nlme::lme(y~x,random=~1|id, data=dd, method="ML")) m0 <- lvm(c(y1[m:v],y2[m:v],y3[m:v])~1*u[0]) latent(m0) <- ~u regression(m0,y=c('y1','y2','y3'),x=c('x1','x2','x3')) <- rep('b',3) system.time(e <- estimate(m0,d)) mytol <- 1e-6 mse <- function(x,y=0) mean(na.omit(as.matrix(x)-as.matrix(y))^2) expect_true(mse(logLik(e),logLik(l))0) { ## At least major version 1 x <- transform(data.frame(lava:::rmvn(1000,sigma=0.5*diag(2)+0.5)), X1=as.numeric(cut(X1,breaks=3))-1,X2=as.numeric(cut(X2,breaks=3))-1) m <- covariance(lvm(),X1~X2) ordinal(m,K=3,constrain=list("t1","t2")) <- ~X1 ordinal(m,K=3,constrain=list("t1","t2")) <- ~X2 ## e <- estimate(m,x) e <- estimate(list(m,m),list(x[1:500,],x[501:1000,]),estimator="normal") estimate(e) } } }) test_that("Multiple group constraints I", { m1 <- lvm(y[m:v] ~ f(x,beta)+f(z,beta2)) d1 <- sim(m1,500,seed=1); d2 <- sim(m1,500,seed=2) ##coef(estimate(m1,d1)) constrain(m1,beta2~psi) <- function(x) 2*x m2 <- lvm(y[m:v] ~ f(x,beta2) + z) constrain(m2,beta2~psi) <- function(x) 2*x mg <- multigroup(list(m1,m2),list(d1,d2)) ee <- estimate(mg) expect_true(length(coef(ee))==5) expect_equivalent(constraints(ee)[1],2*coef(ee)["1@psi"]) # Est expect_equivalent(constraints(ee)[2],2*coef(ee,2)[[1]]["psi",2]) # Std.Err }) test_that("Multiple group constraints II", { data("twindata",package="lava") twinwide <- reshape(twindata,direction="wide", idvar="id",timevar="twinnum") l <- lvm(~bw.1+bw.2) covariance(l) <- bw.1 ~ bw.2 e <- estimate(l,subset(twinwide,zyg.1=="MZ"),control=list(method="NR")) B <- cbind(1,-1); colnames(B) <- c("bw.1,bw.1","bw.2,bw.2") colnames(B) <- gsub(",",lava.options()$symbols[2],colnames(B)) lava::compare(e,contrast=B) B2 <- rbind(c(1,-1,0,0),c(0,0,1,-1)) colnames(B2) <- c("bw.1","bw.2","bw.1,bw.1","bw.2,bw.2") colnames(B2) <- gsub(",",lava.options()$symbols[2],colnames(B2)) lava::compare(e,contrast=B2) l <- lvm(~bw.1+bw.2) covariance(l) <- bw.1 ~ bw.2 intercept(l,~bw.1+bw.2) <- "m" covariance(l,~bw.1+bw.2) <- "s" covariance(l,bw.1~bw.2) <- "r1" l2 <- l covariance(l2,bw.1~bw.2) <- "r2" DZ <- subset(twinwide,zyg.1=="MZ") MZ <- subset(twinwide,zyg.1=="DZ") ## e <- estimate(l,MZ) ## e2 <- estimate(l2,DZ) parameter(l) <- ~r2 parameter(l2) <- ~r1 ee <- estimate(list(MZ=l,DZ=l2),list(MZ,DZ),control=list(method="NR",tol=1e-9,constrain=FALSE)) expect_true(mean(score(ee)^2)<1e-9) constrain(ee,h~r2+r1) <- function(x) 2*(x[1]-x[2]) ce <- constraints(ee) expect_equivalent(constraints(ee)[1],2*diff(coef(ee)[3:4])) expect_true(length(coef(ee))==4) expect_true(nrow(ce)==1) expect_true(all(!is.na(ce))) }) lava/tests/testthat/test-multigroup.R0000644000176200001440000000527113162174023017512 0ustar liggesuserscontext("Multiple Group") test_that("Multiple group I", { m <- lvm(y~x) set.seed(1) d <- sim(m,100) ## Just a stratified analysis e <- estimate(list("Group A"=m,"Group B"=m),list(d,d)) expect_true(mean((coef(e)[c(1,3)]-coef(lm(y~x,d)))^2)<1e-9) expect_true(mean((coef(e)[c(2,5)]-coef(lm(y~x,d)))^2)<1e-9) }) test_that("Multiple group II", { m <- baptize(lvm(y~x)) set.seed(1) d <- sim(m,100) ## Just a standard linear regression (single group) e <- estimate(list(m,m),list(d,d)) expect_identical(coef(e,level=2)[[1]],coef(e,level=2)[[2]]) expect_true(mean((coef(e,level=2)[[1]][1:2,1]-coef(lm(y~x,cbind(d,d))))^2)<1e-9) }) context("Missing data") test_that("Missing data analysis", { ## Random intercept model m <- lvm(c(y1,y2,y3)~x+u); latent(m) <- ~u set.seed(1) ## Missing on first two outcomes d <- makemissing(sim(m,200),p=0.3,cols=c("y1","y2")) e <- estimate(m,d,missing=TRUE) expect_true("lvm.missing"%in%class(e)) expect_true(sum(unlist(lapply(e$estimate$model$data,nrow)))==200) ## Convergence: g <- gof(e) expect_true(mean(score(e))<1e-3) expect_true(g$rankV==length(pars(e))) }) test_that("Multiple group, missing data analysis", { m <- lvm(list(c(y1,y2,y3)~u,u~x)); latent(m) <- ~u m <- baptize(fixsome(m)) regression(m,u~x) <- NA covariance(m,~u) <- NA set.seed(1) ## Missing on all outcomes d1 <- makemissing(sim(m,500),cols=c("y1","y2"),p=0.3) d2 <- makemissing(sim(m,500),cols=c("y1","y2"),p=0.3) e <- estimate(list(m,m),list(d1,d2),missing=TRUE) g <- gof(e) expect_true(g$n==1000) expect_true(mean(score(e))<1e-3) expect_true(g$rankV==length(pars(e))) }) test_that("Multiple group, constraints", { m1 <- lvm(y ~ f(x,beta)+f(z,beta2)) m2 <- lvm(y ~ f(x,psi) + z) ## And simulate data from them set.seed(1) d1 <- sim(m1,100) d2 <- sim(m2,100) ## Add 'non'-linear parameter constraint constrain(m2,psi ~ beta2) <- function(x) x ## Add parameter beta2 to model 2, now beta2 exists in both models parameter(m2) <- ~ beta2 ee <- estimate(list(m1,m2),list(d1,d2)) m <- lvm(y1 ~ x1 + beta2*z1) regression(m) <- y2 ~ beta2*x2 + z2 d <- cbind(d1,d2); names(d) <- c(paste0(names(d1),1),paste0(names(d1),2)) e <- estimate(m,d) b1 <- coef(e,2,labels=TRUE)["beta2",1] b2 <- constraints(ee)[1] expect_true(mean((b1-b2)^2)<1e-5) ## "Multiple group, constraints (non-linear in x) m <- lvm(y[m:v] ~ 1) addvar(m) <- ~x parameter(m) <- ~a+b constrain(m,m~a+b+x) <- function(z) z[1]+z[2]*z[3] ee <- estimate(list(m,m),list(d1[1:5,],d1[6:10,])) b1 <- coef(lm(y~x,d1[1:10,])) b2 <- coef(ee)[c("1@a","1@b")] expect_true(mean(b1-b2)^2<1e-4) }) lava/tests/testthat/test-plot.R0000644000176200001440000001212513162174023016255 0ustar liggesuserscontext("Graphics functions") test_that("attr", { m <- lvm(y~x) d <- sim(m,10) e <- estimate(m,d) }) test_that("color", { cur <- palette() old <- lava:::mypal() expect_equivalent(col2rgb(cur),col2rgb(old)) expect_equivalent(col2rgb(palette()),col2rgb(lava:::mypal(set=FALSE))) expect_equivalent(Col("red",0.5),rgb(1,0,0,0.5)) expect_equivalent(Col(c("red","blue"),0.5),rgb(c(1,0),c(0,0),c(0,1),0.5)) expect_equivalent(Col(c("red","blue"),c(0.2,0.5)),rgb(c(1,0),c(0,0),c(0,1),c(0.2,0.5))) expect_equivalent(Col(rgb(1,0,0),0.5),rgb(1,0,0,0.5)) plot(0,xlim=c(0,1),ylim=c(0,1),type="n",ann=FALSE,axes=FALSE) devc1 <- devcoords() par(mar=c(0,0,0,0)) plot(0,xlim=c(0,1),ylim=c(0,1),type="n",ann=FALSE,axes=FALSE) devc2 <- devcoords() figx <- c("fig.x1","fig.x2","fig.y1","fig.y2") devx <- c("dev.x1","dev.x2","dev.y1","dev.y2") expect_equivalent(devc1[figx],devc2[devx]) }) if (requireNamespace("visualTest",quietly=TRUE) && requireNamespace("png",quietly=TRUE)) { gropen <- function(resolution=200,...) { tmpfile <- tempfile(fileext=".png") png(file=tmpfile,width=200,height=200) res <- dev.cur() return(structure(tmpfile,dev=res)) } grcompare <- function(file1,file2,...) { res <- visualTest::isSimilar(file1,file2,...) unlink(c(file1,file2)) return(res) } test_that("plotConf", { set.seed(1) x <- rnorm(50) y <- rnorm(50,x) z <- rbinom(50,1,0.5) d <- data.frame(y,z,x) l <- lm(y~x*z) d1 <- gropen() par(mar=c(0,0,0,0)) plotConf(l,var1="x",var2="z",col=c("black","blue"),alpha=0.5,legend=FALSE) dev.off() newd <- data.frame(x=seq(min(x),max(x),length.out=100)) l0 <- lm(y~x,subset(d,z==0)) ci0 <- predict(l0,newdata=newd,interval="confidence") l1 <- lm(y~x,subset(d,z==1)) ci1 <- predict(l1,newdata=newd,interval="confidence") d2 <- gropen() par(mar=c(0,0,0,0)) plot(y~x,col=c("black","blue")[z+1],pch=16,ylim=c(min(ci0,ci1,y),max(ci0,ci1,y))) lines(newd$x,ci0[,1],col="black",lwd=2) lines(newd$x,ci1[,1],col="blue",lwd=2) confband(newd$x,lower=ci0[,2],upper=ci0[,3],polygon=TRUE,col=Col("black",0.5),border=FALSE) confband(newd$x,lower=ci1[,2],upper=ci1[,3],polygon=TRUE,col=Col("blue",0.5),border=FALSE) points(y~x,col=c("black","blue")[z+1],pch=16) dev.off() expect_true(grcompare(d1,d2,threshold=5)) d1 <- gropen() par(mar=c(0,0,0,0)) l <- lm(y~z) plotConf(l,var2="z",var1=NULL,jitter=0,col="black",alpha=0.5,xlim=c(.5,2.5),ylim=range(y)) dev.off() d2 <- gropen() par(mar=c(0,0,0,0)) plot(y~I(z+1),ylim=range(y),xlim=c(0.5,2.5),pch=16,col=Col("black",0.5)) l0 <- lm(y~-1+factor(z)) confband(1:2,lower=confint(l0)[,1],upper=confint(l0)[,2],lwd=3, center=coef(l0)) dev.off() expect_true(grcompare(d1,d2,threshold=10)) }) test_that("forestplot", { set.seed(1) K <- 20 est <- rnorm(K); est[c(3:4,10:12)] <- NA se <- runif(K,0.2,0.4) x <- cbind(est,est-2*se,est+2*se,runif(K,0.5,2)) rownames(x) <- unlist(lapply(letters[seq(K)],function(x) paste(rep(x,4),collapse=""))) rownames(x)[which(is.na(est))] <- "" signif <- sign(x[,2])==sign(x[,3]) forestplot(x) ## TODO }) test_that("plot.sim", { onerun2 <- function(a,b,...) { return(cbind(a=a,b=b,c=a-1,d=a+1)) } R <- data.frame(a=1:2,b=3:4) val2 <- sim(onerun2,R=R,type=0) plot(val2) plot(val2,plot.type="single") density(val2) ## TODO }) test_that("spaghetti", { K <- 5 y <- "y"%++%seq(K) m <- lvm() regression(m,y=y,x=~u) <- 1 regression(m,y=y,x=~s) <- seq(K)-1 regression(m,y=y,x=~x) <- "b" d <- sim(m,5) dd <- mets::fast.reshape(d); dd$num <- dd$num+rnorm(nrow(dd),sd=0.5) ## Unbalance spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4),trend=TRUE,trend.col="darkblue") ## TODO }) test_that("ksmooth", { ## TODO }) test_that("plot.lvm", { ## TODO m <- lvm(y~1*u[0:1],u~1*x) latent(m) <- ~u plot(m) d <- sim(m,20,seed=1) e <- estimate(m,d) plot(e) plot(lava:::beautify(m)) g <- igraph.lvm(m) expect_true(inherits(g,"igraph")) }) test_that("images", { ## TODO }) test_that("labels,edgelabels", { ## TODO }) test_that("colorbar", { ## TODO }) test_that("fplot", { ## TODO }) test_that("interactive", { ## TODO }) test_that("pdfconvert", { ## TODO }) test_that("plot.estimate", { ## TODO }) test_that("logo", { lava(seed=1) }) } lava/tests/testthat/test-graph.R0000644000176200001440000000075713162174023016410 0ustar liggesuserscontext("Inference") test_that("d-separation",{ m <- lvm(x5 ~ x4+x3, x4~x3+x1, x3~x2, x2~x1) expect_true(dsep(m,x5~x1|x3+x4)) expect_false(dsep(m,x5~x1|x2+x4)) expect_true(dsep(m,x5~x1|x2+x3+x4)) expect_false(dsep(m,~x1+x2+x3|x4)) expect_true(setequal(ancestors(m,~x5),setdiff(vars(m),"x5"))) expect_true(setequal(ancestors(m,~x1),NULL)) expect_true(setequal(descendants(m,~x5),NULL)) expect_true(setequal(descendants(m,~x1),setdiff(vars(m),"x1"))) }) lava/tests/testthat/test-misc.R0000644000176200001440000000664613162174023016245 0ustar liggesuserscontext("Utility functions") test_that("By", { b1 <- By(datasets::CO2,~Treatment+Type,colMeans,~conc) b2 <- By(datasets::CO2,c('Treatment','Type'),colMeans,'conc') expect_equivalent(b1,b2) ## require('data.table') ## t1 <- as.data.frame(data.table(datasets::CO2)[,mean(uptake),by=.(Treatment,Type,conc>500)]) d0 <- transform(datasets::CO2,conc500=conc>500) t1 <- by(d0[,"uptake"],d0[,c("Treatment","Type","conc500")],mean) t2 <- By(datasets::CO2,~Treatment+Type+I(conc>500),colMeans,~uptake) expect_true(inherits(t2,"array")) expect_equivalent(sort(t2),sort(t1)) }) test_that("Expand", { dd <- Expand(iris, Sepal.Length=2:8, Species=c("virginica","setosa")) expect_identical(levels(iris$Species),levels(dd$Species)) expect_true(nrow(dd)==14) d0 <- datasets::warpbreaks[,c("wool","tension")] T <- table(d0) d1 <- Expand(T) expect_identical(dim(d0),dim(d1)) expect_identical(table(d1),T) expect_identical(expand.grid(1:2,1:2),Expand(1:2,1:2)) expect_identical(expand.grid(a=1:2,b=1:2),Expand(a=1:2,b=1:2)) }) test_that("formulas", { f <- toformula(c('y1','y2'),'x'%++%1:5) ff <- getoutcome(f) expect_equivalent(trim(ff,all=TRUE),"c(y1,y2)") expect_true(length(attr(ff,'x'))==5) }) test_that("trim", { expect_true(length(grep(" ",trim(" test ")))==0) expect_true(length(gregexpr(" ",trim(" t e s t "))[[1]])==3) expect_true(length(grep(" ",trim(" t e s t ",all=TRUE)))==0) }) test_that("Matrix operations:", { ## vec operator expect_equivalent(vec(diag(3)),c(1,0,0,0,1,0,0,0,1)) expect_true(nrow(vec(diag(3),matrix=TRUE))==9) ## commutaion matrix A <- matrix(1:16 ,ncol=4) K <- commutation(A) expect_equivalent(K%*%as.vector(A),vec(t(A),matrix=TRUE)) ## Block diagonal A <- diag(3)+1 B <- blockdiag(A,A,A,pad=NA) expect_equivalent(dim(B),c(9,9)) expect_true(sum(is.na(B))==81-27) }) test_that("plotConf", { m <- lvm(y~x+g) distribution(m,~g) <- binomial.lvm() d <- sim(m,50) l <- lm(y~x+g,d) g1 <- plotConf(l,var2="g",plot=FALSE) g2 <- plotConf(l,var1=NULL,var2="g",plot=FALSE) }) test_that("wrapvev", { expect_equivalent(wrapvec(5,2),c(3,4,5,1,2)) expect_equivalent(wrapvec(seq(1:5),-1),c(5,1,2,3,4)) }) test_that("matrix functions", { A <- revdiag(1:3) expect_equivalent(A,matrix(c(0,0,1,0,2,0,3,0,0),3)) expect_equivalent(1:3,revdiag(A)) revdiag(A) <- 4 expect_equivalent(rep(4,3),revdiag(A)) diag(A) <- 0 offdiag(A) <- 5 expect_true(sum(offdiag(A))==6*5) A <- matrix(0,3,3) offdiag(A,type=3) <- 1:6 B <- crossprod(A) expect_equivalent(solve(A),Inverse(A)) expect_equivalent(det(B),attr(Inverse(B,chol=TRUE),"det")) }) test_that("getmplus", { }) test_that("getsas", { }) test_that("All the rest", { expect_false(lava:::versioncheck(NULL)) expect_true(lava:::versioncheck("lava",c(1,4,1))) op <- lava.options(debug=TRUE) expect_true(lava.options()$debug) lava.options(op) A <- diag(2); colnames(A) <- c("a","b") expect_output(printmany(A,A,2,rownames=c("A","B"),bothrows=FALSE),"a b") expect_output(printmany(A,A[1,,drop=FALSE],2,rownames=c("A","B"),bothrows=FALSE),"a b") expect_output(printmany(A,A,2,rownames=c("A","B"),name1="no.1",name2="no.2", bothrows=TRUE),"no.1") ##printmany(A,A,2,name1="no.1",name2="no.2",bothrows=T) }) lava/tests/test-all.R0000644000176200001440000000013113162174023014201 0ustar liggesusers#library("lava") suppressPackageStartupMessages(library("testthat")) test_check("lava") lava/NAMESPACE0000644000176200001440000003547013162174023012424 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method("%++%","function") S3method("%++%",character) S3method("%++%",default) S3method("%++%",lvm) S3method("%++%",matrix) S3method("+",estimate) S3method("+",lvm) S3method("Graph<-",lvm) S3method("Graph<-",lvmfit) S3method("Model<-",lvm) S3method("Model<-",lvmfit) S3method("Model<-",multigroup) S3method("Model<-",multigroupfit) S3method("[",sim) S3method("addvar<-",lvm) S3method("cancel<-",lvm) S3method("constrain<-",default) S3method("constrain<-",multigroupfit) S3method("covariance<-",lvm) S3method("covfix<-",lvm) S3method("distribution<-",lvm) S3method("edgelabels<-",graphNEL) S3method("edgelabels<-",lvm) S3method("edgelabels<-",lvmfit) S3method("exogenous<-",lvm) S3method("functional<-",lvm) S3method("heavytail<-",lvm) S3method("index<-",lvm) S3method("index<-",lvmfit) S3method("intercept<-",lvm) S3method("kill<-",lvm) S3method("labels<-",default) S3method("latent<-",lvm) S3method("nodecolor<-",default) S3method("nodecolor<-",lvm) S3method("nonlinear<-",lvm) S3method("ordinal<-",lvm) S3method("parameter<-",lvm) S3method("parameter<-",lvmfit) S3method("parfix<-",lvm) S3method("randomslope<-",lvm) S3method("regfix<-",lvm) S3method("regression<-",lvm) S3method("rmvar<-",lvm) S3method("transform<-",lvm) S3method("variance<-",lvm) S3method(Graph,lvm) S3method(Graph,lvmfit) S3method(Model,default) S3method(Model,lvm) S3method(Model,lvmfit) S3method(Model,multigroup) S3method(Model,multigroupfit) S3method(Weights,default) S3method(addattr,graphNEL) S3method(addattr,lvm) S3method(addattr,lvmfit) S3method(addvar,lvm) S3method(adjMat,lvm) S3method(adjMat,lvmfit) S3method(ancestors,lvm) S3method(ancestors,lvmfit) S3method(baptize,lvm) S3method(bootstrap,lvm) S3method(bootstrap,lvmfit) S3method(cancel,lvm) S3method(children,lvm) S3method(children,lvmfit) S3method(click,default) S3method(coef,effects) S3method(coef,estimate) S3method(coef,lvm) S3method(coef,lvmfit) S3method(coef,multigroup) S3method(coef,multigroupfit) S3method(coef,multinomial) S3method(coef,ordreg) S3method(coef,pcor) S3method(coef,summary.estimate) S3method(coef,summary.lvmfit) S3method(coef,zibreg) S3method(compare,default) S3method(confint,effects) S3method(confint,lvmfit) S3method(confint,multigroupfit) S3method(constrain,default) S3method(correlation,data.frame) S3method(correlation,lvmfit) S3method(correlation,matrix) S3method(covariance,formula) S3method(covariance,lvm) S3method(covfix,lvm) S3method(density,sim) S3method(deriv,lvm) S3method(descendants,lvm) S3method(descendants,lvmfit) S3method(distribution,lvm) S3method(dsep,lvm) S3method(edgeList,lvm) S3method(edgeList,lvmfit) S3method(edgelabels,graphNEL) S3method(edgelabels,lvm) S3method(edgelabels,lvmfit) S3method(effects,lvmfit) S3method(endogenous,list) S3method(endogenous,lm) S3method(endogenous,lvm) S3method(endogenous,lvmfit) S3method(endogenous,multigroup) S3method(estimate,MAR) S3method(estimate,default) S3method(estimate,estimate.sim) S3method(estimate,formula) S3method(estimate,list) S3method(estimate,lvm) S3method(estimate,multigroup) S3method(estimate,twostage.lvm) S3method(exogenous,list) S3method(exogenous,lm) S3method(exogenous,lvm) S3method(exogenous,lvmfit) S3method(exogenous,multigroup) S3method(family,zibreg) S3method(finalize,lvm) S3method(formula,lvm) S3method(formula,lvmfit) S3method(functional,lvm) S3method(gof,lvmfit) S3method(heavytail,lvm) S3method(iid,data.frame) S3method(iid,default) S3method(iid,estimate) S3method(iid,glm) S3method(iid,matrix) S3method(iid,multigroupfit) S3method(iid,multinomial) S3method(iid,numeric) S3method(index,lvm) S3method(index,lvmfit) S3method(information,data.frame) S3method(information,glm) S3method(information,lvm) S3method(information,lvm.missing) S3method(information,lvmfit) S3method(information,multigroup) S3method(information,multigroupfit) S3method(information,multinomial) S3method(information,table) S3method(information,zibreg) S3method(intercept,lvm) S3method(kappa,data.frame) S3method(kappa,multinomial) S3method(kappa,table) S3method(kill,lvm) S3method(labels,graphNEL) S3method(labels,lvm) S3method(labels,lvmfit) S3method(latent,list) S3method(latent,lvm) S3method(latent,lvmfit) S3method(latent,multigroup) S3method(logLik,lvm) S3method(logLik,lvm.missing) S3method(logLik,lvmfit) S3method(logLik,multigroup) S3method(logLik,multigroupfit) S3method(logLik,ordreg) S3method(logLik,pcor) S3method(logLik,zibreg) S3method(manifest,list) S3method(manifest,lvm) S3method(manifest,lvmfit) S3method(manifest,multigroup) S3method(merge,estimate) S3method(merge,glm) S3method(merge,lm) S3method(merge,lvm) S3method(merge,lvmfit) S3method(merge,multinomial) S3method(model.frame,estimate) S3method(model.frame,lvmfit) S3method(model.frame,multigroupfit) S3method(model.frame,multinomial) S3method(modelPar,lvm) S3method(modelPar,lvmfit) S3method(modelPar,multigroup) S3method(modelPar,multigroupfit) S3method(modelVar,lvm) S3method(modelVar,lvmfit) S3method(moments,lvm) S3method(moments,lvm.missing) S3method(moments,lvmfit) S3method(nonlinear,lvm) S3method(nonlinear,lvmfit) S3method(nonlinear,twostage.lvm) S3method(ordinal,lvm) S3method(parents,lvm) S3method(parents,lvmfit) S3method(parfix,lvm) S3method(parpos,default) S3method(parpos,lvm) S3method(parpos,lvmfit) S3method(parpos,multigroup) S3method(parpos,multigroupfit) S3method(pars,default) S3method(pars,glm) S3method(pars,lvm) S3method(pars,lvm.missing) S3method(pars,survreg) S3method(path,graphNEL) S3method(path,lvm) S3method(path,lvmfit) S3method(plot,estimate) S3method(plot,lvm) S3method(plot,lvmfit) S3method(plot,multigroup) S3method(plot,multigroupfit) S3method(plot,sim) S3method(plot,twostage.lvm) S3method(predict,lvm) S3method(predict,lvm.missing) S3method(predict,lvmfit) S3method(predict,multinomial) S3method(predict,ordreg) S3method(predict,twostage.lvmfit) S3method(predict,zibreg) S3method(print,Combine) S3method(print,CrossValidated) S3method(print,bootstrap.lvm) S3method(print,effects) S3method(print,equivalence) S3method(print,estimate) S3method(print,estimate.sim) S3method(print,fix) S3method(print,gkgamma) S3method(print,gof.lvmfit) S3method(print,lvm) S3method(print,lvm.predict) S3method(print,lvmfit) S3method(print,lvmfit.randomslope) S3method(print,modelsearch) S3method(print,multigroup) S3method(print,multigroupfit) S3method(print,multinomial) S3method(print,offdiag) S3method(print,ordinal.lvm) S3method(print,ordreg) S3method(print,pcor) S3method(print,sim) S3method(print,summary.estimate) S3method(print,summary.lvmfit) S3method(print,summary.multigroupfit) S3method(print,summary.ordreg) S3method(print,summary.sim) S3method(print,summary.zibreg) S3method(print,transform.lvm) S3method(print,twostage.lvm) S3method(print,zibreg) S3method(profile,lvmfit) S3method(randomslope,lvm) S3method(randomslope,lvmfit) S3method(regfix,lvm) S3method(regression,formula) S3method(regression,lvm) S3method(residuals,lvm) S3method(residuals,lvmfit) S3method(residuals,multigroupfit) S3method(residuals,zibreg) S3method(rmvar,lvm) S3method(roots,lvm) S3method(roots,lvmfit) S3method(score,glm) S3method(score,lm) S3method(score,lvm) S3method(score,lvm.missing) S3method(score,lvmfit) S3method(score,multigroup) S3method(score,multigroupfit) S3method(score,ordreg) S3method(score,pcor) S3method(score,survreg) S3method(score,zibreg) S3method(sim,default) S3method(sim,lvm) S3method(sim,lvmfit) S3method(simulate,lvm) S3method(simulate,lvmfit) S3method(sinks,lvm) S3method(sinks,lvmfit) S3method(stack,estimate) S3method(stack,glm) S3method(subset,lvm) S3method(summary,effects) S3method(summary,estimate) S3method(summary,lvm) S3method(summary,lvmfit) S3method(summary,multigroup) S3method(summary,multigroupfit) S3method(summary,ordreg) S3method(summary,sim) S3method(summary,zibreg) S3method(totaleffects,lvmfit) S3method(tr,matrix) S3method(transform,lvm) S3method(twostage,estimate) S3method(twostage,lvm) S3method(twostage,lvm.mixture) S3method(twostage,lvmfit) S3method(twostage,twostage.lvm) S3method(variance,formula) S3method(variance,lvm) S3method(vars,graph) S3method(vars,list) S3method(vars,lm) S3method(vars,lvm) S3method(vars,lvmfit) S3method(vcov,effects) S3method(vcov,estimate) S3method(vcov,lvmfit) S3method(vcov,multigroupfit) S3method(vcov,multinomial) S3method(vcov,ordreg) S3method(vcov,pcor) S3method(vcov,zibreg) export("%++%") export("%ni%") export("Graph<-") export("Missing<-") export("Model<-") export("addvar<-") export("cancel<-") export("categorical<-") export("constrain<-") export("covariance<-") export("covfix<-") export("distribution<-") export("edgelabels<-") export("eventTime<-") export("exogenous<-") export("functional<-") export("heavytail<-") export("index<-") export("intercept<-") export("intfix<-") export("kill<-") export("labels<-") export("latent<-") export("nodecolor<-") export("nonlinear<-") export("offdiag<-") export("ordinal<-") export("parameter<-") export("parfix<-") export("randomslope<-") export("regfix<-") export("regression<-") export("revdiag<-") export("rmvar<-") export("timedep<-") export("transform<-") export("variance<-") export(By) export(CoefMat) export(CoefMat.multigroupfit) export(Col) export(Combine) export(Diff) export(Expand) export(GM2.lvm) export(GM3.lvm) export(Gamma.lvm) export(Graph) export(IV) export(Inverse) export(Missing) export(Model) export(NR) export(OR) export(Org) export(PD) export(Range.lvm) export(Ratio) export(Specials) export(Weights) export(aalenExponential.lvm) export(addattr) export(addhook) export(addvar) export(adjMat) export(ancestors) export(backdoor) export(baptize) export(beta.lvm) export(binomial.lvm) export(blockdiag) export(bootstrap) export(cancel) export(categorical) export(children) export(chisq.lvm) export(click) export(closed.testing) export(colorbar) export(colsel) export(commutation) export(compare) export(complik) export(confband) export(confpred) export(constrain) export(constraints) export(contr) export(correlation) export(covariance) export(covfix) export(coxExponential.lvm) export(coxGompertz.lvm) export(coxWeibull.lvm) export(csplit) export(curly) export(cv) export(decomp.specials) export(density.sim) export(descendants) export(describecoef) export(devcoords) export(diagtest) export(distribution) export(dmvn) export(dsep) export(edgeList) export(edgelabels) export(endogenous) export(equivalence) export(estimate) export(eventTime) export(exogenous) export(expit) export(finalize) export(fixsome) export(foldr) export(forestplot) export(fplot) export(functional) export(gaussian.lvm) export(gaussian_logLik.lvm) export(getMplus) export(getSAS) export(gethook) export(getoutcome) export(gkgamma) export(gof) export(graph2lvm) export(heavytail) export(idplot) export(igraph.lvm) export(iid) export(images) export(index) export(information) export(intercept) export(intfix) export(kill) export(ksmooth2) export(latent) export(lava) export(lava.options) export(loggamma.lvm) export(logit) export(logit.lvm) export(lognormal.lvm) export(lvm) export(makemissing) export(manifest) export(measurement) export(measurement.error) export(modelPar) export(modelVar) export(modelsearch) export(moments) export(multigroup) export(multinomial) export(nonlinear) export(normal.lvm) export(odds) export(offdiag) export(offdiags) export(ones.lvm) export(ordinal) export(ordreg) export(p.correct) export(parameter) export(parents) export(pareto.lvm) export(parfix) export(parlabels) export(parpos) export(pars) export(parsedesign) export(partialcor) export(path) export(pcor) export(pdfconvert) export(plot.sim) export(plotConf) export(poisson.lvm) export(predictlvm) export(probit.lvm) export(procformula) export(randomslope) export(regfix) export(regression) export(reindex) export(revdiag) export(riskcomp) export(rmvar) export(rmvn) export(roots) export(rsq) export(scheffe) export(score) export(sequence.lvm) export(sim) export(sinks) export(spaghetti) export(starter.multigroup) export(startvalues) export(startvalues0) export(startvalues1) export(startvalues2) export(student.lvm) export(summary.sim) export(surface) export(threshold.lvm) export(tigol) export(timedep) export(toformula) export(totaleffects) export(tr) export(trim) export(twostage) export(uniform.lvm) export(updatelvm) export(variance) export(variances) export(vars) export(vec) export(weibull.lvm) export(wrapvec) export(zibreg) importFrom(grDevices,col2rgb) importFrom(grDevices,colorRampPalette) importFrom(grDevices,colors) importFrom(grDevices,gray.colors) importFrom(grDevices,heat.colors) importFrom(grDevices,palette) importFrom(grDevices,rainbow) importFrom(grDevices,rgb) importFrom(grDevices,topo.colors) importFrom(grDevices,xy.coords) importFrom(graphics,abline) importFrom(graphics,axis) importFrom(graphics,box) importFrom(graphics,contour) importFrom(graphics,contour.default) importFrom(graphics,identify) importFrom(graphics,image) importFrom(graphics,layout) importFrom(graphics,lines) importFrom(graphics,locator) importFrom(graphics,matplot) importFrom(graphics,mtext) importFrom(graphics,par) importFrom(graphics,plot) importFrom(graphics,plot.new) importFrom(graphics,plot.window) importFrom(graphics,points) importFrom(graphics,polygon) importFrom(graphics,rect) importFrom(graphics,segments) importFrom(graphics,text) importFrom(graphics,title) importFrom(methods,as) importFrom(methods,new) importFrom(stats,AIC) importFrom(stats,addmargins) importFrom(stats,approxfun) importFrom(stats,as.formula) importFrom(stats,coef) importFrom(stats,confint) importFrom(stats,confint.default) importFrom(stats,cor) importFrom(stats,cov) importFrom(stats,cov2cor) importFrom(stats,density) importFrom(stats,deriv) importFrom(stats,dnorm) importFrom(stats,effects) importFrom(stats,family) importFrom(stats,fft) importFrom(stats,formula) importFrom(stats,get_all_vars) importFrom(stats,glm) importFrom(stats,lm) importFrom(stats,logLik) importFrom(stats,model.frame) importFrom(stats,model.matrix) importFrom(stats,model.weights) importFrom(stats,na.omit) importFrom(stats,na.pass) importFrom(stats,nlminb) importFrom(stats,p.adjust) importFrom(stats,pchisq) importFrom(stats,pnorm) importFrom(stats,predict) importFrom(stats,printCoefmat) importFrom(stats,pt) importFrom(stats,qchisq) importFrom(stats,qf) importFrom(stats,qnorm) importFrom(stats,qt) importFrom(stats,quantile) importFrom(stats,rbinom) importFrom(stats,rchisq) importFrom(stats,residuals) importFrom(stats,rgamma) importFrom(stats,rlnorm) importFrom(stats,rnorm) importFrom(stats,rpois) importFrom(stats,rt) importFrom(stats,runif) importFrom(stats,sd) importFrom(stats,simulate) importFrom(stats,terms) importFrom(stats,uniroot) importFrom(stats,update) importFrom(stats,update.formula) importFrom(stats,var) importFrom(stats,vcov) importFrom(survival,is.Surv) importFrom(utils,combn) importFrom(utils,data) importFrom(utils,getFromNamespace) importFrom(utils,getTxtProgressBar) importFrom(utils,glob2rx) importFrom(utils,head) importFrom(utils,methods) importFrom(utils,modifyList) importFrom(utils,packageVersion) importFrom(utils,read.csv) importFrom(utils,setTxtProgressBar) importFrom(utils,stack) importFrom(utils,tail) importFrom(utils,txtProgressBar) importFrom(utils,write.table) lava/demo/0000755000176200001440000000000013162174023012120 5ustar liggesuserslava/demo/estimation.R0000644000176200001440000000006513162174023014420 0ustar liggesusersexample(estimate) example(constrain) example(zigreg) lava/demo/model.R0000644000176200001440000000015213162174023013341 0ustar liggesusersexample(lvm) example(regression) example(covariance) example(intercept) example(labels) example(plot.lvm) lava/demo/simulation.R0000644000176200001440000000004013162174023014421 0ustar liggesusersexample(sim) example(eventTime) lava/demo/inference.R0000644000176200001440000000014213162174023014176 0ustar liggesusersexample(gof) example(effects) example(estimate.default) example(modelsearch) example(predict.lvm) lava/demo/00Index0000644000176200001440000000015313162174023013251 0ustar liggesuserslava All demos model Model specification simulation Simulation estimation Estimation inference Inference lava/demo/lava.R0000644000176200001440000000013313162174023013163 0ustar liggesusersdemo(lava:::model) demo(lava:::simulation) demo(lava:::estimation) demo(lava:::inference) lava/NEWS0000644000176200001440000003137013162174023011677 0ustar liggesusers * Version 1.5.1 <2017-09-25 Mon> - conformal predictions: confpred - warnings (char2num used instead of coersion via as.numeric) - %++% for function compositon - New summary.effects methods with mediation proportion in the output - New hook: remove.hooks (see example ordinal.lvm) - constrain methods now handled more robustly in sim.lvm allowing both vectorized and non-vectorized functions - Non-linear associations can now be specified with 'nonlinear' method. Estimation via the 'twostage' function. - Robust standard errors added to the IV estimator (2SLS) - New cross-validation function: cv (and csplit function for creating random sets). * Version 1.5.0 <2017-03-16 Thu> - lava.tobit is longer required for ordinal and censored responses. Default is now to use the implementation in the 'mets' package. - Composite likelihood method (complik) updated - weight argument renamed to weights in agreement with lm, glm, coxph, ... - sim.default: new argument 'arg' passed on to simulation function - sim.default: new argument 'iter'. If TRUE the iteration number is passed to function call as first argument (default FALSE) - estimate.default: Wildcards/global expressions can now be used for specifying contrasts based on the syntax of the functions 'contr', 'parsedesign'. See examples on the help-page. The argument transform.ci has been renamed to back.transform. - correlation methods for matrices and data.frames (either pairwise or full MLE). All methods can now return the influence functions. - revdiag: dimnames are kept - Combine: output updated - forestplot: point estimates shown by default - backdoor now works without conditioning set (yields all possible conditioning sets) - New formula syntax: y+x~v+z same as c(y,x)~v+z - spaghetti: trend.formula can now contain a factor statement on the rhs * Version 1.4.7 <2017-01-26 Wed> - Maintenance release - models can now be specified as y1+y2~x1+x2 instead of c(y1,2y)~x1+x2 - sim method now has a seed argument * Version 1.4.6 <2016-12-14 Wed> - New backtrace algorithms for Newton-Raphson optimization routine. - 'diagtest' updated. * Version 1.4.5 <2016-10-25 Tue> - New graph functions: dsep: check for d-separation (conditional independence). backdoor: check backdoor criterion of a graph (lvm-object). adjMat: return adjaceny matrix. edgeList: return edge list. ancestors: return ancenstors of nodes. descendants: return descendants of nodes. - All simple paths in a graph can now be extracted with: path(...,all=TRUE) - Covariance parameters are now reference with ~~ instead of ,. Applies to setting starting values in 'estimate', parameters in 'sim','compare','estimate',... To use the old syntax set 'lava.options(symbol=c("~",","))' - 'layout' argument added to lava.options (default 'dot') - visNetwork support, new 'plot.engine' argument added to plot methods. - bootstrap.lvmfit now default returns original estimates. - print, transform methods updated (transform output). - '+' operator overloaded for lvm and estimate objects (merge). - New composite likelihood function: complik. - New functions for simple association measures: riskcomp, rdiff, rratio,... - New argument 'latent' in simulate method. If FALSE the latent variables are dropped from the returned data.frame. - modelsearch by default now shows both directional or undirectional associations (type='all' vs type='cor'). - sim.default now stores timings. New print functions (data.table like output). - lvm model can now be updated with the 'sim' function, for instance setting parameter values for the simulation only once: m <- sim(m,p=p,...), with faster subsequent calls sim(m,n=n). - estimate.default can now simulate p-values ('R' argument). Returns an object which can also be used as input for 'estimate'. - Bug fixes: NR optimization with back-tracing; fixed matrices.lvm when called without variance parameters; fixed a bug in r-square computations. - Contrast matrix can be specified with the function 'contr'. * Version 1.4.4 <2016-08-13 Sat> - estimate.default will now use the id-variable of an 'estimate' object if the 'id' argument is left unspecified. For multinomial,gkgamma,kappa additional arguments (...) are now parsed on the 'estimate.default' (including id). - Updated print/summary methods for 'estimate.default'. Sample/cluster-size added to output. - Code clean-up and optimization. Smarter calculations of kronecker products, and some regular expressions updated. - New function 'predictlvm' which return jacobian. - Intercepts can now be specified via parantheses, e.g., y ~ (-2) + x - 'getoutcome' with sep argument for splitting '|' statements in formulas. - Partial gamma, gkgamma, updated (probability interpretation, homogeneity tests removed) - 'moments' function now returns conditional mean with multiple rows. Side effect fixed across multiple functions - twostage function with support for mixture models - Beta (Beta.lvm) and Finite Gaussian (GM2.lvm,GM3.lvm) Mixtures added. - 'sim': parameters can now be specified as part of '...' - summary.sim: calculate Wald CI if confint=TRUE, otherwise use the user supplied confidence limits. - Clopper-pearson intervals and exact binomial tests added to 'diagtest'. - Interval censoring with 'normal' estimator, which now also works with 'binary' definitions. - default plot style updated. * Version 1.4.3 <2016-04-11 Mon> - partial gamma coefficients (gkgamma) - Unit tests works with new testthat version - Avoid trying to fork new processes on windows (bootstrap,sim.default) * Version 1.4.2 <2016-04-05 Wed> - Code optimization and minor bug fixes - Travis-CI, unit-tests - glm estimator update (censored regression) - polychoric correlations (pcor) - New utility functions: wrapvec, offdiag - simulation: regression design on parameters (see weibull + variance hetereogeneity example in help('sim')) - Byte compile by default * Version 1.4.1 <2015-06-13 Sat> - New plot.estimate method - Documentation and examples updated * Version 1.4.0 <2015-02-15 Sun> - Linear measurement error model: 'measurement.error' - Diagnostic tests: 'diagtest' - 'plotConf' updated with support for special function terms (I, poly, ns, ...). Old version is available (not in namespace) as lava:::plotConf0 - Pareto distribution: 'pareto.lvm' - Code clean-up/optimization: 'EventTime', 'stack' - 'estimate.default' new syntax for contrast specification (parsedesign) - 'regression.lvm' with y,x argument (as alias for to,from) - plot longitudinal data: 'spaghetti' - Examples updated * Version 1.3.0 <2014-11-18 Tue> - New syntax for categorical predictors (method 'categorical' and argument 'additive=FALSE' with 'regression method) - Argument 'intervals' added to 'ones.lvm' for piece-wise constant effects - Argument 'average=TRUE' now needed for empirical averages in estimate.default - Fixed a bug in score.glm (with weights and offset) introduced in version 1.2.6 - estimate.default: - small-sample corrections - Default id from row names in estimate.default (used with merge method) - iid decompostion also returned for hypothesis contrasts - keep argument added to estimate.default and merge - labels argument added to estimate.default - 'images' function for visualization of tabular data added to namespace - 'ksmooth' and 'surface' for surface estimation and visualization of bivariate data and functions - 'dsort': Sort data.frames - general multivariate distributions in simulations. see example in 'sim' - 'or2prob', 'tetrachoric' for conversion from OR to probabilities (and tetrachoric correlations). 'prob.normal': calculates probabilities from threshold model given thresholds and variance See also mets:::assoc for calculations of kappa, gamma, uncer.coef. 'normal.threshold': returns thresholds,variance,mu from model with categorical outcomes. - Multiple testing routines: closed.testing, p.correct, ... - 'Missing' method updated with a simple 'suffix' argument - Back-tracing updated in Newton-Raphson routine * Version 1.2.6 <2014-05-07 Wed> - New 'stack' function for two-stage estimation (via 'estimate' objects) - New 'blocksample' function for resampling clustered data. - New function 'Missing' to generate complex missing data patterns - Weibull parametrization of 'coxWeibull.lvm' rolled back (ver. 1.2.4). The function 'weibull.lvm' now leads to Accelerated Failure Time model (see examples of 'eventTime') - iid function cleanup (new 'bread' attribute). iid.glm now gives correct estimated influence functions for 'quasi' link (constant variance) - Parameter constraints on (co)variance parameters now possible with the syntax lvm(...,y~~a*x) (corresponding to covariance(...,y~x)<-"a") - Some additional utilities: pdfconvert, scheffe, images, click. confband updated with 'polygon' argument. - New function getMplus: Import results from Mplus - New function getSAS: Import SAS ODS - New 'edgecolor' argument of plot-function * Version 1.2.5 <2014-03-13 Thu> - 'merge' method added for combining 'estimate' objects - Adjustments to starting values - Function 'categorical' for adding categorical predictors to simulation model - Improved flexibility in simulations with 'transform','constrain' (ex: categorical predictors) - Added 'dataid' argument to estimate.default allowing different id for 'data' and i.i.d. decomposition of model parameter estimates. With the argument 'stack=FALSE' influence functions within clusters will not be stacked together. - R-squared values (+ approximate standard errors/i.i.d. decomposition) via 'rsq(model,TRUE)' - New infrastructure for adding additional parameters to models (no user-visible changes). - multinomial function for calculating influence curves for multinomial probabilities. 'gammagk' and 'kappa' methods for calculating Goodman-Kruskals gamma and Cohens kappa coefficients. - ordreg function for univariate ordinal regression models - iid methods for data.frames/matrices (empirical mean and variance) - Support for using 'mets::cluster.index' in GEE-type models (much faster). - plotConf updated (vcov argument added and more graphical arguments parsed to plotting functions) - Additional unit-tests implemented - New 'forestplot' and 'Combine' functions - Covariance structure may now be specified using '~~', e.g. 'lvm(c(y,v)~~z+u)' specifies correlation between residuals of (y,z),(y,u),(v,z),(v,u). * Version 1.2.4 <2013-12-01 Sun> - Avoid estimating IC in 'estimate.default' when 'vcov' argument is given. - New default starting values - Time-varying effects via 'timedep' - R-squared added to summary - alias: covariance->variance - added size argument to binomial.lvm; * Version 1.2.3 <2013-10-27 Sun> - 'subset' argument added to estimate.default. Calculates empirical averages conditional on subsets of data - Improved output from compare/estimate functions - Minor bug fixes (plot, predict) - sim: Piecewise constant rates with coxEponential.lvm. New aalenExponential.lvm function for additive models. Functions ones.lvm and sequence.lvm for deterministic variables. * Version 1.2.2 <2013-07-10 Wed> - Regression parameters are now by default referenced using '~', e.g. "y~x" instead of "y<-x". Applies to setting starting values in 'estimate', parameters in 'sim','compare','estimate',.... To use the old syntax set 'lava.options(symbol=c("<-","<->"))' - Newton-Raphson/scoring procedure updated - Search-interval for profile likelihood CI improved (for variance parameters) - 'estimate.default' updated (LRT) - 'iid' updated (variance now obtained as tensor product of the result) - progress bar for 'bootstrap' and 'modelsearch' - various minor bug fixes - new functions: Expand (expand.grid wrapper), By (by wrapper) * Version 1.2.1 <2013-05-10 Fri> - Optimization + minor bug fixes * Version 1.2.0 <2013-03-28 Thu> - New method 'iid' for extracting i.i.d. decomposition (influence functions) from model objects (e.g. glm, lvm, ...) - Method 'estimate' can now be used on model objects to transform parameters (Delta method) or conduct Wald tests. Average effects, i.e. averaging functionals over the empirical distribution is also possible including calculation of standard errors. - 'curereg' function for estimating mixtures of binary data. - Instrumental Variable (IV) estimator (two-stage least-squares) optimized. - New distributions: Gamma.lvm, coxWeibull.lvm, coxExponential.lvm, coxGompertz.lvm. New method 'eventTime' (for simulation of competing risks data) lava/data/0000755000176200001440000000000013162174023012105 5ustar liggesuserslava/data/nldata.rda0000644000176200001440000000251113162174023014037 0ustar liggesusers]T{8iIƴXjR%ZDDl1jRmMO%Dd[%if &\m 1?3Ǝݭ?{wq:۸p8ETYQ 8e(0L y4z-G7;n]1MËDZėixڠKNT ? >K̙U_P0uDHlbSA%ޮ× Xr(=@'|wᕘ蹏 -cT4dh GtAupƒ&/f kE >294Y?k7 r)OɸwNB]GF$ Z)Jjms^a-_i:=)sa}hn_Z *S"a(L¥{aFn&@aG4z7=CYf{F"C~)6_`bBShϻyrŶ0A;ݜƭЅ!7<XZR`G{a : $Xꚓh}-4f3|գOfx{@Qwc|`Ҭ/@w8ޢp#GR^A[Po!y礬*'L҈Mo aRSm=R|}N;n&]hթCRAH_;I`]NwҤ,=] q-TJx};c  lsƫa81ő􁈔 7Kl@pO!ſ==ZcYoզ̠q)MN"<ĩ'E,vXh$*f&ЛF- "/8O:%*F7mrلlCKض >YÕ0?[T"<GHCa2X胔;$ ?f4oP_ws`2j&^U/B}6gB -VG*߀z;"cW<%&”E 7Uh Ĝ~{3\SЬP*Սb{g6{Fbs>5յ:]8x*&!F@{u8f’hkotf'hdq;|)P)Mbؖrӿ?/^<JЧ Jl!3Olava/data/missingdata.rda0000644000176200001440000010030013162174023015072 0ustar liggesusers7zXZi"6!Xn])TW"nRʟXgqjn]LZo@gi +GewuEb^Ef[P dUM[r-ҝDma\~@e}?╓hq[Yɧpyp駤A"h Nsm IMX&xq,vՖH=xxgSGȝLÖp0JEcxZ#0dP{`~. :+;YFbھ6;)x3Bɳ]XRyP@r}* Ox)̨ eM!` ~J*z !F`mAp_H@m]X .,<$ UOg=.R422P6wNʡ.Nͼ*^mM-Wkˏ ]>`|Ř+.HrxSbнv4Eo)а SP֌t=s9-꾦O||#B N:I}_+NE0#a*c'w% ![9R|+1@K$YRɞ %^.c>pg) HKN])5d.eG(y[DZR|s8@4!9-f扺%M@=r5E 7KYS|8&1:%[m7b,L>۸M׃>Aèx@დzVsDG@2lyoVG9G[8~ɴ95R~@Hw_:)鞟dv7Ld)sgy( GxQo4V+tڳjyh5#!Acc˻3Hyvia屯{cf]TTɷ5JhKtH'N_X {,򓋣Ni\UyZSfMt5ax() @upk(k&BiČ6⸔Gg1C^x`PPs`eMiY*:r7'E1cQÛI\dsC9Pŀ 6><0 b]72آZAmb&*ޗH=}bg)HᘁE;|YVaGWauz>PŮA1 &M.>]&%] 0>F OIUcИ4CqS8FZR ]Pw$xJ8 ?#$~ ^[hv( m4랭UxΩUJ!98>x@$2Fc2WbUA˜aN{ ~1jT[Ί8u.~z"^F`v#:X#PjcO[g *dce>{P{[O( wi迊COK0ý7㵘r]?aaԠi[(1 RXm%Oywܕ5WKQMpY@+$h{c Uą)[вoO"59cT(!~4kծ~Rn 'Ñ; *ZRꝂ齟{3A}%|B7.d9N6UV;6vÐ^a"@Bh/@pE$Ä'l&.YlHWM֌6Tggsܚ=]1@ _ M&ߋg3Й.2tpT{//+DxJz@iV5^nha[`ل}Ѽ7er R~.N38r$o G'mSsr@˴sŹ<nұ$w[;<<64&VXb.jij$E-Ìw|nTq0=Xm_v 8\6]EA,jqwY}߬BQk@Sj~2&-kC{@ nz«Ҡ!WrpT:k#n:k\Nsq=P>n .#D#cx O:paEk.zqz)b^a$:fU@,HO T3`Kt, R9[e|&~^+soH݈֫|ã7LIߥqBsL0_PǂEøRTl, #8Vyn5> P4u 71뤵yfg}4m_O3"3 S|U:^qӄ!d+_@e\r#}ZdHa$G^X21)PPY4!N=|okuxDF@U^gek]'« f!WC/FG 6|{D=[҂y)٣9Dxk59W p+v/Zk4^@j'sum#$Wϧ>ߙh n -SDN50d{|x~],1џ ^|m~vS۲9GlBheHv'u%dJ~V/@/TӦ[-2xtL50v. S`L 7]FfY.y>EMSt-?!sijp҉PTN^,oeꏲ% sCӞGRʶ!K j:Cϡ8DJ j\vRٍ D>%)eg9]3G `jK23#Σ gKk<65)8߅<! ]U.>SO[`w: 6?ɲS(|siMY۳B4DW]db+(P+IׄvE,w6ը!ؖ68zth g 6#m8[`c7.gF?֨nz,kr*kFcC"0&hhr )JZYT?qr(F+XI͟<ʃ][?N~oh^a7mq~oDU4M0axw3DkFfɮzw;a?}~|MX^)Nԯf6Dѻ{ `F"+{Ѕc4&t1pI 8*b33NdVhZ}ux2:!Fw5T󙽾ϿZO%* o>,9t$U}'|Ь%{Ƿ:^W"g* T%&iFerKP0u v &I['5r/^_>;՛>#qūtBzYn~ѽʧ4]Pǩz:?\L #%5) tWI9!NaQHt v''F"Eo:Y pk@+5CL\۬_sqb]+DMjdw3^C;E:`D#ʦ:?[M6ÉajjfvH+4+_4ƫ .%?K(H:_3$>P5. N2!"Ӡ⯭) z@ZlioQk\ ypR 2aȔ1,'>$ <H }Iglk9C.|&~@wFC=J'"ؤ++/1n ܥS}3n] GÒ !8BMLfLn{&+ZAnW~#~Amv8M5-~tef6l R A< '$(lSxzb,DlrO:uӅA)ʗJ&IRػ$ Y`H*u"Ü3BJ|ϗc'bFc<<Y[U #YʦeqBX CD%3p7K~eSF~`0#&ye,-;ݔ1iWP|%w h4S'og1L* -h1ժEX 6qw ad"Cuݫ7aE8JFիξA(…Z[ PJ j(+AYy]7a޴C(5Gs;;3IZ{MG ;/?SX, 0 3cXz ~9xRjD_3NTz8RzLJউK0'I /7ҁ|Ur)`9Uw=Jqeo yak4wJqSE> ,QBK:IGsUpw g߸1o#@1",OOb3[K.jOWM %o[٩ 6ɬ1>JC/=4aˆ]iO.am56 >_H75YWҴ4S{h 8%\ܴg燺.| c^J=]CS$~/ciB%dѲ=0i'p2+so!_`vEvɲfnnrIrC):̲,v~]eKe (`䈴j}Ƅ˭ff^e4g"ljG)^=[0K_G5K/xw>vU[]nځz6~M85"oZ\TtEo萓wcfљy=i8ݛlT#.00/b@Rv¸u_q >VXJ({1u+ÿX1 2^^ ^JpFk@(RA'~Gq(GD3]N|"P)< 輮:g@!IK7J)зLsrŧzVG\O69n^w綥U?CI8BDR8tf>Y5ڛO{"ج-=?vh&1 7d0I8{a;c8q[z zN$^gYgyx1IآPuR8^-NHYZXjU朢PDcbyM-uVIQX]* >Z?0[31R!&*hCߚEG5KIG3n\Eq{2lFqĶ8Cy\k3Wy=\rQvڦ6*D4[$wh'U ڢCn&Al~\Bjb8,pg%WS zS, z?w]fYgy!-| 6_o DK"te^C-{wib!K/!2ʣ`iCȞf?p]VMRUʨߧc"4GZlUSqgX_K´e(%;2+Y7\`MYꈩ*V}Rq0~@Rx l]P17yB{O&3S3Nb-nnK$fP1]AH*-,_ F-<.{Й[Ze(`zlR޹nPFqr̦l-elڷ/pKisRQﳼzX~yυc&2:nS 3PcjqKr?! L봨$fw `%2_3E W#x:|uǨbbYp4Ûؓb"A!|341]p˦g@  [&q+L7@2W׈pѼ`m{+\&{JnmSs'5VCA Z'`oG,g`.zs -@ʍZ8.sC>`:e:o׊W)T.4< vX6w(ooCΡ}VjئmmE`L3=p-$;F/Ȱr֦ܧVJDW?:Xdg\RTCC~3!,.7"y&=B*s{]<ͷa \c|֐׍*6{Ydð 'b^ OFyZ.ֲ5&qx e 78,Ba+j=0C>T"AR X[R{w{EIhsh| 1mIIE0k;G{%S .T0֡ T -[}|0vρo;y| v^asquSpgD~rM_Rc[ĢKTkE\Qh_8+d ؗT:ۭ ]WgY*g YVsLAo*N'N9IduYJ |TZAn0 fYآmz6KL¯.nH^43(s,3>imJ:a:Y-xTn@ ͡g֡;J:`yFϼ66󠾪"j:hW}qM@ݴxOel U&6 M15'Qs'\[\4,4d*Ͽm&h4nQd"3){n, 8jjlfu`ޔ1TY:LkI%5EV/.un7fnȰ)s+xNzBCNQ:qoj*lKz3Ѿ.`^+0'Ƀa2 ZUM3O&.\x/v[VLK#tn"Pj.Ӊ;P6m0'-~/~!xh]VQRuUon[ w 6GOG/=bΑ փL]#M?)$'#[ ݵ p¯483La)Bt`Σe 5";O!ƀhn9yP)CmYTuGT)0j\?EMzODAt.YNJM!/NN+SR 0ajÒYb_YU"?\s4a$W v5hu:[x8Jm?r\R>,d1hcTX>\Nɳs=ƪe+ob".g؂ ߇EbRn 6&_ؐ EZ=~/..GQJk%P5Kgx]a7#l%K_~ g>'cIuIdMs ؙ߸ :K« `$60&DOxl](eBUEѤokZ-HYa<%5A_ns-Ae؎3}ƌ鼦%l&^N&|J -V~.h/}~H1 ٽ|0:9OffI>1}z%kBrq^&cb5 Q~ڜWb(\$swǘM(8~XR(&3e/k_aw.q5ʑ,d Z`0"i,|}oAV I~F<~ü?>RJ>g1VKJ*ż_QyŊoihQ qt B b}f.<[B”_96"Ngqh~1G(Rړ,{+}+G#&T F5Є0[gAQR& kFb?Qs-(ч|7!#;[Ic-:?w-|Pd MX lq% OEzv[\rc1sp6vnѽcKJ[>ɏʈ~^}y޻DԟhI؞\tLN7ѐB7rY~o/ MO}<ؔbl]I2X1|GQ]^ˆj+u$&i*61(we~4HjϡhNi/3!zkw)b(*aJ>:cgV~$&&Zmӽ07xwT ct IMFlcF(3*Y_ qa%r ΞބyLկ/X\ͮ >&I8@ˆ]DiU!")br IY8q6 =Cs{\U%fb Y7U5{8+_K=ōe[k Ybe9siuM Vr;S!HʩTvQѺCӧ$\ɰ]`sC}N[jJt0$S,* b!]/~߫B=T!83Gz!q=wL ItCǬGcf# o`ӎ[UbD*AvEWL}i#Op qiZtzBl"krz7kwk:*F L5aE."@M{#TkNMtwؾ ~|`R>7esRZ|KMh P/:bWyP=<VHi/B(gQA^)}ˬQz|jdO]zC3pƌ8~6uegc#BހK ^(KxҒb̄RJ`} 0 :u@s0.~z<"s8)0)R:+&޷ImfLl+wIkJJkc닞nu9n}qʛզ'Jm`μld B%' mXa|@3P4Z,W,2V FGrr H^Y Js 7W!=B։1,R[Ps4Yxb.[LKw@/p Cq;w^3_6;[ҠUD5OA1!/&NkW3lKmͳ}J҉ETQu NXSXgs4$aFC T'헻P܍DJa cAC 7r*ܪwdMD)[fsXən6f*L=+iPԾ"Uf)IdFCx#TҜ 2D>ykIP^T qUU48 < eO9qses:_=܀ǮTGB6X(Ԍ5Pޛ;?JhzX2n5Iup`K ֆxp=s 䢍dѢZR~Hk*C?6&ZJ1d4ۍLs*W :?gyp<2^Qў ˉʋ{8-B,╳0M1ưӝE:1wg= GFneMCA |=YP)MGw#3!BKW, WEb,\;9h~bTsL~?}P JuVX'"6+ C͇5MFc(,ʄp"YeJ 阣):xeR$'F l&Zd)Atu63 3ˑա-jC?2yz;̮X^a0 R3ˆxNA+clu{SMD?IlXrws8Q9-SKtځ{vMJ5f]KA+"1 :l?%{qir܅:RBon>j|rT'! p8![)r# '[wUnX5? 9crb X] VlXP&.wNG=oJ0it_U˽DQ=' "S.WE|Trk!)߬ S*3@==O __X%XG. JG5cWqA]źLHr6hxb!v?1uZ5;طTb]iͺ%+TFaSֱW)^2qEZ~26>Hqc"<܃#! C+VO({*ԕu4^$ kK<54CE@@/czfj)En 9!W.ǸBǿ^Huʝ̹u:ԩIԖVLqXCc c3zJ:6OT{НrG>(>*ak90}@!ufU?b6j2 [ʩWDgBZ2̘KBA^8h#xu@iAٟT:m{,- ZBi5@-_I-/4KOL:x;σx04`vhaEQ3Gy OlPs9tYV]W߻#&tH9 ,3^``!X r GͲksM6k(%HCsdwtyō 9 H٣PrMؕl[!*7T^U_[|`٧I5'Db& z'NtAаC 5Vǡo†QhitPC%uN_"y =0(ht#! ޠgtz ьZ7`GKJO#pd&IdXyCIi*# x'?'-S'roB$_~8lrb>l9^/[6Ib߿5[,&@|Dy˜g$)'$b6$xs (ƋCn۷fm K fk {(ԃ!XO~E ;Qw;9J;bAftTYU*' E:ш& {qk^sN*O!RFաE|- PdexdFJ(,b5 WW{G4:8kq 󥧕ozmeբK9T_k2fCEMUY۟Ab_M3u3Kf4E"vŕK߉eYԿn 2w)+*n<5W}d 4~IsE؇zWA }{O}H\JK{rw -.Ќo)e?,P~1Ozz4to nNo4 rJ4O1p.7U{v$'\;RT?U[$B5,2jrX|@zk/ӟ[wt-DR!ϠE5p5`pvv)_E}U(5 Y%bH/ZRwH3ycy"p*kVڶC RVv6u.n~$KS,o 6 ꯑ -4j)!+X;豘t_TlϪe;kw a^p:z(ue/j`hI»p] uAԭQ6V%˞>'gXs YF^u.WBv%*5d=\<ƴF3[T(kQ@Ye]Fmhu'Uw# Rp7X]s 艨K`=Ϡeƾ#_p[n|O wy>D ;5iQWQ Ulx(icU1S33X}?j?f& * 7f)Dwv{Zd4|"ؐZ\< R`ICJ!4E&;q!쌑M.>g`D? ǧ=}KS> oiۢH!= 8{hWOD#E9W̃aU{[ƙEA Oy VM^?)8o v%Y* EV -Lou_:?Qc P}j,|Itq$H/41 m/MnI.rՍpm )etoIp Rb(ZXl*uNbg-թ& b'3s9mw!̀NV )c= -r5 ͆9QVX*mj])`f_(kOzpr#JkdW&gw^ &h6JN?A]90~ҥn[A"$nO1F?L*mGW5wG(Uٗ$R~rw C.K 7(ȫ+|dI@G;XBfi MSǺ e" YUD,ˢR֏5$:Wf3R۵tJFuj@YLw/Tx&[W4|lC>*\`Kt1zJ^!>unkzg sP;28uAnSFH=Ơdk60EyLFb ^M$D8fS8qi6E zDZN<֝3v`X&Q&A MOhIqaOmAALG6N+ 耊8Zق)!o;R}is-I+X^yғ~%'>Z :˂+61=hUOWsSŧ>ߪ7PdvuRK7 P ?Z|k fXz\nAȘ_ uTbSe'm䱵yJ24m>]׷fwFz6De1`anxn΂ɍ(qF݆@E~.$Moӆ?pQOy&MLEi;1V/һ,[ɼ`"$i juP%gQepTgCC}>pw"ρ\ „YqߦK^eQ" ~M,tp=B =cxҖG틜E15 Ca Pkn9`je#N udiaLtuW +C NѠw^ ! ISZ#9u}t_j6MɢE<褴ҿ'>g30\KJSOȟdž Ɗ? 93o%Tս*SHbXHU`G*<c9_ YD]}FWV+e^?CLɮkƻ"@TofH5z < n-(I'6qB'\Xum,ItE8kޣ 7N^Cf>O&S3ڀ8+]PʄD40CWڅd653]*"KW+iQFnKRU ˋ E AqPm:񮤎jZH$6"XFW13 KQ޼?ilϛc; .=/=b̳IQiMBQ,S{)(|)P>ABOdN& 7$mjg}b.RS\-^BP_+\>0Mz݀- xv2>cW vEe=zu,7(eɌ:RѸ I{2q1ϧLgQa.LT,wҖ82]{OȞmAFAZ;"EDJ*&~Ʈ,Xg:qgΠ_ U޻^c]a=*yċ>Ych4m ,va/?C@_Id(AW*BtU(H[m;.wֲtW\Y#>}I,p )ES1N1?.RE?;TV3L/f]Z\ӿGdԶCjU- #ʤՃ>=I6vF:r gS1yU v@/`Yw8mdDK:^#C:">\JXAvqB EFNB\j\Ygf:c89E˹{@H wq%g"g_,tԳ] (,$x9cdV aK]kJ{ʳ d#%0'l"Rω1_60AIV>6b_S,?AO$yC 2: x^L-8/p4!9'(H#ݴ4SիABԠ`4@ܗ<эENor`]-笅7" i򒈢&ľ\qiՓ1.TN9!,~5ÐZUWOY@=kOD<_<:PxΙ,HOsaQv9|q_x-Zw^o~{%A]l*muN8K@ܧƈ۔\`@ग़kKA0pYz' *||<%# ᥄?!Ytp#v-nFѿ d\i} h>Y[쟞e7B;Jr32*Ȩq6U=]ﻎ؇l_~{(b:AFlTϲǭշ=ֹԗj{kvg#VmVȕBVڮb˥,]Dr* ]Ko{f\#Vׄ.YĂA0v3V TvMZfWv. =a-1]%Jy8̢z+f[m@MTyp$(G[pV aIQ^m6p ubr~< ;!s==O⛌8Bg;ME}LeOv{Zvה(g7hvcr\9 vbv=>'t /5Wє4$`ŵ}lMW* -_۫RpMtV&oNXS!jx3#}AMB뮸:rgow$4흜1r]߅Ց$UoM1W;"[eHhl+ԏSX)4/":A2{>TRnk% IPIO[?3mc*^`(О/,F-N!!\tr9| * n56R\dӞ.@W5\v}7ޤL"fu; 6 Ue{#ܠ` NPi##p?î-X<2nh1!BLLt[vHt{N }Ka?qkx%[CO;rI^iMhɺ{qצ$>26[?rۈv Mz_[Zׅ9څ;ڡ}&nlRJsVƴhгf{)ٛ4{_&?>!Fo"Br>ÎgSgSif|wId' 3<3n ̻N7?,,/Q7y"rJSvdJ{k[wx׆=ܩ6!_MM=`|XCcq=L<' ә; 8Fia¯/xecN\Y|S2nM}Ⱦmy716:{Li ]zBW˝chR|+g4>6L$TAQb;FcT}B2{K9[.#P0B譡BIItk]b0 0)-05>硥D&ocW]m1noZĠ>o' Y:xNSUZ;ofzMyO-]i\SBlti<ʇx^/]Jj?@)-Mc)#(wNac;c3Xy;΄NqC -wݜ3<E|ARXbTQ}tv ml_] 4t[|-rL{gt%rM> =V:/|MBJ-K"Ѿ}4.`C{zgq9`P ~ٍ?c Ӈ*(cG u"p&FL6b&Q|*_~6Qf+; }\^Fb`RCrD<7X/K2i}2nQ"y{V@I0N d~p=z a>J=mJt@HUo%|KqاQ[AؕV3Pa#P-ǮF][99ԟ |kP L"I(Ug-و*A&mLrpЛ꠺Xqƭ EDPN|96o D !mԊI@~>wXorvKH$`zU_E{BWJ׌[e2<9)f;+{ 7i͏\a4@%J,2̈́Lt$ԤOuۻƢAFy6 Us؆NHSgZ$O8r.j外q*9S~`M@AMчwUjedM৞}t9 I\gyMv_jGN\m]y0~9x;-亜gl9f̥MB'Lw!9 \4uHs%^2|{]MKnwSe)za7SktN5W'$gi%P)Iu$3 iΡ/I;XM~BY{uBkL l9 Zd*8-50O,g`Zś-Ep\Fȋ 5?lw*5BlK_ϭ^;="OכE,nHؼH֢U:2mFvzol&tdEK#e-OfSЍ4>qy @\D}0La\JxP^Dz0nV%R,xrZ w<)e8jOZj$_Sk2_iYq;!C2 k(+=:H߸VG51e$_A^#rL/?_5 $?va?t{W-U#UsJuK"7 '=( 'm):`Χ>|hlfW;\|9õe7dCaٚ>2"IvsjyO-]5?WDU|e09s^ɽ"G8u+YZJ &Ԩۺ׻9)^h+fY üODl0=n+x=DL$[1jʁc>.?šxh~=kx$|.ӉVk'q}cj'͡PF|.(8d=y S$ 19P~ޔM6;Q,9h%ԐfĈ_̙.6˖@# v x,@[GS0*!.Di1th37sTsxN l#2ʅ hBk Y w]i V躶id..כNHf~ZF Je2? c #?܎ a)p ])8)0sĞMd -࿏)3]jQaĶ;iI$TM*,;_O85lB Xs(JS*"gUiITJ௣{-A?|,]][O?E:S4 z]riXfV؇de-4ߖ3ֶsmq:/dM&[dH LRJSȕұCP{ݭ! ry^^H?wB|^ύG[;5&wV "k$"^WYIk,c{ N_o ,/t)^[<r+\PBb';|4?Ѽ?zx\vw'xi&Y#[|Śis.-m%s|a}|FlEƨ#L`˞"f.uh56uf` /hE @]/ fM,_/#!fKͅcV0QĀ^R/;Pm11mu6L:qoX™?HݯX*AGHG~p"24*Bz\U.B+M;2zLOv\CGm ;_τLprrIEsؘI(Ewog^V1E> cY┅8ܦC{Woħ 5Ow8q49f쑑6Ss~RA.}ey "Wp%<$)(c dZ=ƻRFӂBԤf%\q3ٶԏt%_ʭk{4BfKwɮ̏=?7uƱ3CsĬ+J9J6R0 AӇ*U^RuvT wVӻ֏$Vl`YC,ۙILrJ:d CD#_)86YÞyesEz6[FQQNE3}۰LǑ*.[q!} GOC,نrnBxCQTkˮuG-r!/?R ~#LT^f4='(/Ζ 7'K5T=ۣht>!Y!-af5\8 pWH]>ުC {:\D;$6mJ_K%래\zV}e>xl%:avG1 ;#r_!j}A/HӪm=]ޙɑTPmobrmTHEnPU;)Ų"uvQV>٦zXG[~Yri~Vl*E`>Sgz8W;ދȭVR-oVTЋ,mI-,^ևzQBP{%1FUX^D˖bJsV4H#pcj5F(e*LUԿ03|sW?/3v >SQu k"ԀtÞAwЛکZ2P2 c.\RSLA_L}y j@4 blj}8i f{rP{"EM h?ZFA}WS3zVHKZaS2R}+Wl ɜQn]{ nQOI༁w8(e+H`\5WbFwB$;)X~ɕ^g3@UNDr)R22W2?G>WF,@ JYS;m6пC*RMa_z[P )G7UI[B4YNhdt=*?6HIXJZ4CzTq&}̝OS( Ѩ9g4(}YfLڃzo#lP9>Ъ~Bd;Lu'#8;V"4Է,a\TŔF. u2-DV\ \bQlֿ>{Z^JTc:^bA9eVfjQܗB=puDm-yA.93#sOǛTPx;̠d4', (ߖlo7ZFn]g"mI\ M.5&oS)qSWZHH&w"ϱTj!^V~3E;9XJyw=Y3O1SSiŖ>Y%l*'Ln ipdsP,CջcDsp!eRHGg`&-N. *@n=/2_/k՟2_f&K_:@x+̍!7yoyl*`x\t yCyJevOVUOd?kg+#C6sѾpx|t qfsm6 #Z̯-e;I -u%~ʍ+mCS ܦ͙D#`9H1S4Kő{ $EPEO5lS~.1y Cn)b6lJhdڋknIyP1"!utO|l^co2/PS/9ԉդUmL`,hod<%j;Z3n qumE^.sXw8""@ ].XM^GnǺgw8%iV֬zĆ`ZzXOP["xCG[.d֜=w\?T/H#E&(ՑR{yc{0"N#C,iÈMP.m,1MX84# /nǻIvz)#/PZӨvx?o*IS-D?F״ϭn%$Ppl8Qhb_.~@~-1P F]Qerk&m \ú}`!,DpuyvyG+>6-`~+wܝff,WjRDy<D3x?ym.ToGM<,(=xcQËmIHK>ZdὐA,je +5Op>٤tS?Cx<!^]F+}Z30xsj#"dbV715*ӂTT0١7Jɻ㱛yϻ6̴~7'Z #6oB`k\^nE؉Q92 mQ HV#^ 匱 Uw1 lȐq'x#nT(g`f⾹g({BcrOk]ih4څ i[| @We{=^pɟҭOp߀!?iJQԓ3hL,ē?u%x؍RۜB̐Aߧ4\zَn%NDچ᭳ܤ(EOj0* !IIrLEՎ r)֮i'@UIC rUq{\h7Gd ؅*-N}:P JP{ |8 Hq U˜ڤ<96(g"`R % v|"FF>5HRk3Эx}VЇAnMm2*@bnA4HܺV24}>N]fY;u-jLdBF;@"0lh/H MܿHܠՄ"a}ɵ QRȑx$EpEA"љ[Q_S0&L v,oA|/5vfbq0c8$=L.Z vi3}"޵15I9cm*hGі3Uō #aV<ţ9 Ð)@?`A* 簿}>*&$,6Tj)d'Z(XD8.ߛE.(=bTc$,Ÿvm ؑ^hEBZfTsv[nx2inҗvXP j>0 YZlava/data/nsem.rda0000644000176200001440000006507113162174023013550 0ustar liggesusers]y4U|,RBB%=ЬB(CH%E%$P2I"4IBH1 gX@ Yv BOY[3k+: #?o]F0,k Z4Xw;.>fR#]z(cthTXExCg~;= e#ò=Aj" ~n!_T 1&y#&5XfX#8HДڽd'w)1ƻ+}@;'S,~?i/g#^hƙ׍5puPO_ƲhJdC\_43G׌2[&r p$),'%8[ , 'qytݜFJeot?W8aMGIz=lgp02qvDz}~\_p.Rn,J$4;'u6U_L,y%+"\')oǠ@+\&sU!{.h<%5Vmu_C8e#`02R{{1&zXApׇ3[l@~|)Jl0NwXڒ0m4P+3OP_^Br?8V-2J+kHz?%!p5.w_#W[Gpҭ`Y.SU~`)rQ~bmŧ9BRwky̦+9,6d:х>W$SJol;~FNe4\d#XLw^sDy rΛ2$z)e&vtn罸|! '\Cdơg63xYJdXWrl[U|pB8Hv=L88o 6vo1RḔ7 UûZbz!&,шv:5c*^4M{c몲7D3ۆcB0E=f>8Nً緾d!V}Re]%qilehg҇KEBߡSkͫ^+-`H]C/y!갤I5{{k~Qj9 'ogЕ=ZaFy!o vpDw|=N~6!5+ʓZڛ-֣3F5:N,~%-9\H 2yyIǯb%#sv-\Hi, +حk }ە]" ظ^a_1} 6)Uwz ʗ1-$w]q 6 q(8Z9+y\;c74##}* N͇ŷIo'8,ns^AuktD15= ݹq&*=̀dHָ-ٿ$g̢Д|+AXx_Rbr vB**\ڝ{z18E-%캫tG>2>+u`Y߲- o45'nwljWmUpu_mT^#Nҩ: FR$=[gF~{I<~u[Sn5Ê558Q%'nK>Iq/NjY"apEQ>wO$8ʞZs@im.;$t)˃wݞZWw r+_2.ioP, σfn2FcQl8LI?}*4_]f U$J:aA݄(gέx.r]kX@tm/,UHcR[8_ٺ1#|b֦!l5P]Ԇxua=\*ݫEOgJ5&xxysh$@~"p.SNzKR~p; wM^6s.f֯X~+V;zJQ' z{0e~9VQ~eQ7PO^Lf=9>wedeBcL+p_d+[o2p~CʃbuޕBg茲<(|/f+ñG6]DCΎ/'X9Ϫ}2si_ulнAyf'zP";"K3va/Cf3/QY<lavșahm| s^ÌX&}'fAguA%?Xs l{k|eS-$+ߛѩ0zbq?wͰ(8}XM,%'}B 0^kQ]OsCqdWػCNk=2 +OwC凲T%cu}kW6?&,zC$e7fp/sH1$v͠)Ө7}ʵrsq )EMwQpe8pMDL%b'4mdjH&LۨV cvOT`j?fŖB2f+;HRf7V<] xlй{az&_*>KT%BCX4by.MOx8aPzq,5?_P)M?CQnn0G)l4'!U+d؊4$cud[Q;f9OrSR`A:MVy=EYҶ6nS9"vE%/v+]J>ޒ &ג{Z m?N ɓLfLÿ`N kG /NRMZfdA~x^O;As(#L@uqN#$>ͯ{u'vlTUf*&`vQpAvS%C%NN4S?2w$FWt7xrv1La't:Ԯ37A Cշu7#Nr|}e 'uX$dᬄJyuEH͑Huf*Гv?'$g/YsT"ζ(ǹ9c=E{,Woo-#x%PQa1 FSY`׸ȑ30hy#<ĠQ,ܝc'NY8)ZHtMMt/.31L]s0!u6tDY wԠ%VՍ?{ڕyud~ F/162!# u. t[u|]?}ʒ6Lp> Kx&eXV:ߚ58x vVʼnQ_8oR8( ay;"I4 '>< xF"v8`RD. v*;E6'~?X\˧Lcеm|.z<Дy1#oX:GÐ7'A2 XxU X+7' aȸ{Yg_%J26:Dҝ;b6nC^9]9BvH`L:^{$CaBHOҳϮIuB~aYbX=TLX{$sfzË-/`wS w cJD>թǕMw,gW~o3;]q-dH V:PtNco o1{["z:`nZNNe傞zл>[[XJ>} $2Nc5]w 釄E;{ :L%|4֑!Ӛ*D-B+U~ypZF{ql"Ӝ~|: J w=\x۱1D9f ?`мM'ɾG2)|gˢ蔕GayV趝N#/{OzO0/vY A /;YS4y*H(I;qRfc@6я Gp$cT,cE>7 {Ul|9OTb{@2A?ǺOLq5c gw4C㡣黧#8d>$}oOGtISmQQ_̳W9Fa##Ӊ^Suč(9~Jl:JП7H,yLEha<`EPԈz0t=۝ݓmεNFBޝO|1o#i=0F*=:bArI({ݮcwnNT78%̩X!?N3e_M="~AAUVW<҉:wY7ZD%Lv-sŔ7q{d}qw ɔΛ#aW5'%d[5skP0 D}nD iV}-b*g#xzRsoT=vdaخ6)18z+,n&vukq%YUzgI/ٞU<6 4:qY\'$>'8/6{v3r/tMԆƭ]B_[<8NXJ U"j#`g*Cױ㱢]\i{B51*0)_ ]>yqΙF9u |*jj.yx [A[@sȪHsm-"5NwiY%XPyֈ/_2 mCV<p  )M0vAws$66lPM2YKX}c>03}C1={yK`ߘccEaFMX~U dE4lq1ڄq%ld |h[c&M}Ol Yx>(wbt ̦HM XnK<{=)a~%u{Kco8ɨ L.pR}`>Wu- G{eDBb֖ 5c^{  ^S_~ac/<${obMwlJ-n}h}7Y![}G(= `jm`LU| UgWGzYr'ZH&=k5;5X yO4)UA*%h=kjէaUc>q۪e[F2pAI⦠%gLrq^`t"q>cY[҇W"'FI dY7UfJ#rrwl8ynr +? RDV&ml|v y̕oc/&y[GWhj<W@4H~jn{XV`R*_˻khRqZ$ Fˇ`h${ sP)5/_ \eT- ӱ/I?o0p{+)i>3N¥roEسvx dU<-7=p& 6KfB'ݫ $6anc85@?qf$v¬3.>+V84z/ //x]5/taLBx?V g&OuJ~f= :N5ݻpX,sϼWg^$\~)o ik"5V߉_^nU!t롟]8yIweRC0 z?,^:3u+0ڼ*PNн>> pLaI t>aa 3t.ꥌ̳JwLBގGWUGJkiMJ>Fڢȥ]1IQՃ!=z%X2xI?[ւ;45pUL S$"/(ZW,-7m:Y ovevK^lۺ꙽fW#dο3ni$caFa\5XoG|Ȫi I礚l(\cWzDaA34tfߋO0~}E̗<͠ycXW 0Bpd.QO8hC8LZH˾x~wПr5ݩ}iG)󳗺K\tu*'X0MReHlp_8,Iy>:?;g6_p]tY'?YKoR7\8(Ne*L;նq,: ~ ('4Y.M#K9S![-^wG \wlŎ4:'A8p :_H?^ 87( ?/,Z[¤&MCN# cXbMQ4iiOf6|PP(R9d<#JEԳ@ZW${%PvT<<%^n 䞾 P23]ߛa>%!Mquǹs4|dҷ ,JaE=Yk9%DCm0< *y԰7sPjbӏH`GYl3u=9&N|s &r]%[oeY<] =#,mh?%8Xc t$;Z H OmGql:s9h}{O<`)mqC:/(ٽNF\J?_W 0{L}{V]'O0&Jt_d= -ݻzˏIy?wضks|Ś##EWXf$˼< :!ǎI)$1~fm"Hy5.YmQy)cLS r'X_;e*/7ւ ޮ}U!{ry-,eЯͲct]遢Q Uk>|UF޹a~z)dkpwp;z6!e8q@")|0&Fq{._WCe `q0&ƪ76e=͎|rN_s硯8'I*{`ed|8AmуSh ߉Pv@E# $(u mؕ=ڭMյ/8L q䭉QVTxIZĝ\N%%>ٗ)09b8$Qm3魗twT?%=$Џwpy|mk%.Ɵf_PqQr OyM@H:Bh8Pw|"7$CyyP**ad#?#hogu(H *: aQ#dž 51J77R1Or;~8|i8sc=otJa}߻X!=Gr| 秚ev,onaɣ;\$=g-tF EVٱ*Oz'B)J5BF"?ߗ;l6lJgCWLRrNw_C*,\˫1jI )gh 8ʹn&Ve$߁UDgw`MdQRϸ؏k9A0s*R'Pk3G\\ jBs,kY:gg ;SaH8uƷ>ZdyQHߐ2&YXB¬C`8 |ccU'Y2չougq6שR Лv3SlmBCc) pQ0u1H[oAnӨI6_W¥ww0ⴂ_c,Nwd͢L_jܒq""qf'H '~q'+e zIst pwK7m-9{:6>17qT$M,A<:yZh$`Ls^{?v*_W_%8S| ?kOa6DS<, t\rswUJ,]Nw =dHYU_`z0x3LmUk"3,s>(ǯr`>?;e gGY8; ӲW߽ 3xTt.AUW`j}If|WFI#rh04<֮It7#|Yùl>$z`lAeO¡QM0Gpd2Qp*"/= \wOG}vaTd.{Ͽ DPE!JG[_ADb:, od[Վgڸa޴ycX& +]ecB/ء(*y~=W85G1|JpXBKy:Yplk=SE4vlr!-FWlIhwW/+)UY/;ޙQRĝ Lj |Y͖܅#`VG8Ƚc?ew]zqhX:q}2Uż-!5秶ےhNҳ69O\ʯ $c: 'Rg2E>$W9ۉLNr$U]'+ڞ@sa+I$ioU{ F$:ob#z̩&K#]yhu(FCRP]sF9P~> S^tdr%aR{#7"vi&nX=Y_OK0 c #ۘcP"~'`ljí ZJ"LFn{>%X8/dd6h ƩV-EH{ӣ*hW> ;uܞ+Q$}_5 K{-xG8m,p}Jl hV1"Ytk" {{ƥe@ttu<~9azrC|4zb ,N#Pd.wU|ʉo";EöM%&hC糠AHû_,Bo%'N 3j ^՝G=?qiX'LP:u. bC4%~# 6`c2dž0VqN=ޕ܂#׍;۠=W$62'vVtEn[狰#$/RRZ=E޺w$qvuW^)ė ݃쉁Nӷ|$)R>O*30ƙgUAY0m{܆Sw#/[Yl1P:.)0-wjJo/ʂʰbAXՠRqM27((:jv-8@0)x<)`F|w}w=7ܓpvd 04$X :ۇ3-K4xgKa8i׾l!{Ih> d}7~`lwI߀O@uip'Ǽzua(֫/2] &+0Tw3=~0vO"8Ri@0OY^/O\?t'{XכǾ)\V#:S94[8Bd؞|ܫЃO`γV#̸KLmh$Ӥ =$" 0#pDfNܓ2]dlS$ys 9qne@6fx}+Cfؿς ,(ayHUjԏ0A穐YG>m?-*]޾'Zm}x¡L|_U?]/][lNӫGVgnճtzF-t)`apaO~y$4ltdqI.RGRj'l4t^VeѢxl;\X{&u5yp7?EjTU7+;)X/1|4Iϩ3\> ph޾Zg*hni}&F'>e|`iʸ5wd/]+[`ԙh=I<|$WN;].'SA5sw PHO:-T~) snۀY] R нDk juOFoԨ0ˮ?MU%Kz#?fZzLԯto2Y-ڮf$nt?p:F{옪*Jqsxї^Lf+{.'XU*D&UinuVx_ 1ǞiVlzNz}s9X-cAq&Vy y+m zF[C>qN[0&͝Y^5>g.-ݐ;X}O-q_kq 4 g\ xƄ\%a¦ͣIJ|BsU .6?ZqhG9f6X8$FMZTt`55dJ,95k8$F,aՂJX^o;C0*i}] ^:{CCH9O0 +y䫸ݮk53yu>ʽ2ܟi@xo*qՑ+ԩoSU>]+@~DsIOMIH(^I0 \H3}Y&t[G0y[-tk*x9Hۛ`єda WÚ5_ #8zFwLhB&[tv>8}jE"P-4yq~Ǐ;_6_z'a0X!FXJ2zP&V[O>9;=4e4$ 94C~7^4LRI^KB ^3S CR@i1o_#qz"vPdߟDZmKU³L >i0]̮0ND|YSf=|PW`lzANȶH];!z؁Wn>53p}8MuM5((Yӧ`+%lK}`en4R{]X3zɇ`k !a]$=l:K@ij=)XhKp'fnCfidz`' Igr'q l>eLQL{$EJ b1-5?ޱKB_zC6$UC{B'XT<`o:}]\ >mK 7{pBr1c)D{Xqxn~߷zVLՓsopS(ޘb=ytLf|t] J3sW`L^-4A8.I Sxx`x4<[=3KݾH0RKX~JswIK\c}S^v7L*t2ۥe}{g?pW\,W f4jq~;aJǖw@S=_.t.W.foW&F#mqajΘ?8p߲2t1vhE5UU>A߄wui?=q1@P[4x`yW:Tҷnq^NW1qI:*qOpCߴY1TOHi6SpAhD=R6I|%0Je#c8jv`Mwv덏 F309#tm^5 U0EUn4m/؍;5bw0ɐ%|0Z :B¼VOp8[ݍ'QT ,?{|kr!{e)}MiU0žEv8vXokylT}#n==+sv|җ@Zǟ_$f%0wDs}auc ]RnMkxRu 3R{b6 ߰o-Gqa~_qV5鼤wga6fG@LEJ?=qakx?n>{o &-eܴ؜'DKUQ fa36&|WOAM !Qj6eb5\xue,o~G]꧇I@5~[SDt |_R2_%`ҍP0i~TpUKEH5!֔RJP&}ER$x߽TYĪ/]+?")Fa!ytVҜG5vpRxeX _[Vՙ%rFM0w=(Ԃr^SHyWٱƨ2^EmĈd:YM?' kZ'9/=#tUKsZl3nm{`MwI!έ>b\GM'`UK{ } T#eay\o AI:r;NZ##v$s{7yF.iN@_3_'L4yTkԤ@)8Cۮоfw aPzL6YyK C߽]rpm)Ϟsn;g˿#Cɫڒk7I¥[ЙYr 3Ĵ6"Aɇ${R=4{aWnP= Swv~&lc ǎ^@9;8 _J z0}o0LQ+l 2ϖ@w9~%a!~{QW^ e,w]7#%qRÕ'PD'i(SZ*6v# Y<i\Ԛfϸ_3k3"z9m#WK ^Ea|{햎;0|٧k&zKb0q"(;l]WZ'fl쵕yMI/g; F[oSb. :,xf5N9d]<~`t}^Jm/8F[~A+.3D﫜* z^o3U9Fj5T+l3 L!~FW1;# II Wc Q9G~ñf?Ec *o֌SBCgʠ=~cXj>|[dC] KB/i2] /7SvlNjM8z4@y^«]i>ij.=bo]nnwKpҨ.xk">a2w/jm{=U@Zx~6`\`9V0~k$9H4IgߕbV}q5M>IbK}j9 ڹ$eFEAW/5)~'UB!1j5<I#d|RlYSݽ4^C{yi2e˪KByz 0{Iw`\ڣu 񤊠tfa\{l;ILM::02+[K}s)1JvGP.-AӴyso`| ɱ%(qJg~TA%W]]СQSgt_C*EBwjB+{^2ᵻBLhxxU Jͷ~'([Ω3FeD4Js誯 g^K2qjUCsm G$A7{Aa-`Kgj5oXwX3_^{uo!YI%lٸSoqi-oN$(*{B>k׃۱%Z gǼalC/Tǎф+`F_t4Ο`7P3nڇ:=~t>O}U+(P]֥}[>EX@5gc_pBlV: W鯬Sa߬`Xy5 $Md}9낰Ց8!ͷ 6K3oyEefDlE,t~ 3-ǂN%ΰ9d$,6d{ޜ;,47O۔g<'O28` _;k3b:[;|/.jW.z')cB,pۖVӳ|V#:u2xO6f7[;I)cPP`Y! R:6Ԡøx̹Qf'p8 cr\k :GN[6XpKU~&f,cn0imߨeehcc]Ԝ]Ci{4}J7} WiL{gzM?o7U{Gj[E{i݃|W 8lo2Xakrm3Zp(TX 9EWRe !#[' =fUop{O=ӊe~Μy!@TiG&C[^!XMzSW &t$tB${Ɯ4r#`p+l>P5mP6ޯ3ޖacWwLbP~T5'P~ZNw;LSPhM+*w@d @ۨlt\=8^btiH >y"kR:pIf$:bÈ?{'ҷԡ21܁S  azD(lw[q[3/.>}5z:iֳ$?/矢:,}#d_9’'TqNIX-nwd6Ihq>v LH~k.oKW&tާkŝ?r7c?6\ɺ5Q+G=R/NgʠqFɔ`|ٖx,jXHf4 H7wI*8-fG* deH%n.=R ̓~cY蛨{{~M %n˼.ḿ3 8]!DR4vKޝNNِqM ,dϸЅe[]0gbOFTpVN g=U9!)80?K*s7j7:O!r:b?\džF#8XtA{g\YⲚ~B5/[р|ylUޱwu+fIH{?ny!M=T2T=+1#fćmkDG"uسIBp4\טl_Lֻ#"^uoJ>ݬxf&oqչLy~>t2UVb;ߜC~CONPSD9īx;&zOYq:?bpA29(&LdQiϙ8Xbq{G6&ɍ#se_v{k0aȽűz12OZc+m!G6ؽ%6o|"9?m>}BF, u3 /SeΟ&isDIh?aV7uφ(A]'knYZɮQlR, zFLL(/$(^yufSNnyCaߓwzzL-)k_#_={"Ainac28Ux@2νxgyNt<(T<ɿAC׼#u'p$5 Wg9B+׊#{1{}獮>d#H |8 wj tfٌgRcM9_W-$i68x ~%3PۏXE܄P)VvӠqןT烨qsy̼Oɬ7KE3A{U2}0=96 'f{Gm—~a@Q~==(VBm6DA[1OD)0ȓujK J'ZHzTcW2 xr )GVDh;q=ۭ=Yf :9_7`]m.3hSx3W:C]+}"0sʅY6{`k߬vYcYB))ȶ}~!X37TEѷ/„N.+E~'4{^{&T[NN2.ILz6ou?P\G4f>fKSILg}bI"9U-݇!Ӧ" $]ŕϵ%tp= =]E't57զ ^5?֤<4Bk^:Ґ 莰vCE>Hۢ5$@z.?yCԄn{HGO\õqgF;?hlw,)/ZN0>9iQ{Rs+;W C e} E#UC76<>y3q>Y%BF{ldnrx-e~'C{xB4}lԷ0:mƆ $睖R4ȾpG1 L#&-Iq:rn2čE c%rBطA3BrpȃصC͎8C(і-(,n5F׮{rlQ7dJ##m{(A_Sa^E[9;uD[Gf}-Sg=U !EƧD[uVs]T(B=ش;WwGxksspQ#ug8)-x#+tWwԛ,X\ݒtw=>s%?; =9.ࢹK}|n',C9~--ݥy&=e?Uβ ^LuǺ6Q=&[jgHɞV Xgo,Io1붟9=ꚧF>B㗗Pnx̣_INvO#N̗eVyS oO'㢚ydCehb ~x aTPb9r[|˫Cg0A=U3wpht^3rBZgKliEz,bnBc2?[ 0l6+bpb)xhHl|a0zڙ4v_[qOG6zr]ytsEdd7$E2[7|2n8"3Y1{Ա(9鏋1uy[`ݫ>Hnt` q4ݥ t[ %=ụPy{*(qLe< eN {Ղ#g5|uχ!f,X2- XѪ+&IڔW|, ٗW#B=Ҡ7[;`ש#wƄ fo^ gcӏCKH'n0&8axl~* ML69P[lϛ܆![/ <%8\Ձ *|#[CpQ<:G.W_ꎽogx/f2ԇ>)d'p5=~P9*vbu`cHeӗHp}iQg:,?ozIe'(8} fS*`d)q fFΆUx-caݧH84bOݶnO>[%sTKi]i^߯_ω-F @:>FJaEs]XghCQ|;3 F1~(=_A X?9B 1w%`t[kqM̸($Mا=gŠ햢j3 L hu`!EiK[1'ʖOü5kdrq3 BFΘ`g Dq,}yW YтbZ4lH 'nx{)>0Q}o"ޥ?8 ZX02y;gghdbM}H9Oc֔}"ck8q)# ?vKSPa=g=5 .0WUWζ^ZӀVi<=$I,u|dg4$y.1ċJ-֐ GDbǯ> ӑ?lc~ڥ[kѶo'=E*v7r_J][J[`[N+85gsqWOy`uusg?e+nlava/data/semdata.rda0000644000176200001440000003715013162174023014221 0ustar liggesusers7zXZi"6!XBx>*])TW"nRʟXgqjn]LW+ -1P27㖤NK3 &tNQރ20εw0dg:"y=WUL؈{Gi5p[|< W]Hw&.N.˻Bk/pYZrdv̋AL O+5Bc@U GYo$ܮPiTxb2Gr7Aȴ'(wj8]TKPMN|UW%`]5x%\C`zE&*3s@͊E9b7Bztz:0P++>2rZL"! {LpxUV@N&v}Ӓo]q}LYmUD~@mGE},Hbΰ5O {J,_q^!] s*<&7 {_!?@wfi`n,ݒ^Q#WDUj€2MP\X£fU!ދƍ=VΝNd 4ruB c֕d2t=. 4@+vCD<}_> Hr\h-Ee^֟HH%]4Bp"XibrK?,L7 +f#eA"jr@'bRLW#@ehAMFswhik+֊F4k|cPi ٻ Y 'oj@Edo`zM}eP lc"@0UXgGs-?gq62+=mXX)dD]k{Zv_)Kio PN=ݯИ ]A]~s⨊}ĉVHIc/ﲂLpLo3L'u%d0:r \N+~R͡دh˦ĶEEɦEW|qmɍg]MQkx(*KqBgVmqe_i]D['ֱ ̍4dLJ̎Ԏ<ڮ'TU0di2,!(}q5+ #"%S&i\6!؃G뾖SlUFjrڻ6ZJ^ĜXAU&3\&뀦'8! 5-HW3,<:  e<$eK/Yb8&I2^SF!(J _򘖧5݅~[mقC޺ ׊5% V[xr"1ۼ{'n2P_1Pˎ`徲@)5_h=g,23i1"@B4d#Z{y7^0Ia>_l|=[ZvS_.1ot ~a^,n3m:g"6Ff$#t;- EÕ 28obb7N8: | odži k$i̪,G8\ d9AOHG딣vAc-&C/Rw57ons71Cvi5#O}B΃*1HD'>bb5m]׾fs)S<<~s10R!D$PGif r Y|0Lrh+cD${v[؞v-k>!@l()F 9k}ETQY$(֒=ٵUÍsӏJL~:IVV/;"* WSn^y+4@P$R>@u~PJvwy'D㩸27*)g3Z\O[ܻ=:-w֙>4wq6҆'x|`Oz8!emHLzԦyeG #sRuLy $GyT51!v9;x 4I^ΉHnu(14@~7^ |MG"ZIjH-mnт]E"@'>ӽ ]o^=S(`{,& 1E/]I!i[ȰB/)+=6= 89;> XRbP츰z2 64O/+GU|N-Bܩ6@S,H@ yD*fMv,G짶"d^9.&M%++”j;Dz JQHN:lտՊ`Ii($qq:5A|N~KIʩSCY8|2$yeFs^WzDZ@eԢxr4Q9Ku(*oꔖ'<}ӺӬܯ $3tl kҀM;XKc)R:sнljX*U;qj&r4i3!c}HRlxv߅^Yoe)3ɣg'B8l"Oý"_F(+/SW).l`,X"3*u*ے3!c\)igJdwPmY/,)~Cwԛ D,ݸ:JnoԸ 8vD vQ/g !ygRnф$MQpEo: @.j";jE.8զ+z@ ?>!x5ԃ{P |ܯTFp k㐤?˒\^f'ٙMǽ^͠cܘ:Vzh| k9Cp0$ tGl!.oN#󼽭-#E)' 6wЋŸ[oW;ߜK[{Dc|LZzr^88NJ_ (XI,,rq Yάx$ܚﬥ'ܦ?,k,p4~&K;'ÛĪo˹B8+ kIAG3QaѤt%n@zynHjFeib~`]DC d±I;iae0h<}z"ʬڸb;!nu:gy=SLJ&?p aRf:ؗTS[0waЁ/K0S 'bS:V;^G`A}4h 3R؀کt7mf0uA SzTK0np$ZVH wXqBXst (kEpvpD( ʞz]nԆB"\gʯ"APc_:Od^DŽrPV7ES<-u>G_#M[HBb Alx-ؾ WxY2!d@vQmj$䆃{ȝ!\]^LJ=yPJ"1 239Z[ z^[RE00-- } Qd;jeFaΣ&/[DG8 {p?cHWτCB}~6h# -C]ݡ里!M*!bY/@ ~[5/N% j2R{e\XDO{lGRu]ļ+Y6 #~j*$'6T~U㵉?;4e I߳jDuS}2W *14?ZFEY٘efk5gIJbfU]ʆ]E,lj+uAhuCÓs*kKXDɰAXXylaHLF >?=*BѼ_HAӴ>?,1Tff LXuw _3m6 yse 0HKz0,NcYL_BU vy/kn#o8V!0 emkor{hz af.0PĺSd u]wQd2Un N5@A:+Ts9:oVXpTt z ,-96JIe})w 9I5Zdc%0,r%2kC`;˵B&YoH1Y=\W,fٱ6FndXGdAZ[7Mrd!D ^XJ=#D6L{{Jg)ەDqlrrtYL¡9D+kq+^"5tyyg!ߋ@vW?_wjqXJ9QLN%=?%$#&  ޚx*W :sg%P 7B՜eПe}p߭<*q%y I7 2i3 k,u 0lj'` g6宖 0ϊP7")AՉcf ( Cv^I=SHjoa-g5Liʏ{E&-PbPA.&_8yNH_ [jߕ_ƗF.h8eam<;6Gn +?!x7v7m\S/~/<@M QwdsO^bM6`gzdWѵn- u2nRJE/&BqOߣ|cuU!~oq=Vu^Eȴ@Qft8\}k_o>X<l x+ll }CcgG&&Y]Pl=݇^ !TyJV^& mVw<ӃNNL V7[E"~2|l}_։OƩ {A.8%xˈ*S*l|T?y2OT֤7{.o0:X[(K{䬻:M$ a`1Մp7ʮr]>ӣ xpj.b),MT8;O7Pqtvs >9 EL~,1]3J`oP&梜5ZxT)4L}6eb1-P4jй 5SX(ۻMw; z+:R d` 4C:$x}3+ y{f1[%i?c0lX$1`.oOIJmπ,uO=N5TnRf:w 5},|7Qis:堇 i=[(b<+2ݾ MTxc< E9,ʹm[YĀid AqxR2^wHd/"д>}Wֱ C >S^%ǚmp+ [ls#]`yR?pzМLj+TW> 11tfBn9`t<_>TEЉi#xѶ+[*PL?*vޡ $*jKO)hA <̭"P6q.lQJK*f#ۆ3%; |[Z#>xHpZۥwr.B>YvKzOaܶ޿14 ^b9 ڗ]45'Pq@4Pz9h{q1ZøSY{^@el-bXG5ڶcʹrʏ}`# K }ٶKv`I&#bC q3hI`i+ǯrIl]v^-# I#}]se@aszZ Ho>pT.,ErIh0]w6b$쐶mz[aahoYNk"Dߜ$BXn~c-6<1Gi3&㒟.H?j)=C=`@&΅rWszFnh&~O|~e#R 'vaX Bț$IƓn=f-eP(>U~R)LqZݭn )b&jcAUaBL[>aϏESJ;RÕj^vA.X*);ZQ+cD۩f& bD`;>`o8Gk9 tQx@,BMm.zR8?x0W)db %܍Oqi|&ZPm[RBeফj3D|ɨaŞ>#L8vWȼj:c f2-!{\(ٿ=#b4q(u r/s(XZ:l۔dL+kK4UcK3zsJs =>xWcs?7Pip?/mY[)$a`Ŋ-*Bl?'R[e//V8Ly#Sqا_0t,W{$ [&s8&1wc-uiCtfh%A!`AB^뻰 ڥu.}d϶kVpbiCV|z!ŧN|G( 3G LpHؘx_We#H"[a~ _XIxPZ!00ïPfq0ȴۘ,ɋf"DŽcT3! UZJ*O}Y`z6j*lW\#G7|ۙF< J+Zoڄ^P)k]wڽ.^WTngT2Y451XKݙY԰Z? =-Iu<2`Ċ4*!qwVE#fQWֱEdUp6v߲&Ӳj"JY M'I4wSqgP&&MO^θmv~n/>B9͉ 6 j*׎t1!v1Fic"a;E]ҙ%A& 0ƖFu_zu'FL4>ƽS9hĚG[ uտZiKݥi{p!2"U~WO& ~B#ޕn?!'S-')AB-OpgƃVgp3js1 ؐ)ljus C| / [RV}Zu;u$!| i1,?zn X&m8:_us ),HGPh蟷K girSعtP1el $8썬L,F*UlPe06R\$3:;׍@MLeEi9Kvnΰ<η"J?tkqS 4,f zPQ /;x(&m 2AՒ&k̲|F=hu"w!E[O$ ٜߤ# Z{MNM?mtD~KplBDHx@Hj,{II}%Fnd/?d={1.^)]4u3 S5,Dydp6LtиLS4lK} [VO&&U#lDutǻ T[9Pu83N Y֪-_^1`sv \-$~ƞvGPيW|E%2 lImH"}k0qz+T"%w h_C62pWRt LR %ּit4xDΆ `].YS &YlP{Ԁ,?B~ `"df mNύTڵ"ՎM._HD3X$TʏR=BQ$˩YZit]n`r!<<1=>z&}bYO.2׺OcRPArOƬgjDj &ݱo&!=t8Y'Ȭl!ӎ":-D.7=.?7 ? RQb Ł:EqT:Co?*)ṁ1ϑ>AһNT 5ql YemdDt"_ZYFdoeZ3g: ?h{4x紫> O(iKT\܇Õ)99t E[ֹw_4:6>c#?TP!9%WjJ!̧j}"vƄz`qVuz7%_$3p7_b8K<"_S, h~8. i=,bN!B|ML[~[ZC :l: ϣ{h$r},yCwQկ_rhEI{`~8gS0] eT301N0gx:lCU:^({#]*lt+Dka뇺f+bRRWW*+~!VXLB'n6汀;$mF\h8)-ⴱ xߑ%'@7Ւ!Ril+kT]O6!nYs؈m[el?l/`9?^'O+C71 [fԄa1/& jًLc2\6GҨ2,m$/˪ j<2=QN`>`^Z ]t ??G)s[fQq݊x)BYD"XžW}vs?PEk}Wjfe`Hh~"}WGBT;3 /& h3bO3\iO즲LZN{pGM`ЦG7ز둜yIiY #$aU'4աʏ>0^5 h22~/ߣ) iU&urCԯe+6gjb7Tf3m\F>FhN )^_YGx(p; HpӀjx53BT͛lPs$QbȗbV(%9ة\KVR̶0JOL=cG)jvOSjTmP%*00k_Y*ܫ TE?Emwxt2jyRSPT"?8vyR_Q>H.$juk55uOA5MR:FX`_(T*- *Aai-;ٌ8gaR.ZX-% mc~r3cPQG)Ƣmޘ,ldShhArt(NH9*QU"Ɛ+֩W9ͬzgҒb`#6'4jb,,OU_@kX7{gȤAP75c:j'7̾UR4wj݀"=2 Kg-zPхI_NOjliD4*tM' Vk#V'T.Jj ٫NX@aCṕ)9$"+s {]5]>>P}UU d>UUϢkmyibW,Rx m?\8(nF4f|xr>0 YZlava/data/serotonin.rda0000644000176200001440000006526113162174023014627 0ustar liggesusersͻ 8k?d2AN(ʐ(SdԠ$E*$CE)6dg{o۞yy=usO^YDb*U øȄ230`1(vp?0 Tío@0fm,#x`S9`)qo;tˁZvhog¸Oui JX(^5a\g'Ъrk`N0l%Z%Ь=a楻4c.Eq(b>e~0O\&}O{f:2T%U6 gXqӐEC:@ >е曃P8i>I8"A·ۃ^d>s٨Fc`|qӢcmLʈFSakv,Cp/3LrTCB th^xI$'W} \W3 4anL\ >TZ)@8&pvaedU@ul84TɆ0{1hh-0N3PpCwa{GXT_RWUGdDL61z-A]++{<\,w$ w8c3z:z/,cu@{V1L#yWa2 2d mT:˩eDK`u-T c0nL8oK{%s.(~d=TspH$kܚFl n] utmr@kߦ<.ʷXMIZɦ0-RBȲO:U˝a@/rˉtE(MGT# *[퓐] @?qAs3QI$t^{t^S,d `h6Kd ĬW :L*a OG0C :s7`.>( cn_?`uu@:Ϋ  x`dIGYA{.;@YLȼcaLp3CP)Y~%Yo M g{bScʆ䝞e:xwg L_:0fa꽳~0(/: lrQI *_^qԮn[nNKWh^a wa +u z\B?F5UI Ou0g#{Hb}7ծ]ر?  .'@}Cd뾷r:j'']|;E,wOْU;aV) Ȥي-y@Pgk-oYz`;|(7_ElIT0*<A4ޔpѵڥ~brv< )V gs^]N&RܔOOMˆq).H0Ebt 繴MЍ`lN s[u9i4Y;I虹0AHI gS0 3iO[Y 屾@ws@y&Zt ;Z*`*J0=[$E8; g`|CY0An9XnW ZYOU(x<&qEP v[-ҷ6\Uj:_p7HnKit9L 499:6Yb1ԩ[6#0yOnE0i@;{EOMQ 0tB(3wDQ m1q7Gaҽ]Mc4lq~ &̅v}2@DзNk}y[:ҕVlu:3Yq;rcLaTÁRżĚw v}[ qq8Ϯ7¸?fZ\޹.MG"{~ }ɷrUCc P+vO)|?ðvv,n,yZ SNnvB1;1Ee"ܮ\/)m{tZZ7KD`SOfXXF VV MRE"0Q\p1R{AݛO9aֿuuD[Z 80KGqK>OVII{ @I[_,@ѭx>Lm Vd J)B_*) o@_2ri&PZRTN7"t^0sfLl5YDKۉ8u'Aߣ"03j9s a÷'0 P;`"[ۃphw( onSbEWmem@~D ̜~ճB\ Oz{)nPtą@ ɃƤ(ަ0m`o~$ _==u@509 Lu}JqbE7\`-jȾ)1@|d|a6^[=Ȭ9CaF k")x%=f}ͧ" &2h6l{f@ Vx-q}WlLEygc zdE0Zv(p20[<8C RZY`rMR0>ndG//[&J"̺Q>9n |&c&a<Ňs_G|Wffo4+ȉ9m-0+}zxnpu`|SM0; H%?W8]lC1'Ol@q7r}ޏY5: o C '\' c̰շ!7vi =6As~R|Axp*_릉ńv*7F^ٚ30d4N||=WžlBѝ@=N{ }sn%A03`^BY0QԵkz7LUAJ9%WE23_e~ yVŜ bC:og4pvދmr S$u2#vW9w+ȑ^Z`83UuGݥz/TϠ8QΝSGbVeeh9ɰ Ʌ^ak90X"m՘H[ :{@ދƂз "SúO /Ѽcj^ȊPFGnRfh{B` eoOamKjy[-Q8لs%3 ~ Ef>b`rYi-/l ;8 ;@qԎ`@S>= K(̖h~0mx0zLo??^ SOjh[`.4LenYo3N&~Q2@>"͘"Xrk.r'ajos!9Xdt|ȮԸBMX٣I\sK~ p fG hA0E W8t$duyU,7hqwŲ@z!$I[R(._ XLsG~yz`瀰Z*_2M}98 Vܣͻ[,90y5y>LJY#l ߽Vdô:Ή:L~h-6]CIGSq@(SȅAZ/>oļ\ ,Uɍ@L|Y-;ꮋʻS'y.0=?y 8Lo9tI$}; !lXS)S.]5ZSkiVsEO P_lXKX;V?h?lɃ!֣)Oaz3Ϧ7t2KMD d9F_W4 ـ|S&;*WE_bI RZKpדO;U৐̴ُ`|]] G/OwCl$@..8㣁X ֒{|@\8S~ w{4Em`ƣzۭ]0bS恣)i@Iv:p{{|D98'H&T%⏶n4.RB h! U Fp ~: E2r_a ٭hɋP|bƗq{sD.Hu!>a޷̤)ZUscWJE1%0Y\u{d;J"|"ѪgpFŌ|MPeR[hY׻)@Kj1 _[`mͫ0/orLz{Py4oK_~5h9 { 㧋70IᲠ7Uq/\8x)a7LMeޢhXI?As21`ҋH+Y10/X1J݀q96הwHz+.#\}1کR4Λ8Sr)&Aga#!+G;LL d(; QmlcܑhctGQ vq`IG`btQ(#260Viw{ޔpP6̛Jd@_P<2,Lo*0};&VY?ح>dR Iۙ clXdxxrZםqzl'o(g 䓞&01;LvfA}`bɍm,'SI!_:u﹦M]-l 5>֠u7 (cD>^z Rw7GKK(rٷs>Zyҁ SCOR H_Qu[%:=iO[.@(x~Pܤ]'AYI@=Wzw|] P?U|k>= &60rS#'%5QZzwM{i -'Ecl6?Z?ܶ\&c@W%l%~LO* ƥ߼Uщg60<;dU,ڌ}5G+[!@ߘ `/*s6In',ǢT=Aڸh0&ኯ~#9n-ØPqWpAh<=n {9L00wI~7:!uz=hLgg}&@{1 FNm4D|f6&(tOC̑q K1Dސb Otsq:e;k$ FFnPe#fOSƻ1@Xo7n*0}=ϭ?$=^B2c纂Ͽ-I'V[ ZIŜ3 D팛19Vw&ʙ:0&ܟ< f?_Cg] _0 ØIAT|ovtx>OD ^W?uʩSpz:,DqN f+]a>"0CL7dˋ煁fe'S?f ~;]YJQ9@} 5[7c.o35gXv/뜳~h<{BP+L1VIuVھC͞IS¹|05VEꀟ @_رk)1"Q cbJ_{w*s+(dh=Td\}rM(ЊL+|Vڣ0&(|}Szlf7)WIhݢ$b.qL𢕟(}CмG^| Ѽ-K z;dAb u8I>Ikg/OūZZՑq Ν[Y\zXrkSd0wztl˾$tځQM20^ag$']X W$O٭}Ͷoм!raCE%cgJA'd`켨4 ޑ!{}q`=,c)ʚ&ίD&77ÈԭW"h7(s,^ӱeF},olMV4ձԒ<@QSfE86ZJ<md4 %0cVqX 1q/a~l?pW 3'ܓTaڤi~ ޘNE转Lк\l60Ldŏ-5LU0y \KNoy!,+N%c/~< f/rUnx"c?jOrq;p>}p3h7{|ʄ3'F+& 0Y:/3=V`r榙A*[Ļa|]6 _;Ja{qz"ZoosY@qDh>Tvֲq-4J : 9<8mSazϭY@&@扂o Z1B W>t;{ 3T_Xv7<#RpK v5ftƁW¹,ɺ|{5R_g[S . _Ugˤrӥ'rd,f7m@!?ˠ!ճd d'݈pJh  ?X :l09{3lxƩc]`(o~ǦLẁGk#T;؟kcW@j m8fq0N5vȹJƆBwcy%ld wS08kU؅EjaVUɩU/ǍlsϰXag,`Y-"ԖU6݄F[Jme;XoU V@] OO][TىL~=ekilG<}ĚT:~Cf&~ŲgY4"3&t̀4EėvϣXbgLӛLٹf~·q|too=c?Jm!i7i@5` ha.}fx,"1MQhW| 3WH5%FRiii-c< ׌և3@(qK"ӧ&aڸd&ű?dsl`NvTk$nJN&ڽÖ[א&;>tvBHR\ "kdbR\k#''㱼u73Nǰ"Z8b|ŠegF"#P5ndu2us`|վXql*B8t13Are9jXaf f" 2rn=0Vt ,C]5`N!L+puPvVU}nE˷`-͇ )j'R1mŽ[ ӓm? e̓ɠ[$ M91@6=PV" Ǖ,7vRzx|ju7<Ռ'62؇@ OPzeo]ۀèO&ez%]|Ƞ 8s0sԫ:f`xu%ra@PJr" x5w`U6 ,Rʐa&*$bpZJU$h`Z(E;.4BwXsLN8;Lᘋ 8-!R}@ J텆OUQDz=ӓˡ!Wi01ǜ = J#ekXv?ry|lqӣ2'NƲt]\!^jzόÜwHVd<[q%[;:Iʐ4K>bgD gxGO)S' H5gq&ZYhYTv+ rN^ F$xX\Q;`q1sޞGbByV)0+ʖ)F>>SBqؕ%s,@ڰZh};!pw/}:qفZ-7.ghKs z(-8w5;O*"C+O䁼գ8sY4 6I+_mOd*TT - @ ^, FI:8P\J .y f`ohm0.}FM6oj`3s?:Ç⢑oƢ{`t0do@fT;g@zU` 4cM9Eȶ'48ta((@Skꥫqo tP+P|צw0:-c d; @y.}Z8xJtӀp\>4}Șf[ >ڷ4ӵ` 5 =8 |UA }blFͺalvw{dwbwwCK! 0ys_O2?lUdaa쉇qv{j6dd|jdí{|Ñ@(W"8U _k@29~`JDJuH˜[C0VE؛ի:5JqQ㳣h], n@lA/z#v {=}3<ð(-xnU֪ByMc O4"G0nz,ʹY0fanaىl[=;0hP Cja}ܽhdcB'V%mu/Yž"3["N#uzYh _m8%ڡ)m8߫F]<7{v6ȷ0tEbM09R*ba UOC>U,Z0OFL1NՊW`D`t ^n+MxBQymxHOMss_O]ݵC#Gեd<9.z , ?xl6b}G:z) 8Q'uF˻َq=J(yW\K13D\^yڳ$a% h5!u euJ{ 夜=Y=|RfI(WӋ2@ݦQ ?ș>2Yɘ d8%$yh`;TW Vvh݀bOjvǠzP~)əj 3ĴS@]D)v$^KTIFh:k ģ2nCgZtmwPb4vF:t7֞ChqȻ, HZ5~(Xw>/Hm| ;ZB.2u)[ @6cx". мb I^oô>B_)?հ~NeC '):ohx/ F"Jc2(N :9Z6:PxtF3[E5ĺ#;R^ lëzU1񃾝LuJ<0X'f3Wa9cмb|ۏ[d@@W &w wk\*hh9q"n1xA#F%'o|sO(_&"@&?]D xc<-ůcDq<ПZ33tĤw> c7 N2CpGwLFq<@ )/SV,Қo:X ȍ ٬; yL ?S"& eҏzxgTW?F0]h0%hJS5Lo['Sٺk(N^ixY?]ֆqn,pssBr,k-VuClJetfE@ii`U'N&}Dj(* :mwSA*zY0YG7PW;6.hÛ;;z>t|=V9eÙ}S@uQ? $c m`Ta: |]d(m @v'"Ƹ3'0fxlh rFEVT46HhDv;+;ep1ȴop.+Gm) {,OM\aU_;8)J5"8nzR*- U(~qV=k)0#?P_G3\3x[ Tz}%\IЗvR,tj*^4~8,&4kZm(n{9dԭ @DXi 49뜆B>^v {Øew;er*泥%?f\ ╾:@xκc@ҕ~Gr"fI)7ױ'CR7E?99Pw~~jGRly; )N^l/ 2OQ\Y#yW2\t}r=,XbB*r$4ӞLGqsiHE0'=2W[v\ODX(WL!zw{48-o4$M_?:@f5BK.RUH}j] JLj`$K}DvB߱ +0)}?l.}b c/\4(X:EE'"h`d"zoVsgaD|x ˌ) \m 1?n0w㒉 wٜB@ՙ`(2ޔFlz=bAz`A!j{(BhBn@I #NvV܌rRB tX3y)@R Din 0EF%;Un3:O ?}ܳ)s ,0}BT* a_^(;#+wGCzlk{1_R7 Qd?<'|PGO&PҜ_g'QHgMD.#>8/P8/ 8ou De|39f>7 0xw5*C݉I0DChvG ?4~A)[*PϞ4X C"zV@ir%?ZG߁:KB&NAo"'N!|lAM{Y.7eo&;aūP;vPMPtfExU~Ԃ`iWݬuE?;3 nOԻ~ ƶ^`L2g'wac?veZlxDG!1uO?~wz CIύaS[WCo&s(P\+<x}ݗN )x} X/ЙM5`Pe PYj.0<~pr&l72b`Lɴлd[=Elj[KF°l9Ykd}WP;!}CDniw(#N€Әi/ _bCkʃo:N}vh^GPh7$p}0|j *P fքC _YaP&Y:)|/ Yyz>Ky)`p[tD=?>ژ%*Fpguo9Zп;ԯ#\X*ߌ_[^D,U'Vw=2׸|ʀa@5hChk珲@ mSKO3:z__=p@0> ;:rphXBCb@1I B)d[:lpoܚI6?>(#!"ZO,.,Rv}=5W`ˆxCsb9Hdy\Lӕq@mQAͻ q9=0f:Wd^?bС9 tL*$]a3ߦ&Wzn͌C_kc4~lFぴxuܝ֕j:xϗ0")Hn cc?3bZF](^~-Zؾhp?b,c9? rMaR6c_r!L*§zk )1\?A%@*4e~2*cN !D}nJ/uGS&0*)[ѧkޟ3 ?"#YȾUPrF}T [ԛ-q蹮8?f_oAqAY@vKC-ȶh,L20-(!6/x[  xs[$?]3gNM}Fiz+;fFqoHk^D5q^B 4]~uKa{ _R[{ct6K#ȅ@9uū O+g!PZo U2Ұ@5͕MA2/(.WK~s (#S]g |R"x+WeP:WO(*ƕٿg6ihp?Omw]6ޞKϋ] k߼fd#|gof YAmMb>Zg,b.VÑY(.َ/(c҇ }p|Žt\:Z҂Q)]̋at?$R?B n bV>)^6Ԯ6EȻgJSn" `9ajd|v7VZwz7ވ֣$:oНn_ z=4ŇQM`ǏJ/,eq?m_PڒC0!6ޑkJ00];P\G}= f3n_C_j4hvwۿ ~^! \#m~ ¿vs'=Bbp_'QYrz0_R.לFZ5gn,Fl=TCXƻcy=0vL\ Xw)Klz/07Aħ8={ƽ)0Q0ڢoS6ۀ+*GFeϻ]pb^}'!};;YuI=1c(:4OSn+B'YY:' [;c%1NwH/| {w-ZӹQq#^D(ket[""CksP^ E9c:_:CRy{V?+"S;8n?jqC/NW?ם.1?,f0:m<=%!Ȕh5Xl}NY2п-eu޲_ax Ǭ}u>h?LWrq1/wk;)"ѽ8n O %kNi`V'/&A=EgN :z-8}rko~S9KOc)(~Pݖ5{`&Zʓ[='[Z_Qpv9X r@:훉HRqV}4lt<['#E!mF^ZQhxG;㇇"ٔty^-"u6㙨#eI 8KB̮!Hx{ONJA]}rN9 cY%gY*V %DLz7Ŏř'm+O-ȏZ Pt%pt5;v- E/wXrnO[}8vC""CRg  mO!gI>8[V/ꭡќQ r1{@ݿzF|{E;&C&#GXp[/y0?Ԭ574.:V=y*X|s~ t_7c[,q2R?g bǭPI6b[~؂:E8a-n_̑x"dc 6@z_ꚸbmq<bVDxW!J;W3WD*g6X^!יN I{.e+R~Q'*xڵH0B y6 4 ʼGG< =д'+ap";B1NpW#WŬ^q(-[J9V<_ռ_[wR?}V/])u+ϽYޕY^Xi?+k+-__W++V+Yy_+Y>y-J9VlʿVӯҎWst.22_ۯԊ+_gW~RWc<6+WWѕ2ޭJ=ʞ<߯J}ۯW[rAC݇=3+0CoWЄf˜R;oC_Y, &cxsۈ(Xw9>.@uk1L:|ޢ:06eWx4ꖉq޹iwZV@Ul@ǭ]5|yP(lFcE(y! of_!ulnӾG,nwcn7*SOƱ;1xw9,4&EMFxyEe6sdn kYzNn>FVˬsOG4uC2 =qUJyl&,lOHTԍ}DA$h=Ry),}A+ tk#kWvpz x?m풄eU]*JYEäټ &YSL%a=AE?~FCtsa?a̟ ⱂFO%e=I-vK*p0.tE3j6XJ1 +SQԶ#@VY0yelH &n: o]޷;˶^ ͌~PΕm]~.0}I&C+9X&1n؏|r:X[Z,F_`q,fق2L.X<[ qX "p,NuØYe:%wsB3\}gS6 a1CَB+nP밨ٗit}OWzb0's@W GL;c3\/[X4=bceTNAHd_uv xwvv5p˟2 +4lTc}XN0f'۽ƳAh_+ g45‡OẇU< ?%he~DFfkb:0 )[ MܬP?eFYO f#,/iػ ?^[-(id3;,kgZeevgThY>@,r+ ga_3o`^Q^/#vE3P/;Dۯ2r5UP_eMi sjqί,ӢRDZNWĂ. r^2e9cv8>s8Օ_?ajNsΦ{c-Ǧ,;rp&Y[$agfuYNj n>1_Anڗ+Cgg#jGfQ+ta[Дw%X5%rY R1'E!?5{' }h[VeCC6L[ބDR#`5azy U/]cww// i0nHl!vYhlTၞC//!FQ(X&ЇwXg : }>Ɗev#MWR;IH) SdBsH3IO1]ZQk}`aۭ^aN0Χ-BO?qϮ_#e}GaS4#2-ഞS'2GZls܋u̡˓GFno]W"'M AK2'T(+UX ;l #0\%!T7SYǼ[h%F1 !_aVuNn߷_C$~mM fEE9ȵuP.1,2 CnRЌ R|CH{7SiWgxU$l)_wO7A{9l))|06q؊i=R,蘸)Rqa->zm\e5g^I>wcF`=m*֤ ?=x``HW`ZRo0|ܯa8E^h6|vmqDfزMmEJISK@mF\"éTz޼H`u|Fm[N0|=M;*e_ʱȫj~ fO"a_)4j͡H?06 }*vȄV<* /y.S%B4BĀА:ĞL.][)XOGAruu_Jo0zn3>ߐ㜭;D K2l3!{<~Bfi(̝Nޱ`@<ƎC V{ ߊtU_=ny[Wyg'n==Oo%_T_g?{t?'Ə4>oŅm]/4/o??4Y9<W^wryi^Uߕ~?kW3jwrQK?*W];}|wūo÷űJՇo|坞?>joQ/h% 9ci]JF8Ku}gQR[{ˁIH #;^"Doum' jͣpcf%0%W8^vlE;S*M;8.al9%Nfi[ m>,pAy=v~ceSօɦEᗂ^?m!ErlZgaO%JQ#I{VNUM4`\;p+؟@agW 3I0o9vq~3FѼ|uHg䷧ Ld@dB!1-a^UtDնXϨ>h2%fG eb^1jI Y H#&P56-iL7fB%5o2A']@6c`Y6!^:`b q²AXnO[> nȮ+BC GJql%3a0JEJ^ _)&d *Nq_F 1o#F rEGnD'Y줯j+ՄES$;T:tǤ6g(P  њ!R ^x }p09V5҇Ԟ2E*5c{l9+?5%*77LbZS1ixsr S*{5n@V*de4?Q;- G#: E_ DJ^Ml5}u@׻m*SX%2PzAFsܪ|?J[Oջ2'޺ >:`CF毋_=,뙱CRgCdF%F-&XS!HXFS;튑AT%B?u1SS}Ҁ& %q_>q}A/AZN uy_q>n) Yhd#ւƾ=`]oŽ@VΖwʆ 5Ȕ{|Dc<$iJрҰ\uVpsv)=öQvz55~p84WiS89m돱Akzzc HcQR4cnbQ~ꗯЛe:e.EUdMTa(hsh%ZoܗrL!{Fz:g}A$bm/WGD} O#p )`"^ 38μa52ڀ/=c ܙ,+x#WdNvT x=X桑!^/4@(7LE7>z)uyMCEu'ugq=<eٜͪmWT2僣%p΄L Y|hwY =~XaxMwof(,oV!l]-+ 'c~׮dA#'3\2UdNB52)lZKI ȻVbzXGgِ4z1\ d~u{C[M֯NYؽ%y;nF}~Xߨ'o|X>QSFݖ_]_R٥yK,YRQo%K,QoߨRgi%K|9]>oR{zv/X:|Ootg5֝,s/_n/$=o/u_RKK8,Yw[\j[miRR9=.w+=DȿQ%~.K[ηDj~fK|r-.k~N8WܿZte*ݽDoToiInҸ-g{K.1<Kr\>K]Z糼8vG+$ϲ?9,u+~ߨҺe[Zcm$~mguYZ縴]7znR{iҾKn}<-,YcȿQyr?k/g9^]wXYki]g~K77ʎߨY_ۓ/Ӓw[ڧr\]|4n]ksvi|}qa9~YRڲ,_?^__\:?+ׯ}ߺ{-|_ 0b{ݾ -:5ivGT[r\hʧY~01:ޖ /j$mBQkd6x*x&{vUk9{maZ,VZǷ Kq2EƟ>ջmCBxc|[tWW1[N4yY!Ϙ+x$^3 '=E iy1Hr_'Qm/oNMo{q??{;]昝{Tw ߊ9u` zO{>]O=hLO?,f qCꟿ|p!xctDž?̗3 R6~i~?yvʋgXE-7Yj4ɿ,1?XwSo8GI36鲨Cw_O,ݎΘܼ{8]:;AU\¬ܯ?ZG>t?;8>[[w=j'vk}?jzU_}[v/-fF 3,vn /\dZ~vv>+Nq^y*7̬Tv*Z\eoc腮G[ ]cnlava/data/hubble.rda0000644000176200001440000000103613162174023014036 0ustar liggesusersuTKavfuE-:DAh B#oR kYΌ3f BX`k@$ +fyNE@"fA<<>tb"BhBw]>ń!Z$6߳mB?)Yd+i.Hh`ue+I37>[F po{(:Ĝș}*p)>ߤ=/mk >Ox K{z15y@9u{H{N8˞oG~;0j<^:DN0>]?wU ~s}ޘs4s55glc\\uOP< P|^xWrY7=:OU6Yʇ"(!&!eaXJoKi~7K`VRVCXL Ye+~O[Kud]);F0Er{1L8)e_7o@lava/data/indoorenv.rda0000644000176200001440000004463613162174023014615 0ustar liggesusers7?~Bf҄4I%iРTHAe TD)QO!JeJD}2옏g>>|/9kuu}4r\P(F+Ӫ̸za0S֬~cideu$:vUW[gnf0Rk_f9:+KKrfԗŷXS۩goXy"u)?~)8r3 <_ݱҢw9\RobaORw"`J[*77td(إT>!9@vaLVŮ"$+ &z&k[4HWnF+E/d|G7*RUpۥDQpY`kqPfĊ#i!Y ^k^.=`pgyqzpܠ]*>~׺d;Ef) KF>{peh4`|lbg3dG5߼Y:~cKs4LiuΌ"!, c{eʏ=ugfI+wR^7RJȂe0q=]yRjĵeiᰍK6?yW}M)/N'={ ˊ_q`w?sl֖J39.9_o3T $LZq \wQ-3|]+{b\;Z λZifC)/jwR*8.I(D3kJy4)b6V{p5/.F3!͚ `urAf?>撗%J_찰xP+줆Gbg Fr]u. wpR郛AoãH˶%@Ml1d`KO;Woāobk CbOc)ؐ޿5KZ*⧢K KV`R%.Id|.KXJ~ư>yjуX.@,^,AuS)p,5k=փ KЕvK6v,AtR5jdQ1(fMX 3-KZJmؤz3aH'S"ot!~ڊcWg{6 z88U&:&5FbpH\ -.# ~x /M'xpQհ)k0_#j=ٕ\Ø,y ;^eҒ"쵷$Ka`%xO[ᢺtl-,y u'LMqJ~J3K8pU@<4DcNE84We35A5NpS5Z`ιrT!tmN2*X16?^b5~ n>\_J A_: y"Y/P:ɰ.R+ 4zszƮlE ;[Ul\:{γ[܍sq?r. DwN8i{6#-1~" <9a;?T?bgdFZ.ʦY᡾y[? Fa 0vߵcLul^l:\, hU`#(&?CWv WzL6 # 8 4\~::Y+kpBsXo~0-h;>8x'R_)ًI2LՐ7hPՑHE\_GT`荢%)jSKqP1-2%}.0> ,srV~ !LeC:.P+Udc殾:gq`)MAo cluԙ|X.?,aVQ_A1obcOPBH`W.=Nf U~ڤ +pQ==ܛid4̼m8 8âV4ߖ@uZoX=@Zh{m*N}%=$^yy9F# 50,7cJ_̟J>h}H|zfFHlȒX{6FMPqc.ɜ2tq0]$ NR 'Yj.8jL$[ZtyL0ZmvOK/򾊭,2$dt,0,lƇ6 2xC6r[׸#rWÎ=:3VSX-z-G.ty;`sOFK1єx` ͭy̲|`WHie ^` ]Giܺ 'SDlIu >;z|c'6-En?z@2\;&R`4ˉ?+jVq'\ xčujr~%b&1qԾ~vz#~> 55g$}܉d.T'/;AQڟ:D443n>gҽ4z(ޡYmt4aYX$;HFC& 0!X`;7̌1~y+n CXzѕ*vW|5 q&Xs[8ӿyP `Ī:ߢ; fزrő ObzZ$dCo5YClc2VzMo_{\93嶛1'sŭ `T>$3x:}w*1jr:WGhCb_N}F/ { %~'N޿CJ˝]3ֲ^~0󌜉m G`;i~M Nͽ;J +T~ D ~U$ Ca0޴2L<` y 7Kw ^*b0Гʽ="8G617'¯z %$rZݦ}ltr*]H3dv4uvA (G)33O*^4‰]u qfsQ-Epk 'ֆi֋ƝI|WyO=OmY} E\ +sخ'ŏm#TE/qL0nJ<9=M37B[;D1iXW[4%=0uPeq(N`}I{˴*f6L-Za[*ެi`= sl5?{*{E:`ǂ? [ wuħ R#j./_k:Wydˁ7#Syd:B_)yi_aT֠9GO4co} Hm[{`<&A.)lbooîJ 8K-l.| ⿑"mМhuUf6{޵ׄy;C.CuPhRݭq]!@{ ;5?wub|1ؗ$*m"TFRzH!xBnS\2IEWx#+7̋τ&΁H?۔k(wCr#ga0QxVX ĂdPslfڐ{s^#gVY S%r/S^c(nczhJ|DJ⼀rM$OG;fnl &?J6߃R-_'HbLld'x1p䧇$Kn/$UلCZŌ =1*Oum/I2OU ]#znMFwe3e|.67z'X %$XIUҔр& ;XAX%3X3oUű_\!=udr[_W iSā)GHVP"߼7~m=pa}MgdX>t1k"9ӍŅ'qTuvx׵3%a[ıʫƼ_?&1B0uv+}zC`;UK4X?=8'܍YG@;c}X*+p*9E/(62!o _(7=,˯Aj q,vZ,Efe=#٬|Z^J|J3t 075TJm*)!k]^ÛͻkXuGzrJJ:3~qC(րì 'I_JiVz='e~gM1L'~=փ-kԗl)H=mGlFŘ>ᬗ: |BoyT1ű@icnd .9gٵQ&s ;+(DCjk:acfM^>~ҞKݐNZw3dt6:>*7OfsasdcRoU*lZOa$Tk.vH밮ܷ"M4`qPD0,$3 h~KDF[hl3)g 0)Gz[`8+84>3Zmn|-jw\ǹ2ږU޸$ Aف#/usb+F ~/ 6MxVNj ] Ƙ?ǁ`Qbzj< =mGvE q *tp:ګ[fqiMWb~Ikw?+x+8-3`C-Pa%A{2p/?}[atNJ5\jyHf6}L knɏ0&GջSٞrguNXy+bݣlVygȸfb+ԙ5]Be)@=yPNb*,䢟#X=mqkt i-J\mb8wY!'Jhc{ھ|9]x?lɷ.u5Шc+O0n;14ɨ.|V!6!)Gn _~y"GѠ)IS{QB׾p8`F۳06u*O I bʭaI'7h4翏Fa&(w3` Ao#Ǯ=Y{֒'qQE!S\} 2;x5;aQnF^wO԰XAI;x O3/6[eBmWq_uyKvE:}@7pS({QlOp[Èx䐔pswJo/qg`ITn#.B±o_aTuƮ^ôxP+Ŷ{\ڗ2s+L]Lڻ'd޷VyFcV<-Ʌp6e%i7~CaFxTP0TɃ3I}I0qMQthTI8he3QM0Q?i C`>$Ehk8TtVvZ2mi0mw]ݭI$qU6;^ӝG-?R m=,/Je >Efޤd¯T1.jb|OpYv63i8loJ%ǃ`,,0Bhf%߳"oB$Gn%n \h~`fk \M' C)ip߃&ۃMp偵U %30pyhz㰈,>#+u*x_՜V8PQGv|89_ sJqu0/uW:&33I)tH=}nz4ikn1w;z[ "_Lbc9$L~8.w]ı-,({`;i:b~2[ [ a@2Te6Nޥ[LlgQT 7vwH> іt[]umzNpf&R\ÉNB+][-k* N yEc[r6 SN2u`q]?"9mNXrN[+o瀿=$ͳ0ͳġ 6.ya- p߳"S^4_kݙ}=hڍ^4| ,s߹m?U%)0(q]X4)\c*G`UJ1Nn#zz,p`[mqO*ezw=u;F HǼ3'HPMCz7D޳3R U*T ƛCU{y;fSL%vp KD.DH$^|| EҌ?< ]p+q k` Њ;j&8CEa<}(A8'' kޘhH0T7[~NR<$ei2. Y)Q-Ćj3.Yl'c\rKo^;+Q Ưᨁ@8$M(o,`  A-̰{=`{xa4h>t&> ^Ԧu xDߊū#{A%ga^~F>K .uYei}eʠe|Ƹ=&d*Q"/,ϿǥhvɦIs*q)}u>>F-8xǬ,dc R!.إ]y N8ku%ߋYIş@O!G`KtHU9fjJ]H"5la, {:VZ'{X*q C#f&UfEE{sN}&Vdc^]w 7"'d)b1SR lAfH/v, wv{CA!+Xdhsw'UZ$t^8~3_vo\]ن[w;+8{>|j#V+.so"JÒߎEEhW&;guopwԔ<ݦ0g_Jz4*֥'=IqHvvQ'\7N| O9tW-6W{s Ծ{;V\fs6/{&"|y`;A^Qƚ<`5n8yQ:/L8;'a/ŴI[cyaJiı; ayƛB]m8X0zz5Tބ79{}R%>% u3BCBoB`AtWa|vn]L6]};VeipJD>epJs*R_]2~=^ԡ$TdZ;V1X a_ :neQH@vmXqt>?Ie,ƌfp^ 9nhY%ȺBT082å8GglL+pUۻlj5{"79)y6]rլe ] `&𰫚7o~-;֫ycqkq#cCZ/[o$gR_Bd4|J·1'JS|Z C9 횇{ӷ\rǾ6c{ʗ;b|H!Dr\!7W7W 43n=k&Bӎ [a.l8mc 򝄩gO4K"&nޕ_g \pBr2{Խǰm`U5ײqT$e;r5aSzuPx܍ȟwj= 4o:8YoIњ\0~O\vt~XĞowKr,:T keA_s 0hayn~qxz ܲ0u|5gAMwk9 VO[fWnS gKAE$-Q pnP mZۿʿJ#۽03[K:ڷOvK?`AQ'~V9Q A|XJ07dxݶ`Y7d6>r˙}fֵG&($|cWM9bJX";[Wxz9:{I&wpZ=PUxM P`jZܣ;v[]Z<C e {z-w1*N9gYt>YHY4x 2Úv]I)*\j=p ya{oCWESS\˟˰mT)L,WZʜd#6.EO\p)'׬j;Y6Y#d#^ *yg$$Wvdؼ+i6Q<ۖk|ΘB -V7q7of&߉9KBwO p|{f{5N,6=ŅsߏOlT` 2}ӵ$V7Ur{UcqZwv.ͥLTp%*ۿ'qq$3Kt> L2 )hV/^Y!C:IQx#]yzxn %aaA^}[g/N>$9/ U,!>gȝ&/t3ƹKБ֡F2wP&R-`eW_9FVaхq~`t|ޟ=GSpv(Y)'zXwyMRsЇ;quŪw5ujIX:8}vfj"q>uEHBV\.cgy{qPò'rTќH?a) ^k}@9~m9+e{i|sWFO⼭ff0Fة 6{:\xn}ŮpWyyc E'<5&7cN}4ڣ8ɽGkSRCUg9?@+$mL T{mz#(h+Ap0-Z壄ݎ{f{J8,ìbwv!Xm2qUoeDr˙-Ù묵8tfyHI#V4!Ғ+&G/Mu|/B7C_>LԬB?jWmZ\pAK8N3|`Ule"ud<wC<ƅo %.GHK,ks.)OBK,޸TzW0f!GqTo\f?fW$X E5 LtX@C^=.:$Q >”#֬0p~Q7Jiw ls^-~,9?p^X{;`ޫ YOfX#^&TZMۧd8"vvqcεK1!kNPp?1a8e^2lpfc!~[M6jc:S3)J%yfw(b}Փs8u u㚦!Puë5Zj:yqv@-gjL:u C[S V~KMo'-a7N0$LBЄ5v\{ }ǹq/V-)SzKx-ejLb֍rߴd9m|ۓT_Pþ~MNxKomZ͇W}O?An=qv2QBTܻ14l nli.>%iK/":T6]i(]o\(Lpv7p~MfWq$4VX+9ކSv]ai8Bۗq{ZsNoUw:]I@WGo3O\ )hz6-ɺhF0>ߛ*o6+bf.j;8pEuc̩uJѠDgnPjmq>jpC!g8z>lgWN)UWm˅xaGL3M{{`t!^,'~E0Q{O$sVH?]bM֤.:]tJ[$L{5?;D)4N*:ZY "rtfD R$äD~Kj DhrPVjKzϡ"8L\PK‹ aX#ٻ@sTRvvvHe{=_5fa<1l+u74s1d'NM}ageTj0OXФ& mQӍ[JRW`2狟SٳW='$v&!_CыȝT8trʌ}j9J Vtq Զ* s7 3Kӝa`T=8 ։ː싎%'H&Q?)ic!`8>9&;3tнq~(ɛ$N299An<̤^C68k0\ *?4'c ӛ: |m=wp ݡొoۦ>YCEA+}\~0-0{Ԗ%.tYZ[ ąƟVHϖOIxk<7U{{}9fNhʪu{̀?=qv"&aޑ5/66]7JyCofPl?v'3ul9ow#zX햐3UkTYW LˠҢ2PSg0=] |Z {5 )-к2&>@IuC6倮5\z?LXG5KXs:if &jR8:ረ+RٙuJpO9^̅@X3 )?w3rlMa>#@V i'9^o܀s|{GqrM ܽm B)B]v2^'B{ o|/{Wd_c^ sߢV@pSzLm͐+x7E21>\ouT-DF|i+?g~l##_W'NB#Q)C{|XW3gMp?:Xc8Vx_+r.=Y ITҠ9F\B&+D'Aؓ4Pa&g\' a -Ǣ4;e ?޼Er>8ZᴊG[B#сmϩpЌMkb,Ҋn:ScUAh(e=-sYy 0@S@W}ts9Z*dn^ քj")4n,҅&8RHu8_]Sw_sƩS"{I7({G}Z`Vhٴ+̘â]OHBHj2^Ӳ!>ӰfY}.hdUScgi 6H8.um0^ͳ9?5=Mr^[PJTG.+_bwҶJkm0 vt 0wrHĭ:Eu3nJ iA(;a>L=;y+HE2^o=-W dx`!+-!rF'xF/\V(}УW~ ֎X]M0< JZ)|:Ay/YMuKR#-u;ꈀߕVa Xt?@\5@޾ ly!ZA2GAڱsOlI q%2ƃEC!B0Gv4w~) 0*TǙ;Le| KdWpUaYNAb%½cP3?ޮ7mwʅ0 vH.[0̱'׎mr+Á~~3h'퓵K*pB۽(6X}o3z܍uwgqsm懺JGLckcېHl}VRi!=+3J `Ӝ* tҷ_'y %ҧ>y"-Eaԃ%1}!Q.-0ש UM_9͢OdeXl)>$&++z }GNuS#;xzkő_i854>.Tgrݛco^s!IE+vOד|)÷!3-;7yl0{V _kw[ UF.oM>NsAmXʞ]3wa}M|JPXa&qeAB4ftW~\I:g&75,vbOŽ;Hyނъ'lBqIz,(P 1K]OI.8O LƜ1+sч͛k>TI |,] ҫs1$ _mLe?qX;m'e{fͅUz-(0߽ 0վ$z,.u2ɢgf`+}$G՗ǽg5;w>E²3=a\gU 01?y;I뚟諪0?1pO7JT!Wh{H"Z\80qz \S'hݙ}D7ƻ `afR?2QaՆ#V)]_RsfI2Ypcdwͼ ~M\yi+NdO`tn$ԇ5@s$NeŮƗl÷:ӥ#SO]`u~pҝˎ,w+B֞.BVTE0g$ԈYZbǽNb*xEU\6v,| .L\2C5oxtyUx8l%qo&8Gi:Eu3y* {w._rj=q9 $q9}4nǡ_ōvK[[}0ά ]7*NŐN0li$;d_*Ҥww-.l-*kbkf+kUc*%y>{.]sS90|4Rsd܃o"4(搴iǝK~y7Ͻs }Cj:vś*q̧-|s8v9 .tdY[F*:~Ǧl(o\9 ]Y=JSHO6Yqd-Ǹ%6uS%FP8ݿ~ql0JSOeMN!N5G&6Bא/ǫX+<&cC ґ.y`.;JIFЃq cN_/fM#nxema<cCnv(YmMRx+K{XNQq}gft9J:ŠfM.3  ,Wv=SVT#QgehFS;  Xzs&' 5kliSgery]/9Bm u BRh[f{0[u~KҀq0+”zB>.$Vt 'Iמ+8G{^ 7})hT^mR%|"~8kk YwXY *Α~@U%}f ?_K6 0k>N0bQ Cٰ,4 =jaPv9ߓ8u)I1T4渏?vIMQ mI =ޚ`@8rFc牣LpX.wCMtqtw$1Zcu`rV0S]dfMrxtI]`:H6_O`N0Mkߪ%9E'a W]Iw(M M.53TG9l vMl!ĺ(x; Ɖv78xޅP~ Y ǕHۗE{z`ld)Q*&޶813&ן AH6HBuiz~iݺ 5^Cˌ8vBLҒXCn> #9&[ڞ0kc 46U:OEuoY3}^eV7| -Ιʼn}5U$}`%C{yofɞUP[u(M;msUv2oEسN mjRG5j*kJJJz`5mit.HV_~;]p]()?S"7Plava/data/calcium.rda0000644000176200001440000001412113162174023014211 0ustar liggesusers\|TUֿwHh"]eJ ("/=T5EAYE(ETOEEPDDDpAzf)乫6?Lޙ;ssFvK]xB(*$1.!E("C2b! caU a]0Y{6ٻ*lܳJ&1 =rR)#;s9u\7r^fKgF?r~_9Z3ٸUF Fob)#k`m[j2`OkA#'w̍Ռuz_9քF u>4^:ֽFn)sF_Ɗlg͆e0Ouё]zpO6%ڙ;5V~2Fc6X3r\T^?}Tx0U>wص6lY1;{]sn5Izkx'njҴ\[Vtl ܈1[l'7mf9F2#pJ #`N`H[ŢuZdyx恟3[njdc]@K Fbg7"w }fu8++9\'MuFΌ2O>EA1z/ngyJ#Vls[YyFLXጱKǮR\x193_4Q'^0߶ :(r^4 WC5N}ȶylMdCc?~#ru~h913+\F.+#y):.7Uo_gaҁSu>)!cKɵUy- 7Bt.Kt=:F[#HZ_?yÖ0q cïo0!ׇ\KF?><'B獐M󇩿 G(_u|Piߜ'{ i\^Oo&D(.5u92} 7_MܿP":uxyG`z1ψ?}3lʽF_={![t?C~ "yu+}A~&>[s9~O=SQ%>-|GLw030 :CsGp >ɇO>s!.Z퀺z@}4 4t@#4vMw9Gpr@ nt@Z98 D>Zm]USV~ܳa&;ZS}ǽ%i}o3ߖ_|.;1;xΉGޚծwτ>E*WR;d+*(ܧ'|Jy>]lS5=UaɜkSU|^z}"wՉMg%(OժxKypUQ#3}W*m]ܗ'_oʓĪ%?(_漶_7ruĖ;rųPy\@{efk%)R2|ksZ'^qqJz}nSs|s=7sZ) rw(Fyʟ-@9uT[)_NN+0[![U=wIǶx׍(wK#czVJFϕmL9YU|-Md?bJuީnQ;F/گsɪ-雬2%79cFd+w76u[ܳ˜Y[#4-<d]VݭKG:1GS&Ș<VK ^PEqcˮ +o/+kh/݇5|5HmP>OsccV/!p*E)SREw)U'zSy>ז5Z#㎕>jf I=IǹܧnQqg%$_Xf31E^Zw5Waށdw[;U| edy=}{UKWI]awZ-l=iaQ5o8^+[{]Sh[͇֗J?1 hǕ/٘QS <|sNxnGSE5痮Vkv )ߦ  #o{<>/8yO 'd?q+=/>G-;<]gUlƝv]d}M!g6ؼ:mUeW)d_O.ߜ?ԼggZQz'\OKK/.:\~R#Z5qwA@xz*p b.ǏM4~~qy}Lcoo:)8ۏf8:ovy8_QQ}$x|^Umգ+g{x!a7RA=y}FtK7r?}6Y Em.yྍ/_q_<_sY9XXGs}^Of)Q;rpk1y/]|Bk!]>E!FQ|gaۍ!jvl\ _& x–[R\bFU͂&n_9Ҟмa[ma'xD-C_σ4v|μ>bF@(@Az ȀX< PMOOˀ `%@ 5T<Z&^6(eG[ H@k$E@] @U9x R?WJhڙ>@Y) 18p w%J@"y&)k`r"hGWk"7C/%x)K ^JR4l x)K/%x)K ^JpQ{ܓUpO{ܓD[-= IpO{:y]uApO_~|Um  D <#N:J C7jU)IxvAHHYҕt 6Cw:=--!.qqs)Ž<^ZJbJw ly2$d( PeGs @r{83hKٴ~n$~',GNR6g|6mPqЀ5PؔSRaעg6<(YMͲ07R{<y#.0G݉aZn {5-aD]} nV݁wݴR6^̏H !{q_"r#V]la -SSY~lߛ/2'AԋY/V~W6!2͛@4Fzs*m/k(?SҺ{үz7UE)[MYdY~݈c}>fDϞk yyySz}u_yE=-z[((QPnbr2*2+ɝ[fWmQظK7V a?k+D_UqM W y(y\S"`M'kiOnAy$Vɡ1M NRcp^z#,ݺ0);!U =]Cooj%zEF~aKe͖g8x3K'U@^ D~sE~JǭO5kwX^!mpt$:*.q+VTß&yk6@I;0!?+\ǩC%#nM8c"f(f()zՖ}Gv ȝ>#ú_07{`!>ͮڄfk&D)q/"3|NVUz ۊP?R#=#`-'nƄCAǾ0iij?l>мɒHrxhJ[cAo@7 nx.xJ7 OM1gpY=gvV{~}\{Nl]1,8y4'd`NjdZlГ%}Q m_?NOײjXLivOy3,|O&*-U8ۿ6٩WB!*2إ{>;QR/P밫~C-q&v ŞUg~|d?X)`y:ld3/L}Ǧ_i$2|pQO)s_i+*B#_aBcԠUChY4 KSK@nػo0E;k~{0zwR?HA6/bP77w^Ѕ^Û_pQ|gkg%9vstHd R'!4\󛮾ש/T\@bNoeSjfxÎkp|T.(Mh؊A߻Ơ;b^mKt .(WIӈnjKO̡wNZ/6vHK̾_Q8/O vo[wbrmpO.z+`GW$OkUpZѼiGdb MbτLUWȶcw {QmVd+Ac՗جB5$.:؍4a1Ov9 a;uv$0qQ/~;c]$>p~q XeCÑ?¡'{?~envoVޖg'] r /aNz+dqҋKn0S _D}PN.)iYvuC!Йhx0%8mGTp8D)tK[8SC%ML!t #Q2`3yN3=" BqZuL8Ѵjߒ/s6#:%e)}-0'F3wڃd]=2] Qˉ0=C}#̱h!X2^n9ɡc~q)<IwI}g?ao6gѐ{HQ+R);^AݶO|Sy"tM҂;dX5%-3qdRiB\x0ʦv+5^>faT,aX\}ϙ ~{Aן׫42俢L! R " sO{!uZ'!B0N*,?[bysb>&~'UckCT/uDZ<@vǗQj/4L=Is47M 7 "m`l}0q}~l԰N) 'il<=.^/:³ze1fN_ WB^DT~\}a= ,~8U++Y]~(P#[Sl/ {޽JC#I1x ꯝ% S-ntbzF3?jb9lvURF;hq%茨(6SMtw #ʀZ+̄?(]fzͰ+0;soY@Ƌ48W#( ^tM'*e[,%VUz'R*vi8p >&u yT,B${wg8/fb_†'4q.zq\mVCuT"Yۓ+qԾɂlzA8"{m9LկH('ycƱ0`ŝil"arn蚭0Vь= -R d.cgglu*8q%֫'-ΊS? Ƕ!\y{ڕ9`_dTuÎ(GPqqrNl/vHp.)]Mq5" )0W}W|}E ?q7aĵ [.C%픣,/kz Bo?adb8ҚN5޺s5>/ǎD@NPyw<Ɯ.4n&M\=D38ȷ׈`%'3o{頨8. MIj>FХzCZsܝ(oZÌzDNO`~ٲZ}>Ml1J3h.b)_C5el~.P&;? q._:J$4Q5+;mNg>VOe p_9ykXX X?$P8M wΥ R* FSPו O);j}hy!k5m-HɅOIeQ5VL?U!88v m,җh<*j_~r&B0ۭE {jq, dZ4pFҶ2,Ωy{vK8yGuD8]|=2Ln9O_*s.^O/, Z]"6W+{,Ge>퐾"do9~'9+prZ6Dd=M^酎6ߣ@]J"n;Hw=z8֛m"{m\ݳ. WTo6a?LŢXs675 * o9mgРLs9:= 9ZA33? rۼm6]i/IE̦DZQɸE^|M~#[6d=w.ջ 76|>'cVz"Sl>No?B_;YG09.|3?8r1.ߛM{yyyQkvlUo ghT~o^aܬo\gnV+7בx={f7yy?Ceu]u!hO7?~=|8"CZ]05:'8gV/}W&n: ;9o6y\S[_A;q^:&~+o?_m?yKOl|_\[x6~?8m^7˦ZuT}}{M6y`&?[kWN}p_}__;'~55ÿ_nѦnشwf\0ԗf_u3/~o7opO6㺹f6yol?u?&/s7osqٌצ_GNTj?7M\_7񸉏z}jYNf3gsm??>?}yO/Spɏx߬m/oMkus>٬ckf5o׿ma+/t>zuDhsT e' `/W6˱Ӂ@j)#+0)7i"%\2{gGt}5hlFɑ "- 0Sa{5.U;)(PPkaa/PךRm/Dnsr--.:yMܗEߴzvXx4'Kʉ_P@$;k+"o<Œ&8lJ)`$[QmUx~NOߦvX,ۖr1eGe0+8-Iח1xܽo|lrN҅bH8TE72٨`{?xiR$/,ETpd5)}8ulXqoSNe6}M>#ƪlqm 0"ÝGrrjn{r@BFݒٍ=!x`n,ҞuI|F 0ũAٌ3EPN={>$gӑT%Gixm]#hvxoIºwۡ0SrUY!G.ۃq:c6늟6k}U,S@q&Vשs6T PS=BAs˼)H"j`h^ $k @ ϯ'wf%%UѬ?or zJ5"CݯyNK,Z׾zɻBQ҄;3Swً3}h @38}MJV~+ߖ $K6X[opXKםol=#CUm~Rabҥr)v;TCv5ț vxYfbpl.N sƨvE $}ɨul WXmyXJ +?9+Y{ŐSwYlNdW{Y3X!/#6VCUJpmC .^5-s+\qIC]lʼn+;1H6̕, *󊢻4ȘC2y@z=yc茾ѹx m,(~bLvrks|0pĸ]ܳfúBC+(hT&=okƮW9u1C4&cHN8LĜ1ЧP'zfvE{@׾]PV^ᣭ9?n#VFޅ,jwvoCWRj%cw}wIZ\ mvCq30p6"B *K>VVU0/aEjBav_okY@r|| TsW2T (MB;<~ Ye>E'kbvo@2|7Q lP>mzc"xaޟ߮P :iOlx]#N)33lU,8%}e5koGNhS_ዧ=ڑ_DgY n%oU*+hvR۝Xm. _e'qnmu ۯ$,c5VH~O/D%!Xtz45Ae9'1UIRHJa̖23+q,aXSڳm(z,r=bL1u[lXLwC?Fo"HLչ2ϡ 3|4w`MbW5LߨFXh9=bb~( Y/1bB㎋ `A\oCa7J@ΈWy`130A(Q[*&`ޖͬ:,zÜ>{kG@rloYf"n(0'|7)6Za~שg>H8qU]W2JƑgAXCB2~jT|O!ʎ^MBg~b,ep 9='?]~!XO1[D1/P 'aXǩHCzE\B^tʛ+M8z, ^aix4ESo85 'yJ 0=A|U~Uʯ`H,2H:jٟnTmoX1 ܵX<`w @j^uRHIJJviHl*ḟߤ61}dbRx:H)h6?ugx| ij!8t9&DylO3ܴ.O~cG72{M1DԆ+ & b^OranRY*4Fڢ{Ze(,BbHzA_ɠǯэ'ƶ+f!"ɯ  vH~D8m8# i5ZHlP*7sj\ nW Z4wWXX-Id K'."Ig= .wЅN3>]"O^ o:m0%ZS^i.1y+ya*S #,_\JߞN k} Hvi䛄r16q>`uvscn-)VAϼ-ʀm?ac;0嘣ȮLގicw%+ߠX"毙ͩǡyZ=p./zQ)<;wS=lAQaBϡ`ӰW6D㺏X_uNQ_ gN'7b]e Y`hYV3=*azW8y23^>g:( sʦl8 !4 \|ZCuPˍdؙk [rTnawGo"GOʶhyaOVuvh\Re -JQ:Ze՝$`JH2,cnÆyl&`Š^=]`9+cwbOczSX*Țb{R +Qbl9aI;f6,T_,?@B0ms T:If8`~!,9}U/ءcs%+ ;yI/hAdv TksAb0SW+=]hbsXPMCĹ[d;1?USkѻ`:N~G17HQOc'Շ0qΆ\PT4 DC,l0R)ws8Tva$Ko \~Ӊ푣dذxEMbob _cϦCc{-;85Hd'բ`I~֞',T<::;4cU0TT8?:2Ss.qNgCg`$zQ{8^:.}R89S 0_ aJS~ZN\=@qJ#A. }6;2t\2PL|L2`E"P)'""ėq ?~Zd H ˖Sr_`^0>s{<3z͊Z5+~I5T>pBz=R1qpޤ3GFvIܻ_ ʒ7b"3'JzF"I5dX`Uu9̈ʔTC!08:=dڞ@g3]pXO:,:[d+(27|2n jҁľ}3D6Գ?!G"EiQL\=b&Hyd?$؉qpXBl3h𖗉iɜ |fD|> Pz %P|S8Π\Mk8{E@6wAxz̾_rH:7;iuyKft\1 uO27E|;?Somv91aP?u9_v݅#M`dϔ0zF@rUl)r'y8vN[ `@MiMxӜ҃TX`;MtQSP%vPI`̶DN{a=Ժ9=s~-X/<ď2Tٴ<_'T-Q3RܑoŊٸT)WuoeJiϕp򆡅=e<+St`\{[R@{KaѣP3fU~idr% y2B|]M +{a,ՓNIe܀/kgMۍ 8J c'hborYM0:5,x=K\Oݚ `3uo) TWJA͵o2Gj01|#30BJrvW8R: ^&C_ {5 IIuן 3PK29dv^~]Tqd&V~U dhry|׽s0tSWg3e6d;e!Ǎ d XӨ.}'R,1@ASV5pMʵrN$[a|Ңc T]:`S;ܝ ԁD~ri"l^6Į醞SiiKށ@gGvfWa=]~%SSFi0:-qBpޢٍDb,4nQ;(v؟'M=]lpם&>ق=RtjS?.X}%WL蘰 r?.o|  E `[TSxz~3%.[zϋnUU0xX^=zf׍: ? 0{_@A2y :4?!e._ZCԷЮK֑ +ߩ|/YqV  5Uy e˘d73Fb> 8880i2_7t)ݽ@ _o[(?TNmiuWq.V@xW=|}8XZK fm]{2!7*TXĶAZJe< yX߹{e%ܲ{Ecg(]˚Kz[uhcwҁz9t h%&N!_Iu[@я *.ыbDQ#>$h1 AQ^Q([Nb$d|Y܍qlL%-oda&ʏ:1;sɸ/jވF!11\P84x&3\Wlu92(v4Sa淇DiE;?SDb LKocT=U@=X \]r^8]k]v}sA (CFs'&]ܲBa5F:LqgYy< 暡cX o"&*_C7azTgjysg 3c; x.s.Rs aSճX Cov@}"|;M>E7QȁS'"aL _|Aيivr=87>-,]zV-ƃbb߬ ,Hx 4}YLvH:S8ZI \ qc!!Z8!tH^p1Lޗl[aW{#oD;Z}Hҍ>T-jL !01`(m+AI=y6ޱ ]"Ѽg0x4f-7uvhi}J:gn2|4mt&)<{q$:]uElj׀\duYg Wd /϶CXBsœAij~z{CmpШB(E7 f`]87HqKaIݧ:*쟁)zY^\ğ>0Ϝԅ+Yݯ`lY슺C&}$W׶’eѢo@0-EHE;KQ#ԃތC͂wlMscmh#.r'ޕǿ!w;J640aNPIeR9a1fj^]L+rYN,oa@ 5\8SL >W+}[+ζ+cZ%7f7@?|i^l8N$}Ҭ SR3.($LBLIb3V#8HRkx[5xY(mvRNrx/FH"vp@DX48Fww.(ݶ+<ч~- K8' NHl]acpbMomO| dn8Ge kKKqaeNXv284Beao>8mpN }tHGEl>!L28 ΁(Td&I[mBuLeV!;UInQky^89eaFz'KXб-U_mfOL0p4!v`v>yï{ҿk| C[IZ`{=HǖVB%3QZ*9B,,yk87pN, >h #.Ѳpcן8{45>@vF T^ӆiߣԐI (-2˨.A0,J PsV;cg؅p*LU95,}o~%zGpCb~:h|c{*KOp0r4v UTb0&ioy2&'Bvœ^le+8fS~` \7E&{C y~ቫJ̜`"L'+@!_bq9yj=c躞y{#:mb[KMu"=LPCm-mRGR筺Xyva. ˪9}u9A餘?y,M n,@ ;_<:krٰ.> \W%ɬq}sIqм2w2i84){:acLæ/%?6Uɦ`h5({uMvVdٹJh1 ۧpɻ"q܋@A ے pA2!-i웩å]?k]68.ld3'C1U OC%0= KhHީ5z4N}F @{e\I.ɤk^|wFзu;Nk7LWR}!.ja3Fr>rl063yv |B?s^a'{gq!.An~$K ;9b1[NNA;q>񛓇>&{jZ6tNq xjͣMK*wy#o@_Z$ +*'CceOqho)O~)yӓcf[PJG.1̰mǥ`w =+ 'l𕉯e2/O "KkRKQC {M/^#Yw$s{4n~)ױ6y .he%7Ʈ{S@wջʴX&t!|~OHK3&sstYqɥ0RGϬ馩M˸;Ώbl`dt~<{ {DZ. Dag~~x,o8U ds@y? #jpAWwm9!h[,u e` { I7۟5Uܸ1<34n,Rp4,1c}4q73.D(Vr*t+SN{#t%0MLaw%7:$&H JQa"$t .Weކњ;<?z>[.IG,侟7@On!cof1 ay?ܰNj ()yI3/?eEl 8`%5t)lYT;\9\bz T")6gb53G3@a_I*icHN i*&\Thx֘-*V%`7jڙCҟc;![|k 1p#}g郹SCg_>{-<ٳ w_BwC~+KJiZk\^ƒywcqE<t8u-/"_e$ONn.y hNڝ87XN'%'.,(ca<) 7X3Ж#ـUw{(=0$y61B\GD>)Y/V*xEp/9h%kb'}o BAV̇U^F\?[ U['0L91gq*ߡ`UKDyjdH`iWO!0@rSjˏ4\Z a*P7TblC?%2B tD/՟LP׭[X!LG> ٨?{OCȘڥ8\t1zõ@ K$o5//= d3{qK.SnAD4{VxC[滓E8̊$f6eʣaTUQ\gsl=|xFKpFJ@6awt-DMn9,.0Hɣ޺c[N܅ <6X5Oz.\i JϝFO'B] (Xܶ*ĘݿP=hmUR4脝[|l`{{L]=2}!1MuG{tۮjQ/(͡=*c+Us0T{/|fl;)<Z6=E߽N/1S7+ ݂W_rw`j./1:IƘ7K9 )G|E6kyjgI5N=?ӏVk @6:fl C.aUrGKvl7L!o(ml>ø[q.} iOrdX(LO<9 #_1o…kzr)"Ew+^Nr#i_vɜh~̀+2Q TyN,f OJ4&(tҗ#uW'~Wʰaue05ĝ>S)P&vx32B4_ZuʭREq-W$fqdʡIYuuu\ne\cUiwT7v~O 2^1\I>qb=D7 r2N~!pZ–=^|IJ61)m0JƝՐ{:V:: eJ;p@86IiI Vgh2Y djK S0|_4J0/|>OŠ[_yJoޘcgi|3f@$w5eEq5m6'ȕºR?2ⰰGx%l4 C0;Kd*s%NzBT5d0K)2QaFjw N15W}sNXV'+ N2A:_YlTjy^peK&z}$;n}2+Y!vK3cKV_G 66kdЂ-l ٍpiEZgih3n)bρL8qQ''e3]F^ʓU%cm|͸~yo"@q.mr@OBc:XsS`"񄜽0+4q oOk0ccȾ/1lƙң|`6Z8=f%i]5:,f@uu߿S38_'7SA|C'dRBqXu?v8r&s=GiS 5jd|],VԧɊ`X]s%UgmyLcf>[4\E2{(vUgaS=ЂsvP; G0S)N=&y*aXfZK.*ø+!..N$@8_QJ)ѱ[\[a!&`cfX[| {Z"+M9|5Pb/!4~Ew蝎C.`K ՟ZjB0ft)kWa+gE]TP¸LkS`tV)観,ޢ 3[&xC'G'CF dL4cZ1BnnItp/sM؂ ̣`gN$ [u|(\ #" 3;Ft(Nq:t$Kjي=߿EW).}w}I|>E Zl"P?ey6l=_#9ԏKN AIkq)$hOZi+p}/7̶>tw‰I˒Ձ$PrGz*|u^!-U]>n? (nsbC'6ipޚ>3PzZ"FWQlkXϿpeC銌 h4*\5}-$),D-5)Ab(SSļkT7`jDŽ|w rnz^6Ӆn9ޕHIzj vDP36fږΎTү3Ұ_'+xTiLJm Akd7pœ ̯d4Cm601L*8EکK84$kD;s ~iyaBpAiɢT|bT"?D^+*"Fuēd4~}={l{Õ;뽆/Wj pBzdo" Kݴ~W jB- Q$\Idcy@O¸wC=gāFТM4)& [jv0{+M 7^1م)|m1G0ʯ@ek> o  aY+g0J-4T%Kpȓ'lї+ՙz2L7 C玑 wBM &lqc}.PEPܒs%L|^$~ԖCckf 0ryD4?Meřq`R +9fYIﲨ?AJmcWE?\v+i:X5߳Hh} -Y%75H'G)M73kqqcy-^uݶ?\1>rϹƄSE!y*cr+|ZfeB"y`|Ph1 I/+NMl❋'?${A{K _OkU.W|%8/J'.>ʢ׮d)(<uAwCHfc.$C1@骰-B,XuWNSx.)UGDr"=SY`b_A 9;0̟vH).I[fXt yےtn;]l0pk}pi fڕ^Xm+O2А{,ɳO߲3s ޡ8wo5X5ߞ7cGy)i:KOHMMEryj%3;絵(H}6u;~@gQ&˾\eRYCsG`vn/}IMe%g`GXh)=ԃcn\e#Cykqa='CggTmB-z=WM>]l#.۝@5uVmf^W2Aύ87/曊7'+`x&9hZpiJg~v ־RoiZ ,/u 0eD GQYBG-SZ S_+ wؽlq_!'t]Ŏ/5MQu([q<.Ddv6EBqtЅSDz1-WOZ@YtH=7M? rO;sPkkB"j;Zgg`*+p<`CWIxy|ڛ=U?ix)'YvKULL7tzк3$;)j8Sm jw0_[A)*v(|4+H~˔d'<(ђjWqX +jp8md$,|Qo; ﺷgaVNnOimI|q/>[IПc'nu5ϬUcs~fF>"Pًyo20 9W \ ٗ9u(}[œp{>BM\X':=bjYT9D>]@ W~KH~V m8xGUfFEќD_XC=VrHy[ ,CN_!/ aM`8EPdV|xmSX>@afCF{d^َ~>'d`ZMW0iÏEg_ߛ&^;iĨ(Ըu`#/x:S6n6>{zCj" Jf{ih^yo(t3]`+F̱:ч5qWߕӺ6$T0vud!LJ=Ǟ|l=E _OUepϋֿi]㙨`f^B^ag@C~D<;BH'6 WǡyLtu:nK]o'cdT"qԃ]y4\!vB.竝[胢p3"s}&avjFk6C욕~*IĆ!3S,@6=liCwyWqro>&P-! 稛 p ux"Loo{IyǢ n[8zf¤S}|0:Ҟ?tftD aHuh_ONo#lЁ)a`rutCXH> syn.#RW5*/a # 齰0tS xDZ]l6^\"g?cq{ sb|O3d*);Z)uܽ4C)]հ@I864 \g8mao~Γ\4[`ql>+X^ΨϱIg<XښSMnֶൊ¢ 7H>~[j]!Ѹgjxs)OSЕ|bݷ0dwɷ)@KttFsW)9Xd%J IbG\($Ѱ%I_H<̓hzY|cnSᢊ^7@>;lׄU뒀,U N5뀕R4WcR3Si7k`3~ B}bq^M4, r6MJȼ"ݶmF/~dQы`{ kӆss>7ns,vi 4m: kNTUB3c+S]xw*S,ߺ^e,Vls46:$/ J@t86C 8ZF*/,Rp 3\vr+S*;aSv˽$⼓tFLНt-69&ϠG4n +댜H#gg< utM!H{>\Uøc{`^Uq-7#*E=αf#9:l 7yY~+C\U`qQ>=R{K{]D ai3(bߠtr7Q8gWOY70 sӻs`칄ISs'9W5H)dٹCRtwa`D=g8qѪ/G]_hC瀖h_RT |F60~2nYM뚳t!x^&f$HP/],= )8SL›r&852RlG3ShNbUH&.6YAQ`JLwA` |O#dNA QK׉_ddz4?雃E[h,&[^5P}%1 & oYew|Cpmy )~Ow@Q]PG;qvP]زodfÂ[FPhȖ 58ᑾ"tܾ$%3;7򑨸33D(0篕č֓dZGb:Аuw6hOzk 7K ј i{ywq]; K>d7_WrWI\@eq%0V&nopȹGh 6Q^8΍(E~4'_E޾Oͷi(S_|6~}!ɣckuq\<~ \+ j0Zղ6*lu,%K;op!^Q̰&*ڔgס;#$ 1e`ۋ /aA1'b!>fHXden/* D%$~Zcy]>r; Ump KBZ~-?{mi~Hχ6.b[Z-ʅZy%xpi9wC&sۈ uEBi.qn'T$ZVtp̸.9n`2Ŧ'i7Y BkW Rio~@Cj[;9hxDC݊oC+G_PS@x ;LdvpW9Xze3/_#|R!M#_.A;Gt~/c%جiuCK^dMK80L<,3Y`KA^1'mIp7SL֭䪻X {B# qD!-@%.4 2'>mr8bZ{Lr?R)]:scIv%ìK)KL8u h<'v8A](Ft9yeykyar5:I,)-$Z6Ghq7QЯcgOxu.0FX} u CM\fѥa6y=.ZK\H]⦏o(atSO~[u[q%9p2ꎽdGASUʾq$O F-4H.Gy7"Y0 T(# 矞,N3w+$BB񌦊FӿvAzԪ4.H}&A5քNOxX9k 3Ϋm;{x~*: NI$og{ iCW`QLJsOW@br=t/5g/6> W(3ɢ G%$| W2l|J+3B C d6{Ijv-y 4ZzhヌēnWlyv61UL HߦqѕfwOz#NȘZbkJ}З4vt6f16-]s4,-'V=mw{LTy4sYقQ\vj"G\SAW 1E& Ai>q?`M'IboF=Ɛ ҏNC&QyXsJdo?쎀ŕ꾥k+_W~S݄Z޷Ru]Q|kS&OLPKb8et6Ld n}{scyػ-Guvn]z(!o+<ga )Gs;\vn"fiCOb-avYжYNIo {Ԅ`Ud\jvׄ:Bd%C}0ƯyيS-o1;FuG ֚ ۻpeChlU>ڎS62t( =]H\<{'w%O+_㳜{Sv1A/߰ޟX3̙w=[ƨc7u7H@ i'wQwo`ǩ; V+d༁nvl0y7VĽ C^m5caխQh91RxH)~弤g]ŞɵL+ޡq87&MZ\ROkLιGA #$UG+|цW~y¢o#spgm;z.)H`X #_4;1*%1\OrAset(@Ar,^jp5]Ya@K6EUs;aB ^eZ;c_<'xa潽2X;|$|i 1< -pQCH7)xN_Gk)!ob{gMP=Z$+y'B'#m~Xnow"LPt҅$NJU5&kc;N[Q*aɬ;&Ii]  U_cl)cDra.Жw$k8gà\2.G Xoͫd:[ Kbʃ=Э2 &YyU ?͏sBNSmϰ4{XQK  oƐs46gL%Wu89,|8W]Еsqt?oZ%8_-G;x{}vZ$b ۡcwO4ֽ-Y`K~>{KX]ܕO,a ge k\.Bsv xQNSN TC;V2  ~-ڭK Wk$oUZ9θ\f /dͰBtܰ\=?.e㯿m(Yg?i2];P6d5Qb2h/i'8ݻ:l~V'X|Z O_g Ķ*!rviZ}Ni?kח70XVf$PCV3kɽ$v qz; 2lnuʮ};&z_;;-nhbw0wlpJbmP|zz!]#x;"YEjCiVԝ:a;FĆQUCM*%u\O1 g>4a4b`ˬTnm;a'z"d3ZūRבŔH:y-iW#SÙU~W8^)+ufVU,$SSE"UxNcwJa͖=QוBdSL=޳o\ }50zSWK݃kD|'   Gfʂ@;9+һz. +|F5~o<e܄mqc|qBzGƶ1!Nʠw)nDYSгX=7ސba#x~킭F8x/`tZzO|ه.ʪJ%R?\jHluˆΓ!N|lL(k!vjW@f} ׵ '>b*׊'`VsdJAHc1j,+U^4I+0^Krhu?)QX$Q~c\8ZVZkipP\v,= @\m pdG貼!not.Z#'a!-}IXǿkKya[ 6hβ{Qp7WUB|9?^},yog0EFé8Er9&_ D6Ɩ#FwJlS [Ise<`znŻ0sK^B5˼Oƕ+P܁#ki lWYH n OĖ;'m,fo;]ꅩѱ7ⰪC~SO../ Q4aȶ b_|S@ީ##,Zተv;qDZ}Iwj*]\vu Xi͈~c:@pl+\?F40C,9 vŒmD$2+}4ƱR 09] c:t03d#LX*[Oy_[TDž'pefZvF| 2&P&<}ź'[CO~_oD&aD̤% ^0<95"W, P8|,{A2\u86d R()38FzRn"*n3k=]Kb&ϳ}x^+ wҡ#Qc/N+yObہ/?b~^0DuӼ%0UsV>S^ko*?IFYݟA˜UUGgO)b %K[>`ʕo UN "尴ʆfEeNa]fX2G 7=m  ;7OO )nqq8,) ߃¸씨n*4nwzJYM`)|Kjkq(fʥՖ1sE83 K<XO*>t- ;A N4ͅpV vkX[GEa=jBk\F!ԋ]/lF 0!*P6Ŷ)m TPIo oE#ɰ= +KB9kp9'FF%B') X >ښ0 pY,gk3 ):j4Y#]ڲ8Csd622kZ'[!,:_K_R`&ً@𬦌Ïߵ{ [з<ͩ>rԛ֥fP0ӿl?ޮ{rh)R;!\>g=2ŏLRލmzE8=$GdeJ)5I}IADaDI09G}J`NjrUGqOr6蹓Fr^e n;I/OAPc&n a˪qCK=V|ﶠ=:.$n#1ړs,QZW \〙_7+U}E$ഇЪ."')&ʡk.OԱ;0v$'_~ͤeӟebmpTӄ ?b^bN0w*H2~F.( 侒LG=Ɖ|n03j驞%d%fS4v68D.- HE@Yg=ǶRXt.a"!RXԚn *i0UhvO!g7",E6Ox#2y5=ڦ10yYAaDHɾ {$G- G$}Ddm9u#tl|o>and 9mh!tt 5d}" GI #0[†{k_`}\ CCṱt/ᒓ8+0zK4KoNoaFHVWr։)(l i7 .ҙ+8l=h(j>-:D&ɋX$@'7*̒ys϶`cX+Z;oV_Ί-*8<z (7Dvk9NvJ8mwF; ժͅ"&,O~e+ O6ߺ,8x?뮬9P໭L۫c~- , 펿򰥜IZL]ifпLہ-~M9Fߞ7*'%0Of j"®oɭ+&no_窠SM;k˒ {ޞx_ׄwQUu#>;%sꋠkчӌ \mLV[>y%Kop685 nut4B0BP6v;|q$G ?']mڮ7tpHխr&!hS?LG?$`@`[z{/ cP|[fq:2S?xGv0BU^|aU!-a F;޼s&Vbr2-ĈiE^Y6{gEB'ψkxZ0`5ILfKnB?] 0GMHB |+HM)hvR (l;:\pj)`¶G*Ws@Cc; veZ<[8n2p<6MZJXz>o\谌=Щ˸Ii ,w4q-p@wmO8VnCi =J0w-vi\ H*|ϻQ v$j CۙHans$F$B8Lt ؒ畠Ko,i EQo(؜*_.6akp4\$upIOuNC OC"js qs249pn:46}xz.٨6uZF K) ðw*$ve`BŇY? {4]U `[Hf6 T/gףc"7r1^H2{ΤO`_,+Hgՠe8p"F#=6l$.O4rI1%0 X5Ŷg_IyCՇ%\t@sSc'qw}UW*ï N򊸰{u0TPJݎOSMO`@eWLN>o$Q)B[??漢og%&<%,r8!i1睨#1vzưeoh#bޯl X55ܷ-ރW?ʀz{x-d?0=ߙ1 mew5ƱsMuH^jtڽo]$ݛIJC03tȑh z0j\jp cJa4O`I5Tr@`U 6WցM(?Dž Y`t߇F1/([-#rwjY(w%0}{f,xl&9/ɚBÆM%rCGR;&3[!nz:]@?z3|ΆE=aBG_hlRյQ\M{B)OѶE2PPFCE! l^َs'q2^9 Vx fHy!dǦc,{:P)6@SG`ʸwY ;S }m[5kU\Z\8+0w)#8{H8²{ ]i":%>' ٶv#"+ -q|a"&N\Ph3"?'~@DV<3^\9hbHx UqߟKӽ5"ka%$}v1ag(NTz#K\oPxU;1q:!¥Uek>bӡ @[c©)Wi=s=.xR[vnA/e55@`!]N0|=1|YK1^V$qQ7oX߭gi;¼q}.Ͽ{ RC J}Y(䅖Υm;ӸN@O*+s-/X8-li≮c]rȧ{tC+*3/iXk(/Ʉu+ByP?.u+iw*. 9\6F~,Dm7~rg$(HyLdu"ŀ鬙 |bPݘj2[kvHB5QOjŃܝXPzQ0Ky9jQ#%8X㧧 =,N7KCo_ȔǗ,aNVDm5EIMksAUd65>!,>83r/opӺ>/zǔAu*ydv6vC]uh'-f>P|-"V1ݶW'3cj Yb,0~d\H%__cspϤZk5(A,ը߯UcDA>MU›Â;3[8l8-3텩ڢOLi,8\(1 r8-3FGznpMdZ*8*0  M) $@EX 0%w]߹ ,j&+˽.y@d}wخՔ= Ǹ k@)Ia"I({! f}Ԙ]z q zV[ X暔^-w^#9{X ]kr/{ze^ ]]a ~y ms+@B҅zF*is=H+RX)vžm+cןZ^MO.4m;^*IO#~ ,0Zs& {{x-T[1^wY2cND46i@L[5މFWaI}&w1_3HMPK::&:::z:&MOt 6 N&ʒlWla_qg#7hF+g'ۿl\Wt;v>=jlava/data/bmd.rda0000644000176200001440000000400713162174023013340 0ustar liggesusersX U~#)ETDDDT"SW@GDʌ#*20[4hcVq7ciKcLcaݯu+wፀƗ|{߿|7V뤖֖ZTkmZsmW١]k7Ep(ab R_)(ܺC{(T[1\bb_H~Q((T8X1VqP8aVLTLRLV8RqhT1icQⷊ3+NP8Iql\ũ++(PX8KѪ8[Hqb\Kmvy O|2 )+.VPt*V*V!~.] *Q<\_o^q?KK5$.ްDM됞& ˥:}[Ht/!- Ht~co^xbjԻ{e%m:=M>cш&JV>Q^yqݲxgbv8ge/j,}p_J]kx$o3l?_o7ɞqW}(F`IԈc?ăW~IӇ%?\wI2; I $u}$WJ ;~")Lp_TIp`v&t%(j9(-=։Wy)>Txa`9Ij+? Y&z9C?gJ:!x: f-g |*ܩ-3^r%>-IMIݩv\Hr̛)fy"OA-폽}"H<ٿ1an{[w7@bw{ n|^%@㣟O~`7u:r?oayjd={xRw^oӓ_ðUdyzZe~ʼKOGƟ7ùm?l:|J2Ú,.Km_0z 1bVo:vOvH}(OH壒A24 cy>/,Z2HzAް$$W1[:`-S'/~jy+N[evZ~z5 u/#⇼5I*훇[[@g t~<9}-d=_sc 9PV5v9.G}pfe>RO-^gc}VCZ=[~fjol'ƼY_~4?٧fZ΢a m)py%9oGW,ܳ_7ևΰ_K۞tʪ!Ov+e7_ا@-+9fWBwQ}+u:Pc8?D?sYǑW%Z O^[/n׮y' Bw% !3}!_^nɇ %G΅̾}xS'iՐ}uO ^ v~Y?:}pê~B3:q{}҃iY; [?A?!s+<7ޖIY a'_"sev?uGl1lϾsSmVյxtQ>5wvLyRϓ{}>)\Uv5wފ܏[:ִ_֭4 ߵ{i_e˖[^6lava/data/brisa.rda0000644000176200001440000003722413162174023013705 0ustar liggesusers7zXZi"6!X>W])TW"nRʟXgqjn]LUKC΍@lmʾ̈&! Q?#Dց/:Ge [8{~C'L"-nJ e[Cbpf'r5n|-&1Tv\iNƒ;Mxk:9!I(*Z+vp|z~7#ENp6/͈CJ pW&=MNj;Ij#j B_$0" &?_vnG6ݺIw[DƆ>Mkɇe4rL{l~[ ʌ7 _fݏ%n=֛bc( gj.Ud<i^Gp/,=w&Yl'L tbHYpQܝ'K+oB\nb1n,m8۫ɀSY--<uD:/udx'5|8=8ѡi)pvgc 8;`Y7Z̉qЅ΀ci22-ίk/h$]ڱU-{Nf{$ԉ0/aе(ĮGӼ{.ֽ bރpS Agik۔8On?EM&:$ޙiH%hEP1}daoqDɛsby׷lt PjLʛ,ړФZno? LW㾸II؛cR 5I$SVYdr*W 8J$R %- j@t!E=^f6l! \ύ} R_Ko aʤޜ rDdeZp/>Hfõ Oo 9%omaRCx*"P)Q1Vf\? ui1Qq Z& J c:H/IFHgX.*Dssx{l/[Debk< a#uwc!Amr+hriɳۜI 7_[fa8kRZLj94`4e(U,@5;Pt15J]6c2U۲aN ޘ&!L[yR7e?D^5cka?D%hgLae[|ly1yYV徖rDsmG-2M<_Q 1e'`gMe?P\YL){Mi䃸G/SFbN˦c'$l>=`k^=}~?Q͓M TF_IɱH@;ixmWx4yx\](N-x ty|2 y7l .l8jGrzWǗL(w1.Wmot8Nhn,63a nmiɡ(Ң(;.pmvXYt*S1Vgn DmFh slM(75&ܵ!:.o ck=d+WAFeCa ѭhْe/=> =]F.Lw%ey{J lUe^ { V :'__:M40*xu9 FT0/LȔ NHAS>U.ꎺ dˡe p;B$)}#J^_V:`TS4yRK,mՃ&2%<ݭ> 84#j,ppF+&{Ԣ-U}`SNwC,eiעv蝦'7$ 4y0$,jw :xG! R>5hBC87q:8e,W*ekN6'#r<1xCuV=}FX }aO5:5͈ ȸ\.&ԒsdR Zv EtS^;<v}q~`nGp*jÖ ќ ]79Xp cuN'qWlUCkOl t_tŧ(YLpwΠ^S)WCToyp 5}$o5kOG!Jn$hU*{1@ ܊ ks^S-ˀ6Ժ߮T*iczBKlU[l*ʋIVvƋ^فpԅєShս\A.agwa BhhkQEɡö%!H=(!guTٸ(:6x{}c_e{xLOwX qAc:iL6p 2R,EWDћ&F-(1~6"t48;ĔNͮN8u PFjAix~2*y#qٴ{Y '@ [DD3-;׽ b&rk@!" Ô*D$n̏>bN>.2}< vE;u]Dzq@)zlEctekD=*L Wb>T롒?Θ wD pPcC0:+.qVd2hggʪtIĐ~_GpiL"ۯ1S''qSCҒXPbf>CQW}}Y "#t+zݟeK2d負7?J2մ H? 92Ҭ&h,?QDQfl rk]T^;"а~L'dRL FRκ7MVn{툝$]?)wfM1g ! ]='"salV:1i=F4`0)6V)њZ!Yw)}l敡89 Ig?ۼl{o"(ZvMxQ|ZRڈ;d>l؜]p9zㅰ1a4,++7$Q7֮N&u؋¨2)"ridws:f0 QVEC?x{jt0/j ֬'~4d7`vcLf&Ac~a ߉. )+=~Lܩk \;%q!l8UOWI\ GT)9V4"v3\uMl 18]5`вdsk'9ªʿha?Erwj\&ΐà[4F}{7z摯 ?B3+J׵_Z\֞]$84P`DGygCh#S¤Œ:uB=葱" t z )1vv4?D%A=4Վ8]4 x]f Ѳ %(nZw6'Kɫ'e0=2nEFf䑤]yءP<3i\x'߀'f[:ǯnZ>94/8cwm7׺(WCQeD) |9\Ftf!x OjРur 6dBT9=0TWa FdPiD~uU:Z 5\ۓe n)(5Gb7'`m-B T?zv$^w̴3vA=bD}%}+ryZlKq WΒ3٫m~ ->t@I_ҩiE|R/@o$E)S;7+EXO˗w~IsooJ k\rx*PnABYQ*!yEJYaSqe̖ue: ԿŅ1 qp Nnl2z/zA{^ ]_lN7pm8p'͸t^{6fTh-m玊; {~ʙvZW*'QS).g/$eekݍ`]Mov:C#=w?& L ,Z;:VeF4oIu qݢÍ}㰯}rтzԺrBR$ kWeMQtӉ_Gi_94~#7|)Νl׈uIƳ'&3eR9 SԬej- ކy,̕7*yZHɤ^(#R#4r1ZQ%KԼ2̙ _ة)jI(K3 X Χ@.NհgwgsƸ _Y+'59+QrÜGLaXj^?LpAزCqifz_虇y1CYdSAM6R 'PmW܄*0: - >D;0OE~gX~7AڈÅp_2r & Ed;'d s.1Qi`F'*(gSeJN#3']*Ώ9ޥW|7~qYw Wh?мp_fBVT oti6m;MDI=t L!d]xpgÚWaH ȣ.ȉdϱd"٘xǴC_zc qip㳁ߕ@ůU͈gG敖 q =CAQdԤvY/eEOH@_"eu׹{\wgOեO dsS|%0{J<*}QMYYU`1նO:F[ouò/7i3-+2ץLFJҩK̶rS4N F UUE$\K]lViU^S[9$tOeC}Yj^2o @ f~FS)B~oG\ЀtOPk VL3n8ńpksR{ϯW ѭtK;aIסDW A Ⱦr[u /[cVMq 7ܶ(Q- zpiV)W!dYxY[_udR=S:Q~Aa 5*'Porp.G\"M( 7H_%Y1fq[_ Q7p&iǭB+%w$Hd>IJ}jBNǢ#i7VP g}y;E:Vf:fْMbvhQAMoѷ@HMx Tv@Bn疳J `P)f8/90)-8ۄZr[Ȇ*&)Gv`Ϗ?^7\FkoKC!|4=XtBjYs tAZ[r^ɭqo(6@wΧk, %RUK"]b0)y__EΌZM4PAW?\$tn(VFkIYӯWSKV~{p>D S* f(|fOX&:TssQ$ȷsvK؞='0" Q ҳ? ~]h5WV ~ W=̮FKkfkqGwUk(4mL"A?Dx ~r{µT'9Z[wAJҊzBT]s8Kv(]w"Q1-8n(;h(E0B]O{er q4p|;%,\:|¥W|olr ϹQjQoYnil6/6ZY ے4{ZE)Q*119—=QU,wHmTԼUr177M $r.LgX|J uWɧ}՝Bu_ltk+@O4J'C4Фf.e-kcRoHRFz͟N'(x9U³zs>q|7{8 {.F)2T6Qk}^@1bZcoNW&@MUjt+ *<ش幑^ֆ嫐0(=b9|犀7k0U 9jxS))tEa.9 2n o>";Nz/$ӥ2oD̾| Ǧ>f)MSx,*^q:v:k_6ֺD!lΥ}g=,O8" ZJ:mHaeD<ΡJ~# JWڗE{]Lk;k_3 eLM9O8fJ_* n]ip#5;7dazsx_/E.5. QG{(Qy>KIUk\zP?C)Gٺ)9?&m `u} nV)ݨ@&aWR)\S]q?9z|zgl1#+>\UwA IqD>m[gJjUg)Wv ِ% JgڱKPW_W,u N:WQ1xݾe5cͻ!ciGWIN'5L2)qL ɪ ĶHԕ/P*30y&yVF F4XpYҕ{'G`oߝVjҷ- {{ц Ĕʵ=\@x_DUT/*E yB*LyOl{?WQ(~{quF Q3CZ9G$A(EP2HNw8\` >pq@[xL PBYtxCӿ݄'<;ÚCգu ̘l+H"ٔ4@}t^'(DWXKNBbA̐Y Tؤ#CֆiTh6=O9zSwf2;4Ȅzp1ihۜ_oX#=V?#ҒA>.Wַ? YDž%&JSq;iCTSsd*5#[A~z0v4,+|x9b)k,LxWi_E$ʱNE84ZI=+gH+d*Sfc/؃6 $zqOāa}z$_]'?^zbAh|&UYD"zdGm066(CZXN$g^)Ex6Tchd䳐i+/ƛ;tڌX<]jK3ObD[laa^3-!.ّ7o,CEgHw>N<]ZH!BC^̘,/Ds"gVUaI+ȕ)[vL,+I lIOL+0~)i;7%@ 0Xe(|C76Yi>*f2'֐ :X{RQ+sOϳm*X<"GcV BK,6*6Zj9ؐ6 E+IRߝG7{"sH=NvIMEշ*hѪtfg{>ɷ/BЛDJ0zcjunӒj?]..WR?8n?P4[l78ĦjΏ_4Y*v,L{ڢ-6!'k4TmӢMt-@ХRwmv\~C$TF#cXÜ4wj90`8לdD&ecw%jN䪟˵;wwtB3BI2܍eQ!_r hS@TSAruSr+&c.XˌX:/tD\lAW%<~pQ^V &2][*EKG3 J*CS>z^VY@ )~, ID+>0vgi95n;l'9Lb\T9f.keITbk{iEդlj2M_P p}>WfD$XTm u6+ &%{u]A}V+4VI>ח;̜&,W# y4x9Ũ]ߘQup*9cazm2>>(U:: ]S:կڧ GXĎb$[wTd;KmS~ѭW,Z5nlg?>6sxε o" 2 4+'oğֻ;NP`jodS{݃*V!249q\spmw2 hTU<+~)9úff3EocXJli 6.иN6PnJp:pu6\jt.=VݽhTIRl!Ԅ&i`74\vA͌]ò\K;fnA޹떹߃sQ|lL$6Z˄-8Eǝ4-[Q}"}jҏv:VnfKB2M5A 0HÆ՞/Cӂ%vzPhb/mÿ~=A_ O=G:OLHEkWh K>jJrؓ{O B:IgW2ܷe9ʔLP$׫?͘Sd6>x֘izκץ46K@{>ZoBm2Pz.DQ`G;+xmhHzkXvql5gYU #b(IHQ(B`"{EyW٬OVXv}ۃcŜ9{KizRfrnB5zi隴8 A^ߣ?1ʎlg_M㼚l,6欴`֧H¥M!`wJϼ#Eon؝]?V >1N_7.ZK`}O%o%%_ooCRD My78jӲ:E)̓pZk@KҸ<4%D5a̝qGAwF@rak.93Rk$ܒj݊S# n6( :SfIheUWE/*Ŏ4}k mGgMqNaT lN@ﶩʨa3'Tr'?C*RO B֌aF9w.f;9Kя2>(!uȶf65׃-j~1#Zafp\:̖ A5` ϦPFwUdގ4{i#l2K}> wdn*qğLc3EA*UgiAYs:Xۻ!m[ 8űj2j@\k+:]~Oz|^0f/Qsܦsq_{钝T#e§P-?oZ7n%_L8셜+Mǡ(Mޒ[K8Xłlt&F4n19?`=FW*UP!Qjee{VՂL9I=l⒓(y6i+ I//T}8r (zy ?36f^߰B5@%.|Fg:-s"<ӶL-~z.{`bZWHs^F\Z) iU`n|A-K=qU8=Je +od>Bw)݄kUv۬`lvS8 sʉr0K4hgc],b(A6VьUiDz ȿFX;OAFNop~CڙG2 E*(űFi!6 6 ā21)J>Cˡ[l /ѯ2pKJ[?2|i=={?3cz' ^gC%-en)|q60 i sFv0k{zB"lZc;p#,HL`c.7Mܿi9u-6Yyw QFvC[\PLaG,IA-t|lSq _^R ȣiҘ$ӎeӶ%S'L' 1//dĉ>H .z"ms5Emmj'?OR,Wm<6*ctڻ\&?ׂڑ r3T`TRLpp|-1:%T<2n?Mf}*!sPqޱ'8ޞw4MIgȋX4u0Z=__4N9"ZF;#G0?uŬ^'4롭֌>ӘTif"&AUd~o8' C b4;# qI E)q3De;X-x=T+AdQҟMcqrǨBc0oZF24tGp\X5eAB֐,!^rlB:- c7\N{f h<S8h9MSt'j.&,BNq#OL3 v4qLZ@-ADIy}oy7-<|vWБ(2 4N͵^deow[YSI!-e*)/pa ^fÉm==|Ċ cMuKӢ^2>!#$E^ALN#atpPtG]@QoTH |+*%e&+ψ)uI]ds@7,ߙ6{ǐY<}&ļh8} UTc9g h뼏cH^$ѝcM 1Xh拉S Icwho|w6RQk%SXbTI/`vxTM{O>j=_ Ly$7^h:(Wb}9,'=r@e,pGgqp vEWl1d=!(S/ +2FPr8|[0y|Qf|·X{\`>0 YZlava/data/twindata.rda0000644000176200001440000016302013162174023014412 0ustar liggesusers7zXZi"6!X<])TW"nRʟXgqjn]LX clKX-:~47LrD9dL{L.tP$cةA(k,7_G} ZNa6XXuQ"5 |xKR3ZoP:CyPWu fP~6ݡJ)x{-g8xHUGQI=GWd9)#Vj,AOEXPVGe|Wa_G S.ojbAlb9@9!|Z՚ڞxaGĴr NS/_)G\ 0}1j~ J ؍ooť/Q{6!6Tь-ϭƷCrWC*wf>IF+}n;7!p>V~=iCIq0CTdÝ;.d~g,ry]\ϖ\z@A)W%*x;a€sA(į0$Z6ka ooz\}p4d׹sԏ ^4#S"b[fKc'FHvU+b0&7F4^|Z%Y] #o%7pHˆ(tԠ24ByMV"w3lK*B:Xz$N%]Q/qd47̿ti@K1_CzKo}4ѵSqM `g{@e, cJrل 86 " 3sY=_D܉zoܔcn\\{JGhim~b3,_ D&kAN ; nKY^7 g;uQ#Ŕ Mvw\Xn>#DJ(TǷ]=|Hr dx+yVdzR.SؐFiq`d[ca=HW5Cڈ}{>pcؽћ ;6N٨G/\uJQ9S|xͧ9;VtZ.W_@^;ޘϠM/^(ct萩asT%p " жА<2'f3{:r6aEU?JJxxPCic`yw{6#?2V`,S {M "'ba`2S[Ƴ q7z\EJ/8j@G"U;wAY x"dx+ì8+3;BĄ"s59Dh'甆I_]~9ϲӄ[ltSu/#J83TOMi;.h#$o.Jv|͙IbREA3c:qo6|89? ?ѡ.y[{N&Cq_ Yp4,uc~eG XI=|'_P`j8 j"+%)#4\@3!%8׵oѨT,Cc8)n7Ob]~ԡB2Hhg$_ ۚoQ)y;7v<:ez{{o&LHۢ.mZD2XR&r"֟O*=b.{ٓ}Rnn(IrJXP @vmcZrl,XM;#5Dw;(e+@)[m-ȹJϨ͵1SB7Ex|ut*YqNa:c l#垂 ٗ+_ۡOufS%>$)&wKZ|(r7u[Q)ffA|)yA/ xYZ JLj8z |ũĩ iQ{7c*ͫ*ͽ"]mF%:ǚd6N~w;Ueۮ"É. šM+X*|7}bL,pÁ$J{qMqJ^^aΕ6b;}=l6ՇürFCfZ tkHmWbyxNkIrFL4fYLz ~!/Jed DcrmxohLU!pzx1$d ynAw & P"*Bl?IV \ÖadDM>sYnZ 69wM7b {o`o/5QQ73o]w-a:aP.^96"oJ,F& ~*4Un'k+g" 21h?͠*\cP~Wa8DFa0eĩOMݿ\AP3sJ oUB 56YOz2wOp4E\R}D-כ,Rܨ}^ǀG9a{Ovk7 Wujh6CG޵UI+:̍%YBBL€en?{cTH,MQ}Iy?9G2#d+uMiFzeT'%8aZ.z6@/d.p$5cu U)JOTz,j4[)g2x !I Cvna+V}P7~g\ ۦcLO_] .FK^1ǣ_6]Om]pP/XO(D#֠s>O]4 ;,e>hg]Wγ~^>_R^_V0hͺ;-4cvVQtx|=DtxPsQ S*JjF|'^M +0~aEMD >zj cŒanl}, Q¡^ Z 9Ni25\qmr >I(BJܱaԘHK/JӸ2ou'tlc6Yٽ?(:Zkl$k*3]џ6w#|`aZzZ53N,5k#ϔ_ǁ- ;oˮ |ђ!̽8!˻s松b!ёkO0S=l3b`i0S[! 9ivďg)Q=w 嚜z2TQ>N^ѝS˧&Nfҍ/]1"})BGGtY̋W,o0 &0s9F: ;C5ҝJcԺi4tn:lS/ē2;:zޢp"ynj\*F*vm)H6SϸP "́Rvv^ j7%Q5˖v;Hyn.~[G7?T Qm=x)kˉ>8T+~. \i#֌mۍ|y _~VYMwgDTaD EacFݠjM|yܔg/v pl.a/Gb}[7q׏T ^rVrLzn'U3v- I SB([8 Ҝ= WҒGj+mNQKFVϦZP+͙Ɖ϶ >4-2'ӥ}xi^m&6-y;YQG ]VL]`O]_$-t7x_}罝SW)0]VըΏ$s9sk7s DӉUJBL5Xwuz9JIջ7[ߕHr\{Ia/|Ʌ2p$[L)6Ԥ%f#%H:< [ 5zL|r#U_!em&gV-JlrQZl.MEa9<^HE;yna78ɺJތrӪ+\RL7Og͸^զJpte)>&\TWQG{o\rω6;f-WMO-@}$>&* Q?!\!h.:N7Pb.mZ۾7re@}9*x eQޕ]@6 H&G^TJq@o)G*{d=w"&>2aR%XnbL {7b.YYaZ$*^j=!_"qOx1F"vP?E& )Kya%ZĀDkE,}!Л(9ʙyզM=z摷u^ʐ6K26g[wni-x6ecZ@WI"ʠ$)z*c zN|x|bJW>7'a]&_ی+AwsV4 gDNN+"wT&Y#&㡶Y1f{kv@kKGA5=}d ҲFҊ;._rjg42& 5w}̭'uUS03$.Q@6~8Pj^ޢ\ އΜMt7C]9:s*1CMP{%_T 3w9W5o1 gڀVZgڶ=eX3l]YR\P;Cs*e3F>4V0%5NZv SHYla"& D rތE,{ ^jg4 nɈIR9O,"#!4%>V򎅌JqՀh٪ (Q;8æ7PNmخޕi!Qx,gh6{_6݄f̼q/5N^f+}6?I3CA%ZRĝjfd6i)7;_ .n+ ?W (nup\~lJS7[X?|˽nP,)oYѡ~mXal*jXsa\af@OZ}@:0 !Y!3to0Fר"+6ٙ6I6"F/2~Y&:2)`Opm }, t>Z\5}coۑKh/F 4BF36}է⢴nHwy284Y %I*pLڹHo|.5TǐZDvm6X_1SP{wYߎ55#ӿ7cϓQ.2(gU0l*sR6έql7PиةF!L+paR:O)\m #pXD-7P#dQ+We*ͫR_gBH)]p/z1 R8 aV*9>=+whDPm]}a(xo}14.8&p 35C:s5dvE<-6=LR[W^N:ґ `'|d;Ok~/+B+9J_ r7Z@d}5 ._laxیIz5l'R۟9`r2ccQXN!x/vx .Jf># F@0~ߥra]׎I^=%xߙ罹N'Coq1ӝ=nBsbܶ[kښY]r/G؟j #38BV 3&\w;;[Z/~ƥmhV0aX3heR؉5o)BVbŧx*ÞԀlb j{j}W~1֫", ^a萣)[Vb[ܢP*[+ެt| ,¶*rWi ҝl籘YNit 8THqh@N'\LfDQy䞤˿Yr#9>:SC-pല{pse&k1B/ɭ bā = {>Qdw%x8 1AL S@E 遉j֎d0 E,V=B>QN\;TܯoPH횯ъD3MtCV18.Izux ;i }6 0KU߆GwkWqxd5.P!r&`J3!SCD7NzXaU-c8QD 'k4R?ؘȴi fPP2O)a|[ĆR,nvK$T$ͯNj5BaIxfKI &5b#(iڽFj 7>l# D{Z#v8$aBr/] G;%P Jލ_|$І! "x{έ7KFEQ~,ب MASxVnD!d 1 r[eXH psppN(3Tvyɋ2L„)~Z>zI>0b<#o'5Q~ ƹVj oS"()":vl;n>|MG k<*T-GsAhi5 z^JVi⬔'(4,måڋlj4 UWZʼnJr'B~uzG6a0eGd~Þ`O0jD-M )̪l:* ~*ahmQb~H)S)a@)ք+leA°׹a}fk91($Xp >Nj рStu\ep43yT 7Qȃ) XĂ"Zɾ3'l >6t8?-)Yso90nӱL:{w8ΜlXy@#( ·DCkOnQtSPH#`7d$̰.$b\|_LS8[6J?4v\b"]s{]ĵ:f YaD8$A' YG9%mxE.nV1Ncʀl M`}jUTYAWb%0o+doVk/S‰9'h^siө1xKG,1^ ]=X54.{LNeJ0l>ڳRP`An|A!3U2ߐGNJE{Qi-lՅpƾELs'*Xʁvqo=v9 䝛@t[ Tv&.;La*\GƳy]w=sI >QHA "wƬ{,Wt80mo3ЅvF@m.3/Ք =)X ! )(x S^o~2OJ %hǶi?.Lx>e˻om# U&1:DnBg[*5:ܘ '#͆)QTMI3=hAo{ȸt3i"N} [B(jttǽ2#\zaJ灟#5IJ[iS,};1b}@Iubi|#d֮5yr[INAmal)A(3X ̨ޡ )Q}Tы.na`4&,wȠE@\Qll02+pױ%Q~D3;ʰǦ`݉*kthZQ,}캎xe.l4(,9C@5ϥuvH8gzyXN+V{Ϣ%F ,Y@fl9EhHF%7AohŘ a-N`h3rDh^LYZvzrȀ b4Ŗvּ:4D{M^lX$ɵ,66~h&+*NEKAaKC'Ͳ*\l2W/9R}RcF&r*@323GZO'Ξdʨx% j("w2 5oʺF)T~Hеu"KDŽoc4zcx4ip'zUs*ɉU1VoFAü5,`+L.кf|g0v^ @,8MC79w-@BVmrquY9Iۭ]O~&尉wz- tUzS!յWN~p׾,oaT:K t-R Hy͉GlHAVdͽCQh֘*"˒l5qaUb bPض0pid߿kk{` %O.Pj-IBGo.)К%Lvк9 1W+nʯ`֓d{{5LIqQld(*UxxfQltaH-9oej-i-^٫G*_ݣל  oʝkAȅ_6+l ÌۛARWzE*3CVc{%vk&FeS&mUHb!hx,/ׯ&Z0]^oʯ 6cc*!/f;,pW& 'əԺpց\Wm 3hNwNT7MZh IQjGkKA&r1:SFϓ`oH]jMrc6d>r³fl;`|&]JYAh9 ']TP)?b+12oV]4Бߐ]`Wmm$q2JaN9C2-@Bo~BQ" n,J,p-/ ]XLh*|:`/H+$#k89'ZWtW%D~ 4vD6'%A h0G' #0)'FUQsj/cwo=B}.i3',NP~9|E B0(fV5#Pɪ@{TY#ܣC+.e$a'Y$#̗Eu&Slo5i6NJ?$[|uRs$6OżZ3a=:sSLɽsF|0l/γV^XYc{5xnk=ÆF<F8.V|/{›3Xf0Z[;W::5~P>h֡B>"Ϋ&ͯY_YN@ ~B}2zqEGƿ pk|wD iwqm=?2>A5 /"^ 1y8\E4j[۽9,H b^5皺! y0h˞V[j>BޅŇQB(G6y'Wg3)6< YIϻg3uSԨ{b^cBv738>)ѳĦ`\GJ`ߣ_9& E#ћrY4B: !W};p;GkuWkѠ D (TŸHtXG R;uAe jGYRdϊGRʐ9=)΂K݈\_Ff"@$ϋp7;$Hx9Kc*HcFn~DTZ|+!,yB;sVT`u=̽Ƣ|!"QJ*,v5LBqAOǓ MSCIS^Yg4W3qϓP 8WiQeTezz/x`!Qo_ihN֨q?!jY(3C-Wn]A<ȉ'K_Opn Ɓ8vFb{ b흮@hCUgf&S 0vEHIA 3S!Tp~L_'*jyz3Fp{~KcZS-|$} .FAS@xIlU̒ d}dceJ`zK(uw0ڤN 0*@`f`aUΈ. h3Ze0ZWe 'ITnk[%l2K{B:NNY@WT2=]>r,ـlͮtiUAWϮR, 9MRf5@vZN$n=$WZ{xORV] EBQ5N-V%Z%l?(CX>֭CtU8pWlk ]zu:H% y98[SeSYKnaiOfEog+Yس@[t#n;lD#=G" .1rOvRJdPmvē)^Ō$}>9ފfk=XJ FgtHP~ >뷒FybէXC6|YZzK"q>bujdұa\s9VN:k2s !'jYiƥe!- Ơ#1Ƶ3"-OfSf*PN{'Tʽ9FВ՟KvJ M $ܯ,&S{Fxm;?ZEp+ȎO@-+QhG܂K")D, 3P};Idq7^gu0t5`IvDo.k1!cKe%|Wy8o{,~*dqRվB~7oAǸ6ЙqX)@c$v/(?.f2`)UCP\s'D(,%>Fy]|YСT?I"ў,g{fH` ~?Y~O7'XڸԂP!2ۙk0BMZwh=MW!7ͦ{@i@6 '݃<ԞL8`:vƾWvGe*@c^NUI0li ;팚?I#q)PA_ǞR0AC2[.zI}bTۈ Ṱ 00ǏIݠlp+ڌ~Cy0El-Y`ܧCODԞ(~2Y’SSaE,p ']YfkB3זokM\e`{30;.*'?"2s?q$Hr9"K6t'&t~I /m. =Ul؈h^y%䅿+IÅUi$HŎ-yvZ9MqxlO E/_l5(&Ǘ:ȼ u@'R9pu|1<ϰ%#Ā(\f149MbX2‘|hBRI8.s!9 ob.{#ѥnY%(iMC'W-ʏ]WăJTo1Z6 %ȸL IyD[%I+a>4“@fqp }a1v`u"G}BNhtf#. Ě9ov,vCup(4衑l&' 8U1f$恼jvrl( Xq ! u٠ |?y,]e#jmF #}^ L4ܟsE=TP)['%_'@~=˩[F\XO,kl?B ߏUKI5@CY4k '3N`Y_lTD]MKLh@MoPDi3dZ={\X !D|_YDo.o̿0T>'ocG#} ' rU!|-g6Tv&DWC|6 ^JEl\Gl8,n!uʹƘ5(bw9r^V8)@*=12Ggy&͓YЗ}O 7bX$0khНP4N)'Tq;bCGxnO4]QF'NjccךF90c= ^x6Y/ȳ[m0mkX-}H#cZW> ,]9#e0#EN}CbsXVLkPGϤQDm>gˁYrNڊ WV\܀Ź2$3Xv׷,5FQ=_^VOgP0\0l{u<_~L3Gw~4Shނmb:_imēܴ5*9LJ6aQN[#p>+O`WF3/G.w˼gWJIlH$o- fA͙0:%<{_pwNCq1)S'# 0IūNnws^CyeRMр aԠ6~4>@UR!b$ dƟK!P="mSDW `TzU2"˹.9ık^72t?lVem( 9xEsЛ=e67<8.# ɪ0+]tG~Ɣpx G:^.=D_Ԋcsċe Xq&5 S޸f2)Ժ(e|;ortpi`ǿ{d*;+1Z)%eusHcvpы:7X8D|\|jg30J#т&ig8> V6ԀDZy@%6/۽YZXől' hrKŚNZ/>[b> /ʛ-ke:Kl7~_:Q~g;श#JC3f 3WĐ) BRpy KY8.gj}@ҰV}fT $Gog8drµ4מҒE}b} fF4PÙzbWy\q ߑLfr߲o ᣸quUbNX/ Ϣ ̃H+UIVQdtD8a(0GPO@չI9?V7^Y) t zŤ=f`FVQ߶6H0)}@s@6(ۇβI1{{7 3';tћ3b_esB'MT5BlRh8e =:IP]=OD{6_uq&'Z}$1NEJB24GKJDg-o=^`ݞNl 'hS6UA["i#_Vsk8:Xc$)}p`l8ߌv=cNTGlMT Zdo75Hrmw`wӼ1PM=m=Ob5֛;㺹`ƏEg=Y9iаl86攺B#&ĆTjBnD#jqty jp% ܗXR$<+1J"pK|\s\!L %+* N{xʵ`mդ{?4~[1~5o1jg<6 kђr:2b;xB%ޣQY&DӔީ~yAucJ_s*}rc#{A_j|"94x"6~Ӂ%G *m)7%6DS3  \)e}q Gkuo_')v.U^ϔL_Iء|I+*bFEF<)˒u{yq9:z.B;t+@ `(-2'(ސX!aaZbTh+t -R{ |=ɝ$(^Qلȧ=ҧ/7NFeqTʵICUv#K4*YdK| +^dpTTrFDvaUKpqgKa%—Uu1O!'agzn+N+;e3gdL__rFCk}(t1O_ߚB!#~ӝ޼}Ы`^: ,zmF/8ؽ~T"ħ>$$%H|L:z& i '`pUCNk8l,Ĺ;nAtWg*m pfAYw"݌O(4ĎeԈݖr*7؝:ć\`H&RN%p]r5&4R鼕t6T*d(ݸ=#R=D`1hIxML~m^#F󽷭4"ο73='bRBV&ƹj~'g)ٝƒC7dijF4.aa }Ĝd)YV'"{m WǗg'FDe}:a< RDkR8IU6k^_% =ί@d=`s.l6E8Q1\]k1n8D,]fهӷN&n 2!]PHFg #&$#nIeYEw#B u[&;gaA>:{}PeS΂_]|_f̭81Aox3A9?w !hAH/lH]׳Ġ!XeQ<]+? iMeʇH&K|9edeoW87P5o8\vMh :\e-Nj=$ֈZ{=ۚ)?OA( &IaVq7tG{ "r󣧫@cjgsMkF[ DҀFiJ%2"߳PGz̼κnHĨGcQC vN'^{rDaITc#rP'Ǯ`(ZBHTˌ 0;n{ =c q:9tσ`?l>{' _*1,vn5u¼CL S{c@!%\'ՅJp=Y 0rEw,)|jYBZEIt)IpV[(?Ce/١a°)jۗZPQC K!se 4,i( ^]d^L+;uӶ*KB%cFb-&՚T>TG& W' Cad0>KLgإMٞF!U TVː5SbACJաۑ!ArC phGr+rA(q@F!W5@J>rvb\E7xrq2,nҹ!2_>-wEh !Wٮa"8%A( 3UZ;,bx7&OM xsF?h#*O]$3o-r:L?'&ZsXZ;pWVs^O8z̀~ST*/p ذXa0t#knfb\u IfTgkC-Ѐ7wwlP#`Nzztos"`!mJ@To᥊~"aun,qaL#W&gA 2NrYY2nU9,$X_QRn`1/o P8_,L%` $$%Cғ3mg};^uSJ pëꪠH%;썈Vu}`U0dkl-&z OcdIEm#\eZbnQ?NX5=,w,̸J*YVdgKns"ƓXE_.uu!qT:qd#Rp>[076-ˆؤ^H{4 6" "Fo7ocN} U2J/Me'7oJ0CTHkr=mHjBptZ6}+W%)꼜/K0̰TY1ey=:?a)hMU"k֡YЧ՗J/,_gzW/%oݔH3%9P+`_:WS3zM,|cqfjMU8Jq=/$aHF5G2<*S} ">y$.epMce#T"/?*M_vOڳJ\dSW:A& "͋qJ*j>= =D' P0*F0\@ȬܻЈ݉)}8if`N_e2*!N.QxmcS *.>Ih$ 3@ 5b{[[ Ylf8.: %ygLQrJ.KZST; ӏdam_ϿYoI4xxXCM=&%62 HפOdԁ\"mr⠊% i{s>ͷ!"  LNZZQdFCA]Y5rL;^d'oXrykkz=_u`心M˻0 jƠo/p RϯfFi! S6L[Z|]9B8U˔=&6+dkK{_ˑVmf!fKN׽ Z@|' jZn` eZqmo%Op^%2.Z䥦j1pɦi -*ݟgªOOhĈ,B~KCa`"LbOC /%7θ' q*vyMPB]I]/$GVjf$tnGG_)mv(Ӷۣaԟm! AVQ7;軷S-T'kإ`1#~.r2 [%d놾E͓l5$ϩ78₡9&=7~՟2>"Jň?0f6;yDD Q3OU}J7?"E@ݺJ}{`SUI(snXV; Ji.t15K WԹ@y, j3(Bn+pE_M'ٺݰR$MKiZZ\μw#=ᠢofY!J4Q˵5w3чX1OG1IQ |00z)2E*Y \<+g ȣ»j?B'H$Y1N:O1$)oT٬3 K 4&0ǤB9 DM͂|9AZ7B"<%kk ӎߘelaNZ.F:;;ms/g$8F [VD7mc?kR5f[Uhx ^y$%^;/vrglf2s,/=.QD{8"Evo|2uv7n/x:U& 4 s:3*k~U -n?҄cXۆ?K_XB:`} 򄾰ˀZ}S}t׸.[J9ޠ8lq p9xXG#3bl* Eߨic{ +{ҫOvFm>??X93P2Kw_Ľiq5 Y00YO=-;s բH/); 3"XCS9l΄~zK(x0.+ 81Q9 !yu8#kc(I=f Е_d2vFO*_ssn+q sP(vD :L>v ڜak$qV KfO0;{! Pմǐ) s å?9}wFx5OU9"He6RkzWjBK}W ?`Io.G d-]}NY޵;&R03ƢIS|+^O'ٷY(8cqވ17k3h*9L&P8,gZ2`sL6@. `Vo͉R{3$NppŪ=:J}w^,,S'鈧Pk5m7)\e/w4U ;JPVϴy-zoن!Rl{Oi 1z1J{;n(Lh>IU{jpdXXK:`e5R&S E^F5Ԯz:xsL>lGԯWFngCoZ/ |z3g0A/8*mmY%wbCY0BWQ^CYқ<}@qi hsGoXE 7>u.Dy@`[s K =JHS{Н]k<{˃ruN|hȋ&^GnR߬$r^زl 잖ﶭ̈#;\T*kE?btJdG2rе^CdaXd1az p^_P1ö%D $H: $#6exIȉC;BNWs<^n9sW"8ŽùUkiy:jfp%mJrѬ:=&+X9.J]<Ζ[n4Ŭ۰aUJJS|c?'NPBhp bIw6R%&f̒1iрaCta\c쒚% ᗐe>L{ WI4t 5;@uz_㳓^xf >gZ1&0)kёi:{rLB.yu>+5,i󘼹;݀45?fJ 4&Vjt!v|(uj 1]tFn2UJ) S dgY6v1T%u }%U'6HX`LKPLDJϹEl餄$􋞣!L*򕻙' Kn rcֺ|K:n9s(Z77xy+X˕S"n鶉\9 q!,msM`pW(2BW!-2eFsRè{n,k˗VORet(z<`K2J T t^aM,V b_z)"_Rz Y.pkwj_k]2oKmHHr5w Q ՒL:T@NNN]M:3&/Z)A#F#sRijjC >8{V]ͅ;/joz듉ޏOu3L.AOD$r+ԜP5/_gB*3_t(ws;B b`cW\a?.{ُM~c#Lck tL8vWhg>3Z7X*feUJi!a ΍5͑uI(gc`*ۨhꉆYX".Mo3٤;F (ԇ*d+|+ $CW%|jI> B=X}jW/D!jtu3zk!t9K֨fgE@QqDoeR wQc[Z[$5d]A~%7&jg: i'f,w]&7S:zDZ- -Eh|eώ cDTWO`(E -jz?kj:qTDby ;)˭>&VtwE.z tU_oX3H/ma4`eSilph 0sM<S91~.zmauűwU7t  qk"UsBt]U&o9>rlH h0AmWo 7ߺ.ڄ_sS W|/^eszOak_+@^4Bt}YE{jT~WIDzGrޜIqP=a1W.Y@u(fi CTdžHsrI3X{NؤGō>8b=-SM"7-0iGD& yr'PފhxN.V DFNlZW8t06Ѐff |Hs[- r|@ z[t{ , muGkmW<;Wv* (`Hm׉+eVIſ.r yzˆ2悬b6vAœ|OBăӏ T*&2)]G&д6VT 7hJvtdrG&('?3`}o8/ J$Co 6դ bYOz땶m҆edu RuGdPfbfק5@qYv஀$|jjutZ#ɰg^iB,AlЀWPقppXu;]e ,dj{!r`}ktb_߄ б$mDK+a :s0<`(NMޯ+:ОtCtW'A3p㳾$|o9|BWK$/=>BV o1Cxl;Tj]սͅz%W[8R#3Ii;m73~m,P˥Y->Ob/WrF2s=nTg24-Z Lͭ!_c!VAs!s%&k.H8,װ!7r>hף TFY U`Q7):m'&F\q1~ZDz&욀.wŖz$hM쒒/q>2h3ci 7>(YG\JngI1{7ykUjsg1V$Hl{8jK`rM"@򻘺 >Jߨq $qUr 2LzͶ3st:mfyi"GbmâP%ECZ-nE7Pyv=M~ "|lhgDV3هFy0t,{gxteH",Z΍,oݯee*O &Ә6a˫hN(,L{뻑>F`)JL"~]9|ѼEY_ЄlV0rgPti|V*tʜ-'yvھEŲ1b=*X1%eGȐK`' Rر~cJ"^D%-O/>Hh5}>.7E~;<^( *xO0Ɂ4xYwAOH ~Ԙ=]tle;]jMsY47  BAɵxG;̮, ʞ]Ƥ*|qdsّUnPG|( ~=Z$euR5=x;-V!}缚NM {~.Lfg >M H.;7#A *&P#xSWebDZ0oȘF=biYcSǹ 4WMqYF8&RīXIv@t>0)tTʣdxk(1ڡ3Z*f%..!@e82*y5ub/p쪿č*hQɥw.A )_2Yp$SH?mUNCV".A|,3JWѧQϏ5k|B\24 ucqdtu`y\aDt?uhaTwJy|T$5F?RBq]Vm1{dQABVOPSQe/s3T}W!vQd`~@`к%*45O H|_4`F?1h"=%};Z r N1m^qFd;&;AY1^;͸>ipEmZEZ ^7_W"Ry)%?]-RsH|} exlǏD~F' une$mL)$.B= &c7U= ؛k+>{ *:Vuߗu`r=Z|jSZV.Y J|41<nRч6~G!дl`\eH&y ``đ%Sva>2MXӢHpG5# 5?0{Q@Xi|L?>]Qv0sQ& Cc]o\4l'22y1t{A|ٺCxZGl(H^3Sl]bZ WǟMTw50_ӂgsǼ8s( KLk"-NvLH 58-K1ն x䉌Yjg<:H_& w_6鎓v #JZ]< X`+>iSI,r-ߪkBz_hĔD}׏ &*NA~֬UJT_ tkHц 1x4E ߡT*cm$U&)"'3r'EFAԎ4s!2)4o8xWxqy,J^;f^37MP/slMyrMEUP6ryͱFkh'՘|[VNwC)yeKbpF I]/qEg!ڀYە?@Pџ;5)|!"d{C URuf%2KVNGRw ɩA?>e/Plu}ǣs;cfR`~GQ.Q = +,T$&ou QzS [@ j 疒B,o|r,hEPf0E'xo+/ "? 59'?(| %g6 u]c)x=w(ûSk XZS9 1']?BoFD1YQ{L*-/ UC a!_9w[Ysg9Rm{S }p !{ڄszWEYTH+GJK_;~!S riq5և 4kh@/,NW$a]b^%HИК jI}/C꽡j~RODT\ݜoD7N`V{(&a|OM_~ obt pODn FCփHz쯬'HܪV% ` x>5m{gl5HdQ /6Wht1clg}CEfgw^U+uLl?X9 wf "(Ujٛ|oHQ؟)W}_@#]$Fjvu8IW+ߠF3*rsrF@ˮ7c4pCauf|"p+Cxn ."} vL/Fnyߟ 9l?5듑j4sD'"3ƎmP+LGes&[v"S#ZN(*u_ gai~w Rķ3' )TL4Je8GoM<\x"1P4"(g Z.40]>#Rym%r+WVO>^qL9Q =`΀Rqt2pSNI^9MPEVVhfK,v)pQbC(9bI?i/,3I"j7OwEKZ1H1f){md"3Jg=e :I+élr!wnf5ـ5ۼ[S-5;4#g uII34|h 6xp9h3!jWTHBSb=:/`M2_%7e3-G[dȰ`*yHW_q+VI-̎:^K@Bq.@p̩Vj_4⬅8iҌi5>ӂegL1~qڂVA9ڊn6 ˝! [jo{>gvVv;(K#`y;E/Zugi7C?aZ3g mDkɛr P :ٺqo;y3<$k 92yuQ|D *j mi!Ui/Y2]՗: `@Al^PRJnIߩTZvQՒ}3J݆4,n){|<š _|r:I) bA'Xz~dfi3XRG @s2RFUr͟parnـgs+#$%IWC5в}L\5Մ քEO`VZ?ΌCv,VeDi')RJB0>Fmaz2ҝ.ruYK1Sō.WWH$*ĆIy1XӦUKsQzj(ȶj?qdL[2$Ed6:ݝlwIz-` ` ltkPMfTuM?2[X~\VѳEa\Ϲ VNzεaD,紑$GZbK2 ZOE .RiG&[\bZZ>x/nr (U,Hמd(㸵VXl&}lHrry 9O ^,qsoksgE 5*HZr=S8Gna'X&nM, Pak6npQ۠b7OG1VT"Q`f_XNgbb'^êm?l;J('*x0|)d B u=YQqן0[fmْ}g^?D`=JH5!*8m!(??Jɱj}Jշ м9@]oihe9z#A#{-%gԎryɫ8 ?>#p&Al<).{uJCfKw.j]^_+d!H `ɪcʾSr;\𨐞@cTi47Q֌P V#C, Kuo|vaj03Qw\_r Kxi.Q;X.n}ryt,=FXH- : mP7lJefBFN~.[=L8B=[C1{ӫX&L]ǐwT2\Êyhk5x5V!sX ~XiOT/hD%6 #YQu|tf5*- ,xw%E/d$ipyMA@,1i$VFDֵѻv?8 iaҙ ܥ&߲0?PzED %dU:ϗKв7p&cZZ_)jkR͹K2j\&61v}s(]t;6TtPobI,ȯW>*CQVtRPc̢CHtutV>I p<#Tipץ,6ܺ% pQL2KiU92l֍nӴfP~/{q͡ui9*kF喗ųɗ5`G=RxL=9;&}~^$|ՑEŜJ q#ַAH5Xi?n%[0lx#"gX䦇ǭƷ* yGohKkukw'5_[Z 4n!%iԬ(Zx-Uݧ) .jBroӖ sYdώ-Do;V[Ta{ Z$@AxWotc0cWW RY&W52$! 4N f Bt v3:uPy;Ccm3`OZYf!Ad$K{Ȋt”;<77J-KxAH/(+c{UgGtr6IL!aIN5@z'ijGНN2ߠyK}nno?UwO"cYSӔZFWD<ڱH!18^*]T1P"۬ [S8YaI eeh5զ7|MtYmkJ+\gpc0}c1cpDĕՊЌ9}9}')vcQcAPD 14f`gu|~L33FeG(i'LLh)7T+ʁxxp!'"o֩˕T \q֌жHtO3?}oo 8mM8ReAjzJ$R } EzϫAƯ+wn}vd{Xe$' ޥt'~%LE%rHP~XRN_3G;O?e$;ĞF/R_9Tn^Qa%T|7%q!p3Ft>ZȚpxUӖ%V xêjdEBSYeg#- 7O,f`\!{`r{qU=bg-%od *ϑ#PSI"}IT/nfNKi3Vݨb=li_b8Hc^)TyQԧ>m. _5ae6 LY H9|Zh$>U:>´kYε+'3ǣP][P;YT>+hf*C s`"y~N4XEjQ!͉]zpa9laPʈ},.Rm8gg5sKo]{mߥ XWS(-C_ ԬPUCZNy]y}:pRGywX e6v݅ g-ʭ]Ѯ0n8.Ӣ H1(W8v)׵oNq9*zTǞY~Ë ޑ}u̱'iQ@464-=ry<=p^FC2L" ꗆeT8*HeWX8>7[,X%\{4WDF8-Set@t(?(¤+.Tprl ן,2TXdIetf~]uE1 `8-aqX{LԬs˧ARqB;0/*1F :P:#7ym6ݽ AC7+VtPp_bX}j}FeXpoD׺rfN]/E;3&~:2u\|lv嗪X miL]enYKPCZT;L{*_sJ>s ISjq=?Uɘ]^68 HH$?W}w)`tb55eDwI4׻Rb\\ HSpXȁxY$Z6w4ƵѲrKΡ4JܖZ#t}LCPӾXR8*t:ig:;kFfUEϏeHp#FLnjc>?z2ͯC~۶LuXКڢUw&Ǥ/NZ$b]-{;)\dP)XfD ÀS;[5pSΑjTĖj;=ZdrC7b^I"fBLP`W<+8ԍ$[&dxk'ZSy]V9%# ~2}Մ)A֠}/&@kҰyU<>Hv+7``C9 ?Rv2~6=qg2e, U1zJqLylL Ov=ZJf8vc %9 o+I* p8Tz>@xMT)s&Cco0#jwؐk/s2<b$"eڢG~~}5VVܵ1[f#{X35slÆhUG", ~w!S~k1gG:*p̪K6!gm qibF:B1ފvdD±c\_SHW;XELHj8 JO4 _hw:F(4eo:v)A-t1C N:XANtsiJ\ kZ_|ǖݩzD G7? iuHڒ(]P5GtU{K %օԌ35)dBG3? tud a;^홇/DuVYv9s3P Jﮪ LAPKH8ɜ2[GB3\RBr'u$'sKq9vShgBu.]K'6S;ȕEͬ'R0].wXP"$;# c} ǴS /ϱ '\?u0>UA%rS| >K=z֖H$n_QD`XڎcOwHNR|Xb, rz}7wi%oE'2C"!`CDshDPo ɅxN\wmw$N# Nl&5LtӧG2664vBjG8?ƾQ2L"ȁG;)/3. K%WlAZSnPD+x;.8b:((_CIVhNAKu=PWBkUT|:;/Ht6gpB(Y}<+fp4%"RNmufYNVR[Z疽m#1X\MC2i10Wx}h8BvigN`MP$szC]i1jiEF#xP$P[-~\cԕ)Eauw  z<^2&ԉ‡(no8U#b>=̴[PoN|rڀLm@amSʊrЯX62'*%zX ͢J=Q%̷b\@jo{g̳A !^?xjߛ·p#vvqs ,mcz虇0҉'NWL,H:cis|; avJe 2tc%ߘ"]4!"J KE D|Pלz|,UyȤzsP^=$l}bꗦ~O۵: 6 -t7RsB}ϐ;!$OL! v)m]z{'X<ʼn8pH uU<6L)TG'&XYMobǿ$%/iO!x0T-hbZj}QzϏmi*ӢP, dIR3aofVkpdY?ɞ+ H |, 78:#MU 2&zڤ NjaDCN"R/[q6~g#8(Ӧ8tn"@ p}PpBd} ͖ԝr*oe3v# #"֦6"6nj]Uc B6~;S.$JwݑrĠ3R2ݺ'_lTZ wwftK_R`9,M~hWWڬ 9^ۓcs5(ԏcR 6XocȾb&ܰbհf%|lseweeo~ @:W521-SoէQp"P\@: #cSsG/i53à4mcyވrsGzViTkAn)lSzf4s.QP[Ų3γu)_v }oEH|9x6p9KSֿ˗f&vl7)[Ty™8&4+jzi9/v nAQo_4qK7yz[C2Ap3sr-Ltb`]/k?o%6AO)0m#O?3X]l::DxOm|?&Rv&<1%SD: FlwE,N()4ԙ8m_TX IAj2=ǐ]q>5rlzaH \Xp$=p l}̈@Rq6g$&IIܔӋ~ \ 4wI=6Z4`@k/PԢBXՌ4`: CFN6=w.Hd07^WUE˧&C LbULF N.J_mF(\X㺩H'-g*['VIl~$; pw%RIG {/i yL{tYyʹxhX2, BN |\jLeݶa~35nqZ3Ft̑hǐ~_rp8-$3^Bt7QfDn9Գ1A;*WG2H(&lG`KlhmaI(ӊ5;wS>&rYuaaW Hi B92tR*o fGi ø<D?)=P- SD91B rmovtIאE7R YpXτ=huFV!d$:5IgSZ%""ypr 3Yڒ8ΘKQ(0-'CÃm{t%N@ywq;]Ή:| u&J_=2M(2W@iƁ\(5!ޭ4`طv 1ۦ=ɽ!P"k~/7/nW@ 5W匨X˫j{ ݴZس2&<.>Ih[Lm:/+TUG:V3lhMnoݯ6ISnK~̥m֚[NtnK  l Np0E@<?46BQiN3ZȦ$Ra3T ې}tO 3,56'%¥ K3 j k}aWs`|d &9;aY!Жv{hy2yDE՞sј/(Z2!I>9nxY" enκQHQI/= w[eRIfzj4ߡJdeA2h,I8H]p^~1znn-.]qO *uуuI~.gǪ.k2\cBC"{q|eU^z@|jA´Eȍ 2=@]!ʴ8jmun]j52YAJwEThi o+Q'Ui:@ b"V eWK l_ %@lhkz,$Jj? Z3{DAt[ v_aw>?Րx ~{?`ߩwjHaH m@Pqe*~ʲ\QNho*йDؐ+!?6z~c#˔# Q,ɏc2oCb[sȂ)}$[[-:X tݴcˀ yұmĤ޴V2Ż/밷5,_K*ZogB-Eר״,K^`1uϖMi +^U~Iԯ 8؏6lYs,#Ut0y_F\·}EI}c8:02{Xmn!L/(~2Cna4oB)l˯o dw; P6 6{+j`-x0 gJ`f?$>&i͢\?6 \/LFuBmpUlE] 3TJg-[Mc<9,<)k<4]}G>6c}M{6`˔KVuש5mJ)omEd՞U:M$;nY^_NsΎiNA13AF#ʙҾܰ\ʘSwwhaçK"#D K* |eJ'`qaT Og<{ؿL+\@ ë+C3vBɚ 6H>dzXH5[ÖOqxزәwcKOZPQ$^-NQ"c!鳍%+09Xor\eC&|L6Aht *͎%X:Y |RG<к_27GdB=Jѵ!U_P[[9E}o?"kO!փ{HۼĆFxcFWv HP)\/5Pjb@\2d$-ؿN#:b#aIΣ\UA INU~`{y R #pNfرvB*˳5?e(c JSxO!܈>3[}.O$'Qk?BJ́Ik )籹E"yF F)~+8k(جv75Fi9H\p["nq8gЂĖMGUdx^G:? ^@AleKd!!jsem͹abYh*+23Hp@Rbܽvէjk*;[q2(c& ٣!z9JT)0oڙG;c9vs;F#еQzw_UG߇J06^#HcxTC]&Hx>l%EuߦPa^t~6ǘ۷}E` Ur$!0q<ؔ:>~:qa |:? 5P. 8 k_ȮFi!{iM P~p:1ӄ.~ؼ<9NW'P¥t;9,{pؾO`eBsU˕}l?tCkNZy!" d5@ .SV̘9nUwi#N`g,&[.PhP]KQZ+bߥX"À-յFbY&2E{mC%b!Q=lI8;Y0U0hNㅄמ.8hVgy&pz uo,!m[|2P5 / RNC3(f%cu =VP()_cGXf]2n"؊, u2k7tQ}ata1HG<mᄷAYt'YrɑO|*A9x#}U-]R㳥&_dY9>bXT *td$rK4.>}^ 1P5o0G/^;:e3}i{*}Ϧo 'Op-9@nyX+xke[x#&9τt~̧ xXow*˔E`o.^fKzX-Gfp((նC ipZ֔_!~+ԏ.M-U0Ƞ>]upTVb[u"uB<\!|KC,!CT-Vn13R8Cץ}pg66&!Ī2Kv_C^mLJ(kb؇aAX)!yϿ8$ˍL4 dfxMW~Ö1LA`؉CtK™tl`]Rnʭ2Wyl2'MIq.LɞλXo$+ h#AI@G͊\-0 85Js>Vb^#Tg ;O`>lCpفNtJjGϫzݓ w vCJpDdD՟ר^F`|{HrHKk@ìI"1,6d vЃ JdUt1 Z6U.O8o2\eB"dڊ, eR6}aw sм{s]pNVI1rc$ja$}LϹ\E \3P 7%_/[iTbfp=Dw=Ձ~[O $/qwV'9氌{㮃q _ 7YDXQv%Jw ݦVcj 8 fI_Y89 ZԾoN{`f`X/T̛}eT θ`'Hc6(aGUó)Fr;6 =$H( F^6a2D1'^zxw5eV,reo p;iJU`(̾Få9$ܛ<J\\$ ƞXRWnЂʋyhy"I,=:1Cd?H|n֍hk[<bm}G]\; 9:oUw^ORT+(r1aQSUcQ.29†{\⒫W@,a<4I2L\6CV//Xh^ˉEChiUj*V/sgxvYYbqP־DH`!O"guLgkMUaE *Gx*eYu4V4vXP3,Lh^ʊnۈ{We9+ڦY40ڻ=hE phK"~K+VdLu~5چuϧ\Ru^fGnE+7ܓL׊!fp,_\8Uޚa/$mz~4p0zo6+ߟ.:8v}9V# 5\-a[ ]Uds 3G "P89}Àb5Mk,Cbjk\0MkW;T<*ӧ.3 ƞAGP .KM)``i;CRokhMelę{;{8/iüt{~[g[ Te^nOu=j3mH¬^c>)"\9h>;}Ι|dP?[ ART_uj=K|S6-2oDH S+qAoC?(XUfBK(":ԸGW7V3{`&MH Y |Ehswja= +eCS0<@(>>;pqDqǨG0sJ;_7fCjŨIwYTtTJk$F>2DɟO=w\HQ?F&x0Q|bgTP|m:=OŠAH첢,=iD@=ДmO)U%_&FH 0##ɉS-B)-kβnpqOCn#d9ӱ.̯4>+]uV1X7vK^PX`oeJwV!$ t'tϊ168?zzv+[;6J]vNK>fl 4 d>S 0!Ͱ",8 ,O%Px[5Ka+1%-"+5̸8 ü˞Zrq(ƩOelnvaܤf.fɪ IJFYtjn'{æ^ h=JW=ݵ+"%Iq_ۃ{S.BS_ڂFɊ&Cuڔd\|wi 懾RQ`0n_~!70bF< (JXnWGC uof- hU[Ś+xӸ~a`P̪Ģ]'șW:j=szViV:D6XB qZcI2#3WuQ*O*Frm$l@QItChȷ=;~ٌN>UeV_Z0I$,#_c1R5>ZH ?$5=B J& " ކ_LފwdbTBFL4jYu<} ==R?oBj9aV3'|rVr7IrqV$vx t"⹬(QnPEYA߫2D@M:~ݤ]9ԅ]HFF$%J( 9!.ߍ.RE%.MI&w{/^]>ŏ> SH]q|cӌ>EQF1ZLA(b)8g4\ѭH\uLrnMt;~x4^2&^܊,50!/ 1Mj]5NKՐBɖ^I339*Qr#bm!x#|`\2<7gTtcg,*IZ'!@ p9, &V qW2py%{쾐%Q) $ݐ(Q 57 xХSt?e0w5O|`"nW O^.[ bZ7-rsyH-ϭhxG,]I} 1ޱ4C;䞌(zVAw5TJs˹6q֥C; Ԟ&v,A,H7%iq.-_O%]6YAx¾ TK糆ed'Lv%c,|m9d F'~K}G̗*jޯcp_ì*El%T)^7=$YlVWz 虘ڑ0}<|!ťbvX&HM:@lk7>NhnP*K(#N.L5'^to;S‡q>^T Pkmb7ZXi0sF˅+|C`aY#~E‡Nkn;ȥ@N i\F>һA`,bbvuyQIQ4XAh!Q4tҌ~ ahN9t['s ڗǪbf({ղx)r6BK*;̶N0ҍOH~ޙ-!s.e7DyYPe[,`CU.)&.E2 uЕyueRq%כJV*ذQԄgLW V~o~-, KFh H,RFNc9%cb1@=azBׁAX36B}fFFČPgwG;'@|f&Uz+72b;bP '(LZyyȚw~ɢ'+ɓP<5nt8rB嚭]3[)Ao+E4.d0p[Hx$E1_:wuw}'bZ؏c4j@ fHe#VZq`0"޽֢q/FK-yNĮ2?JiY4tr?F.yu%ٍ0KM{ūU^~w G O޻q-GXһʃglbtǙ#vBN>o,ɥn|GEl@qM=dufl~^6! J6#k&] ?O }pKBt 40.c,0?oI|a~Pa'dNF*>t(;iܵ)@<']aMu 䄛k7F-+\/خ}MؽB$ȸ!'DmQ;1c [()!&|sԐ`~C$32T`a\k^گdMrxBWv z|AluhS>aqjiީmI paʭ_I#I5vvϝ1OU|ΤR`5$dRN} w@'`V3tYE|b4Hth2RbE㗏 )YKҌOtMwе< X}=J.dJ1V^ nT<Ȕ AR;0潟,6wcgs3I2ؽ#j~.#.f0/ D@^`pުC̼\@BkƑA1?3-8rpZn%s"UNn %[yLcICsyGu!@kxJ3ȫwGBtrYfgY(v5`BWlw{RQ-w|#-*맲HWy87&#:J"h-ٴb2`{SCFN"|̶E)e W AYFƜ Vt恍`&_r씟CB j3gfKA9A)ݜYAb{hb=zA˃9#CkZzC l!q>_|>hV/3\vz6>QJxqQJyxPLK䥥 kyys>24 4(l}E,d]ZI+HFFDG:&v,m~?'X@+rK du>f Dr{A9T_##[F$gI2aYxBZ=55"{37-R ql rz+f3az{RtAJn *k_L7kK ngWYaQ_pla>7~&<& pȏx,NɞhD}3ҜҬaT$4Q%i FjMEyy[FJU`,~F H+؞&mQ7mF mg55P ڭ- ??gwg({5²/4H f^Ҭ%n.C( 'uE?ac9ZAD>hF"ml3T[a(9)ݤ5gVSl݀  ԑ)S"gM:z]yjEe@$7`wsG9Di4bǿeːL^$%Ku}>H,<^{SMMҾQ?ebI!qz;'xxΰ;؈Ԋ6l({YEЛFNq9+CXE}/9B˼J2FyLoVS"&S /b3Jeӈj;V}#ypEǼmYeN!^mxOHn\` 9'19 I8~~ @(t[R684,%$j`<`|žn_$I^D3uWn.vZI^ry-C죐4hO*'X7~Z1ޔimrKfvW/!VQ˘Z} ىf4iKҊ< Q޸rswB߸Cs_j;pZ@@M9`>0 YZlava/data/serotonin2.rda0000644000176200001440000006747513162174023014722 0ustar liggesusersT.6EDD@ DD (&DDAD1 * ""AI&TKFr9g{^={?RoԬ:YOH$2EG"SV_ɫ?H$՚k7^U$(|=\};dx '̄}*J xGk| cL_YMDIɃ>Fǟ{! A~Y9p'IʶWS"5܉ux\klk߿9n8Ft4g#7=H>㫸EFb&+8 .uc&qŕ? S/gN~$8wZ<~\/M>}KNHsU÷NrP,R_ms𾫙"HyDݵ vZE[+8(>I!u>l)ۧn!!f^,lq*}m=b滴sdF?~V4\[x"Z{.ig8z>#A+ 99:j24; gyY7궝\g}c#;İq'Ó!e' ~r̂q9bD|kx!~۹% W<HDR'_ $H+de#}uy?KJA\GJP/]țoKae"ZvȿqvO_߉/I&kx KgT K<zpW1y-J݈r)&<Ko)F 97q5)(sVK0'ܫ{J'ꭅ*|0! ;ȈH< ܍$n0w`'ұZ)0_H lg]Μ_쇋 $IܘГ+ǜYHA:8Klշp-"zLp 4繳Eْ7>DshMA a6}{VR]b~DjZSWKTzB(;6BKryn/ѝiF X) .㎃] Hy܀ïo2 ~C#o!7]!\jסɐ"Ad1X>Ml_:JLk%֨ -C'@ v#hXGX O4zb1a|΄\}2 mL7S _Rz15c|D;bwW][h`h6q /\SUt` oIʷ1o`[u_1xHے} 54 lӹ+mIV: 3q^U/(]xl R'CaYGځ`}db*VU o=yDaƦycD/X 0Pط6^.~z%AvX!|=y^2-BĹ%Z;ZHm?† g^A]nU1~S;}ͽ'"uc^Agԫ^Ê]9 :=#Hn½!neQy9,=_3TR u7hi`{Ƅ 8KxVJk[=s}2<68}Po() 2ޖl~ D|_jIץ=!dIu ǽ}K]3} rk_^x&et~){caV)KyfvzxŒOwLAOZ=şqO{F^x[$W~Gjwx2W"ȿj!fUw-Upm'8qݍ$Yxx+qK1,6d _Q*LzVK]"'#:3y(+U ֧ʇⒾDÈ5X+(,1i³[Sc&$弍EXC1{]w3hy'2pVVݜ2>^lj.ĺ@ģߜZ&%z#'ۏ`WL+gr@`6 ]byGwPqZW{3C{ K뚐iI?)+-,DESDI?Cqtӯ\7ǭNu ?Pi^d[qV#ߓ܃̈KpD.ͺV{GR`j|wX̥O`*IMϽ_k<{kL;d,NSVVɬV5l9Ԧ|r*qI;MsʭŁթH] 'ӟM-a{c9(p T|L; NszT ށ2?S>yu3uok+Wq0 mZs [h"&d#D2#`54$$`}ZT9c W^ELQeIyB!t 괷q=u;>)"/%po!1~Mr «|yU6`J-ŚAf*a8vjAp8'a>d g |Z; R؞/PͦLXurXnT) n}aCgK-}y[;J?QN߲rnOƳhǹY|iF<uTjzvcZi<ڤ$1wU}%ѪZ\wjCfBpI<]DrVC%o4~nb cMQ";XȮKC-^NC66w:Due ވU,"|zޘ [*d [B->I$wy ۡK;`ۋr+縳ԍg%1,MDb5N:q1% >AX詭܅8vĽQ$7}E ~ 1V {͉ܻ) -o!M';_ .+naӢB7%367l^='PT6XRp7_ى8tNT Rnb@;o/D׃ ^n#\$^o_z]ׅl2yG [G,ĪvJP[gE;m#vj΂j\fx+eG+\s Si(sLKߒOV*%υ lEH.e:❰&%1C/8~Nߌq7YαknsUOEbZ7QԺ)̈z=BrԖVA8n L/ҙܱ^*m>xDCW2м%nqihvU ݈)݁}< f7%nm :w~yay T;}z;n;eLԍ`TEkܿf-9 MG^j%0՚uf'xh1A>,0Ɉ{k*=!6Y ờ :j剏 SNEȁ|9^ZQx<4h!/wdN^D٣LL6z ήuJer+A$`+FQ WE:^#(}a=TP}K-rgd6N۲"uۂcsV&,^ڌ3Uga\+RGqpxE浧0x(>"ߺv}6ْnTZKKHAdo|N7A)@? oy vv{3ZsՍ u7Dwa.g|:untPɐ\<"b>j+SkکŮj+XO8DZ6LX 3#魻Lg^ѡ19Iui>sl=sAO=/ӥWN9`%$~9VDQ(Α;gy}v10Gc8SΘbqX؜oHٌ܇a"İǽ&P9=X Zq)V<-D_$Xu--%&I0_.L;2G若,qވ!G$|Z1 khаz^4FY:JZ`=H/B69N'90:M1i{íFKHU ,FobB9n>W tIZcWG0A7n3zbh$kDU_p 3qzL3rI7jB lۗ-mt804[@ɜ! &2T`!\4b}\ GϺ6qy'#S cJw3/w5T @SgF(H.3p9(u3GKr/N~>)4Օ5Ǔ1P{m9pY mx5z ߊnDk;3n>$XCd|Btj>&@0\<:>^kڨz%{G8 soWt`5"klwAf5̅:_WRUޜ]sAxb1pa Th^߈'uI!ҩ{9 z#L,yMC!wmfL1.08=ԘzMCC\o2y&8٢`b=A,oNk[@;$QtP = /9?L~:[S>I#5c5fZV%Έ,Iy[%Թ=+rf4_nl睵Oƃy~0c{Iќj?{y^(SWzn*-pȫ5cFG)Gik&Č7AlC'h@UX _*S:[-"c]6l`GU<ٿ|䞊m2Qƃ?z8Q-h#Ze6%)AKtxT1\<:/rPGNn8;6-{Bt뙼r}xYOtq-6u6̬3y% ?fEe=q>|xf <#0bḽz P*IҌojj>kY=Lj-AgZhgѸPn'Szϯ{YP*:HIRR1A1ZEyɝ0'K-' ?s"py؁qՎMx(ٶ~aP?iTƝbOTԩ`D|v21I{;ǿW$_IA2Lt\I2(l#TcqV>n3zY^SڏkH}^O/4ӿ,+MEdMh$m7 C4Q\K⛯"vƣ=GmiBGvݬ2u`׾to*/M ?Z+) "ՆkQ=hx}q+s2 9 xϽK}B:tq,SM;݃o7ߝIچEgc nGgsi+-cGܶ?LtEi7ajZR=")'i1ak,5 I C#QD&=mV$@ xb-憆3wsPѫN]wyỺ =YMɍL{pokTO_Oma C1<0].-"`n߻sQLL;bcu:} g_XTo4Y a4d=<؋wL~s@y%z| Zy"_c%`vz!z+Da~tϙ~5 ;ן$Xu]V\$G5@aQ?´^3q 3ӬTqڳG+~0~w'ܙwuAK @d$Kj';*q~{niqM#`=L߯׎G*ȑ+GL^j/?sD‡^s0ՁhL=|9Sy*#iI^x}\b̫GFL*]G' p# %7y_@R`2&}-b?zPj$H}϶tŘ+R9پ؇~C18<M6|qYU7XS-`ԫ*2x)!SY> 1 q}O.uGO#kTW'KpCUkxFbĞx8R{1.g|(9;B#bOp.{,y%tYb/.N/5/юq5>TbyɚmN^[]a8}Prr2ya''1AWyWD.")0PTN_aϚ&Юv$S"_o#Nn70SuP,e7 -{뿹£&`zd#=y<gnʿގ',JkrB겜Z.Wj7e܃ A%0q2JmXu5ܓ[Y,KupW6k;1D])Ǖkx-\&#:xpɭ51S-8_'=Vʡq"gLfϥô&Smط;9MǞ\h_LMٵMխd U0#6V|O;ko2Y\dìavrhUw>1:up0}e"|-mb_O΅p&o=SL&%^gZz@3;=Yy2MGሁb)MMF4l`. La+k 7乧p|h5jb:JZм.0}LyjHu^͑!u2Fb>`Ֆ?n/| -FJ]ImChi gsnM_* Sf]CtH5YZmǥaBDfcyp1y}o/j\-{1Q<$zJiװ+yyxPp+ ކ;43S$~ BQUʂ9_&K\l9xz:43"7[pcrַgv;50(sS;$g,$-R}N´wL>j' UfǧxSTVJzs Gag;`mpVؖgʎrǢkW-~T>cgZ797$aj/62LU DގTC+뼞eP3P48k)0i7_Jr{S1.הɧs}C Iy70Y-P-.%*q*_!<@J,V߃l6-^vBݦ o⾿H0/){Ƶѓc>`vH-MР!<56__; =%n?`bnP'ko;6 }, Lw* `똙z2K`N*s:ʻh9ר]"wJEӺk$W vZ 5v^YߪtjDW8#jΰ"fŃKI092ML ⿼R6󹶫;.0<u9.DV8`dǐRṽnNyKgY R?@zЭag\^eN?0?lu³X?gnÊ SERD)_o&qi;BYpC!;^I(γ7ۛ(_H -OŒHרTD/k7k)3ϟtq\b[L>ZˌH-fU 0| .,K=xz Β>0CZq nj[ z.6Fti֗3ǕtsT.yz3j'?qU>\;_2m%/#J7lko4C!]0O4D{G(rI9;nxr2i),]NzC7 4I7=NP~s1e#*+]qD.I8ZcMU/\%nMtLMCi jt? 3S)$B nOA|m4O%Ij˿VW޵10e<mիamݐ/)MPjJBs`|6(k./2I: ˣQMf;[l) 92)I:0mA[|e>{בO38%C8E9x9uMSSA8UpF$c87B %;}ΤkNmOýGɱ!:ܽK`:L:W~gnpæ1v~dT#zwi^[ lc^=(έ ޳ϔk%˛C66rCˬ+4 yN܇ TIxǢ]@teP wuy)'" N%zR#؜hP{=NwE"tax2S6$sHJgi/TwʑxdI\ b,t0WgoXwk&\gMigAYp=~lYcYzFM0ULiFϗpD^fE2w֠C+Oe~ě :$a<}K GF&pQ5-d2Z42;W (j$Lб:WmץՐmp(.iL+%8*bHl߃N}^ KB$Ϳv ryDFHl*'?܀sx[ɌGQ> -5 I.Xpm.жԭT V]7wmpo7mX*kob51l)(."蓥U4ѩ{ m?}y#@rx2s4%)#-?4DƇfEx|DCbR]c 1\m9 -Lu`,0ߏ/h5/26h6ɝxКeObISymo7|^ïӽ% S9D=;I%& Yl'|2fo;DZc?,dnuKsˣ_h4TTj!?72!t|4/ʁL_t\{-!n;x}ַQaQi+j~jw^ qNW$ Dۼ>##L<^5qg?VUJR@E!ݿtn G. 1voe7uW u=pOC8\s[ZL[.P+vy3'N &|47EL k M񔨿pz[{=44C.xF,غ  Ffm>i$Rq #{K,ԶT6Ck;e?#P|7W=-ޝX'Bk3# ;qཤ,xčR/.[-Vf6nw9M|XNٵ6O*'CQGU@:ӶWœq VtKwz'X2'O=ߦ nxa5G̃mv;F`tfiLlF~XXDƗq? Ztز`⢙]qS:+r;D6 OR>L6kMF8\;N"+"e `B'âK4{Zp{fDofԘLlb0KJ~ Tg+wFI;k[%@U ,К~0K j?o ߦ9w(``WBRYvV [6z^khuSLZs7_O|uNO- Y8 \ `bpu³x:63ӟ5dXwne_lf6\…Vo9`B%&hilc7(̮hs >m6!޷cCFq'%3غa#|ϳ>aA>G -x8%/ni'Siy#x,v+X;are;({.|U*AWKLNљLCA32]e q_zQbcY=i`Ԩ]Ox\3a}}9ؑbn3Z"ǎ_?OZώ[]q,OՑaf׸sˠU2 ?oyyUxQRvL]'$ ڷ"sSyٚ8SMjS|'A _;-d|F 8J me.ns7\ #(wpʗ].gwM|k1$*}R YZKi0wexX_{CĖwѫ+3L:yo#xRf;<" ~"`|緋K:*ŷzl ѷljURLb>]ˑ=/کِᮤqٵeWT ֓XLp;WdL6]7\==-֏?"TɁU:Qy],ԉ^@PQs7yfrׯgUkyw}ЯB9 l7i2f2O-g%w8#6qg"WhO[㹋'b7"W,Œ5"GcWIdlnձGS~\eF{?,tʏB{sx¿1n%,pru9ڿ9{2"W" 0~00̖ӧܛ BvH vc o` 1[`{@j<~E '6ZُíƑcB c5qc"`+_wy=N:i6+ǥG6-TpX4)0<O]7Q'UU}jϦ(f"8Nq]zkvä |0![z[~ z.ON+21;%^'T 7v^}ڑdJ"f>#c9pUO)ϩvs%OljB`LoJu;P#Fa˸r/A ^R=+&{K6(8עU)$a)iK^Jy5+?]mwx:'W܏:j@W"ğLr@t'? 0C{ۮx"B׈WU0-6&ۣLW4p44Fvv_ XsD KPFT0ncV[gSWt{=a6,(8`J|n91qFN}u6K9Ytm> `\k}5d:F }9[cdQ6ֱ шx~q5O{KVV'^nԡ#_u8tx7R^BN"k!Fz JB(2-AAӪQ⹧}T8=v]UB,5g# ޡCD!B1Zwa6ǮJx>ʀf>PuKvTŸ^\¤;âQ.ģEF?6K %OxeaG,hɊ5ycbtsg' ɸ? 59#J֍ϟivKO ^.㨴| c</M@{J]wam9jj:p?B#O}-W_sW;(tLB7,Tf\Hvӛ_#} EI3=%20mr|^()S̛jw":M9Y-s~=:N3ySz "HQeCI0bpofD.ԔbxzcBѽ[%Tܓ\WId5D2#0x.b< 50F$2ܡ>TWN1#Vu?;C30a}R( ,o]}oO[#sUHi'am_}[.2 iۊN2#C]fƃ߿/GHBL*:Mܧȟ`8%*)!{.խDLC'.ݬEbEh@\+bl-LkEmCrCYDoGέuPTvSqQ&77r~7- 9Zżh9O~D5Z*$e\zؖe.oF,.}=&Y{OǓ3x;I aa"bL~1W^ ʗOc'/gNbD6g}0Ku?nFgD|,ܣӇ~hυ^2 wN$c`Rv@;\#`&+(`lhlfWz϶x֣{jdP(%Ɣpzug# ϝ27gD,s/jBb~AUyu߽1w@&P3xe5QҚ978ju|#Neo}Z2bX\7*@!y3J/hTd~`qF#SqJb-.#F?6{ S_}]v9X#; f$j˺Jm ]fju<%[ soBbc1Չ |R@?鰏5\u:^:F7;o:ËXgr Dɹ8WsjSk4 15T%z^0^F zep;*1%1Ű]Ix6Ҙ<(kGאӡWA Cww# pᖷ̮Ʒ%8_qz-ꪟb/m&!)~TGEuqS -=D_uj'Hq?Dq'ޥhi2e;!JS쪾kx/XkT{ᅫX MkCN@W]k+gCγ'd>L@poj< 3QVT"&G4ysw>99,>ƋMpy^+a\;x'_"J쟹I$Ke {m>Hж>h=E$\8Mq_` a0Igomhzj:nP$o$ "4LIna5pќ e+"7ly0{7%K`]14ϱxIw<_p@4@;KՍ-_ozf޹mW ?]uCUs25<s%U.Tlj?#Eڃٱ 3Z ZDjMm6y9&ayXٍC*}ꙑ x䱚ok\yr쉶FȒ6Ŵ0@zG %bjka]dNTC4 63Za8BB%n-twfP!iCPyY2*,UNg8I9"͍1PQqSҧw)_#GoSio<1}!O=ZN6f{g5'|p#GGA\c4LJL=^_6 nܻ ;FmO+`]W#h{ 9 pNg@IJ|Y*BȌ̝JF Gr"fۭۄ-Ը&X7o޹ט?e2]NsB Xh 5SL=Pq+ WDaٹ[qdzU90F<32ILCꛤ=a RR^ew3AgmN`ZB̝ee7o:c3{APp=`\9ѧ!f܄@en߫pZ e* \nm=.u'AO۝F}wīqSVuPaJOqs0l~P^1 բDF)W,kIHK} b+/]A*v$ ؑ[2/svi3^lǵ , fN0khѤilYĢfp*z~jIбp͆'=✊KGl7GmƟO_;khwTZhHI8 <γۨ^3.!}Z7bK4̀?hӾ7?"T7TʮePiCq3Qgy7f9™ [+!ʫqUA-D#EMx od'a/~2[8<8spݶL8uRFv~^HVoQgXئ2]~ Mo\޾h.! a* zvrq])wZ WlB[udx_}hu07ɂb^GEW7Lc$D9d\qEUY"!$?=39;SsnU`,w0ʶ! i*&[",{-EyЕo0nã"JTǶLS7 ?K~YOpg+6LV1x&a4o^Vv>TK9` 1+>TN_\qYk47nb(|egW l;.u8io:)nqp淅y̜<~նۉ莐9? `}д OƳ@$P}Y^ qUAsq'TI<+?/Ξƒܼa5Ds_<e6eg 2}D`&+D(b_9jGx~׏3_s',w<zm(|~>f5Vm$ݙ^^}k+_0D?Lոڱ}? OFPMT7A\/Et =`[5(ssT7Ď O\ xg] *Ljm}U-H@ctXA$>;ssད?7C˼*ΰإ2n0e1oY\쁺1GwCY^?p3ߺ0y&bOR5&i]P4|2v=`:6D3w}/3`˜Dw:gDIf~: %q~I>u3I&J^3Mǜv5)YA輟lk-\:A-d[:^=TY? W:l$)ͥ0u)61LKsoBpPcbnVX=wT?GbJY[g(~93PnʹM#Gv>D_>~1%6^ע&_<1qx2h퐠[OsNҸoK^[Tƕ"u1FN{1Y9 xJ|_S ,U>U,S.Tɇx8=w=]md. zmi̕СU~&&F/)f3k|8̵{k;sj n8^=M{]حr *C ( >q-~&aTQ%GvF]/  nW&5)`Q)0W)"l$bYFУ=cg!,hwO >}9OK ]z59UdJ^j)b[1E:-/b`p%Hj6]ED')apvGҍ+%xlx3L:q xaf<1&>w ȴ"<7)! 8eޣ#do+!RI>h:52 5ҝ U[:r3#;1 )e8VNĚs|b/|vG*2ڴN?9P!d0.Y 恁cd(xȂ.y֥9lB+9,A JsDN.WG`5u^8~eO- slk̺2ۢmJ 7Z)a8Gz҄o큢ܪ8+e{ nNG\3]?׏T#h67{.e 0ms8 ۻIн/9E <0fiĈV'f<]WFSIĦ d`8r:$:r2LFLCXuamR r~w+pm{o{4 7E9Pz<,+z3mOәU)TS2c͜!NNzO7`ۗG6@;_0x'+g^WL(ԗʜZSMAlyaMH(ヸ'-ǣC&0ql,knIZO}=b{v'SӚ <bT/9=WGn)%`rldT[^/El!muadhE+GeJ+r' JX0h$1~]&iKf`~<#n"L& _`pH9PudHɳ7 {Td }AssӊN.yOԷvp}>!b}_BqOBtwESGAkm~P34aХJ5Kwu^Jzx޽*{'ˎ?%[&Ȯ^koPx. Á(wZĪ'x_=/e!߳ "%}CD)|lM0P9i3,R NACi9fhZ& ZIp혮 v0xrz2;F!OSsLZ,t]>aO9c C#49J~k B;>g>D(o'Ўȵʤ9]-*߽ww{proUo>l )\?"h2rR\^n͟l_]wC1?ښw YxRo4Sv>w[:=}_)4}4gx0}WqqMn;ю9rjԾn?[8{^iӟCI}΁үG{CHJcXk_Pn3INbZi_+ \46]/\u2wR3q]yƊxwU;i̝'O`?$\.~Iӟ)ƈCt|F*kFho?w&ϗ1Mg1) warning("'p' sum > 1") if (is.logical(all.equal(1.0,sum(p)))) p <- p[-length(p)] } if (missing(K)) { if (!is.null(list(...)$labels)) K <- length(list(...)$labels) if (!missing(beta)) K <- length(beta) if (!missing(p)) K <- length(p)+1 } if (!regr.only) { if (missing(p)) p <- rep(1/K,K-1) pname <- names(p) if (is.null(pname)) pname <- rep(NA,K-1) ordinal(x,K=K,liability=liability,p=p,constrain=pname,exo=exo,...) <- X if (!regr) return(x) } if (missing(beta)) beta <- rep(0,K) fname <- paste(gsub(" ","",deparse(formula)),seq(K)-1,sep=":") fpar <- names(beta) if (is.null(fpar)) fpar <- fname parameter(x,fpar,start=beta) <- fname val <- paste0("function(x,p,...) p[\"",fpar[1],"\"]*(x==0)") for (i in seq(K-1)) { val <- paste0(val,"+p[\"",fpar[i+1],"\"]*(x==",i,")") } functional(x,formula) <- eval(parse(text=val)) return(x) } ##' @export 'categorical<-' <- function(x,...,value) categorical(x,value,...) lava/R/equivalence.R0000644000176200001440000001072013162174023014021 0ustar liggesusers##' Identify candidates of equivalent models ##' ##' Identifies candidates of equivalent models ##' ##' ##' @param x \code{lvmfit}-object ##' @param rel Formula or character-vector specifying two variables to omit from ##' the model and subsequently search for possible equivalent models ##' @param tol Define two models as empirical equivalent if the absolute ##' difference in score test is less than \code{tol} ##' @param k Number of parameters to test simultaneously. For \code{equivalence} ##' the number of additional associations to be added instead of \code{rel}. ##' @param omitrel if \code{k} greater than 1, this boolean defines wether to ##' omit candidates containing \code{rel} from the output ##' @param \dots Additional arguments to be passed to the low level functions ##' @author Klaus K. Holst ##' @seealso \code{\link{compare}}, \code{\link{modelsearch}} ##' @export equivalence <- function(x,rel,tol=1e-3,k=1,omitrel=TRUE,...) { if (missing(rel)) stop("Specify association 'rel' (formula or character vector)") if (inherits(rel,"formula")) { myvars <- all.vars(rel) } else { myvars <- rel } if (length(myvars)!=2) stop("Two variables only") x0 <- Model(x) cancel(x0) <- rel e0 <- estimate(x0,data=model.frame(x),weights=Weights(x),estimator=x$estimator,...) if (k!=1) { p0 <- coef(x) p0[] <- 0 p0[match(names(coef(e0)),names(p0))] <- coef(e0) S0 <- score(x,p=p0)[,,drop=TRUE]; I0 <- information(x,p=p0) T0 <- rbind(S0)%*%solve(I0)%*%cbind(S0); names(T0) <- "Q" } s <- modelsearch(e0,k=k,...) relname <- c(paste(myvars,collapse=lava.options()$symbol[2]), paste(rev(myvars),collapse=lava.options()$symbol[2])) relidx <- NULL if (k==1) { relidx <- na.omit(match(relname,s$res[,"Index"])) T0 <- s$test[relidx,1] } T <- s$test[,1] Equiv <- setdiff(which(abs(T-T0)tol) if (omitrel) { ## Don't save models including 'rel' keep <- c() if (length(Equiv)>0) { for (i in seq_len(length(Equiv))) { newvars <- s$var[[Equiv[i]]] if (!any(apply(newvars,1,function(z) all(z%in%myvars)))) keep <- c(keep,Equiv[i]) } Equiv <- keep } keep <- c() if (length(Improve)>0) { for (i in seq_len(length(Improve))) { newvars <- s$var[[Improve[i]]] if (!any(apply(newvars,1,function(z) all(z%in%myvars)))) keep <- c(keep,Improve[i]) } Improve <- keep } } eqvar <- ivar <- NULL models <- list() if (length(Equiv)>0){ for (i in seq_len(length(Equiv))) { xnew <- x0 newvars <- s$var[[Equiv[i]]] for (j in seq_len(nrow(newvars))) { exo.idx <- which(newvars[j,]%in%index(x0)$exogenous) if (length(exo.idx)>0) { xnew <- regression(xnew,from=newvars[j,exo.idx],to=newvars[j,setdiff(1:2,exo.idx)]) } else { covariance(xnew) <- newvars } } models <- c(models,list(xnew)) } eqvar <- s$var[Equiv] } if (length(Improve)>0) { for (i in seq_len(length(Improve))) { xnew <- x0 newvars <- s$var[[Improve[i]]] for (j in seq_len(nrow(newvars))) { exo.idx <- which(newvars[j,]%in%index(x0)$exogenous) if (length(exo.idx)>0) { xnew <- regression(xnew,from=newvars[j,exo.idx],to=newvars[j,setdiff(1:2,exo.idx)]) } else { covariance(xnew) <- newvars } } models <- c(models,list(xnew)) } ivar <- s$var[Improve] } res <- list(equiv=eqvar, improve=ivar, scoretest=s, models=models, I=Improve, E=Equiv, T0=T0, vars=myvars) class(res) <- "equivalence" return(res) } ##' @export print.equivalence <- function(x,...) { cat(" 0)\t ",paste0(x$vars,collapse=lava.options()$symbol[2])," (",formatC(x$T0),")\n") cat("Empirical equivalent models:\n") if (length(x$E)==0) cat("\t none\n") else for (i in seq_len(length(x$E))) { cat(" ",i,")\t ", x$scoretest$res[x$E[i],"Index"], " (",x$scoretest$res[x$E[i],1],")", "\n",sep="") } cat("Candidates for model improvement:\n") if (length(x$I)==0) cat("\t none\n") else for (i in seq_len(length(x$I))) { cat(" ",i,")\t ", x$scoretest$res[x$I[i],"Index"], " (",x$scoretest$res[x$I[i],1],")", "\n",sep="") } invisible(x) } holm <- function(p) { k <- length(p) w <- 1/k ii <- order(p) po <- p[ii] qs <- min(1,po[1]/w) for (i in 2:k) { qs <- c(qs, min(1, max(qs[i-1],po[i]*(1-w*(i-1))/w))) } return(qs) } lava/R/describecoef.R0000644000176200001440000000174513162174023014144 0ustar liggesusers##' @export describecoef <- function(x,par,from,to,mean=TRUE) { p <- coef(x, mean=mean) if (!missing(from)) { st1 <- paste0(to,lava.options()$symbol[1],from) st2 <- paste0(to,lava.options()$symbol[2],from) st3 <- paste0(from,lava.options()$symbol[2],to) pos <- na.omit(match(unique(c(st1,st2,st3)),p)) attributes(pos) <- NULL return(pos) } res <- strsplit(p,lava.options()$symbol[2]) var.idx <- which(unlist(lapply(res,length))>1) ## Variance parameters rest.idx <- setdiff(seq_along(p),var.idx) res[rest.idx] <- strsplit(p[rest.idx],lava.options()$symbol[1]) mean.idx <- which(unlist(lapply(res,length))==1) ## Mean parameters reg.idx <- setdiff(rest.idx,mean.idx) names(res)[mean.idx] <- paste0("m",seq_along(mean.idx)) for (i in var.idx) attr(res[[i]],"type") <- "cov" for (i in mean.idx) attr(res[[i]],"type") <- "mean" for (i in reg.idx) attr(res[[i]],"type") <- "reg" if (missing(par)) return(res) return(res[par]) } lava/R/nodecolor.R0000644000176200001440000000246413162174023013512 0ustar liggesusers##' @export `nodecolor<-` <- function(object,var,...,value) UseMethod("nodecolor<-") ##' @export `nodecolor<-.lvm` <- function(object, var=vars(object), border, labcol, shape, lwd, ..., value) { if (length(var)>0 & length(value)>0) { if (inherits(var,"formula")) var <- all.vars(var) object$noderender$fill[var] <- value if (!missing(border)) object$noderender$col[var] <- border if (!missing(shape)) object$noderender$shape[var] <- shape if (!missing(labcol)) object$noderender$textCol[var] <- labcol if (!missing(lwd)) object$noderender$lwd[var] <- lwd } return(object) } ##' @export `nodecolor<-.default` <- function(object, var=vars(object), border, labcol, shape, lwd, ..., value) { if (length(var)>0 & length(value)>0) { if (inherits(var,"formula")) var <- all.vars(var) object <- addattr(object,attr="fill",var=var,val=value) if (!missing(border)) object <- addattr(object,attr="col",var=var,val=border) if (!missing(shape)) object <- addattr(object,attr="shape",var=var,val=shape) if (!missing(labcol)) object <- addattr(object,attr="textCol",var=var,val=labcol) if (!missing(lwd)) object <- addattr(object,attr="lwd",var=var,val=lwd) } return(object) } lava/R/onload.R0000644000176200001440000000126113162174023012774 0ustar liggesusers'.onLoad' <- function(libname, pkgname="lava") { addhook("heavytail.init.hook","init.hooks") addhook("glm.estimate.hook","estimate.hooks") addhook("ordinal.estimate.hook","estimate.hooks") addhook("cluster.post.hook","post.hooks") addhook("ordinal.sim.hook","sim.hooks") addhook("color.ordinal","color.hooks") addhook("ordinal.remove.hook","remove.hooks") } '.onAttach' <- function(libname, pkgname="lava") { desc <- utils::packageDescription(pkgname) packageStartupMessage(desc$Package, " version ",desc$Version) lava.options(cluster.index=versioncheck("mets",c(0,2,7)), tobit=versioncheck("lava.tobit",c(0,5))) } lava/R/iid.R0000644000176200001440000001004113162174023012261 0ustar liggesusers##' Extract i.i.d. decomposition (influence function) from model object ##' ##' Extract i.i.d. decomposition (influence function) from model object ##' @export ##' @usage ##' ##' iid(x,...) ##' ##' \method{iid}{default}(x,bread,id=NULL,folds=0,maxsize=(folds>0)*1e6,...) ##' ##' @aliases iid.default ##' @param x model object ##' @param id (optional) id/cluster variable ##' @param bread (optional) Inverse of derivative of mean score function ##' @param folds (optional) Calculate aggregated iid decomposition (0:=disabled) ##' @param maxsize (optional) Data is split in groups of size up to 'maxsize' (0:=disabled) ##' @param ... additional arguments ##' @examples ##' m <- lvm(y~x+z) ##' distribution(m, ~y+z) <- binomial.lvm("logit") ##' d <- sim(m,1e3) ##' g <- glm(y~x+z,data=d,family=binomial) ##' crossprod(iid(g)) ##' iid <- function(x,...) UseMethod("iid") ##' @export iid.default <- function(x,bread,id=NULL,folds=0,maxsize=(folds>0)*1e6,...) { if (!any(paste("score",class(x),sep=".") %in% methods("score"))) { warning("Not available for this class") return(NULL) } if (folds>0 || maxsize>0 || (!missing(id) && lava.options()$cluster.index)) { if (!requireNamespace("mets",quietly=TRUE)) stop("Requires 'mets'") } if (folds>0) { U <- Reduce("rbind",mets::divide.conquer(function(data) score(x,data=data,...), id=id, data=data,size=round(nrow(data)/folds))) } else { U <- score(x,indiv=TRUE,...) } n <- NROW(U) pp <- pars(x) if (!missing(bread) && is.null(bread)) { bread <- vcov(x) } if (missing(bread)) bread <- attributes(U)$bread if (is.null(bread)) { bread <- attributes(x)$bread if (is.null(bread)) bread <- x$bread if (is.null(bread)) { if (maxsize>0) { ff <- function(p) colSums(Reduce("rbind",mets::divide.conquer(function(data) score(x,data=data,p=p,...), data=data,size=maxsize))) I <- -numDeriv::jacobian(ff,pp,method=lava.options()$Dmethod) } else { I <- -numDeriv::jacobian(function(p) score(x,p=p,indiv=FALSE,...),pp,method=lava.options()$Dmethod) } bread <- Inverse(I) } } iid0 <- U%*%bread if (!missing(id)) { N <- nrow(iid0) if (!lava.options()$cluster.index) { iid0 <- matrix(unlist(by(iid0,id,colSums)),byrow=TRUE,ncol=ncol(bread)) } else { iid0 <- mets::cluster.index(id,mat=iid0,return.all=FALSE) } attributes(iid0)$N <- N } colnames(iid0) <- colnames(U) return(structure(iid0,bread=bread)) } ##' @export iid.multigroupfit <- function(x,...) iid.default(x,combine=TRUE,...) ##' @export iid.matrix <- function(x,...) { p <- ncol(x); n <- nrow(x) mu <- colMeans(x,na.rm=TRUE); S <- var(x,use="pairwise.complete.obs")*(n-1)/n iid1 <- t(t(x)-mu) iid2 <- matrix(ncol=(p+1)*p/2,nrow=n) pos <- 0 nn <- c() cc <- mu for (i in seq(p)) for (j in seq(i,p)) { pos <- pos+1 cc <- c(cc,S[i,j]) iid2[,pos] <- (iid1[,i]*iid1[,j])-cc[length(cc)] nn <- c(nn,paste(colnames(x)[c(i,j)],collapse=lava.options()$symbols[2])) } colnames(iid1) <- colnames(x); colnames(iid2) <- nn names(cc) <- c(colnames(iid1),colnames(iid2)) iid1[is.na(iid1)] <- 0 iid2[is.na(iid2)] <- 0 structure(cbind(iid1/n,iid2/n), coef=cc, mean=mu, var=S) } ##' @export iid.numeric <- function(x,...) { n <- length(x) mu <- mean(x); S <- var(x)*(n-1)/n iid1 <- t(t(x)-mu) structure(cbind(mean=iid1/n,var=(iid1^2-S)/n),coef=c(mean=mu,var=S),mean=mu,var=S) } ##' @export iid.data.frame <- function(x,...) { if (!all(apply(x[1,,drop=FALSE],2,function(x) inherits(x,c("numeric","integer"))))) stop("Don't know how to handle data.frames of this type") iid(as.matrix(x)) } lava/R/parsedesign.R0000644000176200001440000000574413162174023014036 0ustar liggesuserssumsplit <- function(x,...) { plus <- strsplit(x,"\\+")[[1]] spl <- unlist(lapply(plus, function(x) { val <- strsplit(x,"\\-")[[1]] val[-1] <- paste0("-",val[-1]) setdiff(val,"") })) res <- c() for (st in spl) { st <- gsub(" ","",st) st0 <- gsub("^[-0-9\\*]*","",st) val <- gsub("\\*","",regmatches(st,gregexpr("^[-0-9\\*]*",st))[[1]]) if (val=="") val <- "1" val <- switch(val,"-"=-1,val) res <- c(res,val,st0) } return(res) } ##' @export parsedesign <- function(coef,x,...,regex=FALSE,diff=TRUE) { if (!is.vector(coef)) coef <- stats::coef(coef) if (is.numeric(coef) && !is.null(names(coef))) coef <- names(coef) dots <- lapply(substitute(list(...)),function(x) x)[-1] expr <- suppressWarnings(inherits(try(x,silent=TRUE),"try-error")) if (expr) { ee <- c(deparse(substitute(x)), unlist(lapply(dots, deparse))) } else { ee <- c(deparse(x), sapply(dots, function(x) deparse(x))) } if (!expr && is.numeric(x)) { return(do.call(contr, list(c(list(x),list(...)),n=length(coef),diff=diff))) } res <- c() diff <- rep(diff,length.out=length(ee)) count <- 0 for (e in ee) { count <- count+1 diff0 <- FALSE Val <- rbind(rep(0,length(coef))) if (grepl('\"',e)) { diff0 <- diff[count] && grepl("^c\\(",e) e0 <- gsub(" |\\)$|^c\\(","",e) ff <- strsplit(e0,'\"')[[1L]] } else { ff <- sumsplit(e) } for (i in seq(length(ff)/2)) { val0 <- gsub("[*()]","",ff[2*(i-1)+1]) val <- char2num(val0) if (is.na(val)) { val <- switch(val0,"-"=-1,1) } par0 <- ff[2*i] par0int <- as.integer(char2num(par0)) if (!regex) par0 <- glob2rx(par0) if (is.na(par0int)) par0int <- grep(par0,coef) if (length(par0int)>1) { diff0 <- diff[count] for (k in seq_along(par0int)) { if (par0int[k]<=length(Val)) { if (diff[count]) { Val[par0int[k]] <- val } else { Val0 <- Val; Val0[] <- 0 Val0[par0int[k]] <- val res <- rbind(res,Val0) } } } } else { if (length(par0int)>0 && par0int<=length(Val)) Val[par0int] <- val } } if (diff0) { n <- sum(Val!=0) if (n>1) { Val0 <- Val ii <- which(Val0!=0) Val <- matrix(0,nrow=n-1,ncol=length(Val)) for (i in seq(n-1)) { Val[i,ii[c(1,i+1)]] <- Val0[ii[c(1,i+1)]]*c(1,-1) } } } if (any(Val!=0)) res <- rbind(res,Val) } res } lava/R/zgetsas.R0000644000176200001440000000242213162174023013200 0ustar liggesusers##' Run SAS code like in the following: ##' ##' ODS CSVALL BODY="myest.csv"; ##' proc nlmixed data=aj qpoints=2 dampstep=0.5; ##' ... ##' run; ##' ODS CSVALL Close; ##' ##' and read results into R with: ##' ##' \code{getsas("myest.csv","Parameter Estimates")} ##' ##' @title Read SAS output ##' @param infile file (csv file generated by ODS) ##' @param entry Name of entry to capture ##' @param \dots additional arguments to lower level functions ##' @author Klaus K. Holst ##' @export ##' @seealso getMplus getSAS <- function(infile,entry="Parameter Estimates",...) { con <- file(infile, blocking = FALSE) inp <- readLines(con) close(con) nullstring <- 0 linestart <- 1; lineend <- length(inp) ## mycmd1 <- paste0("grep -n \"",entry,"\" ", csvfile); a1 <- system(mycmd1,intern=TRUE); ## linestart <- char2num(strsplit(a1,":")[[1]][1]) idx <- sapply(inp,function(x) length(grep(entry, x))>0) if (sum(idx)==1) { linestart <- which(idx) for (i in seq(linestart,length(inp))) { lineend <- i-1 ## if (inp[i]==inp[i-1]) break; if (inp[i]=="") break; } } else { stop("No match or duplicate entries!") } subinp <- inp[(linestart+1):(lineend)] con <- textConnection(subinp) res <- read.csv(con,header=TRUE) close(con) return(res) } lava/R/cluster.hook.R0000644000176200001440000000463413162174023014147 0ustar liggesuserscluster.post.hook <- function(x,...) { if (class(x)[1]=="multigroupfit") { if (is.null(x$cluster)) return(NULL) if (any(unlist(lapply(x$cluster,is.null)))) return(NULL) allclusters <- unlist(x$cluster) uclust <- unique(allclusters) K <- length(uclust) G <- x$model$ngroup S0 <- lapply(score(x,indiv=TRUE), function(x) { x[which(is.na(x))] <- 0; x }) S <- matrix(0,length(pars(x)),nrow=K) aS <- c() ##matrix(0,S[[1]] for (i in uclust) { for (j in seq_len(G)) { idx <- which(x$cluster[[j]]==i) if (length(idx)>0) S[i,] <- S[i,] + colSums(S0[[j]][idx,,drop=FALSE]) } } J <- crossprod(S) I <- information(x,type="hessian",...) iI <- Inverse(I) asVar <- iI%*%J%*%iI x$vcov <- asVar return(x) } ## lvmfit: if (!is.null(x$cluster)) { uclust <- unique(x$cluster) K <- length(uclust) S <- score(x,indiv=TRUE) #,...) I <- information(x,type="hessian") #,...) iI <- Inverse(I) S0 <- matrix(0,ncol=ncol(S),nrow=K) count <- 0 ## J1 <- matrix(0,ncol=ncol(S),nrow=ncol(S)) for (i in uclust) { count <- count+1 S0[count,] <- colSums(S[which(x$cluster==i),,drop=FALSE]) ## J1 <- J1+tcrossprod(S0[count,]) }; p <- ncol(S) ## adj1 <- 1 adj1 <- K/(K-1) ## adj1 <- K/(K-p) ## Mancl & DeRouen, 2001 J <- adj1*crossprod(S0) col3 <- sqrt(diag(iI)); ## Naive se nn <- c("Estimate","Robust SE", "Naive SE", "P-value") asVar <- iI%*%J%*%iI } else { asVar <- x$vcov } diag(asVar)[diag(asVar)==0] <- NA mycoef <- x$opt$estimate x$vcov <- asVar SD <- sqrt(diag(asVar)) Z <- mycoef/SD pval <- 2*(pnorm(abs(Z),lower.tail=FALSE)) if (is.null(x$cluster)) { col3 <- Z nn <- c("Estimate","Std. Error", "Z-value", "P-value") } newcoef <- cbind(mycoef, SD, col3, pval); nparall <- index(x)$npar + ifelse(x$control$meanstructure, index(x)$npar.mean,0) if (!is.null(x$expar)) { nparall <- nparall+length(x$expar) } mycoef <- matrix(NA,nrow=nparall,ncol=4) mycoef[x$pp.idx,] <- newcoef colnames(mycoef) <- nn mynames <- c() if (x$control$meanstructure) { mynames <- vars(x)[index(x)$v1==1] } if (index(x)$npar>0) { mynames <- c(mynames,paste0("p",seq_len(index(x)$npar))) } if (!is.null(x$expar)) { mynames <- c(mynames,names(x$expar)) } rownames(mycoef) <- mynames x$coef <- mycoef return(x) } lava/R/pdfconvert.R0000644000176200001440000000274513162174023013702 0ustar liggesusers##' Convert PDF file to print quality png (default 300 dpi) ##' ##' Access to ghostscript program 'gs' is needed ##' @title Convert pdf to raster format ##' @param files Vector of (pdf-)filenames to process ##' @param dpi DPI ##' @param resolution Resolution of raster image file ##' @param gs Optional ghostscript command ##' @param gsopt Optional ghostscript arguments ##' @param resize Optional resize arguments (mogrify) ##' @param format Raster format (e.g. png, jpg, tif, ...) ##' @param \dots Additional arguments ##' @seealso \code{dev.copy2pdf}, \code{printdev} ##' @export ##' @author Klaus K. Holst ##' @keywords iplot pdfconvert <- function(files, dpi=300, resolution=1024, gs, gsopt, resize, format="png", ...) { if (missing(gsopt)) gsopt <- "-dSAFTER -dBATCH -dNOPAUSE -sDEVICE=png16m -dGraphicsAlphaBits=4 -dTextAlphaBits=4" if (missing(gs)) { gs <- names(which(Sys.which(c("gs", "gswin32c", "gswin64c")) != "")) } cmd1 <- paste0(gs," -r",dpi," -dBackgroundColor='16#ffffff'") if (missing(resize)) { resize <- paste0("mogrify -resize ", resolution) } for (f in files) { f0 <- strsplit(f,".pdf")[1] f.out <- paste(f0,format,sep=".") f.pdf <- paste(f0,"pdf",sep=".") mycmd1 <- paste0(cmd1, " ", gsopt, " -sOutputFile=", f.out, " > /dev/null ", f.pdf) mycmd2 <- paste0(resize, " ", f.out) cat(f.pdf) system(mycmd1) cat(" -> ") system(mycmd2) cat(f.out, "\n") } } lava/R/utils.R0000644000176200001440000002647313162174023012674 0ustar liggesuserschar2num <- function(x,...) { idx <- grep("^[-]*[0-9\\.]+",x,perl=TRUE,invert=TRUE) if (length(idx)>0) x[idx] <- NA as.numeric(x) } ###{{{ substArg substArg <- function(x,env,...) { if (!missing(env)) { a <- with(env,substitute(x)) # a <- substitute(x,environment(env)) } else { a <- substitute(x) } myclass <- tryCatch(class(eval(a)),error=function(e) NULL) if (is.null(myclass) || myclass=="name") { # if (is.null(myclass)) { res <- unlist(sapply(as.character(a), function(z) { trimmed <- gsub(" ","",z,fixed=TRUE) val <- strsplit(trimmed,"+",fixed=TRUE) if (val[1]=="") val <- NULL val })); attributes(res)$names <- NULL return(res) } return(eval(a)) } ## g <- function(zz,...) { ## env=new.env(); assign("x",substitute(zz),env) ## substArg(zz,env=env) ## } ## h <- function(x,...) { ## env=new.env(); assign("x",substitute(x),env) ## substArg(x,env=TRUE) ## } ###}}} ###{{{ procrandomslope procrandomslope <- function(object,data=object$data,...) { Xfix <- FALSE xfix <- myfix <- list() xx <- object for (i in seq_len(object$ngroup)) { x0 <- object$lvm[[i]] data0 <- data[[i]] xfix0 <- colnames(data0)[(colnames(data0)%in%parlabels(x0,exo=TRUE))] xfix <- c(xfix, list(xfix0)) if (length(xfix0)>0) { ## Yes, random slopes Xfix<-TRUE } xx$lvm[[i]] <- x0 } if (Xfix) { for (k in seq_len(object$ngroup)) { x1 <- x0 <- object$lvm[[k]] data0 <- data[[k]] nrow <- length(vars(x0)) xpos <- lapply(xfix[[k]],function(y) which(regfix(x0)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix0 <- list(var=xfix[[k]], col=colpos, row=rowpos) myfix <- c(myfix, list(myfix0)) for (i in seq_along(myfix0$var)) for (j in seq_along(myfix0$col[[i]])) regfix(x0, from=vars(x0)[myfix0$row[[i]][j]],to=vars(x0)[myfix0$col[[i]][j]]) <- colMeans(data0[,myfix0$var[[i]],drop=FALSE],na.rm=TRUE) index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) object$lvm[[k]] <- x0 yvars <- endogenous(x0) #parkeep <- c(parkeep, parord[[k]][coef(x1,mean=TRUE)%in%coef(x0,mean=TRUE)]) } # parkeep <- sort(unique(parkeep)) object <- multigroup(object$lvm,data,fix=FALSE,exo.fix=FALSE) } return(list(model=object,fix=myfix)) } ###}}} procrandomslope ###{{{ kronprod ## ' Calculate matrix product with kronecker product ## ' ## ' \deqn{(A\crossprod B) Y} ## ' @title Calculate matrix product with kronecker product ## ' @param A ## ' @param B ## ' @param Y ## ' @author Klaus K. Holst kronprod <- function(A,B,Y) { if (missing(Y)) { ## Assume 'B'=Identity, (A otimes B)Y k <- nrow(B)/ncol(A) res <- rbind(apply(B,2,function(x) matrix(x,nrow=k)%*%t(A))) return(res) } rbind(apply(Y,2,function(x) B%*%matrix(x,nrow=ncol(B))%*%t(A))) } ###}}} kronprod ###{{{ izero izero <- function(i,n) { ## n-1 zeros and 1 at ith entry x <- rep(0,n); x[i] <- 1 x } ###}}} ###{{{ Debug `Debug` <- function(msg, cond=lava.options()$debug) { if (cond) print(paste(msg, collapse=" ")) } ###}}} ###{{{ categorical2dummy categorical2dummy <- function(x,data,silent=TRUE,...) { x0 <- x X <- intersect(index(x)$exogenous,colnames(data)) catX <- c() for (i in X) { if (!is.numeric(data[,i])) catX <- c(catX,i) } if (length(catX)==0) return(list(x=x,data=data)) f <- as.formula(paste("~ 1+", paste(catX,collapse="+"))) opt <- options(na.action="na.pass") M <- model.matrix(f,data) options(opt) Mnames <- colnames(M) Mpos <- attributes(M)$assign A <- index(x)$A F <- regfix(x) count <- 0 for (i in catX) { count <- count+1 mnames <- Mnames[Mpos==count] kill(x0) <- i Y <- colnames(A)[A[i,]==1] if (length(mnames)==1) { fix <- as.list(F$labels[i,]) fixval <- F$values[i,] fix[which(!is.na(fixval))] <- fixval[na.omit(fixval)] regression(x0,to=Y,from=mnames,silent=silent) <- fix[Y] } else { x0 <- regression(x0,to=Y,from=mnames,silent=silent) } } index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) return(list(x=x0,data=cbind(data,M))) } ###}}} ###{{{ procdata.lvm `procdata.lvm` <- function(x,data,categorical=FALSE, na.method=ifelse(any(is.na(data[,intersect(colnames(data),manifest(x))])),"complete.obs","pairwise.complete.obs"), missing=FALSE ) { if (is.numeric(data) & !is.list(data)) { data <- rbind(data) } if (is.data.frame(data) | is.matrix(data)) { nn <- colnames(data) data <- as.data.frame(data); colnames(data) <- nn; rownames(data) <- NULL obs <- setdiff(intersect(vars(x), colnames(data)),latent(x)) Debug(obs) mydata <- subset(data, select=obs) if (NROW(mydata)==0) stop("No observations") for (i in seq_len(ncol(mydata))) { if (inherits(mydata[,i],"Surv")) mydata[,i] <- mydata[,i][,1] if (is.character(mydata[,i]) | is.factor(mydata[,i])) mydata[,i] <- as.numeric(as.factor(mydata[,i]))-1 } ## mydata <- data[,obs] ## if (any(is.na(mydata))) { ## warning("Discovered missing data. Going for a complete-case analysis. For data missing at random see 'missingMLE'.\n", immediate.=TRUE) ## mydata <- na.omit(mydata) ## } S <- NULL n <- nrow(mydata) if (n==1) { S <- diag(nrow=ncol(mydata)); colnames(S) <- rownames(S) <- obs } if (na.method=="complete.obs" && !missing) { mydata0 <- na.omit(mydata) n <- nrow(mydata0) mu <- colMeans(mydata0) if (is.null(S) && n>2) S <- (n-1)/n*cov(mydata0) ## MLE variance matrix of observed variables rm(mydata0) } nS <- is.null(S) || any(is.na(S)) if (na.method=="pairwise.complete.obs" || nS) { mu <- colMeans(mydata,na.rm=TRUE) if (nS) { n <- nrow(mydata) S <- (n-1)/n*cov(mydata,use="pairwise.complete.obs") S[is.na(S)] <- 1e-3 } } } else if (is.list(data)) { if ("cov"%in%names(data)) data$S <- data$cov if ("var"%in%names(data)) data$S <- data$var if ("mean"%in%names(data)) data$mu <- data$mean n <- data$n S <- reorderdata.lvm(x,data$S) mu <- reorderdata.lvm(x,data$mu) ## if (is.null(n)) stop("n was not specified"); } else stop("Unexpected type of data!"); if (nrow(S)!=ncol(S)) stop("Wrong type of data!"); return(list(S=S,mu=mu,n=n)) } ###}}} ###{{{ reorderdata.lvm `reorderdata.lvm` <- function(x, data) { if (is.vector(data)) { nn <- names(data) ii <- na.omit(match(index(x)$manifest, nn)) data[ii,drop=FALSE] } else { nn <- colnames(data) ii <- na.omit(match(index(x)$manifest, nn)) data[ii,ii,drop=FALSE] } } ###}}} ###{{{ symmetrize `symmetrize` <- function(M, upper=TRUE) { if (length(M)==1) return(M) if (!is.matrix(M) | ncol(M)!=nrow(M)) stop("Only implemented for square matrices.") if (upper) { for (i in seq_len(ncol(M)-1)) for (j in seq(i+1,nrow(M))) M[i,j] <- M[j,i] return(M) } else { for (i in seq_len(ncol(M))) for (j in seq_len(nrow(M))) if (M[i,j]==0) M[i,j] <- M[j,i] else M[j,i] <- M[i,j] return(M) } } ###}}} ###{{{ naiveGrad naiveGrad <- function(f, x, h=1e-9) { nabla <- numeric(length(x)) for (i in seq_along(x)) { xh <- x; xh[i] <- x[i]+h nabla[i] <- (f(xh)-f(x))/h } return(nabla) } ###}}} ###{{{ CondMom # conditional on Compl(idx) CondMom <- function(mu,S,idx,X) { idxY <- idx idxX <- setdiff(seq_len(ncol(S)),idxY) SXX <- S[idxX,idxX,drop=FALSE]; SYY <- S[idxY,idxY,drop=FALSE] SYX <- S[idxY,idxX,drop=FALSE] iSXX <- solve(SXX) condvar <- SYY-SYX%*%iSXX%*%t(SYX) if (missing(mu)) return(condvar) muY <- mu[,idxY,drop=FALSE] muX <- mu[,idxX,drop=FALSE] if (is.matrix(mu)) Z <- t(X-muX) else Z <- apply(X,1,function(xx) xx-muX) SZ <- t(SYX%*%iSXX%*%Z) ## condmean <- matrix( if (is.matrix(mu)) condmean <- SZ+muY else condmean <- t(apply(SZ,1,function(x) muY+x)) ## ,ncol=ncol(SZ),nrow=nrow(SZ)) return(list(mean=condmean,var=condvar)) } ###}}} CondMom ###{{{ Depth-First/acc (accessible) DFS <- function(M,v,explored=c()) { explored <- union(explored,v) incident <- M[v,] for (v1 in setdiff(which(incident==1),explored)) { explored <- DFS(M,v1,explored) } return(explored) } acc <- function(M,v) { if (is.character(v)) v <- which(colnames(M)==v) colnames(M)[setdiff(DFS(M,v),v)] } ###}}} Depth-First/acc (accessible) npar.lvm <- function(x) { return(index(x)$npar+ index(x)$npar.mean+index(x)$npar.ex) } as.numeric.list <- function(x,...) { res <- list() asnum <- as.numeric(x) lapply(x,function(y) ifelse(is.na(as.numeric(y)),y,as.numeric(y))) } edge2pair <- function(e) { sapply(e,function(x) strsplit(x,"~")) } numberdup <- function(xx) { ## Convert to numbered list dup.xx <- duplicated(xx) dups <- xx[dup.xx] xx.new <- numeric(length(xx)) count <- 0 for (i in seq_along(xx)) { if (!dup.xx[i]) { count <- count+1 xx.new[i] <- count } else { xx.new[i] <- xx.new[match(xx[i],xx)[1]] } } return(xx.new) } extractvar <- function(f) { yy <- getoutcome(f) xx <- attributes(terms(f))$term.labels myvars <- all.vars(f) return(list(y=yy,x=xx,all=myvars)) } ##' @export getoutcome <- function(formula,sep,...) { aa <- attributes(terms(formula,...)) if (aa$response==0) { res <- NULL } else { res <- paste(deparse(formula[[2]]),collapse="") } if (!missing(sep) && length(aa$term.labels)>0) { attributes(res)$x <- lapply(strsplit(aa$term.labels,"\\|")[[1]], function(x) as.formula(paste0("~",x))) } else { attributes(res)$x <- aa$term.labels } return(res) } ##' @export Specials <- function(f,spec,split2="+",...) { tt <- terms(f,spec) pos <- attributes(tt)$specials[[spec]] if (is.null(pos)) return(NULL) x <- rownames(attributes(tt)$factors)[pos] st <- gsub(" ","",x) res <- unlist(strsplit(st,"[()]"))[2] if (is.null(split2)) return(res) unlist(strsplit(res,"+",fixed=TRUE)) } ##' @export decomp.specials <- function(x,pattern="[()]",pattern2=NULL, pattern.ignore=NULL, sep="[,\\+]",perl=TRUE,reverse=FALSE,...) { st <- gsub(" |^\\(|)$","",x) # Remove white space and leading/trailing parantheses if (!is.null(pattern.ignore)) { if (grepl(pattern.ignore,st,perl=perl,...)) return(st) } if (!is.null(pattern)) { st <- rev(unlist(strsplit(st,pattern,perl=perl,...)))[1] } if (!is.null(pattern2)) { st <- (unlist(strsplit(st,pattern2,perl=perl,...))) if (reverse) st <- rev(st) } unlist(strsplit(st,sep,perl=perl,...)) } Decomp.specials <- function(x,pattern="[()]") { st <- gsub(" ","",x) st <- gsub("\n","",st) mysplit <- rev(unlist(strsplit(st,pattern))) type <- mysplit[2] vars <- mysplit[1] res <- unlist(strsplit(vars,",")) if (type=="s" | type=="seq") { return(paste0(res[1],seq(char2num(res[2])))) } unlist(strsplit(vars,",")) } printline <- function(n=70) { cat(rep("_", n), "\n", sep=""); } lava/R/multipletesting.R0000644000176200001440000000410713162174023014753 0ustar liggesuserspzmax <- function(alpha,S) { ##P(Zmax > z) Family wise error rate, Zmax = max |Z_i| if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") k <- nrow(S) z <- qnorm(1-alpha/2) 1-mets::pmvn(lower=rep(-z,k),upper=rep(z,k),sigma=cov2cor(S)) } ##' @export p.correct <- function(object,idx,alpha=0.05) { S <- vcov(object); if (!missing(idx)) S <- S[idx,idx,drop=FALSE] f <- function(a) pzmax(a,S)-alpha uniroot(f,lower=0,upper=0.05)$root } ##' Closed testing procedure ##' ##' Closed testing procedure ##' @aliases closed.testing p.correct ##' @param object estimate object ##' @param idx Index of parameters to adjust for multiple testing ##' @param null Null hypothesis value ##' @param ... Additional arguments ##' @export ##' @examples ##' m <- lvm() ##' regression(m, c(y1,y2,y3,y4,y5,y6,y7)~x) <- c(0,0.25,0,0.25,0.25,0,0) ##' regression(m, to=endogenous(m), from="u") <- 1 ##' variance(m,endogenous(m)) <- 1 ##' set.seed(2) ##' d <- sim(m,200) ##' l1 <- lm(y1~x,d) ##' l2 <- lm(y2~x,d) ##' l3 <- lm(y3~x,d) ##' l4 <- lm(y4~x,d) ##' l5 <- lm(y5~x,d) ##' l6 <- lm(y6~x,d) ##' l7 <- lm(y7~x,d) ##' ##' (a <- merge(l1,l2,l3,l4,l5,l6,l7,subset=2)) ##' if (requireNamespace("mets",quietly=TRUE)) { ##' p.correct(a) ##' } ##' as.vector(closed.testing(a)) ##' closed.testing <- function(object,idx=seq_along(coef(object)),null=rep(0,length(idx)),...) { B <- diag(nrow=length(idx)) e <- estimate(object,keep=idx) combs <- pvals <- c() for (i in seq_along(idx)) { co <- combn(length(idx),i) pp <- numeric(ncol(co)) for (j in seq_along(pp)) { pp[j] <- compare(e,contrast=B[co[,j],,drop=FALSE],null=null[co[,j]],...)$p.value } combs <- c(combs,list(co)) pvals <- c(pvals,list(pp)) } pmax <- c() for (k in seq_along(idx)) { pk <- c() for (i in seq_along(idx)) { cols <- apply(combs[[i]],2,function(x) k%in%x) pk <- c(pk,pvals[[i]][which(cols)]) } pmax <- c(pmax,max(pk)) } return(structure(pmax,comb=combs,pval=pvals)) } lava/R/assoc.R0000644000176200001440000001434613162174023012640 0ustar liggesusersnormal.threshold <- function(object,p=coef(object),...) { M <- moments(object,p=p) ord <- ordinal(Model(object)) K <- attributes(ord)$K cK <- c(0,cumsum(K-1)) breaks.orig <- list() for (i in seq(K)) { breaks.orig <- c(breaks.orig,list(M$e[seq(K[i]-1)+cK[i]])) } breaks <- lapply(breaks.orig, ordreg_threshold) names(breaks) <- names(K) ii <- match(names(K),vars(object)) sigma <- M$Cfull[ii,ii] list(breaks=breaks,sigma=sigma,mean=M$v[ii],K=K) } prob.normal <- function(sigma,breaks,breaks2=breaks) { if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") if (ncol(sigma)!=2 || missing(breaks)) stop("Wrong input") P <- matrix(ncol=length(breaks2)-1, nrow=length(breaks)-1) for (i in seq(length(breaks)-1)) for (j in seq(length(breaks2)-1)) P[i,j] <- mets::pmvn(lower=c(breaks[i],breaks2[j]),upper=c(breaks[i+1],breaks2[j+1]),sigma=sigma) return(P) } assoc <- function(P,sigma,breaks,...) { if (missing(P)) P <- prob.normal(sigma,breaks,...) Agree <- sum(diag(P)) marg.row <- rowSums(P) marg.col <- colSums(P) Chance <- sum(marg.row*marg.col) kap <- (Agree-Chance)/(1-Chance) gam <- goodmankruskal_gamma(P)$gamma inf <- information_assoc(P) res <- c(list(kappa=kap,gamma=gam),inf) if (!missing(sigma)) res <- c(res,rho=sigma[1,2]) return(res) } ################################################## ### Risk comparison ################################################## ## or:= riskcomp(x,scale=odds) // OR ##' @export riskcomp <- function(x,...,scale,op="/",type=1,struct=FALSE) { val <- c(x,unlist(list(...))) if (!missing(scale)) val <- do.call(scale,list(val)) if (!struct && length(val)==2) { if (type==2) { return(do.call(op,list(val[2],val[1]))) } else if (type==1) { return(do.call(op,list(val[1],val[2]))) } return(c(do.call(op,list(val[2],val[1])), do.call(op,list(val[1],val[2])))) } outer(val,val,op) offdiag(outer(val,val,op) ,type=type) } ##' @export Ratio <- function(x,...) riskcomp(x,...,op="/") ##' @export Diff <- function(x,...) riskcomp(x,...,op="-") ################################################## ## Odds ratio ################################################## ##' @export odds <- function(x) x/(1-x) logor <- function(x) { c(log(prod(diag(x))/prod(revdiag(x))),sum(1/x)^.5) } ##' @export OR <- function(x,tabulate=FALSE,log=FALSE,...) { if (!inherits(x,c("multinomial","table"))) { val <- riskcomp(x,...,scale=odds) if (log) val <- base::log(val) return(val) } if (inherits(x,"multinomial")) { M <- x } else { M <- multinomial(x) } pos <- M$position if (ncol(pos)!=2 & ncol(pos)!=2) stop("Only for 2x2 tables") orfun <- function(p,...) { list(logOR=sum(log(p[diag(pos)]))-sum(log(p[revdiag(pos)]))) } estimate(M,orfun,back.transform=exp) } ################################################## ## Information theoretical measures ################################################## information_assoc <- function(P,base=exp(1),...) { P.row <- rowSums(P) P.col <- colSums(P) H.row <- H.col <- H <- 0 for (j in seq_along(P.col)) if (P.col[j]>0) H.col <- H.col - P.col[j]*log(P.col[j]+(P.col[j]==0),base=base) for (i in seq_along(P.row)) { if (P.row[i]>0) H.row <- H.row - P.row[i]*log(P.row[i]+(P.row[i]==0),base=base) for (j in seq_along(P.col)) { if (P[i,j]>0) H <- H - P[i,j]*log(P[i,j],base=base) } } I <- H.row+H.col-H return(list(MI=I,H=H,H.row=H.row,H.col=H.col, U.row=I/H.row,U.col=I/H.col,U.sym=2*I/(H.row+H.col))) } ##' @export information.data.frame <- function(x,...) { information(multinomial(x,marginal=TRUE),...) } ##' @export information.table <- function(x,...) { information(multinomial(x,marginal=TRUE),...) } ##' @export information.multinomial <- function(x,...) { estimate(x,function(p,object,...) { P <- object$position; P[] <- p[object$position] information_assoc(P)},...) } ################################################## ## Independence tests ################################################## independence <- function(x,...) { if (is.table(x) || is.data.frame(x) || is.matrix(x)) { x <- multinomial(x) } if (!inherits(x,"multinomial")) stop("Expected table, data.frame or multinomial object") if (length(x$levels)!=2) stop("Data from two categorical variables expected") f <- function(p) { P <- x$position; P[] <- p[x$position] n <- nrow(x$iid) k1 <- length(x$levels[[1]]) k2 <- length(x$levels[[2]]) A1 <- matrix(0,ncol=length(p),nrow=k1) for (i in seq(k1)) A1[i,x$position[i,]] <- 1 A2 <- matrix(0,ncol=length(p),nrow=k2) for (i in seq(k2)) A2[i,x$position[,i]] <- 1 P1 <- A1%*%p P2 <- A2%*%p I <- P1%*%t(P2) Q <- P-I # Q <- sum(n*P*(log(I[1,1])-P1 sum((P-I)^2) ##V <- sqrt(sum((P*n-I*n)^2/I/n) /(n*(min(k1,k2)-1))) V <- sqrt(sum((P-I)^2/I) / ((min(k1,k2)-1))) return(V) sum(n*Q^2/I)^0.25 return((sum((P-I)^2))^.5) ## V } ## M <- P*n ## O2 <- colSums(M) ## O1 <- rowSums(M) ## M[1,1]-O1[1]*O2[1]/200 ## M[2,2]-O1[2]*O2[2]/200 ## sum((M-I*n)^2/(I*n)) ## sum((P*n-I*n)^2/I/n) ## sum(Q) ## sum(Q^2) ## M <- P ## chisq.test(M,correct=FALSE) return(estimate(x,function(p) list(cramersV=f(p)),iid=TRUE,...)) e <- estimate(x,f,iid=TRUE,print=function(x,...) { cat("\tTest for independence\n\n") cat("Test statistc:\t ", formatC(x$coefmat[1]/x$coefmat[2]), "\nP-value:\t ", x$coefmat[5],"\n\n") print(estimate(x)) },...) return(list(p.value=e$coefmat[5])) ## Q <- sum((a$coefmat[1,1]/a$coefmat[1,2])) ## df <- nrow(a$coefmat) ## res <- list(##data.name=hypothesis, ## statistic = Q, parameter = df, ## p.value=pchisq(Q,df=1,lower.tail=FALSE), ## method = "Test for independence") ## class(res) <- "htest" ## res } ## independence(x) ## chisq.test(table(dd)) lava/R/bootstrap.R0000644000176200001440000001675713162174023013555 0ustar liggesusers##' Generic method for calculating bootstrap statistics ##' ##' @title Generic bootstrap method ##' @param x Model object ##' @param \dots Additional arguments ##' @seealso \code{bootstrap.lvm} \code{bootstrap.lvmfit} ##' @author Klaus K. Holst ##' @export bootstrap <- function(x,...) UseMethod("bootstrap") ##' Calculate bootstrap estimates of a lvm object ##' ##' Draws non-parametric bootstrap samples ##' ##' @param x \code{lvm}-object. ##' @param R Number of bootstrap samples ##' @param fun Optional function of the (bootstrapped) model-fit defining the ##' statistic of interest ##' @param data The data to resample from ##' @param control Options to the optimization routine ##' @param p Parameter vector of the null model for the parametric bootstrap ##' @param parametric If TRUE a parametric bootstrap is calculated. If FALSE a ##' non-parametric (row-sampling) bootstrap is computed. ##' @param bollenstine Bollen-Stine transformation (non-parametric bootstrap) for bootstrap hypothesis testing. ##' @param constraints Logical indicating whether non-linear parameter ##' constraints should be included in the bootstrap procedure ##' @param estimator String definining estimator, e.g. 'gaussian' (see ##' \code{estimator}) ##' @param weights Optional weights matrix used by \code{estimator} ##' @param sd Logical indicating whether standard error estimates should be ##' included in the bootstrap procedure ##' @param silent Suppress messages ##' @param parallel If TRUE parallel backend will be used ##' @param mc.cores Number of threads (if NULL foreach::foreach will be used, otherwise parallel::mclapply) ##' @param \dots Additional arguments, e.g. choice of estimator. ##' @aliases bootstrap.lvmfit ##' @usage ##' ##' \method{bootstrap}{lvm}(x,R=100,data,fun=NULL,control=list(), ##' p, parametric=FALSE, bollenstine=FALSE, ##' constraints=TRUE,sd=FALSE,silent=FALSE, ##' parallel=lava.options()$parallel, ##' mc.cores=NULL, ##' ...) ##' ##' \method{bootstrap}{lvmfit}(x,R=100,data=model.frame(x), ##' control=list(start=coef(x)), ##' p=coef(x), parametric=FALSE, bollenstine=FALSE, ##' estimator=x$estimator,weights=Weights(x),...) ##' ##' @return A \code{bootstrap.lvm} object. ##' @author Klaus K. Holst ##' @seealso \code{\link{confint.lvmfit}} ##' @keywords models regression ##' @examples ##' m <- lvm(y~x) ##' d <- sim(m,100) ##' e <- estimate(y~x, d) ##' \donttest{ ## Reduce Ex.Timings ##' B <- bootstrap(e,R=50,parallel=FALSE) ##' B ##' } ##' @export bootstrap.lvm <- function(x,R=100,data,fun=NULL,control=list(), p, parametric=FALSE, bollenstine=FALSE, constraints=TRUE,sd=FALSE,silent=FALSE, parallel=lava.options()$parallel, mc.cores=NULL, ...) { coefs <- sds <- c() on.exit(list(coef=coefs[-1,], sd=sds[-1,], coef0=coefs[1,], sd0=sds[1,], model=x)) pb <- NULL if (!silent) pb <- txtProgressBar(style=lava.options()$progressbarstyle,width=40) pmis <- missing(p) ##maxcount <- 0 bootfun <- function(i) { ##maxcount <- max(i,maxcount) if (i==0) { d0 <- data } else { if (!parametric | pmis) { d0 <- data[sample(seq_len(nrow(data)),replace=TRUE),] } else { d0 <- sim(x,p=p,n=nrow(data)) } } suppressWarnings(e0 <- estimate(x,data=d0,control=control,silent=TRUE,index=FALSE,...)) if (!silent && getTxtProgressBar(pb)<(i/R)) { setTxtProgressBar(pb, i/R) } if (!is.null(fun)) { coefs <- fun(e0) newsd <- NULL } else { coefs <- coef(e0) newsd <- c() if (sd) { newsd <- e0$coef[,2] } if (constraints & length(constrain(x))>0) { cc <- constraints(e0,...) coefs <- c(coefs,cc[,1]) names(coefs)[seq(length(coefs)-length(cc[,1])+1,length(coefs))] <- rownames(cc) if (sd) { newsd <- c(newsd,cc[,2]) } } } return(list(coefs=coefs,sds=newsd)) } if (bollenstine) { e0 <- estimate(x,data=data,control=control,silent=TRUE,index=FALSE,...) mm <- modelVar(e0) mu <- mm$xi Y <- t(t(data[,manifest(e0)])-as.vector(mu)) Sigma <- mm$C S <- (ncol(Y)-1)/ncol(Y)*var(Y) sSigma <- with(eigen(Sigma),vectors%*%diag(sqrt(values),ncol=ncol(vectors))%*%t(vectors)) isS <- with(eigen(S),vectors%*%diag(1/sqrt(values),ncol=ncol(vectors))%*%t(vectors)) data <- as.matrix(Y)%*%(isS%*%sSigma) colnames(data) <- manifest(e0) } i <- 0 if (parallel) { if (is.null(mc.cores) && requireNamespace("foreach",quietly=TRUE)) { res <- foreach::"%dopar%"(foreach::foreach (i=0:R),bootfun(i)) } else { if (is.null(mc.cores)) mc.cores <- 1 res <- parallel::mclapply(0:R,bootfun,mc.cores=mc.cores) } } else { res <- lapply(0:R,bootfun) } if (!silent) { setTxtProgressBar(pb, 1) close(pb) } ## if (!silent) message("") coefs <- matrix(unlist(lapply(res, function(x) x$coefs)),nrow=R+1,byrow=TRUE) nn <- names(res[[1]]$coefs) if (!is.null(nn)) colnames(coefs) <- nn sds <- NULL if (sd) sds <- matrix(unlist(lapply(res, function(x) x$sds)),nrow=R+1,byrow=TRUE) if (!is.null(fun)) { rownames(coefs) <- c() res <- list(coef=coefs[-1,,drop=FALSE],coef0=coefs[1,],model=x) } else { colnames(coefs) <- names(res[[1]]$coefs) rownames(coefs) <- c(); if (sd) colnames(sds) <- colnames(coefs) res <- list(coef=coefs[-1,,drop=FALSE], sd=sds[-1,,drop=FALSE], coef0=coefs[1,], sd0=sds[1,], model=x, bollenstine=bollenstine) } class(res) <- "bootstrap.lvm" return(res) } ##' @export bootstrap.lvmfit <- function(x,R=100,data=model.frame(x), control=list(start=coef(x)), p=coef(x), parametric=FALSE, bollenstine=FALSE, estimator=x$estimator,weights=Weights(x),...) bootstrap.lvm(Model(x),R=R,data=data,control=control,estimator=estimator,weights=weights,parametric=parametric,bollenstine=bollenstine,p=p,...) ##' @export "print.bootstrap.lvm" <- function(x,idx,level=0.95,...) { cat("Non-parametric bootstrap statistics (R=",nrow(x$coef),"):\n\n",sep="") uplow <-(c(0,1) + c(1,-1)*(1-level)/2) nn <- paste(uplow*100,"%") c1 <- t(apply(x$coef,2,function(x) c(mean(x), sd(x), quantile(x,uplow)))) c1 <- cbind(x$coef0,c1[,1]-x$coef0,c1[,-1,drop=FALSE]) colnames(c1) <- c("Estimate","Bias","Std.Err",nn) if (missing(idx)) { print(format(c1,...),quote=FALSE) } else { print(format(c1[idx,,drop=FALSE],...),quote=FALSE) } if (length(x$sd)>0) { c2 <- t(apply(x$sd,2,function(x) c(mean(x), sd(x), quantile(x,c(0.025,0.975))))) c2 <- cbind(c2[,1],c2[,1]-x$sd0,c2[,-1]) colnames(c2) <- c("Estimate","Bias","Std.Err","2.5%","97.5%") cat("\nStandard errors:\n") if (missing(idx)) { print(format(c2,...),quote=FALSE) } else { print(format(c2[idx,,drop=FALSE],...),quote=FALSE) } } cat("\n") invisible(x) } lava/R/finalize.R0000644000176200001440000001646013162174023013330 0ustar liggesusers##' @export `finalize` <- function(x,...) UseMethod("finalize") ##' @export `finalize.lvm` <- function(x, diag=FALSE, cor=FALSE, addcolor=TRUE, intercept=FALSE, plain=FALSE, cex, fontsize1=10, cols=lava.options()$node.color, unexpr=FALSE, addstyle=TRUE, ...) { g <- as(new("graphAM",adjMat=x$M,"directed"),"graphNEL") graph::nodeRenderInfo(g)$fill <- NA graph::nodeRenderInfo(g)$label <- NA graph::nodeRenderInfo(g)$label[vars(x)] <- vars(x) graph::nodeRenderInfo(g)$shape <- x$graphdef$shape Lab <- NULL for (i in seq_len(length(x$noderender))) { nn <- unlist(x$noderender[[i]]) if (length(nn)>0) { R <- list(as.list(x$noderender[[i]])); names(R) <- names(x$noderender)[i] if (names(x$noderender)[i]!="label") graph::nodeRenderInfo(g) <- x$noderender[i] else Lab <- R[[1]] } } if (!is.null(Lab)) { ## Ugly hack to allow mathematical annotation nn <- names(graph::nodeRenderInfo(g)$label) LL <- as.list(graph::nodeRenderInfo(g)$label) LL[names(Lab)] <- Lab if (any(unlist(lapply(LL,function(x) is.expression(x) || is.name(x) || is.call(x))))) { graph::nodeRenderInfo(g) <- list(label=as.expression(LL)) } else graph::nodeRenderInfo(g) <- list(label=LL) names(graph::nodeRenderInfo(g)$label) <- nn ii <- which(names(graph::nodeRenderInfo(g)$label)=="") if (length(ii)>0) graph::nodeRenderInfo(g)$label <- graph::nodeRenderInfo(g)$label[-ii] } graph::edgeDataDefaults(g)$futureinfo <- x$edgerender$futureinfo graph::edgeRenderInfo(g)$lty <- x$graphdef$lty graph::edgeRenderInfo(g)$lwd <- x$graphdef$lty graph::edgeRenderInfo(g)$col <- x$graphdef$col graph::edgeRenderInfo(g)$textCol <- x$graphdef$textCol graph::edgeRenderInfo(g)$arrowhead <- x$graphdef$arrowhead graph::edgeRenderInfo(g)$dir <- x$graphdef$dir graph::edgeRenderInfo(g)$arrowtail <- "none" graph::edgeRenderInfo(g)$cex <- x$graphdef$cex graph::edgeRenderInfo(g)$label <- x$graphdef$label for (i in seq_len(length(x$edgerender))) { ee <- x$edgerender[[i]] if (length(ee)>0 && names(x$edgerender)[i]!="futureinfo") { graph::edgeRenderInfo(g)[names(x$edgerender)[i]][names(ee)] <- ee } } opt <- options(warn=-1) var <- rownames(covariance(x)$rel) if (unexpr) { mylab <- as.character(graph::edgeRenderInfo(g)$label); names(mylab) <- names(graph::edgeRenderInfo(g)$label) g@renderInfo@edges$label <- as.list(mylab) } if (intercept) { ## mu <- intfix(x) ## nNA <- sum(is.na(mu)) ## if (nNA>0) ## mu[is.na(mu)] <- paste("m",seq_len(nNA)) ## mu <- unlist(mu) ## x <- addNode(mu,x) ## for (i in seq_along(mu)) { ## print(mu[i]) ## x <- addEdge(var[i], var[i], x) ## } ## x <- addattr(x,attr="shape",var=mu,val="none") } allEdges <- graph::edgeNames(g) regEdges <- c() feedback <- c() A <- index(x)$A if (index(x)$npar.reg>0) for (i in seq_len(nrow(A)-1)) for (j in (i+1):(ncol(A))) { if(A[i,j]==1 & A[j,i]==1) feedback <- c(feedback, paste0(var[i],"~",var[j]), paste0(var[j],"~",var[i])) if (A[j,i]==0 & x$M[j,i]!=0) { g <- graph::removeEdge(var[j],var[i],g) } if (A[i,j]==1) regEdges <- c(regEdges,paste0(var[i],"~",var[j])) if (A[j,i]==1) regEdges <- c(regEdges,paste0(var[j],"~",var[i])) } varEdges <- corEdges <- c() delta <- ifelse(diag,0,1) if (cor | diag) { for (r in seq_len(nrow(covariance(x)$rel)-delta) ) { for (s in (r+delta):ncol(covariance(x)$rel) ) { if (cor | r==s) if (covariance(x)$rel[r,s]==1 & (!any(c(var[r],var[s])%in%exogenous(x)))) { newedges <- c() if (A[r,s]!=1) { g <- graph::addEdge(var[r],var[s], g) newedges <- paste0(var[r],"~",var[s]) } else { if (A[s,r]!=1) { g <- graph::addEdge(var[s],var[r], g) newedges <- c(newedges,paste0(var[s],"~",var[r])) } } if (r==s) varEdges <- c(varEdges, newedges ) if (r!=s) corEdges <- c(corEdges,newedges) } } } } if (length(x$edgerender$futureinfo)>0) { estr <- names(x$edgerender$futureinfo$label) estr <- estr[which(unlist(lapply(estr,nchar))>0)] revestr <- sapply(estr, function(y) paste(rev(unlist(strsplit(y,"~"))),collapse="~")) revidx <- which(revestr%in%graph::edgeNames(g)) count <- 0 for (i in estr) { count <- count+1 for (f in names(x$edgerender$futureinfo)) { if (count%in%revidx) { g@renderInfo@edges[[f]][[revestr[count]]] <- x$edgerender$futureinfo[[f]][[i]] } else { g@renderInfo@edges[[f]][[i]] <- x$edgerender$futureinfo[[f]][[i]] } } } } allEdges <- unique(c(regEdges,corEdges,varEdges)) corEdges <- setdiff(corEdges,regEdges) for (e in allEdges) { dir <- "forward"; lty <- 1; arrowtail <- "none" if (e %in% feedback) { dir <- "none"; lty <- 1; arrowtail <- "closed" } if (e %in% varEdges) { dir <- "none"; lty <- 2; arrowtail <- "none" } if (e %in% corEdges) { dir <- "none"; lty <- 2; arrowtail <- "closed" } arrowhead <- "closed" estr <- e for (f in c("col","cex","textCol","lwd","lty")) { if (!(estr%in%names(graph::edgeRenderInfo(g)[[f]])) || is.na(graph::edgeRenderInfo(g)[[f]][[estr]])) g <- addattr(g,f,var=estr, val=x$graphdef[[f]], fun="graph::edgeRenderInfo") } if (addstyle) { g <- addattr(g,"lty",var=estr,val=lty,fun="graph::edgeRenderInfo") g <- addattr(g,"direction",var=estr,val=dir,fun="graph::edgeRenderInfo") g <- addattr(g,"dir",var=estr,val=dir,fun="graph::edgeRenderInfo") g <- addattr(g,"arrowhead",var=estr,val=arrowhead,fun="graph::edgeRenderInfo") g <- addattr(g,"arrowtail",var=estr,val=arrowtail,fun="graph::edgeRenderInfo") g <- addattr(g,attr="fontsize",var=estr,val=fontsize1,fun="graph::edgeRenderInfo") } if (is.null(graph::edgeRenderInfo(g)$label)) graph::edgeRenderInfo(g)$label <- expression() if (!missing(cex)) if (!is.null(cex)) graph::nodeRenderInfo(g)$cex <- cex } if (plain) { g <- addattr(g,attr="shape",var=vars(x),val="none") } else { if (addcolor) { if (is.null(x$noderender$fill)) notcolored <- vars(x) else notcolored <- vars(x)[is.na(x$noderender$fill)] nodecolor(g, intersect(notcolored,exogenous(x))) <- cols[1] nodecolor(g, intersect(notcolored,endogenous(x))) <- cols[2] nodecolor(g, intersect(notcolored,latent(x))) <- cols[3] if (!is.null(trv <- x$attributes$transform)) { nodecolor (g, names(trv)) <- cols[4] } ## nodecolor(x, intersect(notcolored,survival(x))) <- cols[4] myhooks <- gethook("color.hooks") count <- 3 for (f in myhooks) { count <- count+1 res <- do.call(f, list(x=x,subset=notcolored)) if (length(cols)>=count) { nodecolor(g,res$vars) <- cols[count] } else { nodecolor(g, res$vars) <- res$col } } } } options(opt) attributes(g)$feedback <- (length(feedback)>0) return(g) } lava/R/combine.R0000644000176200001440000000440213162174023013134 0ustar liggesusers excoef <- function(x,digits=2,p.digits=3,format=FALSE,fun,se=FALSE,ci=TRUE,pvalue=TRUE,...) { cc <- coef(summary(x)) res <- round(cbind(cc[,1:3,drop=FALSE],confint(x)),max(1,digits)) pvalround <- round(cc[,4], max(1, p.digits)) if (format) { res <- base::format(res,digits=digits,...) pval <- format(pvalround,p.digits=p.digits,...) } else { ## res <- format(res) pval <- format(pvalround) } pval <- paste0("p=",pvalround) pval[which(pvalround<10^(-p.digits))] <- paste0("p<0.",paste(rep("0",p.digits-1),collapse=""),"1") res <- cbind(res,pval) nc <- apply(res,2,function(x) max(nchar(x))-nchar(x)) res2 <- c() for (i in seq(nrow(res))) { row <- paste0(if(res[i,1]>=0) " " else "", res[i,1], paste(rep(" ",nc[i,1]),collapse=""), if (res[i,1]<0) " ", if (se) paste0(" (", res[i,2], ")", paste(rep(" ",nc[i,2]), collapse="")), if (ci) paste0(" [", res[i,4], ";", res[i,5], "]", paste(rep(" ",nc[i,4]+nc[i,5]),collapse="")), if (pvalue) paste0(" ", res[i,6])) res2 <- rbind(res2," "=row) } names(res2) <- names(coef(x)) if (!missing(fun)) { res2 <- c(res2,fun(x)) } res2 } ##' Report estimates across different models ##' ##' @title Report estimates across different models ##' @param x list of model objects ##' @param ... additional arguments to lower level functions ##' @author Klaus K. Holst ##' @examples ##' data(serotonin) ##' m1 <- lm(cau ~ age*gene1 + age*gene2,data=serotonin) ##' m2 <- lm(cau ~ age + gene1,data=serotonin) ##' m3 <- lm(cau ~ age*gene2,data=serotonin) ##' ##' Combine(list(A=m1,B=m2,C=m3),fun=function(x) ##' c("_____"="",R2=" "%++%format(summary(x)$r.squared,digits=2))) ##' @export Combine <- function(x,...) { ll <- lapply(x,excoef,...) nn <- lapply(ll,names) n0 <- unique(unlist(nn,use.names=FALSE)) res <- matrix(NA,ncol=length(ll),nrow=length(n0)) colnames(res) <- seq(length(ll)) rownames(res) <- n0 for (i in seq(length(ll))) { res[match(names(ll[[i]]),n0),i] <- ll[[i]] } colnames(res) <- names(ll) class(res) <- c("Combine","matrix") return(res) } ##' @export print.Combine <- function(x,...) { print(as.table(x),...) } lava/R/operators.R0000644000176200001440000000276613162174023013551 0ustar liggesusers##' For matrices a block-diagonal matrix is created. For all other ##' data types he operator is a wrapper of \code{paste}. ##' ##' Concatenation operator ##' @aliases %++% ##' @rdname op_concat ##' @usage x \%++\% y ##' @title Concatenation operator ##' @param x First object ##' @param y Second object of same class ##' @author Klaus K. Holst ##' @keywords utilities misc ##' @seealso \code{blockdiag}, \code{\link{paste}}, \code{\link{cat}}, ##' @examples ##' ## Block diagonal ##' matrix(rnorm(25),5)%++%matrix(rnorm(25),5) ##' ## String concatenation ##' "Hello "%++%" World" ##' ## Function composition ##' f <- log %++% exp ##' f(2) ##' @export `%++%` <- function(x,y) UseMethod("%++%",y) ## ##' @export ## `%+%` <- function(x,y) UseMethod("%+%",y) ##' @export `%++%.default` <- function(x,y) paste0(x,y) ##' @export `%++%.character` <- function(x,y) paste0(x,y) ##' @export `%++%.matrix` <- function(x,y) blockdiag(x,y) ##' @export `%++%.function` <- function(x,y) function(...) x(y(...)) notin <- Negate(get("%in%")) ##' Matching operator (x not in y) oposed to the \code{\%in\%}-operator (x in y) ##' ##' Matching operator ##' @rdname op_match ##' @aliases %ni% ##' @usage x \%ni\% y ##' @param x vector ##' @param y vector of same type as \code{x} ##' @return A logical vector. ##' @author Klaus K. Holst ##' @seealso \code{\link{match}} ##' @keywords utilities misc ##' @examples ##' ##' 1:10 %ni% c(1,5,10) ##' ##' @export "%ni%" <- function(x,y) notin(x,y) ## function(x,y) { ## is.na(match(x,y)) ## } lava/R/subset.R0000644000176200001440000000326113162174023013027 0ustar liggesusers##' Extract subset of latent variable model ##' ##' Extract measurement models or user-specified subset of model ##' ##' ##' @aliases measurement ##' @param x \code{lvm}-object. ##' @param vars Character vector or formula specifying variables to include in ##' subset. ##' @param \dots Additional arguments to be passed to the low level functions ##' @return A \code{lvm}-object. ##' @author Klaus K. Holst ##' @keywords models regression ##' @examples ##' ##' m <- lvm(c(y1,y2)~x1+x2) ##' subset(m,~y1+x1) ##' ##' @export ##' @method subset lvm subset.lvm <- function(x, vars, ...) { if (missing(vars)) return(x) if (inherits(vars,"formula")) vars <- all.vars(vars) if (!all(vars%in%vars(x))) stop("Not a subset of model") latentvars <- intersect(vars,latent(x)) ## g0 <- subGraph(vars, Graph(x)) ## res <- graph2lvm(g0) res <- lvm(vars) M <- t(x$M[vars,vars,drop=FALSE]) for (i in seq_len(nrow(M))) { if (any(M[,i]==1)) { res <- regression(res, y=rownames(M)[M[,i]==1], x=rownames(M)[i], ...) } } if (length(latentvars)>0) latent(res) <- latentvars res$cov[vars,vars] <- x$cov[vars,vars] ## Fixed parameters: res$par[vars,vars] <- x$par[vars,vars] res$fix[vars,vars] <- x$fix[vars,vars] res$covpar[vars,vars] <- x$covpar[vars,vars] res$covfix[vars,vars] <- x$covfix[vars,vars] res$mean[vars] <- x$mean[vars] res$attributes <- x$attributes for (i in seq_along(x$attributes)) { val <- x$attributes[[i]] if (length(val)>0) { val <- val[intersect(vars,names(val))] res$attributes[[i]] <- val } } index(res) <- reindex(res) return(res) } lava/R/randomslope.R0000644000176200001440000000434013162174023014044 0ustar liggesusers##' @export "randomslope<-" <- function(x,...,value) UseMethod("randomslope<-") ##' @export "randomslope<-.lvm" <- function(x, ..., value) { randomslope(x, covar=value, ...) } ##' @export `randomslope` <- function(x,...) UseMethod("randomslope") ##' @export `randomslope.lvm` <- function(x,covar,random=NULL,response=NULL,param,postfix,clear=FALSE,zero=TRUE,...) { if (missing(covar)) { rsidx <- unlist(x$attributes$randomslope) if (length(rsidx)>0) return(names(rsidx)[rsidx]) else return(NULL) } if (inherits(covar,"formula")) { covar <- all.vars(covar) } if (clear) { ## x <- addattr(x,attr="shape",var=var,val="rectangle") x$attributes$randomslope[covar] <- FALSE } else { if (!is.null(random) & !is.null(response)) { if (inherits(random,"formula")) { random <- all.vars(random) } if (inherits(response,"formula")) { response <- all.vars(response) } if (length(covar)!=length(response)) stop("Vectors should be of the same length!") if (!(random%in%latent(x))) { addvar(x) <- random latent(x) <- random } if (missing(param) || !is.null(param)) { if (!missing(postfix)) newlatent <- paste0(random,postfix) else newlatent <- paste(random,covar,sep=".") covariance(x,random) <- 1 for (i in seq_along(covar)) { if (missing(param)) { x <- regression(x,to=newlatent[i],from=random) } else { if (inherits(param,"formula")) { param <- all.vars(param) } if (length(param)!=length(newlatent)) param <- rep(param,length(newlatent)) regfix(x,to=newlatent[i], from=random) <- param[i] } regfix(x,to=response[i],from=newlatent[i]) <- covar[i] latent(x) <- newlatent[i] covariance(x,newlatent[i]) <- 0 } } else { for (i in seq_along(covar)) { regfix(x,to=response[i],from=random) <- covar[i] } } } else { x$attributes$randomslope[covar] <- TRUE } } index(x) <- reindex(x) return(x) } ##' @export `randomslope.lvmfit` <- function(x,...) { randomslope(Model(x),...) } lava/R/children.R0000644000176200001440000000676113162174023013322 0ustar liggesusers##' Generic method for memberships from object (e.g. a graph) ##' ##' @title Extract children or parent elements of object ##' @export ##' @aliases children parents ancestors descendants roots sinks adjMat edgeList ##' @param object Object ##' @param \dots Additional arguments ##' @author Klaus K. Holst "children" <- function(object,...) UseMethod("children") ##' @export "parents" <- function(object,...) UseMethod("parents") ##' @export "roots" <- function(object,...) UseMethod("roots") ##' @export "sinks" <- function(object,...) UseMethod("sinks") ##' @export "descendants" <- function(object,...) UseMethod("descendants") ##' @export "ancestors" <- function(object,...) UseMethod("ancestors") ##' @export "adjMat" <- function(object,...) UseMethod("adjMat") ##' @export "edgeList" <- function(object,...) UseMethod("edgeList") ##' @export adjMat.lvm <- function(object,...) t(object$M) ##' @export adjMat.lvmfit <- function(object,...) adjMat(Model(object),...) ##' @export edgeList.lvmfit <- function(object,...) edgeList(Model(object),...) ##' @export edgeList.lvm <- function(object,labels=FALSE,...) { edgelist <- data.frame(from=NULL,to=NULL) A <- adjMat(object) for (i in 1:nrow(A)) { ii <- which(A[,i]>0) if (length(ii)>0) edgelist <- rbind(edgelist,data.frame(from=ii,to=i)) } if (labels) edgelist <- as.data.frame(apply(edgelist,2,function(x) vars(object)[x])) edgelist } ##' @export parents.lvmfit <- function(object,...) parents(Model(object),...) ##' @export children.lvmfit <- function(object,...) children(Model(object),...) ##' @export descendants.lvmfit <- function(object,...) descendants(Model(object),...) ##' @export ancestors.lvmfit <- function(object,...) ancestors(Model(object),...) ##' @export roots.lvmfit <- function(object,...) roots(Model(object),...) ##' @export sinks.lvmfit <- function(object,...) sinks(Model(object),...) ##' @export parents.lvm <- function(object,var,...) { A <- index(object)$A if (missing(var)) { return(rownames(A)) } if (inherits(var,"formula")) var <- all.vars(var) res <- lapply(var, function(v) rownames(A)[A[,v]!=0]) res <- unique(unlist(res)) if (length(res)==0) res <- NULL res } ##' @export children.lvm <- function(object,var,...) { A <- index(object)$A if (missing(var)) { return(rownames(A)) } if (inherits(var,"formula")) var <- all.vars(var) res <- lapply(var, function(v) rownames(A)[A[v,]!=0]) res <- unique(unlist(res)) if (length(res)==0) res <- NULL res } ##' @export ancestors.lvm <- function(object,x,...) { if (inherits(x,"formula")) x <- all.vars(x) res <- c() left <- setdiff(vars(object),x) count <- 0 child <- x while (length(x)>0) { count <- count+1 x <- parents(object,child) child <- intersect(x,left) res <- union(res,child) left <- setdiff(left,child) } if (length(res)==0) res <- NULL return(res) } ##' @export descendants.lvm <- function(object,x,...) { if (inherits(x,"formula")) x <- all.vars(x) res <- c() left <- setdiff(vars(object),x) count <- 0 parent <- x while (length(x)>0) { count <- count+1 x <- children(object,parent) parent <- intersect(x,left) res <- union(res,parent) left <- setdiff(left,parent) } if (length(res)==0) res <- NULL return(res) } ##' @export roots.lvm <- function(object,...) { return(exogenous(object,index=FALSE,...)) } ##' @export sinks.lvm <- function(object,...) { return(endogenous(object,top=TRUE,...)) } lava/R/makemissing.R0000644000176200001440000000154413162174023014033 0ustar liggesusers##' Generates missing entries in data.frame/matrix ##' ##' @title Create random missing data ##' @param data data.frame ##' @param p Fraction of missing data in each column ##' @param cols Which columns (name or index) to alter ##' @param rowwise Should missing occur row-wise (either none or all selected columns are missing) ##' @param nafun (Optional) function to be applied on data.frame before return (e.g. \code{na.omit} to return complete-cases only) ##' @return data.frame ##' @author Klaus K. Holst ##' @keywords utilities ##' @export makemissing <- function(data,p=0.2,cols=seq_len(ncol(data)),rowwise=FALSE,nafun=function(x) x) { p <- rep(p,length.out=length(cols)) if (!rowwise) for (i in seq_along(cols)) { data[rbinom(nrow(data),1,p[i])==1,cols[i]] <- NA } else data[which(rbinom(nrow(data),1,p)==1),cols] <- NA return(nafun(data)) } lava/R/covariance.R0000644000176200001440000002250313162174023013634 0ustar liggesusers ##' Add covariance structure to Latent Variable Model ##' ##' Define covariances between residual terms in a \code{lvm}-object. ##' ##' The \code{covariance} function is used to specify correlation structure ##' between residual terms of a latent variable model, using a formula syntax. ##' ##' For instance, a multivariate model with three response variables, ##' ##' \deqn{Y_1 = \mu_1 + \epsilon_1} ##' ##' \deqn{Y_2 = \mu_2 + \epsilon_2} ##' ##' \deqn{Y_3 = \mu_3 + \epsilon_3} ##' ##' can be specified as ##' ##' \code{m <- lvm(~y1+y2+y3)} ##' ##' Pr. default the two variables are assumed to be independent. To add a ##' covariance parameter \eqn{r = cov(\epsilon_1,\epsilon_2)}, we execute the ##' following code ##' ##' \code{covariance(m) <- y1 ~ f(y2,r)} ##' ##' The special function \code{f} and its second argument could be omitted thus ##' assigning an unique parameter the covariance between \code{y1} and ##' \code{y2}. ##' ##' Similarily the marginal variance of the two response variables can be fixed ##' to be identical (\eqn{var(Y_i)=v}) via ##' ##' \code{covariance(m) <- c(y1,y2,y3) ~ f(v)} ##' ##' To specify a completely unstructured covariance structure, we can call ##' ##' \code{covariance(m) <- ~y1+y2+y3} ##' ##' All the parameter values of the linear constraints can be given as the right ##' handside expression of the assigment function \code{covariance<-} if the ##' first (and possibly second) argument is defined as well. E.g: ##' ##' \code{covariance(m,y1~y1+y2) <- list("a1","b1")} ##' ##' \code{covariance(m,~y2+y3) <- list("a2",2)} ##' ##' Defines ##' ##' \deqn{var(\epsilon_1) = a1} ##' ##' \deqn{var(\epsilon_2) = a2} ##' ##' \deqn{var(\epsilon_3) = 2} ##' ##' \deqn{cov(\epsilon_1,\epsilon_2) = b1} ##' ##' Parameter constraints can be cleared by fixing the relevant parameters to ##' \code{NA} (see also the \code{regression} method). ##' ##' The function \code{covariance} (called without additional arguments) can be ##' used to inspect the covariance constraints of a \code{lvm}-object. ##' # ##' ##' @aliases covariance covariance<- covariance.lvm covariance<-.lvm ##' covfix<- covfix covfix<-.lvm covfix.lvm ##' variance variance<- variance.lvm variance<-.lvm ##' @param object \code{lvm}-object ##' @param var1 Vector of variables names (or formula) ##' @param var2 Vector of variables names (or formula) defining pairwise ##' covariance between \code{var1} and \code{var2}) ##' @param constrain Define non-linear parameter constraints to ensure positive definite structure ##' @param pairwise If TRUE and \code{var2} is omitted then pairwise correlation is added between all variables in \code{var1} ##' @param \dots Additional arguments to be passed to the low level functions ##' @param value List of parameter values or (if \code{var1} is unspecified) ##' @usage ##' \method{covariance}{lvm}(object, var1=NULL, var2=NULL, constrain=FALSE, pairwise=FALSE,...) <- value ##' @return A \code{lvm}-object ##' @author Klaus K. Holst ##' @seealso \code{\link{regression<-}}, \code{\link{intercept<-}}, ##' \code{\link{constrain<-}} \code{\link{parameter<-}}, \code{\link{latent<-}}, ##' \code{\link{cancel<-}}, \code{\link{kill<-}} ##' @keywords models regression ##' @export ##' @examples ##' ##' m <- lvm() ##' ### Define covariance between residuals terms of y1 and y2 ##' covariance(m) <- y1~y2 ##' covariance(m) <- c(y1,y2)~f(v) ## Same marginal variance ##' covariance(m) ## Examine covariance structure ##' ##' `covariance` <- function(object,...) UseMethod("covariance") ##' @export "variance<-" <- function(object,...,value) UseMethod("covariance<-") ##' @export `variance` <- function(object,...) UseMethod("variance") ##' @export "variance.lvm" <- function(object,...) covariance(object,...) ##' @export "variance.formula" <- function(object,...) covariance(lvm(),object,...) ##' @export "covariance.formula" <- function(object,...) covariance(lvm(),object,...) ##' @export "variance<-.lvm" <- function(object,...,value) { covariance(object,...) <- value return(object) } ##' @export "covariance<-" <- function(object,...,value) UseMethod("covariance<-") ##' @export "covariance<-.lvm" <- function(object, var1=NULL, var2=NULL, constrain=FALSE, pairwise=FALSE, ..., value) { if (!is.null(var1)) { if (inherits(var1,"formula")) { lhs <- getoutcome(var1) xf <- attributes(terms(var1))$term.labels xx <- unlist(lapply(xf, function(x) x[1])) if (length(lhs)==0) { covfix(object,var1,var2,pairwise=pairwise,...) <- value object$parpos <- NULL return(object) } else { yy <- decomp.specials(lhs) } } else { yy <- var1; xx <- var2 } covfix(object,var1=yy,var2=xx,pairwise=pairwise,...) <- value object$parpos <- NULL return(object) } if (is.list(value)) { for (v in value) { covariance(object,pairwise=pairwise,constrain=constrain,...) <- v } return(object) } if (inherits(value,"formula")) { lhs <- getoutcome(value) if (length(lhs)==0) { return(covariance(object,all.vars(value),constrain=constrain,pairwise=pairwise,...)) } yy <- decomp.specials(lhs) tt <- terms(value, specials=c("f","v")) xf <- attributes(terms(tt))$term.labels res <- lapply(xf,decomp.specials) nx <- length(xf) if (nx==1) { if(is.null(attr(tt,"specials")$f) | length(res[[1]])<2) { if(is.null(attr(tt,"specials")$v) & is.null(attr(tt,"specials")$f)) { for (i in yy) for (j in res[[1]]) object <- covariance(object, c(i,j), pairwise=TRUE, constrain=constrain, ...) } else { covfix(object,var1=yy,var2=NULL) <- res[[1]] } } else { covfix(object,var1=yy,var2=res[[1]][1]) <- res[[1]][2] } object$parpos <- NULL return(object) } xx <- unlist(lapply(res, function(z) z[1])) for (y in yy) for (i in seq_along(xx)) { if (length(res[[i]])>1) { covfix(object, var1=y, var2=res[[i]][1]) <- res[[i]][2] } else if ((i+1)%in%attr(tt,"specials")$f | (i+1)%in%attr(tt,"specials")$v) { covfix(object, var1=y, var2=NULL) <- res[[i]] } else { object <- covariance(object,c(y,xx[i]),pairwise=TRUE,...) } } object$parpos <- NULL return(object) } else covariance(object,value,pairwise=pairwise,...) } ##' @export `covariance.lvm` <- function(object,var1=NULL,var2=NULL,exo=FALSE,pairwise=FALSE,constrain=FALSE,value,...) { if (!missing(value)) { covariance(object,var1=var1,var2,exo=exo,pariwise=pairwise,constrain=constrain,...) <- value return(object) } if (!is.null(var1)) { if (inherits(var1,"formula")) { covariance(object,constrain=constrain, pairwise=pairwise,exo=exo,...) <- var1 return(object) } allvars <- var1 if (!missing(var2)) { if (inherits(var2,"formula")) var2 <- all.vars(var2) allvars <- c(allvars,var2) } if (constrain) { if (length(allvars)!=2) stop("Constraints only implemented for pairs") return(covarianceconst(object,allvars[1],allvars[2],...)) } object <- addvar(object, allvars, silent=TRUE, reindex=FALSE) xorg <- exogenous(object) exoset <- setdiff(xorg,allvars) if (!exo & length(exoset)0) { A[idxA] <- pidxA for (p in ii$parname) { idx <- which((x$par==p)) newval <- A[idx[1]] attributes(newval)$reg.idx <- idx attributes(newval)$reg.tidx <- which(t(x$par==p)) parval[[p]] <- newval if (length(idx)>1) { idxA <- c(idxA,idx[-1]) pidxA <- c(pidxA,rep(A[idx[1]],length(idx)-1)) A[idx] <- A[idx[1]] } } ## duplicate parameters } pars.var <- parBelongsTo$cov idxdiag <- (seq(ncol(P1))-1)*ncol(P1) + seq(ncol(P1)) idxP <- idxdiag[which(P1[idxdiag]==1)] pidxP <- pars.var[seq_len(length(idxP))] P[idxP] <- pidxP pars.off.diag <- pars.var if (length(pidxP)>0) { pars.off.diag <- pars.off.diag[-seq_len(length(pidxP))] } counter <- 0 if (length(pars.off.diag)>0 & ncol(P)>1) for (i in seq_len(ncol(P1)-1)) for (j in seq(i+1,nrow(P1))) { if (ii$P1[j,i]!=0) { counter <- counter+1 pos <- c(j+(i-1)*ncol(P1), i+(j-1)*ncol(P1)) P[j,i] <- P[i,j] <- pars.off.diag[counter] idxP <- c(idxP,pos); pidxP <- c(pidxP,P[j,i],P[i,j]) } } if (length(ii$covparname)>0) for (p in ii$covparname) { idx <- which(x$covpar==p) isOffDiag <- !(idx[1]%in%idxdiag) if (!(p%in%ii$parname)) { parval[[p]] <- P[idx[1]] } attributes(parval[[p]])$cov.idx <- idx if (length(idx)>1+isOffDiag) { P[idx[-seq(1+isOffDiag)]] <- parval[[p]] } if (ii$npar.reg>0 && p%in%ii$parname) { parBelongsTo$reg <- c(parBelongsTo$reg,p) idx.reg <- which(x$par==p) P[idx] <- A[idx.reg[1]] atr <- attributes(parval[[p]]) parval[[p]] <- A[idx.reg[1]] attributes(parval[[p]]) <- atr idxP <- c(idxP,idx) pidxP <- c(pidxP,rep(P[idx[1]],length(idx))) } else { idxP <- c(idxP,idx[-seq(1+isOffDiag)]) pidxP <- c(pidxP,rep(P[idx[1]],length(idx)-1-isOffDiag)) } } ## duplicate parameters idxM <- c() pidxM <- seq_len(ii$npar.mean) v <- NULL named <- sapply(x$mean, function(y) is.character(y) & !is.na(y)) fixed <- sapply(x$mean, function(y) is.numeric(y) & !is.na(y)) v <- rep(0,length(x$mean)) names(v) <- colnames(P) if (ii$npar.mean>0) { idxM <- which(ii$v1==1) v[idxM] <- pidxM } if (any(fixed)) v[fixed] <- unlist(x$mean[fixed]) for (p in ii$mparname) { idx <- which(x$mean==p) if (!(p%in%c(ii$parname,ii$covparname))) { if (length(idx)>1) { pidxM <- c(pidxM,rep(v[idx[1]],length(idx)-1)) idxM <- c(idxM,idx[-1]) } parval[[p]] <- v[idx[1]] v[idx] <- parval[[p]] } attributes(parval[[p]])$m.idx <- idx if (p %in% ii$covparname & !(p %in% ii$parname)) { parBelongsTo$cov <- c(parBelongsTo$cov,p) idx.2 <- which(x$covpar==p) v[idx] <- P[idx.2[1]] pidxM <- c(pidxM,rep(P[idx.2[1]],length(idx))) idxM <- c(idxM,idx) } if (p %in% ii$parname) { parBelongsTo$reg <- c(parBelongsTo$reg,p) idx.2 <- which(x$par==p) v[idx] <- A[idx.2[1]] pidxM <- c(pidxM,rep(A[idx.2[1]],length(idx))) idxM <- c(idxM,idx) } } ## Ex-parameters idxE <- NULL pidxE <- parBelongsTo$epar named <- sapply(x$exfix, function(y) is.character(y) & !is.na(y)) fixed <- sapply(x$exfix, function(y) is.numeric(y) & !is.na(y)) epar <- rep(0,length(x$exfix)) names(epar) <- names(x$expar) if (!(ii$npar.ex==0)) { idxE <- which(ii$e1==1) epar[idxE] <- pidxE } if (any(fixed)) epar[fixed] <- unlist(x$exfix[fixed]) for (p in ii$eparname) { idx <- which(x$exfix==p) if (!(p%in%c(ii$parname,ii$covparname,ii$mparname))) { if (length(idx)>1) { idxE <- c(idxE,idx[-1]) pidxE <- c(pidxE,rep(epar[idx[1]],length(idx)-1)) } parval[[p]] <- epar[idx[1]] } attributes(parval[[p]])$e.idx <- idx if (length(idx)>1) epar[idx[-1]] <- parval[[p]] if (p %in% setdiff(ii$covparname,c(ii$parname,ii$mparname))) { parBelongsTo$cov <- c(parBelongsTo$cov,p) idx.2 <- which(x$covpar==p) epar[idx] <- P[idx.2[1]] pidxE <- c(pidxE,rep(P[idx.2[1]],length(idx))) idxE <- c(idxE,idx) } if (p %in% setdiff(ii$parname,ii$mparname)) { parBelongsTo$reg <- c(parBelongsTo$reg,p) idx.2 <- which(x$par==p) epar[idx] <- A[idx.2[1]] pidxE <- c(pidxE,rep(A[idx.2[1]],length(idx))) idxE <- c(idxE,idx) } if (p %in% ii$mparname) { parBelongsTo$mean <- c(parBelongsTo$mean,p) idx.2 <- which(x$mean==p) epar[idx] <- v[idx.2[1]] pidxE <- c(pidxE,rep(v[idx.2[1]],length(idx))) idxE <- c(idxE,idx) } } ee <- cbind(idxE,pidxE); rownames(ee) <- names(x$expar)[ee[,1]] ## Constrained... constrain.par <- names(constrain(x)) constrain.idx <- NULL if (length(constrain.par)>0) { constrain.idx <- list() for (p in constrain.par) { reg.tidx <- reg.idx <- cov.idx <- m.idx <- e.idx <- NULL myc <- constrain(x)[[p]] xargs <- manifest(x)[na.omit(match(attributes(myc)$args,manifest(x)))] if (length(xargs)>0) { parval[xargs] <- 0 } if (p%in%ii$parname.all) { reg.idx <- which(x$par==p) reg.tidx <- which(t(x$par==p)) } if (p%in%ii$covparname.all) { cov.idx <- which(x$covpar==p) } if (p%in%ii$mparname.all) { m.idx <- which(x$mean==p) } if (p%in%ii$eparname.all) { e.idx <- which(x$exfix==p) } constrain.idx[[p]] <- list(reg.idx=reg.idx,reg.tidx=reg.tidx,cov.idx=cov.idx,m.idx=m.idx,e.idx=e.idx) } } parBelongsTo <- lapply(parBelongsTo,function(x) sort(unique(x))) return(list(mean=cbind(idxM,pidxM), reg=cbind(idxA,pidxA), cov=cbind(idxP,pidxP), epar=ee, parval=parval, constrain.idx=constrain.idx, parBelongsTo=parBelongsTo)) } matrices.lvm <- function(x,pars,meanpar=NULL,epars=NULL,data=NULL,...) { ii <- index(x) pp <- c(rep(NA,ii$npar.mean),pars,epars) ##v <- NULL v <- ii$v0 if (!is.null(meanpar) && length(meanpar)>0) { pp[seq(ii$npar.mean)] <- meanpar v[ii$mean[,1]] <- meanpar[ii$mean[,2]] } A <- ii$A A[ii$reg[,1]] <- pp[ii$reg[,2]] P <- ii$P P[ii$cov[,1]] <- pp[ii$cov[,2]] e <- NULL if (length(x$expar)>0) { e <- rep(0,length(x$expar)) fixed <- sapply(x$exfix, function(y) is.numeric(y) & !is.na(y)) if (any(fixed)) e[fixed] <- unlist(x$exfix[fixed]) if (nrow(ii$epar)>0) e[ii$epar[,1]] <- pp[ii$epar[,2]] names(e) <- names(x$expar) } parval <- lapply(ii$parval,function(x) { res <- pp[x]; attributes(res) <- attributes(x); res }) ## Constrained... constrain.par <- names(constrain(x)) constrain.idx <- NULL cname <- constrainpar <- c() if (length(constrain.par)>0 && is.numeric(c(pars,meanpar))) { constrain.idx <- list() for (p in constrain.par) { cname <- c(cname,p) myc <- constrain(x)[[p]] xargs <- manifest(x)[na.omit(match(attributes(myc)$args,manifest(x)))] if (length(xargs)>0) { if (!is.null(data)) { parval[xargs] <- (data)[xargs] } else parval[xargs] <- 0 } val <- unlist(c(parval,constrainpar,x$mean,e)[attributes(myc)$args]) cpar <- myc(val); constrainpar <- c(constrainpar,list(cpar)); names(constrainpar) <- cname if (p%in%ii$parname.all) { if (!is.null(val)) A[ii$constrain.idx[[p]]$reg.idx] <- cpar } if (p%in%ii$covparname.all) { if (!is.null(val)) P[ii$constrain.idx[[p]]$cov.idx] <- cpar } if (p%in%ii$mparname.all) { if (!is.null(val)) v[ii$constrain.idx[[p]]$m.idx] <- cpar } if (p%in%ii$eparname.all) { if (!is.null(val)) e[ii$constrain.idx[[p]]$e.idx] <- cpar } } } return(list(A=A, P=P, v=v, e=e, parval=parval, constrain.idx=ii$constrain.idx, constrainpar=constrainpar)) } ###}}} matrices.lvm ###{{{ matrices.multigroup matrices.multigroup <- function(x, p, ...) { pp <- modelPar(x,p) res <- list() for (i in seq_len(x$ngroup)) res <- c(res, list(matrices2(x$lvm[[i]],pp$p[[i]]))) return(res) } ###}}} matrices2 <- function(x,p,...) { m0 <- p[seq_len(index(x)$npar.mean)] p0 <- p[with(index(x),seq_len(npar)+npar.mean)] e0 <- p[with(index(x),seq_len(npar.ex)+npar.mean+npar)] matrices(x,p0,m0,e0,...) } ###{{{ matrices, to be superseeded by above definition matrices.lvm <- function(x,pars,meanpar=NULL,epars=NULL,data=NULL,...) { ii <- index(x) A <- ii$A ## Matrix with fixed parameters and ones where parameters are free J <- ii$J ## Manifest variable selection matrix M0 <- ii$M0 ## Index of free regression parameters M1 <- ii$M1 ## Index of free and _unique_ regression parameters P <- ii$P ## Matrix with fixed variance parameters and ones where parameters are free P0 <- ii$P0 ## Index of free variance parameters P1 <- ii$P1 ## Index of free and _unique_ regression parameters P1.lower <- P1[lower.tri(P1)] constrain.par <- names(constrain(x)) parval <- list() if (ii$npar.reg>0) { A[which(M1==1)] <- pars[seq_len(ii$npar.reg)] for (p in ii$parname) { idx <- which((x$par==p)) newval <- A[idx[1]] attributes(newval)$reg.idx <- idx attributes(newval)$reg.tidx <- which(t(x$par==p)) parval[[p]] <- newval if (length(idx)>1) { A[idx[-1]] <- parval[[p]] } } ## duplicate parameters } if (ii$npar.reg==0) { pars.var <- pars } else { pars.var <- pars[-seq_len(ii$npar.reg)] } diag(P)[ii$which.diag] <- pars.var[seq_along(ii$which.diag)] pars.off.diag <- pars.var if (length(ii$which.diag)>0) pars.off.diag <- pars.off.diag[-seq_along(ii$which.diag)] counter <- 0 if (length(pars.off.diag)>0 & ncol(P)>1) for (i in seq_len(ncol(P1)-1)) for (j in seq(i+1,nrow(P1))) { if (ii$P1[j,i]!=0) { counter <- counter+1 P[j,i] <- pars.off.diag[counter] } } if (length(ii$covparname)>0) for (p in ii$covparname) { idx <- which(x$covpar==p) if (!(p%in%ii$parname)) { parval[[p]] <- P[idx[1]] } attributes(parval[[p]])$cov.idx <- idx if (length(idx)>1) { P[idx[-1]] <- parval[[p]] } if (ii$npar.reg>0 && p%in%ii$parname) { idx.reg <- which(x$par==p) P[idx] <- A[idx.reg[1]] atr <- attributes(parval[[p]]) parval[[p]] <- A[idx.reg[1]] ###????? attributes(parval[[p]]) <- atr } } ## duplicate parameters P[upper.tri(P)] <- t(P)[upper.tri(P)] ## Symmetrize... v <- NULL { named <- sapply(x$mean, function(y) is.character(y) & !is.na(y)) fixed <- sapply(x$mean, function(y) is.numeric(y) & !is.na(y)) v <- rep(0,length(x$mean)) names(v) <- colnames(P) if (!(is.null(meanpar) | ii$npar.mean==0)) v[ii$v1==1] <- meanpar if (any(fixed)) v[fixed] <- unlist(x$mean[fixed]) for (p in ii$mparname) { idx <- which(x$mean==p) if (!(p%in%c(ii$parname,ii$covparname))) { parval[[p]] <- v[idx[1]] } attributes(parval[[p]])$m.idx <- idx if (length(idx)>1) v[idx[-1]] <- parval[[p]] if (p %in% ii$covparname & !(p %in% ii$parname)) { idx.2 <- which(x$covpar==p) v[idx] <- P[idx.2[1]] } if (p %in% ii$parname) { idx.2 <- which(x$par==p) v[idx] <- A[idx.2[1]] } } } ## Ex-parameters e <- NULL { named <- sapply(x$exfix, function(y) is.character(y) & !is.na(y)) fixed <- sapply(x$exfix, function(y) is.numeric(y) & !is.na(y)) e <- rep(0,length(x$exfix)) names(e) <- names(x$expar) if (!(is.null(epars) | ii$npar.ex==0)) e[which(ii$e1==1)] <- epars if (any(fixed)) e[fixed] <- unlist(x$exfix[fixed]) for (p in ii$eparname) { idx <- which(x$exfix==p) if (!(p%in%c(ii$parname,ii$covparname,ii$mparname))) { parval[[p]] <- e[idx[1]] } attributes(parval[[p]])$e.idx <- idx if (length(idx)>1) e[idx[-1]] <- parval[[p]] if (p %in% setdiff(ii$covparname,c(ii$parname,ii$mparname))) { idx.2 <- which(x$covpar==p) e[idx] <- P[idx.2[1]] } if (p %in% setdiff(ii$parname,ii$mparname)) { idx.2 <- which(x$par==p) e[idx] <- A[idx.2[1]] } if (p %in% ii$mparname) { idx.2 <- which(x$mean==p) e[idx] <- v[idx.2[1]] } } } ## Constrained... constrain.idx <- NULL cname <- constrainpar <- c() if (length(constrain.par)>0 && is.numeric(c(pars,meanpar,e))) { constrain.idx <- list() for (p in constrain.par) { cname <- c(cname,p) reg.tidx <- reg.idx <- cov.idx <- m.idx <- e.idx <- NULL myc <- constrain(x)[[p]] xargs <- manifest(x)[na.omit(match(attributes(myc)$args,manifest(x)))] if (length(xargs)>0) { if (!is.null(data)) { parval[xargs] <- (data)[xargs] } else parval[xargs] <- 0 } val <- rbind(unlist(c(parval,constrainpar,x$mean,e)[attributes(myc)$args])) cpar <- myc(val); constrainpar <- c(constrainpar,list(cpar)); names(constrainpar) <- cname if (p%in%ii$parname.all) { reg.idx <- which(x$par==p) reg.tidx <- which(t(x$par==p)) if (!is.null(val)) A[reg.idx] <- cpar##myc(val) } if (p%in%ii$covparname.all) { cov.idx <- which(x$covpar==p) if (!is.null(val)) P[cov.idx] <- cpar##myc(val) } if (p%in%ii$mparname.all) { m.idx <- which(x$mean==p) if (!is.null(val)) v[m.idx] <- cpar##myc(val) } if (p%in%ii$eparname.all) { e.idx <- which(x$exfix==p) if (!is.null(val)) e[e.idx] <- cpar##myc(val) } constrain.idx[[p]] <- list(reg.idx=reg.idx,reg.tidx=reg.tidx,cov.idx=cov.idx,m.idx=m.idx,e.idx=e.idx) } } if (x$index$sparse & !is.character(class(pars)[1])) { A <- as(A,"sparseMatrix") P <- as(P,"sparseMatrix") v <- as(v,"sparseMatrix") } return(list(A=A, P=P, v=v, e=e, parval=parval, constrain.idx=constrain.idx, constrainpar=constrainpar)) } ###}}} matrices Obsolete lava/R/multigroup.R0000644000176200001440000002407713162174023013741 0ustar liggesusers###{{{ multigroup ##' @export multigroup <- function(models, datasets, fix, exo.fix=TRUE, keep=NULL, missing=FALSE, ...) { nm <- length(models) if (nm!=length(datasets)) stop("Supply dataset for each model") if (nm<2) stop("Two or more groups neeeded") mynames <- names(models) ## Check for random slopes xfix <- list() for (i in seq_len(nm)) { x0 <- models[[i]] data0 <- datasets[[i]] xfix0 <- colnames(data0)[(colnames(data0)%in%parlabels(x0,exo=TRUE))] xfix <- c(xfix, list(xfix0)) } if (missing(fix)) { fix <- !any(unlist(lapply(xfix, function(x) length(x)>0))) } for (i in seq_len(nm)) { x0 <- models[[i]] data0 <- datasets[[i]] if (length(exogenous(x0)>0)) { catx <- categorical2dummy(x0,data0) models[[i]] <- catx$x; datasets[[i]] <- catx$data } if (!lava.options()$exogenous) exogenous(models[[i]]) <- NULL } models.orig <- NULL ###################### ### MLE with MAR mechanism ###################### if (missing) { parcount <- 0 reservedpars <- c() mynpar <- c() for (i in seq_len(nm)) { ## Fix some parameters (predictors,latent variables,...) d0 <- datasets[[i]][1,,drop=FALSE]; d0[,] <- 1 if (fix) models[[i]] <- fixsome(models[[i]], exo.fix=exo.fix, measurement.fix=fix, data=d0) ## Find named/labelled parameters rpar <- unique(parlabels(models[[i]])) reservedpars <- c(reservedpars, rpar) mynpar <- c(mynpar, with(index(models[[1]]), npar+npar.mean+npar.ex)) }; reservedpars <- unique(reservedpars) nonamepar <- sum(mynpar) ## Find unique parameter-names for all parameters newpars <- c() i <- 0 pos <- 1 while(pos<=nonamepar) { i <- i+1 newname <- paste0("par",i) if (!(newname%in%reservedpars)) { newpars <- c(newpars,newname) pos <- pos+1 } } pos <- 0 models0 <- list() datasets0 <- list() complidx <- c() nmodels <- 0 modelclass <- c() nmis <- c() for (i in seq_len(nm)) { myvars <- unlist(intersect(colnames(datasets[[i]]),c(vars(models[[i]]),xfix[[i]],keep))) mydata <- datasets[[i]][,myvars] if (any(is.na(mydata))) { if (i>1) pos <- pos+mynpar[i-1] models[[i]] <- baptize(models[[i]],newpars[pos+seq_len(mynpar[i])] ,overwrite=FALSE) val <- missingModel(models[[i]],mydata,fix=FALSE,keep=keep,...) nmodels <- c(nmodels,length(val$models)) complidx <- c(complidx,val$pattern.allcomp+nmodels[i]+1) nmis0 <- rowSums(val$patterns); allmis <- which(nmis0==ncol(val$patterns)) if (length(allmis)>0) nmis0 <- nmis0[-allmis] nmis <- c(nmis,nmis0) datasets0 <- c(datasets0, val$datasets) models0 <- c(models0, val$models) modelclass <- c(modelclass,rep(i,length(val$models))) } else { datasets0 <- c(datasets0, list(mydata)) models0 <- c(models0, list(models[[i]])) modelclass <- c(modelclass,i) nmis <- c(nmis,0) } } models.orig <- models suppressWarnings( val <- multigroup(models0,datasets0,fix=FALSE,missing=FALSE,exo.fix=TRUE,...) ) val$models.orig <- models.orig; val$missing <- TRUE val$complete <- complidx-1 val$mnames <- mynames attributes(val)$modelclass <- modelclass attributes(val)$nmis <- nmis return(val) } ###################### ### Usual analysis: ###################### warned <- FALSE for (i in seq_len(nm)) { if (inherits(datasets[[i]],c("data.frame","matrix"))) { myvars <- intersect(colnames(datasets[[i]]),c(vars(models[[i]]),xfix[[i]],keep)) if (any(is.na(datasets[[i]][,myvars]))) { if (!warned) warning(paste0("Missing data encountered. Going for complete-case analysis")) warned <- TRUE datasets[[i]] <- na.omit(datasets[[i]][,myvars,drop=FALSE]) } } } exo <- exogenous(models) means <- lvms <- As <- Ps <- ps <- exs <- datas <- samplestat <- list() for (i in seq_len(nm)) { if (!is.null(exogenous(models[[i]]))) { if (any(is.na(exogenous(models[[i]])))) { exogenous(models[[i]]) <- exo } } mydata <- datasets[[i]] mymodel <- fixsome(models[[i]], data=mydata, measurement.fix=fix, exo.fix=exo.fix) mymodel <- updatelvm(mymodel,zeroones=TRUE,deriv=TRUE) P <- index(mymodel)$P1; P[P==0] <- NA P[!is.na(P) & !is.na(mymodel$covpar)] <- mymodel$covpar[!is.na(P) & !is.na(mymodel$covpar)] A <- index(mymodel)$M1; A[A==0] <- NA A[!is.na(A) & !is.na(mymodel$par)] <- mymodel$par[!is.na(A) & !is.na(mymodel$par)] mu <- unlist(mymodel$mean)[which(index(mymodel)$v1==1)] #ex <- names(mymodel$expar)[which(index(mymodel)$e1==1)] ex <- mymodel$exfix if (length(ex)>0) { if (any(is.na(ex))) ex[is.na(ex)] <- mymodel$expar[is.na(ex)] ex <- ex[which(index(mymodel)$e1==1)] } p <- pars(mymodel, A, P, e=ex) p[p=="1"] <- NA means <- c(means, list(mu)) lvms <- c(lvms, list(mymodel)) datas <- c(datas, list(mydata)) samplestat <- c(samplestat, list(procdata.lvm(models[[i]],data=mydata))) As <- c(As, list(A)) Ps <- c(Ps, list(P)) ps <- c(ps, list(p)) exs <- c(exs, list(ex)) }; ###### pp <- unlist(ps) parname <- unique(pp[!is.na(pp)]) pidx <- is.na(char2num(parname)) parname <- unique(unlist(pp[!is.na(pp)])); nfree <- sum(is.na(pp)) + length(parname) if (nfree>0) { pp0 <- lapply(ps, is.na) usedname <- cbind(parname, rep(NA,length(parname))) counter <- 1 pres <- pres0 <- pp0 for (i in seq_len(length(pp0))) { if (length(pp0[[i]]>0)) for (j in seq_len(length(pp0[[i]]))) { pidx <- match(ps[[i]][j],parname) if (pp0[[i]][j]) { pres[[i]][j] <- paste0("p",counter) pres0[[i]][j] <- counter counter <- counter+1 } else if (!is.na(pidx)) { if (!is.na(usedname[pidx,2])) { pres[[i]][j] <- usedname[pidx,2] pres0[[i]][j] <- char2num(substr(pres[[i]][j],2,nchar(pres[[i]][j]))) } else { val <- paste0("p",counter) pres[[i]][j] <- val pres0[[i]][j] <- counter usedname[pidx,2] <- val counter <- counter+1 } } else { pres[[i]][j] <- NA } } } mypar <- paste0("p",seq_len(nfree)) myparPos <- pres0 myparpos <- pres myparlist <- lapply(pres, function(x) x[!is.na(x)]) } else { myparPos <- NULL mypar <- NULL myparpos <- NULL myparlist <- NULL } ### Mean parameter mm <- unlist(means) meanparname <- unique(mm[!is.na(mm)]) midx <- is.na(char2num(meanparname)); meanparname <- meanparname[midx] any.mean <- sum(is.na(mm)) + length(meanparname) nfree.mean <- sum(is.na(mm)) + length(setdiff(meanparname,parname)) ## mean.fixed <- na.omit(match(parname,mm)) mean.omit <- lapply(means,function(x) na.omit(match(parname,x))) nmean <- lapply(means,length) if (any.mean>0) { mm0 <- lapply(means, is.na) usedname <- cbind(meanparname, rep(NA,length(meanparname))) counter <- 1 res0 <- res <- mm0 for (i in seq_len(length(mm0))) { if (length(mm0[[i]])>0) for (j in seq_len(length(mm0[[i]]))) { midx <- match(means[[i]][j],meanparname) if (mm0[[i]][j]) { res[[i]][j] <- paste0("m",counter) res0[[i]][j] <- counter counter <- counter+1 } else if (!is.na(midx)) { pidx <- match(meanparname[midx],pp) if (!is.na(pidx)) { res[[i]][j] <- unlist(myparlist)[pidx] res0[[i]][j] <- char2num(substr(res[[i]][j],2,nchar(res[[i]][j]))) + nfree.mean ##nmean[[i]] } else { if (!is.na(usedname[midx,2])) { res[[i]][j] <- usedname[midx,2] res0[[i]][j] <- char2num(substr(res[[i]][j],2,nchar(res[[i]][j]))) } else { val <- paste0("m",counter) res[[i]][j] <- val res0[[i]][j] <- counter usedname[midx,2] <- val counter <- counter+1 } } } else { res[[i]][j] <- NA } } } mymeanPos <- res0 mymeanpos <- res mymeanlist <- lapply(res, function(x) x[!is.na(x)]) mymean <- unique(unlist(mymeanlist)) } else { mymeanPos <- NULL mymean <- NULL mymeanpos <- NULL mymeanlist <- NULL } ### Extra parameters N <- nfree+nfree.mean m0 <- p0 <- c() coefs <- coefsm <- mm0 <- mm <- pp0 <- pp <- c() for (i in seq_len(length(myparPos))) { mi <- mymeanPos[[i]] nmi <- length(mi) pi <- myparPos[[i]] p1 <- setdiff(pi,p0) p0 <- c(p0,p1) ## pp0 <- c(pp0,list(match(p1,pi)+nfree.mean)) pp0 <- c(pp0,list(match(p1,pi))) if (length(mean.omit[[i]])>0) mi <- mi[-mean.omit[[i]]] m1 <- setdiff(mi,m0) m0 <- c(m0,m1) mm0 <- c(mm0,list(match(m1,mi))) pp <- c(pp,list(c(m1,p1+nfree.mean))) if (length(p1)>0) coefs <- c(coefs,paste(i,coef(lvms[[i]],fix=FALSE,mean=FALSE)[pp0[[i]]],sep="@")) if (length(m1)>0) { coefsm0 <- paste(i,coef(lvms[[i]],fix=FALSE,mean=TRUE)[mm0[[i]]],sep="@") coefsm <- c(coefsm,coefsm0) } } coefs <- c(coefsm,coefs) res <- list(npar=nfree, npar.mean=nfree.mean, ngroup=length(lvms), names=mynames, lvm=lvms, data=datas, samplestat=samplestat, A=As, P=Ps, expar=exs, meanpar=names(mu), name=coefs, coef=pp, coef.idx=pp0, par=mypar, parlist=myparlist, parpos=myparpos, mean=mymean, meanlist=mymeanlist, meanpos=mymeanpos, parposN=myparPos, meanposN=mymeanPos, models.orig=models.orig, missing=missing ) class(res) <- "multigroup" checkmultigroup(res) return(res) } ###}}} ###{{{ checkmultigroup checkmultigroup <- function(x) { ## Check validity: for (i in seq_len(x$ngroup)) { if (nrow(x$data[[i]])<2) { warning("With only one observation in the group, all parameters should be inherited from another a group!") } } } ###}}} checkmultigroup lava/R/logLik.R0000644000176200001440000002473113162174023012750 0ustar liggesusers###{{{ logLik.lvm ##' @export logLik.lvm <- function(object,p,data,model="gaussian",indiv=FALSE,S,mu,n,debug=FALSE,weights=NULL,data2=NULL,...) { cl <- match.call() xfix <- colnames(data)[(colnames(data)%in%parlabels(object,exo=TRUE))] constr <- lapply(constrain(object), function(z)(attributes(z)$args)) xconstrain <- intersect(unlist(constr), manifest(object)) xconstrainM <- TRUE if (length(xconstrain)>0) { constrainM <- names(constr)%in%unlist(object$mean) for (i in seq_len(length(constr))) { if (!constrainM[i]) { if (any(constr[[i]]%in%xconstrain)) xconstrainM <- FALSE } } } Debug(xfix,debug) if (missing(n)) { n <- nrow(data) if (is.null(n)) n <- data$n } lname <- paste0(model,"_logLik.lvm") logLikFun <- get(lname) if (length(xfix)>0 | (length(xconstrain)>0 & !xconstrainM & !lava.options()$test & model!="gaussian")) { ##### Random slopes! x0 <- object if (length(xfix)>0) { Debug("random slopes...",debug) nrow <- length(vars(object)) xpos <- lapply(xfix,function(y) which(regfix(object)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix <- list(var=xfix, col=colpos, row=rowpos) for (i in seq_along(myfix$var)) for (j in seq_along(myfix$col[[i]])) { regfix(x0, from=vars(x0)[myfix$row[[i]][j]],to=vars(x0)[myfix$col[[i]][j]]) <- data[1,myfix$var[[i]]] } index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) } k <- length(index(x0)$manifest) myfun <- function(ii) { if (length(xfix)>0) for (i in seq_along(myfix$var)) { index(x0)$A[cbind(myfix$row[[i]],myfix$col[[i]])] <- data[ii,myfix$var[[i]]] } return(logLikFun(x0,data=data[ii,,drop=FALSE], p=p,weights=weights[ii,,drop=FALSE],data2=data2[ii,,drop=FALSE], model=model,debug=debug,indiv=indiv,...)) } loglik <- sapply(seq_len(nrow(data)),myfun) if (!indiv) { loglik <- sum(loglik) n <- nrow(data) attr(loglik, "nall") <- n attr(loglik, "nobs") <- n attr(loglik, "df") <- length(p) class(loglik) <- "logLik" } return(loglik) } if (xconstrainM) { xconstrain <- c() for (i in seq_len(length(constrain(object)))) { z <- constrain(object)[[i]] xx <- intersect(attributes(z)$args,manifest(object)) if (length(xx)>0) { warg <- setdiff(attributes(z)$args,xx) wargidx <- which(attributes(z)$args%in%warg) exoidx <- which(attributes(z)$args%in%xx) parname <- names(constrain(object))[i] y <- names(which(unlist(lapply(intercept(object),function(x) x==parname)))) el <- list(i,y,parname,xx,exoidx,warg,wargidx,z) names(el) <- c("idx","endo","parname","exo","exoidx","warg","wargidx","func") xconstrain <- c(xconstrain,list(el)) } } if (length(xconstrain)>0) { yconstrain <- unlist(lapply(xconstrain,function(x) x$endo)) iconstrain <- unlist(lapply(xconstrain,function(x) x$idx)) Mu <- matrix(0,nrow(data),length(vars(object))); colnames(Mu) <- vars(object) M <- modelVar(object,p=p,data=data) M$parval <- c(M$parval, object$mean[unlist(lapply(object$mean,is.numeric))]) for (i in seq_len(length(xconstrain))) { pp <- unlist(M$parval[xconstrain[[i]]$warg]); myidx <- with(xconstrain[[i]],order(c(wargidx,exoidx))) mu <- with(xconstrain[[i]], apply(data[,exo,drop=FALSE],1, function(x) { func(unlist(c(pp,x))[myidx]) })) Mu[,xconstrain[[i]]$endo] <- mu } offsets <- Mu%*%t(M$IAi)[,endogenous(object),drop=FALSE] object$constrain[iconstrain] <- NULL object$mean[yconstrain] <- 0 loglik <- do.call(lname, c(list(object=object,p=p,data=data,indiv=indiv,weights=weights,data2=data2,offset=offsets),list(...))) } else { cl[[1]] <- logLikFun loglik <- eval.parent(cl) } } else { loglik <- 0 if (length(xconstrain)>0 && NROW(data)>1) { for (ii in seq(nrow(data))) { cl$data <- data[ii,] cl$weights <- weights[ii,] cl$data2 <- data2[ii,] loglik <- loglik+eval.parent(cl) } } else { cl[[1]] <- logLikFun loglik <- eval.parent(cl) } } if (is.null(attr(loglik,"nall"))) attr(loglik, "nall") <- n if (is.null(attr(loglik,"nobs"))) attr(loglik, "nobs") <- n##-length(p) if (is.null(attr(loglik,"df"))) attr(loglik, "df") <- length(p) class(loglik) <- "logLik" return(loglik) } ###}}} ###{{{ gaussian_loglik ##' @export gaussian_logLik.lvm <- function(object,p,data, type=c("cond","sim","exo","sat","cond2"), weights=NULL, indiv=FALSE, S, mu, n, offset=NULL, debug=FALSE, meanstructure=TRUE,...) { exo.idx <- with(index(object), exo.obsidx) endo.idx <- with(index(object), endo.obsidx) if (type[1]=="exo") { if (length(exo.idx)==0 || is.na(exo.idx)) return(0) } cl <- match.call() if (type[1]=="cond") { cl$type <- "sim" L0 <- eval.parent(cl) cl$type <- "exo" L1 <- eval.parent(cl) loglik <- L0-L1 return(loglik) } if (missing(n)) { if (is.vector(data)) n <- 1 else n <- nrow(data) } k <- length(index(object)$manifest) if (!is.null(offset) && type[1]!="exo") { data[,colnames(offset)] <- data[,colnames(offset)]-offset } if (type[1]=="sat") { if (missing(S)) { d0 <- procdata.lvm(object,data=data) S <- d0$S; mu <- d0$mu; n <- d0$n } if (missing(p)) p <- rep(1,length(coef(object))) L1 <- logLik(object,p,data,type="exo",meanstructure=meanstructure) ## Sigma <- (n-1)/n*S ## ML = 1/n * sum((xi-Ex)^2) Sigma <- S loglik <- -(n*k)/2*log(2*base::pi) -n/2*(log(det(Sigma)) + k) - L1 P <- length(endo.idx) k <- length(exo.idx) npar <- P*(1+(P-1)/2) if (meanstructure) npar <- npar+ (P*k + P) attr(loglik, "nall") <- n attr(loglik, "nobs") <- n attr(loglik, "df") <- npar class(loglik) <- "logLik" return(loglik) } myidx <- switch(type[1], sim = seq_along(index(object)$manifest), cond = { endo.idx }, cond2 = { endo.idx }, exo = { exo.idx } ) mom <- moments(object, p, conditional=(type[1]=="cond2"), data=data) if (!lava.options()$allow.negative.variance && any(diag(mom$P)<0)) return(NaN) C <- mom$C xi <- mom$xi if (type[1]=="exo") { C <- C[exo.idx,exo.idx,drop=FALSE] xi <- xi[exo.idx,drop=FALSE] } Debug(list("C=",C),debug) k <- nrow(C) iC <- Inverse(C,det=TRUE, symmetric = TRUE) detC <- attributes(iC)$det if (!is.null(weights)) { weights <- cbind(weights) K <- length(exo.idx)+length(endo.idx) if (ncol(weights)!=1 & ncol(weights)!=K) { w.temp <- weights weights <- matrix(1,nrow=nrow(weights),ncol=K) weights[,endo.idx] <- w.temp } if (type=="exo") weights <- NULL } notdatalist <- (!is.list(data) | is.data.frame(data)) if (missing(n)) if (!missing(data)) n <- NROW(data) if (!missing(n)) if (notdatalist & (n<2 | indiv | !is.null(weights))) { if (n==1) data <- rbind(data) res <- numeric(n) data <- data[,index(object)$manifest,drop=FALSE] loglik <- 0; for (i in seq_len(n)) { ti <- as.numeric(data[i,myidx]) if (meanstructure) { ti <- cbind(ti-as.numeric(xi)) } if (!is.null(weights)) { W <- diag(weights[i,],nrow=length(weights[i,])) val <- -k/2*log(2*base::pi) -1/2*log(detC) - 1/2*(t(ti)%*%W)%*%iC%*%(ti) } else { val <- -k/2*log(2*base::pi) -1/2*log(detC) - 1/2*t(ti)%*%iC%*%(ti) } if (indiv) res[i] <- val loglik <- loglik + val } if (indiv) return(res) } else { if (missing(S)) { d0 <- procdata.lvm(object,data=data) S <- d0$S; mu <- d0$mu; n <- d0$n } S <- S[myidx,myidx,drop=FALSE] mu <- mu[myidx,drop=FALSE] T <- S if (meanstructure) { W <- crossprod(rbind(mu-xi)) T <- S+W } loglik <- -(n*k)/2*log(2*base::pi) -n/2*(log(detC) + tr(T%*%iC)) } return(loglik) } ###}}} ###{{{ logLik.lvmfit ##' @export logLik.lvmfit <- function(object, p=coef(object), data=model.frame(object), model=object$estimator, weights=Weights(object), data2=object$data$data2, ...) { logLikFun <- paste0(model,"_logLik.lvm") if (!exists(logLikFun)) { model <- "gaussian" } l <- logLik.lvm(object$model0,p,data,model=model,weights=weights, data2=data2, ...) return(l) } ###}}} logLik.lvmfit ###{{{ logLik.lvm.missing ##' @export logLik.lvm.missing <- function(object, p=pars(object), model=object$estimator, weights=Weights(object$estimate), ...) { logLik(object$estimate$model0, p=p, model=model, weights=weights, ...) } ###}}} ###{{{ logLik.multigroup ##' @export logLik.multigroup <- function(object,p,data=object$data,weights=NULL,type=c("cond","sim","exo","sat"),...) { res <- procrandomslope(object) pp <- with(res, modelPar(model,p)$p) if (type[1]=="sat") { n <- 0 df <- 0 loglik <- 0 for (i in seq_len(object$ngroup)) { m <- Model(object)[[i]] L <- logLik(m,p=pp[[i]],data=object$data[[i]],type="sat") df <- df + attributes(L)$df loglik <- loglik + L n <- n + object$samplestat[[i]]$n } attr(loglik, "nall") <- n attr(loglik, "nobs") <- n##-df attr(loglik, "df") <- df class(loglik) <- "logLik" return(loglik) } n <- 0 loglik <- 0; for (i in seq_len(object$ngroup)) { n <- n + object$samplestat[[i]]$n val <- logLik(object$lvm[[i]],pp[[i]],data[[i]],weights=weights[[i]],type=type,...) loglik <- loglik + val } attr(loglik, "nall") <- n attr(loglik, "nobs") <- n##-length(p) attr(loglik, "df") <- length(p) class(loglik) <- "logLik" return(loglik) } ###}}} logLik.multigroup ###{{{ logLik.multigroupfit ##' @export logLik.multigroupfit <- function(object, p=pars(object), weights=Weights(object), model=object$estimator, ...) { logLik(object$model0,p=p,weights=weights,model=model,...) } ###}}} logLik.multigroup lava/R/index.sem.R0000644000176200001440000002421113162174023013412 0ustar liggesusers##' @export updatelvm <- function(x,mean=TRUE,...) { index(x) <- reindex(x,mean=mean,...) x$parpos <- parpos(x,mean=mean,...) return(x) } ##' @export "index" <- function(x,...) UseMethod("index") ##' @export "index<-" <- function(x,...,value) UseMethod("index<-") ##' @export "index.lvm" <- function(x,...) { x$index } ##' @export "index.lvmfit" <- function(x,...) { index(Model(x)) } ##' @export "index<-.lvm" <- function(x,...,value) { x$index <- value; return(x) } ##' @export "index<-.lvmfit" <- function(x,...,value) { Model(x)$index <- value; return(x) } ### A ## Matrix with fixed parameters and ones where parameters are free ### J ## Manifest variable selection matrix ### M0 ## Index of free regression parameters ### M1 ## Index of free and _unique_ regression parameters ### P ## Matrix with fixed variance parameters and ones where parameters are free ### P0 ## Index of free variance parameters ### P1 ## Index of free and _unique_ regression parameters ### npar.var ## Number of covariance parameters ##' @export `reindex` <- function(x, sparse=FALSE,standard=TRUE,zeroones=FALSE,deriv=FALSE,mean=TRUE) { ## Extract indices of parameters from model x$parpos <- NULL M <- x$M eta <- latent(x) ## Latent variables/Factors m <- length(eta) obs <- manifest(x) ## Manifest/Observed variables endo <- endogenous(x) exo <- exogenous(x) ##,index=FALSE) allvars <- vars(x) eta.idx <- na.omit(match(eta,allvars)) obs.idx <- na.omit(match(obs,allvars)) exo.idx <- na.omit(match(exo,allvars)) exo.obsidx <- na.omit(match(exo,obs)) endo.obsidx <- na.omit(match(endo,obs)) fix.idx <- !is.na(x$fix) ## Index of fixed parameters covfix.idx <- !is.na(x$covfix) ## Index of fixed covariance parameters constrain.par <- NULL if (length(constrain(x))>0) constrain.par <- names(constrain(x)) M0 <- M; M0[fix.idx] <- 0 ## Matrix of indicators of free regression-parameters (removing fixed parameters) M1 <- M0; ## Matrix of indiciator of free _unique_ regression parameters (removing fixed _and_ duplicate parameters) parname <- unique(x$par[!is.na(x$par)]) ## parname.all <- unique(x$par[!is.na(x$par)]) ## parname <- setdiff(parname.all,constrain.par) for (p in parname) { ii <- which(x$par==p) if (length(ii)>1) M1[ii[-1]] <- 0 if (p %in% constrain.par) M0[ii] <- M1[ii] <- 0 } npar.reg <- sum(M1) ## Number of free regression parameters P <- x$cov; P0 <- P; P0[covfix.idx] <- 0 ## Matrix of indicators of free covariance-parameters (removing fixed parameters) if (length(exo.idx)>0) P0[exo.idx,exo.idx] <- 0 ## 6/1-2011 P1 <- P0 ## Matrix of indiciator of free _unique_ variance parameters (removing fixed _and_ duplicate parameters) covparname <- unique(x$covpar[!is.na(x$covpar)]) for (p in covparname) { ii <- which(x$covpar==p) if (length(ii)>1) P1[ii[-1]] <- 0 if (p%in%c(parname,constrain.par)) P0[ii] <- P1[ii] <- 0 } ## P1. <- P1[-exo.idx,-exo.idx] npar.var <- sum(c(diag(P1),P1[lower.tri(P1)])) parnames <- paste0("p", seq_len(npar.reg+npar.var)) A <- M A[fix.idx] <- x$fix[fix.idx] ## ... with fixed parameters in plac P[covfix.idx] <- x$covfix[covfix.idx] ## ... with fixed parameters in plac px <- Jy <- J <- I <- diag(nrow=length(vars(x))) if (m>0) { J[eta.idx,eta.idx] <- 0; J <- J[-eta.idx,,drop=FALSE] } ## Selection matrix (selecting observed variables) { ## Selection matrix (selection endogenous variables) if (length(c(eta.idx,exo.idx))>0) { Jy[c(eta.idx,exo.idx),c(eta.idx,exo.idx)] <- 0; Jy <- Jy[-c(eta.idx,exo.idx),,drop=FALSE] } ## Cancelation matrix (cancels rows with exogenous variables) px[exo.idx,exo.idx] <- 0 } ## Creating indicitor of free mean-parameters fixed <- sapply(x$mean, function(y) is.numeric(y) & !is.na(y)) named <- sapply(x$mean, function(y) is.character(y) & !is.na(y)) mparname <- NULL if (length(named)>0) mparname <- unlist(unique(x$mean[named])) v0 <- rep(1,length(x$mean)) ## Vector of indicators of free mean-parameters v0[exo.idx] <- 0 if (length(fixed)>0) v0[fixed] <- 0; v1 <- v0 for (p in mparname) { idx <- which(x$mean==p) if (length(idx)>1) { ## print(idx[-1]) v1[idx[-1]] <- 0 } if (p%in%c(parname,covparname,constrain.par)) v0[idx] <- v1[idx] <- 0 } ## duplicate parameters ### ### Extra parameters ### efixed <- sapply(x$exfix, function(y) is.numeric(y) & !is.na(y)) enamed <- sapply(x$exfix, function(y) is.character(y) & !is.na(y)) if(length(enamed)>0){ eparname <- unlist(unique(x$exfix[enamed])) } else{ eparname <- NULL } ## Extra parameters e0 <- rep(1,length(x$expar)) ## Indicators of free extra par. if (length(efixed)>0) e0[efixed] <- 0 e1 <- e0 for (p in eparname) { idx <- which(x$exfix==p) if (length(idx)>1) { e1[idx[-1]] <- 0 } if (p%in%c(parname,covparname,constrain.par,mparname)) e0[idx] <- e1[idx] <- 0 } ## duplicate parameters ## Return: ## Adjacency-matrix (M) ## Matrix of regression-parameters (0,1) _with_ fixed parameters (A) ## Matrix of variance-parameters (indicators 0,1) (P) ## Manifest selection matrix (J), ## Position of variables matrix (Apos), ## Position of covariance variables matrix (Ppos), ## Position/Indicator matrix of free regression parameters (M0) res <- list(vars=allvars, manifest=obs, exogenous=exo, latent=eta, endogenous=endo, exo.idx=exo.idx, eta.idx=eta.idx, exo.obsidx=exo.obsidx, endo.obsidx=endo.obsidx, obs.idx=obs.idx, endo.idx=setdiff(obs.idx,exo.idx)) if (standard) { res <- c(res, list(M=M, A=A, P=P, P0=P0, P1=P1, M0=M0, M1=M1, v0=v0, v1=v1, e0=e0, e1=e1, npar=(npar.reg+npar.var), npar.reg=npar.reg, npar.var=npar.var, npar.mean=sum(v1), npar.ex=sum(e1), constrain.par=constrain.par)) npar.total <- res$npar+res$npar.mean+res$npar.ex which.diag <- NULL if (length(P1)>0) which.diag <- which(diag(P1==1)) res <- c(res, list(parname.all=parname, parname=setdiff(parname,constrain.par), which.diag=which.diag, covparname.all=covparname, covparname=setdiff(covparname,constrain.par), meanfixed=fixed, meannamed=named, mparname.all=mparname, mparname=setdiff(mparname,constrain.par), eparname.all=eparname, eparname=setdiff(eparname,constrain.par), J=J, Jy=Jy, px=px, sparse=sparse)) parname.all.reg.idx <- parname.all.reg.tidx <- parname.reg.tidx <- parname.reg.idx <- c() for (p in res$parname.all) { ipos <- which((x$par==p)) tipos <- which(t(x$par==p)) if (p%in%res$parname) { parname.reg.idx <- c(parname.reg.idx, list(ipos)) parname.reg.tidx <- c(parname.reg.tidx, list(tipos)) } parname.all.reg.idx <- c(parname.all.reg.idx, list(ipos)) parname.all.reg.tidx <- c(parname.all.reg.tidx, list(tipos)) }; if (length(parname.reg.idx)>0) { names(parname.reg.idx) <- names(parname.reg.tidx) <- res$parname } if (length(parname.all.reg.idx)>0) { names(parname.all.reg.idx) <- names(parname.all.reg.tidx) <- res$parname.all } covparname.all.idx <- covparname.idx <- c() for (p in res$covparname.all) { ipos <- which(x$covpar==p) if (p%in%res$covparname) covparname.idx <- c(covparname.idx, list(ipos)) covparname.all.idx <- c(covparname.all.idx, list(ipos)) }; if (length(covparname.idx)>0) names(covparname.idx) <- res$covparname if (length(covparname.all.idx)>0) names(covparname.all.idx) <- res$covparname.all mparname.all.idx <- mparname.idx <- c() for (p in res$mparname.all) { ipos <- which(x$mean==p) if (p%in%mparname) mparname.idx <- c(mparname.idx, list(ipos)) mparname.all.idx <- c(mparname.all.idx, list(ipos)) }; if (length(mparname.idx)>0) names(mparname.idx) <- res$mparname if (length(mparname.all.idx)>0) names(mparname.all.idx) <- res$mparname.all eparname.all.idx <- eparname.idx <- c() for (p in res$eparname.all) { ipos <- which(x$exfix==p) if (p%in%eparname) eparname.idx <- c(eparname.idx, list(ipos)) eparname.all.idx <- c(eparname.all.idx, list(ipos)) }; if (length(eparname.idx)>0) names(eparname.idx) <- res$eparname if (length(eparname.all.idx)>0) names(eparname.all.idx) <- res$eparname.all res <- c(res, list(mparname.idx=mparname.idx, covparname.idx=covparname.idx, parname.reg.idx=parname.reg.idx, parname.reg.tidx=parname.reg.tidx, mparname.all.idx=mparname.all.idx, eparname.all.idx=eparname.all.idx, covparname.all.idx=covparname.all.idx, parname.all.reg.idx=parname.all.reg.idx, parname.all.reg.tidx=parname.all.reg.tidx )) } else { res <- index(x) } if (zeroones) { if (sparse) { if (!requireNamespace("Matrix",quietly=TRUE)) stop("package Matrix not available") Ik <- Matrix::Diagonal(length(obs)) Im <- Matrix::Diagonal(ncol(A)) Kkk <- NULL J <- as(J, "sparseMatrix") Jy <- as(Jy, "sparseMatrix") px <- as(px, "sparseMatrix") } else { Ik <- diag(nrow=length(obs)) Im <- diag(nrow=ncol(A)) } Kkk <- NULL res[c("Ik","Im","Kkk")] <- NULL res <- c(res, list(Ik=Ik, Im=Im, Kkk=Kkk)) } if (deriv && length(P)>0) { if (res$npar.mean>0 & mean) D <- deriv.lvm(x,meanpar=rep(1,res$npar.mean),zeroones=TRUE) else D <- deriv.lvm(x,meanpar=NULL,zeroones=TRUE) res[c("dA","dP","dv")] <- NULL res <- c(res, list(dA=D$dA, dP=D$dP, dv=D$dv)) } if (length(P)>0) res <- c(res,mat.lvm(x,res)) return(res) } lava/R/regression.R0000644000176200001440000002261613162174023013707 0ustar liggesusers ##' Add regression association to latent variable model ##' ##' Define regression association between variables in a \code{lvm}-object and ##' define linear constraints between model equations. ##' ##' ##' The \code{regression} function is used to specify linear associations ##' between variables of a latent variable model, and offers formula syntax ##' resembling the model specification of e.g. \code{lm}. ##' ##' For instance, to add the following linear regression model, to the ##' \code{lvm}-object, \code{m}: ##' \deqn{ E(Y|X_1,X_2) = \beta_1 X_1 + \beta_2 X_2} ##' We can write ##' ##' \code{regression(m) <- y ~ x1 + x2} ##' ##' Multivariate models can be specified by successive calls with ##' \code{regression}, but multivariate formulas are also supported, e.g. ##' ##' \code{regression(m) <- c(y1,y2) ~ x1 + x2} ##' ##' defines ##' \deqn{ E(Y_i|X_1,X_2) = \beta_{1i} X_1 + \beta_{2i} X_2 } ##' ##' The special function, \code{f}, can be used in the model specification to ##' specify linear constraints. E.g. to fix \eqn{\beta_1=\beta_2} ##' , we could write ##' ##' \code{regression(m) <- y ~ f(x1,beta) + f(x2,beta)} ##' ##' The second argument of \code{f} can also be a number (e.g. defining an ##' offset) or be set to \code{NA} in order to clear any previously defined ##' linear constraints. ##' ##' Alternatively, a more straight forward notation can be used: ##' ##' \code{regression(m) <- y ~ beta*x1 + beta*x2} ##' ##' All the parameter values of the linear constraints can be given as the right ##' handside expression of the assigment function \code{regression<-} (or ##' \code{regfix<-}) if the first (and possibly second) argument is defined as ##' well. E.g: ##' ##' \code{regression(m,y1~x1+x2) <- list("a1","b1")} ##' ##' defines \eqn{E(Y_1|X_1,X_2) = a1 X_1 + b1 X_2}. The rhs argument can be a ##' mixture of character and numeric values (and NA's to remove constraints). ##' ##' The function \code{regression} (called without additional arguments) can be ##' used to inspect the linear constraints of a \code{lvm}-object. ##' ##' For backward compatibility the "$"-symbol can be used to fix parameters at ##' a given value. E.g. to add a linear relationship between \code{y} and ##' \code{x} with slope 2 to the model \code{m}, we can write ##' \code{regression(m,"y") <- "x$2"}. Similarily we can use the "@@"-symbol to ##' name parameters. E.g. in a multiple regression we can force the parameters ##' to be equal: \code{regression(m,"y") <- c("x1@@b","x2@@b")}. Fixed parameters ##' can be reset by fixing (with \$) them to \code{NA}. ##' ##' @aliases regression regression<- regression<-.lvm regression.lvm regfix ##' regfix regfix<- regfix.lvm regfix<-.lvm ##' @param object \code{lvm}-object. ##' @param value A formula specifying the linear constraints or if ##' \code{to=NULL} a \code{list} of parameter values. ##' @param to Character vector of outcome(s) or formula object. ##' @param from Character vector of predictor(s). ##' @param fn Real function defining the functional form of predictors (for ##' simulation only). ##' @param silent Logical variable which indicates whether messages are turned ##' on/off. ##' @param additive If FALSE and predictor is categorical a non-additive effect is assumed ##' @param y Alias for 'to' ##' @param x Alias for 'from' ##' @param quick Faster implementation without parameter constraints ##' @param \dots Additional arguments to be passed to the low level functions ##' @usage ##' \method{regression}{lvm}(object = lvm(), to, from, fn = NA, ##' silent = lava.options()$silent, additive=TRUE, y, x, value, ...) ##' \method{regression}{lvm}(object, to=NULL, quick=FALSE, ...) <- value ##' @return A \code{lvm}-object ##' @note Variables will be added to the model if not already present. ##' @author Klaus K. Holst ##' @seealso \code{\link{intercept<-}}, \code{\link{covariance<-}}, ##' \code{\link{constrain<-}}, \code{\link{parameter<-}}, ##' \code{\link{latent<-}}, \code{\link{cancel<-}}, \code{\link{kill<-}} ##' @keywords models regression ##' @examples ##' ##' m <- lvm() ## Initialize empty lvm-object ##' ### E(y1|z,v) = beta1*z + beta2*v ##' regression(m) <- y1 ~ z + v ##' ### E(y2|x,z,v) = beta*x + beta*z + 2*v + beta3*u ##' regression(m) <- y2 ~ f(x,beta) + f(z,beta) + f(v,2) + u ##' ### Clear restriction on association between y and ##' ### fix slope coefficient of u to beta ##' regression(m, y2 ~ v+u) <- list(NA,"beta") ##' ##' regression(m) ## Examine current linear parameter constraints ##' ##' ## ## A multivariate model, E(yi|x1,x2) = beta[1i]*x1 + beta[2i]*x2: ##' m2 <- lvm(c(y1,y2) ~ x1+x2) ##' ##' @export "regression<-" <- function(object,...,value) UseMethod("regression<-") ##' @export regression.formula <- function(object,...) regression(lvm(),object,...) ##' @export "regression<-.lvm" <- function(object, to=NULL, quick=FALSE, ..., value) { dots <- list(...) if (length(dots$additive)>0 && !dots$additive && !inherits(value,"formula")) { regression(object,beta=value,...) <- to return(object) } if (!is.null(to) || !is.null(dots$y)) { regfix(object, to=to, ...) <- value return(object) } else { if (is.list(value)) { for (v in value) { regression(object,...) <- v } return(object) } if (inherits(value,"formula")) { fff <- procformula(object,value,...) object <- fff$object lhs <- fff$lhs xs <- fff$xs ys <- fff$ys res <- fff$res X <- fff$X if (fff$iscovar) { ## return(covariance(object,var1=decomp.specials(lhs[[1]]),var2=X)) covariance(object) <- toformula(decomp.specials(lhs[[1]]),X) return(object) } if (!is.null(lhs) && nchar(lhs[[1]])>2 && substr(lhs[[1]],1,2)=="v(") { v <- update(value,paste(decomp.specials(lhs),"~.")) covariance(object,...) <- v return(object) } if (length(lhs)==0) { index(object) <- reindex(object) return(object) } for (i in seq_len(length(ys))) { y <- ys[i] for (j in seq_len(length(xs))) { if (length(res[[j]])>1) { regfix(object, to=y[1], from=xs[j],...) <- res[[j]][2] } else { object <- regression(object,to=y[1],from=xs[j],...) } } } object$parpos <- NULL return(object) } if (!is.list(value) | length(value)>2) stop("Value should contain names of outcome (to) and predictors (from)") if (all(c("to","from")%in%names(value))) { xval <- value$x; yval <- value$y } else { yval <- value[[1]]; xval <- value[[2]] } regression(object, to=yval, from=xval,...) } } ##' @export `regression` <- function(object,to,from,...) UseMethod("regression") ##' @export `regression.lvm` <- function(object=lvm(),to,from,fn=NA,silent=lava.options()$silent, additive=TRUE, y,x,value,...) { if (!missing(y)) { if (inherits(y,"formula")) y <- all.vars(y) to <- y } if (!missing(x)) { if (inherits(x,"formula")) x <- all.vars(x) from <- x } if (!additive) { if (!inherits(to,"formula")) to <- toformula(to,from) x <- attributes(getoutcome(to))$x K <- object$attributes$nordinal[x] if (is.null(K) || is.na(K)) { K <- list(...)$K if (is.null(K)) stop("Supply number of categories, K (or use method 'categorical' before calling 'regression').") object <- categorical(object,x,...) } dots <- list(...); dots$K <- K dots$x <- object dots$formula <- to dots$regr.only <- TRUE object <- do.call("categorical",dots) return(object) } if (missing(to)) { return(regfix(object)) } if (inherits(to,"formula")) { if (!missing(value)) { regression(object,to,silent=silent,...) <- value } else { regression(object,silent=silent,...) <- to } object$parpos <- NULL return(object) } if (is.list(to)) { for (t in to) regression(object,silent=silent,...) <- t object$parpos <- NULL return(object) } sx <- strsplit(from,"@") xx <- sapply(sx, FUN=function(i) i[1]) ps <- sapply(sx, FUN=function(i) i[2]) sx <- strsplit(xx,"$",fixed=TRUE) xs <- sapply(sx, FUN=function(i) i[1]) fix <- char2num(sapply(sx, FUN=function(i) i[2])) allv <- index(object)$vars object <- addvar(object, c(to,xs), silent=silent,reindex=FALSE) for (i in to) for (j in xs) { object$M[j,i] <- 1 if (!is.na(fn)) functional(object,j,i) <- fn } if (lava.options()$exogenous) { newexo <- setdiff(xs,c(to,allv)) exo <- exogenous(object) if (length(newexo)>0) exo <- unique(c(exo,newexo)) exogenous(object) <- setdiff(exo,to) } if (lava.options()$debug) { print(object$fix) } object$fix[xs,to] <- fix object$par[xs,to] <- ps object$parpos <- NULL index(object) <- reindex(object) return(object) } lava/R/pars.R0000644000176200001440000000240713162174023012470 0ustar liggesusers##' @export `pars` <- function(x,...) UseMethod("pars") ##' @export pars.default <- function(x,...) { if (!is.null(x$opt$estimate)) return(x$opt$estimate) if (!is.null(x$opt$par)) return(x$opt$par) if (!is.null(x$coef)) return(x$coef) return(coef(x)) } ##' @export pars.lvm.missing <- function(x,reorder=FALSE,...) { res <- pars.default(x) if (reorder) { idx <- match(coef(Model(x)),names(coef(x))) return(res[idx]) } return(res) } ###{{{ pars.multigroupfit ## pars.multigroupfit <- function(x,...) { ## res <- pars.default(x) ## lapply(ee$model$lvm,coef)) ## coef() ##} ###}}} ###{{{ pars.lvm ##' @export pars.lvm <- function(x, A, P, v, e, ...) { parres <- A[index(x)$M1==1] diagcorfree <- diag(P)[diag(index(x)$P1)==1] parres <- c(parres, diagcorfree) if (ncol(A)>1) for (i in seq_len(ncol(index(x)$P1)-1)) for (j in seq(i+1,nrow(index(x)$P1))) { if (index(x)$P1[j,i]!=0) { parres <- c(parres, P[j,i]) } } if (length(parres)>0) names(parres) <- paste0("p",seq_len(length(parres))) if (!missing(v)) { parres <- c( v[which(index(x)$v1==1)], parres) } if (!missing(e)) { parres <- c( parres, e[which(index(x)$e1==1)] ) } return(parres) } ###}}} pars.lvm lava/R/model.R0000644000176200001440000000245213162174023012623 0ustar liggesusers##' Extract model ##' ##' Extract or replace model object ##' ##' ##' @aliases Model Model<- ##' @usage ##' ##' Model(x, ...) ##' ##' Model(x, ...) <- value ##' ##' @param x Fitted model ##' @param value New model object (e.g. \code{lvm} or \code{multigroup}) ##' @param \dots Additional arguments to be passed to the low level functions ##' @return Returns a model object (e.g. \code{lvm} or \code{multigroup}) ##' @author Klaus K. Holst ##' @seealso \code{\link{Graph}} ##' @keywords models ##' @examples ##' ##' m <- lvm(y~x) ##' e <- estimate(m, sim(m,100)) ##' Model(e) ##' ##' @export `Model` <- function(x,...) UseMethod("Model") ##' @export `Model.default` <- function(x,...) x ##' @export `Model.lvm` <- function(x,...) x ##' @export `Model.lvmfit` <- function(x,...) x$model ##' @export `Model.multigroup` <- function(x,...) x$lvm ##' @export `Model.multigroupfit` <- function(x,...) x$model ##' @export "Model<-" <- function(x,...,value) UseMethod("Model<-") ##' @export "Model<-.lvm" <- function(x,...,value) { x <- value; return(x) } ##' @export "Model<-.lvmfit" <- function(x,...,value) { x$model <- value; return(x) } ##' @export "Model<-.multigroup" <- function(x,...,value) { x$lvm <- value; return(x) } ##' @export "Model<-.multigroupfit" <- function(x,...,value) { x$model <- value; return(x) } lava/R/endogenous.R0000644000176200001440000000170613162174023013672 0ustar liggesusers##' @export `endogenous` <- function(x,...) UseMethod("endogenous") ##' @export `endogenous.lvmfit` <- function(x,...) { endogenous(Model(x),...) } ##' @export `endogenous.lvm` <- function(x,top=FALSE,latent=FALSE,...) { observed <- manifest(x) if (latent) observed <- vars(x) if (top) { M <- x$M res <- c() for (i in observed) if (!any(M[i,]==1)) res <- c(res, i) return(res) } exo <- exogenous(x) return(setdiff(observed,exo)) } ##' @export endogenous.list <- function(x,...) { endolist <- c() for (i in seq_along(x)) { ## exolist <- c(exolist, exogenous(x[[i]])) endolist <- c(endolist, endogenous(x[[i]])) } endolist <- unique(endolist) return(endolist) ## exolist <- unique(exolist) ## return(exolist[!(exolist%in%endolist)]) } ##' @export `endogenous.multigroup` <- function(x,...) { endogenous(Model(x)) } ##' @export `endogenous.lm` <- function(x,...) { getoutcome(formula(x))[1] } lava/R/scheffe.R0000644000176200001440000000223113162174023013121 0ustar liggesusers##' Function to compute the Scheffe corrected confidence ##' interval for the regression line ##' ##' @title Calculate simultaneous confidence limits by Scheffe's method ##' @param model Linear model ##' @param newdata new data frame ##' @param conf.level confidence level (0.95) ##' @export ##' @examples ##' x <- rnorm(100) ##' d <- data.frame(y=rnorm(length(x),x),x=x) ##' l <- lm(y~x,d) ##' plot(y~x,d) ##' abline(l) ##' d0 <- data.frame(x=seq(-5,5,length.out=100)) ##' d1 <- cbind(d0,predict(l,newdata=d0,interval="confidence")) ##' d2 <- cbind(d0,scheffe(l,d0)) ##' lines(lwr~x,d1,lty=2,col="red") ##' lines(upr~x,d1,lty=2,col="red") ##' lines(lwr~x,d2,lty=2,col="blue") ##' lines(upr~x,d2,lty=2,col="blue") scheffe <- function(model,newdata=model.frame(model),conf.level=0.95) { df <- model$df.residual p <- model$rank alpha <- 1-conf.level ## Scheffe value uses 1-tailed F critical value scheffe.crit <- sqrt(p*qf(1-alpha,p,df)) ci <- predict(model,newdata,interval="confidence",level=conf.level) delta <- scheffe.crit/qt(1-alpha/2,df) ci[,2] <- ci[,1] -(ci[,1]-ci[,2])*delta ci[,3] <- ci[,1] +(ci[,3]-ci[,1])*delta return(ci) } lava/R/confband.R0000644000176200001440000002275413162174023013304 0ustar liggesusers##' Add Confidence limits bar to plot ##' ##' @title Add Confidence limits bar to plot ##' @param x Position (x-coordinate if vert=TRUE, y-coordinate otherwise) ##' @param lower Lower limit (if NULL no limits is added, and only the ##' center is drawn (if not NULL)) ##' @param upper Upper limit ##' @param center Center point ##' @param line If FALSE do not add line between upper and lower bound ##' @param delta Length of limit bars ##' @param centermark Length of center bar ##' @param pch Center symbol (if missing a line is drawn) ##' @param blank If TRUE a white ball is plotted before the center is ##' added to the plot ##' @param vert If TRUE a vertical bar is plotted. Otherwise a horizontal ##' bar is used ##' @param polygon If TRUE polygons are added between 'lower' and 'upper'. ##' @param step Type of polygon (step-function or piecewise linear) ##' @param ... Additional low level arguments (e.g. col, lwd, lty,...) ##' @seealso \code{confband} ##' @export ##' @keywords iplot ##' @aliases confband forestplot ##' @author Klaus K. Holst ##' @examples ##' plot(0,0,type="n",xlab="",ylab="") ##' confband(0.5,-0.5,0.5,0,col="darkblue") ##' confband(0.8,-0.5,0.5,0,col="darkred",vert=FALSE,pch=1,cex=1.5) ##' ##' set.seed(1) ##' K <- 20 ##' est <- rnorm(K) ##' se <- runif(K,0.2,0.4) ##' x <- cbind(est,est-2*se,est+2*se,runif(K,0.5,2)) ##' x[c(3:4,10:12),] <- NA ##' rownames(x) <- unlist(lapply(letters[seq(K)],function(x) paste(rep(x,4),collapse=""))) ##' rownames(x)[which(is.na(est))] <- "" ##' signif <- sign(x[,2])==sign(x[,3]) ##' forestplot(x,text.right=FALSE) ##' forestplot(x[,-4],sep=c(2,15),col=signif+1,box1=TRUE,delta=0.2,pch=16,cex=1.5) ##' forestplot(x,vert=TRUE,text=FALSE) ##' forestplot(x,vert=TRUE,text=FALSE,pch=NA) ##' ##forestplot(x,vert=TRUE,text.vert=FALSE) ##' ##forestplot(val,vert=TRUE,add=TRUE) ##' ##' z <- seq(10) ##' zu <- c(z[-1],10) ##' plot(z,type="n") ##' confband(z,zu,rep(0,length(z)),col=Col("darkblue"),polygon=TRUE,step=TRUE) ##' confband(z,zu,zu-2,col=Col("darkred"),polygon=TRUE,step=TRUE) ##' ##' z <- seq(0,1,length.out=100) ##' plot(z,z,type="n") ##' confband(z,z,z^2,polygon="TRUE",col=Col("darkblue")) ##' ##' set.seed(1) ##' k <- 10 ##' x <- seq(k) ##' est <- rnorm(k) ##' sd <- runif(k) ##' val <- cbind(x,est,est-sd,est+sd) ##' par(mfrow=c(1,2)) ##' plot(0,type="n",xlim=c(0,k+1),ylim=range(val[,-1]),axes=FALSE,xlab="",ylab="") ##' axis(2) ##' confband(val[,1],val[,3],val[,4],val[,2],pch=16,cex=2) ##' plot(0,type="n",ylim=c(0,k+1),xlim=range(val[,-1]),axes=FALSE,xlab="",ylab="") ##' axis(1) ##' confband(val[,1],val[,3],val[,4],val[,2],pch=16,cex=2,vert=FALSE) confband <- function(x,lower,upper,center=NULL,line=TRUE,delta=0.07,centermark=0.03, pch,blank=TRUE,vert=TRUE,polygon=FALSE,step=FALSE,...) { if (polygon) { if (step) { x1 <- rep(x,each=2)[-1] y1 <- rep(lower, each=2); y1 <- y1[-length(y1)] x2 <- rep(rev(x),each=2); x2 <- x2[-length(x2)] y2 <- rep(rev(upper),each=2)[-1] xx <- c(x1,x2) if (!is.null(center)) center <- rep(center,each=2)[-1] yy <- c(y1,y2) } else { xx <- c(x,rev(x)) yy <- c(lower,rev(upper)) } polygon(xx,yy,...) if (line && !is.null(center)) { mlines <- function(x,y,...,density,angle,border,fillOddEven) lines(x,y,...) mlines(xx[seq(length(xx)/2)],center,...) } return(invisible(NULL)) } if (vert) { ## lower <- lower[length(x)] ## upper <- upper[length(x)] ## center <- center[length(x)] if (line && !missing(lower) && !missing(upper)) segments(x,lower,x,upper,...) if (!missing(lower)) segments(x-delta,lower,x+delta,lower,...) if (!missing(upper)) segments(x-delta,upper,x+delta,upper,...) if (!is.null(center)) { if (!missing(pch)) { if (blank) points(x,center,pch=16,col="white") points(x,center,pch=pch,...) } else { segments(x-centermark,center,x+centermark,center,...) } } } else { if (line && !missing(lower) && !missing(upper)) segments(lower,x,upper,x,...) if (!missing(lower)) segments(lower,x-delta,lower,x+delta,...) if (!missing(upper)) segments(upper,x-delta,upper,x+delta,...) if (!is.null(center)) { if (!missing(pch)) { if (blank) points(center,x,pch=16,col="white") points(center,x,pch=pch,...) } else { segments(center,x-centermark,center,x+centermark,...) } } } if (missing(lower)) lower <- NULL if (missing(upper)) upper <- NULL invisible(c(x,lower,upper,center)) } ##' @export forestplot <- function(x,lower,upper,line=0,labels, text=TRUE,text.right=text,text.fixed=NULL,text.vert=TRUE, adj=NULL, delta=0,axes=TRUE,cex=1,pch=15, xlab="",ylab="",sep,air, xlim,ylim,mar,box1=FALSE,box2=FALSE, vert=FALSE,cex.axis=1,cex.estimate=0.6, add=FALSE, reset.par=FALSE,...) { if (is.matrix(x)) { lower <- x[,2]; upper <- x[,3] if (ncol(x)>3) cex <- x[,4] x <- x[,1] } if (missing(mar) && !add) { if (vert) { mar <- c(8,4,1,1) } else { mar <- c(4,8,1,1) } } if (missing(labels)) labels <- names(x) K <- length(x) onelayout <- FALSE if (!add) { def.par <- par(no.readonly=TRUE) if (reset.par) on.exit(par(def.par)) if (text.right) { if (vert) { layout(rbind(1,2),heights=c(0.2,0.8)) } else { layout(cbind(2,1),widths=c(0.8,0.2)) } } else { onelayout <- TRUE layout(1) } } if (vert) { if (missing(ylim)) { if (missing(air)) air <- max(upper-lower,na.rm=TRUE)*0.4 ylim <- range(c(x,lower-air,upper+air),na.rm=TRUE) } if (missing(xlim)) xlim <- c(1,K) } else { if (missing(ylim)) ylim <- c(1,K) if (missing(xlim)) { if (missing(air)) air <- max(upper-lower,na.rm=TRUE)*0.4 xlim <- range(c(x,lower-air,upper+air),na.rm=TRUE) } } args0 <- list(...) formatCargsn <- names(formals(args(formatC)))[-1] nn <- setdiff(names(args0),formatCargsn) plotargs <- args0[nn] mainplot <- function(...) { par(mar=mar) ## bottom,left,top,right do.call("plot",c(list(x=0,type="n",axes=FALSE,xlab=xlab,ylab=ylab,xlim=xlim,ylim=ylim),plotargs)) if (box1) box() if (axes) { if (vert) { axis(2,cex.axis=cex.axis) } else { axis(1,cex.axis=cex.axis) } } } if (onelayout && !add) mainplot() if (text) { xpos <- upper if (text.right && !add) { if (vert) { par(mar=c(0,mar[2],0,mar[4])) } else { par(mar=c(mar[1],0,mar[3],0)) } plot.new() if (vert) { plot.window(xlim=xlim,ylim=c(0,0.5)) } else { plot.window(ylim=ylim,xlim=c(0,0.5)) } if (box2) box() xpos[] <- 0 } if (!is.null(text.fixed)) { if (is.logical(text.fixed) && text.fixed) text.fixed <- max(xpos) xpos <- rep(text.fixed,length.out=K) } nn <- intersect(names(args0),formatCargsn) args <- args0[nn] for (i in seq_len(K)) { st <- c(do.call(formatC,c(list(x=x[i]),args)), paste0("(", do.call(formatC,c(list(x=lower[i]),args)),"; ", do.call(formatC,c(list(x=upper[i]),args)),")")) if (text.vert) { st <- paste0(" ",st[1]," ",st[2],collapse="") st <- paste(" ", st) } if (vert) { if (!is.na(x[i])) { if (!text.vert) { if (text.right) xpos[i] <- xpos[i]+0.025 graphics::text(i,xpos[i],paste(st,collapse="\n"),xpd=TRUE, offset=3, cex=cex.estimate, adj=adj) } else { if (!is.na(x[i])) graphics::text(i,xpos[i],st,xpd=TRUE, srt=90, offset=0, pos=4, cex=cex.estimate, adj=adj) } } } else { if (!is.na(x[i])) graphics::text(xpos[i],i,st,xpd=TRUE,pos=4,cex=cex.estimate, adj=adj) } } } if (!onelayout && !add) mainplot() if (!is.null(line)) { if (vert) { abline(h=line,lty=2,col="lightgray") } else { abline(v=line,lty=2,col="lightgray") } } if (!missing(sep)) { if (vert) { abline(v=sep+.5,col="gray") } else { abline(h=sep+.5,col="gray") } } do.call("confband", c(list(x=seq(K),lower=lower,upper=upper,x, pch=pch,cex=cex,vert=vert,blank=FALSE), plotargs)) if (!add) { if (is.null(adj)) adj <- NA if (vert) { mtext(labels,1,at=seq(K),las=2,line=1,cex=cex.axis, adj=adj) } else { mtext(labels,2,at=seq(K),las=2,line=1,cex=cex.axis, adj=adj) } } } lava/R/lmers.R0000644000176200001440000000502613162174023012645 0ustar liggesusers##v <- lmerplot(l1,varcomp=TRUE,colorkey=TRUE,lwd=0,col=rainbow(20)) lmerplot <- function(model,x,id,y,transform,re.form=NULL,varcomp=FALSE,colorbar=TRUE,mar=c(4,4,4,6),col,index=seq(50),...) { if (varcomp) { Z <- lme4::getME(model,"Z") nn <- unlist(lapply(lme4::getME(model,"Ztlist"),nrow)) ve <- lme4::getME(model,"sigma")^2 vu <- varcomp(model,profile=FALSE)$varcomp L <- Matrix::Diagonal(sum(nn),rep(vu,nn)) V <- Z%*%L%*%(Matrix::t(Z)) Matrix::diag(V) <- Matrix::diag(V)+ve cV <- Matrix::cov2cor(V) if (!is.null(index)) { index <- intersect(seq(nrow(cV)),index) cV <- cV[index,index,drop=FALSE] } if (colorbar) { opt <- par(mar=mar) } ##if (missing(col)) col <- c("white",rev(heat.colors(16))) if (missing(col)) col <- rev(gray.colors(16,0,1)) image(seq(nrow(cV)),seq(ncol(cV)),as.matrix(cV),xlab="",ylab="",col=col,zlim=c(0,1),...) if (colorbar) { uu <- devcoords() xrange <- c(uu$fig.x2,uu$dev.x2) xrange <- diff(xrange)/3*c(1,-1)+xrange yrange <- c(uu$fig.y1,uu$fig.y2) colorbar(direction="vertical",x.range=xrange,y.range=yrange,clut=col,values=seq(0,1,length.out=length(col)),srt=0,position=2) par(opt) } return(invisible(V)) } if (missing(y)) y <- model.frame(model)[,1] yhat <- predict(model) if (!is.null(re.form)) ymean <- predict(model,re.form=re.form) if (!missing(transform)) { yhat <- transform(yhat) if (!is.null(re.form)) ymean <- transform(ymean) y <- transform(y) } plot(y ~ x, col=Col(id,0.3), pch=16,...) if (!is.null(re.form)) points(ymean ~ x, pch="-",cex=4); for (i in unique(id)) { idx <- which(id==i) lines(yhat[idx]~x[idx],col=i) } } varcomp <- function(x,profile=TRUE,...) { cc <- cbind(lme4::fixef(x),diag(as.matrix(vcov(x)))^.5) cc <- cbind(cc,cc[,1]-qnorm(0.975)*cc[,2],cc[,1]+qnorm(0.975)*cc[,2], 2*(1-pnorm(abs(cc[,1])/cc[,2]))) pr <- NULL if (profile) pr <- confint(x) colnames(cc) <- c("Estimate","Std.Err","2.5%","97.5%","p-value") vc <- lme4::VarCorr(x) res <- structure(list(coef=lme4::fixef(x), vcov=as.matrix(vcov(x)), coefmat=cc, confint=pr, varcomp=vc[[1]][,], residual=attributes(vc)$sc^2 ), class="estimate.lmer") res } lava/R/confpred.R0000644000176200001440000000521113162174023013317 0ustar liggesusers##' Conformal predicions ##' ##' @title Conformal prediction ##' @param object Model object (lm, glm or similar with predict method) or formula (lm) ##' @param data data.frame ##' @param newdata New data.frame to make predictions for ##' @param alpha Level of prediction interval ##' @param mad Conditional model (formula) for the MAD (locally-weighted CP) ##' @param ... Additional arguments to lower level functions ##' @return data.frame with fitted (fit), lower (lwr) and upper (upr) predictions bands. ##' @examples ##' set.seed(123) ##' n <- 200 ##' x <- seq(0,6,length.out=n) ##' delta <- 3 ##' ss <- exp(-1+1.5*cos((x-delta))) ##' ee <- rnorm(n,sd=ss) ##' y <- (x-delta)+3*cos(x+4.5-delta)+ee ##' d <- data.frame(y=y,x=x) ##' ##' newd <- data.frame(x=seq(0,6,length.out=50)) ##' ## cc <- confpred(lm(y~ns(x,knots=c(1,3,5)),d),newdata=newd) ##' cc <- confpred(lm(y~poly(x,3),d),data=d,newdata=newd) ##' if (interactive()) { ##' ##' plot(y~x,pch=16,col=lava::Col("black"),ylim=c(-10,15),xlab="X",ylab="Y") ##' with(cc, ##' lava::confband(newd$x,lwr,upr,fit, ##' lwd=3,polygon=TRUE,col=Col("blue"),border=FALSE)) ##' } ##' @export confpred <- function(object,data,newdata=data,alpha=0.05,mad,...) { ## Split algorithm if (inherits(object,"formula")) { object <- do.call("lm",list(object,data=data,...)) } dd <- csplit(data,0.5) muhat.new <- predict(object,newdata=newdata) ## New predictions muhat.1 <- predict(object,newdata=dd[[1]]) ## Training R1 <- abs(dd[[1]][,1]-muhat.1) muhat.2 <- predict(object,newdata=dd[[2]]) ## Ranking R2 <- abs(dd[[2]][,1]-muhat.2) if (missing(mad)) mad <- formula(object) if (is.null(mad)) { mad.new <- rep(1,nrow(newdata)) } else { ## Locally-weighted conformal ffinference if (names(dd[[2]])[1] %ni% names(newdata)) { newdata <- cbind(0,newdata); names(newdata)[1] <- names(dd[[2]])[1] } X0 <- model.matrix(mad,data=newdata) if (inherits(mad,"formula")) { X2 <- model.matrix(mad,dd[[2]]) mad.obj <- stats::lm.fit(x=X2,y=R2) mad2 <- X2%*%mad.obj$coefficients mad.new <- X0%*%mad.obj$coefficients } else { mad.obj <- do.call(mad,list(y=R2,x=dd[[2]])) mad2 <- predict(mad.obj,newdata=dd[[2]]) mad.new <- predict(mad.obj,newdata=newdata) } R2 <- R2/mad2 } k <- ceiling((nrow(data)/2+1)*(1-alpha)) if (k==0) k <- 1 if (k>length(R2)) k <- length(R2) q <- sort(R2)[k] ## 1-alpha quantile lo <- muhat.new - q*mad.new up <- muhat.new + q*mad.new data.frame(fit=muhat.new,lwr=lo,upr=up) } lava/R/cv.R0000644000176200001440000000725013162174023012134 0ustar liggesusersrmse1 <- function(fit,data,response=NULL,...) { yhat <- predict(fit,newdata=data) if (is.null(response)) response <- endogenous(fit) y <- data[,response] c(RMSE=mean(as.matrix(y-yhat)^2)) } ##' Cross-validation ##' ##' Generic cross-validation function ##' @title Cross-validation ##' @param modelList List of fitting functions or models ##' @param data data.frame ##' @param K Number of folds (default 5) ##' @param rep Number of repetitions (default 1) ##' @param perf Performance measure (default RMSE) ##' @param seed Optional random seed ##' @param mc.cores Number of cores used for parallel computations ##' @param ... Additional arguments parsed to models in modelList and perf ##' @author Klaus K. Holst ##' @examples ##' f0 <- function(data,...) lm(...,data) ##' f1 <- function(data,...) lm(Sepal.Length~Species,data) ##' f2 <- function(data,...) lm(Sepal.Length~Species+Petal.Length,data) ##' x <- cv(list(m0=f0,m1=f1,m2=f2),rep=10, data=iris, formula=Sepal.Length~.) ##' x2 <- cv(list(f0(iris),f1(iris),f2(iris)),rep=10, data=iris) ##' @export cv <- function(modelList, data, K=5, rep=1, perf, seed=NULL, mc.cores=1, ...) { if (missing(perf)) perf <- rmse1 if (!is.list(modelList)) modelList <- list(modelList) nam <- names(modelList) if (is.null(nam)) nam <- paste0("model",seq_along(modelList)) args <- list(...) ## Models run on full data: if (is.function(modelList[[1]])) { fit0 <- lapply(modelList, function(f) do.call(f,c(list(data),args))) } else { fit0 <- modelList } ## In-sample predictive performance: perf0 <- lapply(fit0, function(fit) do.call(perf,c(list(fit,data=data),args))) namPerf <- names(perf0[[1]]) names(fit0) <- names(perf0) <- nam n <- nrow(data) M <- length(perf0) # Number of models P <- length(perf0[[1]]) # Number of performance measures if (!is.null(seed)) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } dim <- c(rep,K,M,P) PerfArr <- array(0,dim) dimnames(PerfArr) <- list(NULL,NULL,nam,namPerf) folds <- foldr(n,K,rep) arg <- expand.grid(R=seq(rep),K=seq(K)) #,M=seq_along(modelList)) ff <- function(i) { R <- arg[i,1] k <- arg[i,2] fold <- folds[[R]] dtest <- data[fold[[k]],] dtrain <- data[unlist(fold[-k]),] if (is.function(modelList[[1]])) { fits <- lapply(modelList, function(f) do.call(f,c(list(dtrain),args))) } else { fits <- lapply(modelList, function(m) do.call(update,c(list(m,data=dtrain),args))) } perfs <- lapply(fits, function(fit) do.call(perf,c(list(fit,data=dtest),args))) do.call(rbind,perfs) } if (mc.cores>1) { val <- parallel::mcmapply(ff,seq(nrow(arg)),SIMPLIFY=FALSE,mc.cores=mc.cores) } else { val <- mapply(ff,seq(nrow(arg)),SIMPLIFY=FALSE) } for (i in seq(nrow(arg))) { R <- arg[i,1] k <- arg[i,2] PerfArr[R,k,,] <- val[[i]] } structure(list(cv=PerfArr, call=match.call(), names=nam, rep=rep, folds=K, fit=fit0), class="CrossValidated") } ##' @export print.CrossValidated <- function(x,...) { ##print(drop(x$cv)) res <- apply(x$cv,3:4,function(x) mean(x)) if (length(x$names)==nrow(res)) rownames(res) <- x$names print(res,quote=FALSE) } lava/R/ordinal.R0000644000176200001440000002524013162174023013153 0ustar liggesusersordinal.remove.hook <- function(x,var,...) { ordinal(x,K=0) <- var return(x) } color.ordinal <- function(x,subset=vars(x),...) { return(list(vars=intersect(subset,ordinal(x)),col="indianred1")) } ordinal.sim.hook <- function(x,data,p,modelpar,...) { ovar <- ordinal(x) for (i in seq_len(length(ovar))) { if (attributes(ovar)$liability[i]) { idx <- attributes(ovar)$idx[[ovar[i]]] if (length(idx)==0) { breaks <- c(-Inf,0,Inf) } else { breaks <- c(-Inf,ordreg_threshold(modelpar$e[idx]),Inf) } z <- cut(data[,ovar[i]],breaks=breaks) data[,ovar[i]] <- as.numeric(z)-1 } K <- attributes(ovar)$K[i] lab <- attributes(ovar)$labels[ovar[i]][[1]] if (!is.null(lab)) data[,ovar[i]] <- factor(data[,ovar[i]], levels=seq(K)-1, labels=lab) } return(data) } ordinal.estimate.hook <- function(x,data,weights,data2,estimator,...) { dots <- list(...) nestimator <- c("normal") nestimator2 <- c("tobit","tobitw","gaussian") ord <- ordinal(x) bin <- NULL hasTobit <- lava.options()$tobit && isNamespaceLoaded("lava.tobit") if (hasTobit) { bin <- lava.tobit::binary(x) } if (is.null(estimator) && length(ord)>0) estimator <- nestimator[1] ## Binary outcomes -> censored regression if (is.null(dim(data))) return(NULL) if (is.null(estimator) || estimator%in%c(nestimator2,nestimator)) { for (i in setdiff(lava::endogenous(x),bin)) { if (is.character(data[,i]) | is.factor(data[,i])) { # Transform binary 'factor' y <- as.factor(data[,i]) data[,i] <- as.numeric(y)-1 if (hasTobit && nlevels(y)==2 && !is.null(estimator) && estimator%in%c("gaussian","tobit")) { lava.tobit::binary(x) <- i } else { estimator <- nestimator[1] ordinal(x,K=nlevels(y)) <- i } } } ord <- ordinal(x) if (length(ord)>0 && !is.null(estimator) && estimator%in%nestimator2) { if (hasTobit) { lava.tobit::binary(x) <- ord } else { estimator <- nestimator[1] } } if (hasTobit) bin <- intersect(lava.tobit::binary(x),vars(x)) if (length(bin)>0 && (is.null(estimator) || estimator%in%"normal")) { estimator <- nestimator[1] ordinal(x,K=2) <- bin } if (length(bin)>0 && estimator%in%nestimator2) { estimator <- nestimator2[1] if (is.null(weights)) { W <- data[,bin,drop=FALSE]; W[W==0] <- -1; colnames(W) <- bin weights <- lava::lava.options()$threshold*W } else { ## if (!all(binary(x)%in%colnames(data))) ## W <- data[,binary(x),drop=FALSE]; W[W==0] <- -1; colnames(W) <- binary(x) ## attributes(W)$data2 <- weights ## weights <- W ## weights[,binary(x)] <- W } for (b in bin) { data[!is.na(data[,b]),b] <- 0 } ## data[,binary(x)] <- 0 if (!is.null(data2)) { estimator <- "tobitw" } } } ## Transform 'Surv' objects data2 <- mynames <- NULL if (is.null(estimator) || estimator%in%nestimator[1] || (!hasTobit && estimator%in%nestimator2)) { for (i in setdiff(lava::endogenous(x),c(bin,ord))) { if (survival::is.Surv(data[,i])) { S <- data[,i] y1 <- S[,1] if (attributes(S)$type=="left") { y2 <- y1 y1[S[,2]==0] <- -Inf } if (attributes(S)$type=="right") { y2 <- y1 y2[S[,2]==0] <- Inf } if (attributes(S)$type=="interval2") { y2 <- S[,2] } if (attributes(S)$type=="interval") { y2 <- S[,2] y2[S[,3]==1L] <- y1[S[,3]==1L] } if (!(attributes(S)$type%in%c("left","right","interval2","interval"))) stop("Surv type not supported.") mynames <- c(mynames,i) y2 <- cbind(y2) colnames(y2) <- i data2 <- cbind(data2,y2) data[,i] <- y1 estimator <- "normal" } } } W <- NULL if (length(estimator)>0 && estimator%in%nestimator2 && hasTobit) { for (i in setdiff(lava::endogenous(x),bin)) { if (survival::is.Surv(data[,i])) { estimator <- nestimator2[1] S <- data[,i] y <- S[,1] if (attributes(S)$type=="left") w <- S[,2]-1 if (attributes(S)$type=="right") w <- 1-S[,2] if (attributes(S)$type=="interval2") { w <- S[,3]; w[w==2] <- (-1) } mynames <- c(mynames,i) W <- cbind(W,w) data[,i] <- y } } if (length(W)>0) { colnames(W) <- mynames if (!is.null(weights)) { wW <- intersect(colnames(weights),colnames(W)) if (length(wW)>0) weights[,wW] <- W[,wW] Wo <- setdiff(colnames(W),wW) if (length(Wo)>0) weights <- cbind(weights,W[,Wo,drop=FALSE]) } else { weights <- W; } } } return(c(list(x=x,data=data,weights=weights,data2=data2,estimator=estimator),dots)) } ##' @export "ordinal<-" <- function(x,...,value) UseMethod("ordinal<-") ##' @export "ordinal<-.lvm" <- function(x,...,value) { ordinal(x, value, ...) } ##' @export "ordinal" <- function(x,...) UseMethod("ordinal") ##' @export print.ordinal.lvm <- function(x,...) { cat(rep("_",28),"\n",sep="") for (i in x) { val <- attr(x,"fix")[[i]] if (length(val)==0) cat(paste(i,"binary",sep=":"),"\n") else print(unlist(attr(x,"fix")[[i]]),quote=FALSE) cat(rep("_",28),"\n",sep="") } } ##' @export `ordinal.lvm` <- function(x,var=NULL,K=2, constrain, breaks=NULL, p, liability=TRUE, labels, exo=FALSE, ...) { if (inherits(var,"formula")) { var <- all.vars(var) } if (is.null(var)) { ordidx <- unlist(x$attributes$ordinal) KK <- unlist(x$attributes$nordinal) idx <- x$attributes$ordinalparname fix <- lapply(idx,function(z) x$exfix[z]) liability <- x$attributes$liability labels <- x$attributes$labels if (length(ordidx)>0) { val <- names(ordidx) return(structure(val,K=KK,idx=idx,fix=fix,liability=liability,labels=labels,class="ordinal.lvm")) } else return(NULL) } if (K[1]==0L || is.null(K[1]) || (is.logical(K) & !K[1])) { x$attributes$type[var] <- setdiff(x$attributes$type,var) pp <- unlist(x$attributes$ordinalparname[var]) parameter(x,remove=TRUE) <- pp x$attributes$ordinalparname[var] <- NULL x$attributes$ordinal[var] <- NULL ##x$attributes$labels[var] <- NULL x$attributes$type <- x$attributes$type[setdiff(names(x$attributes$type),var)] x$attributes$liability <- x$attributes$liability[setdiff(names(x$attributes$liability),var)] x$attributes$nordinal <- x$attributes$nordinal[setdiff(names(x$attributes$nordinal),var)] x$attributes$normal <- x$attributes$normal[setdiff(names(x$attributes$normal),var)] x$constrainY[var] <- NULL exo <- intersect(var,exogenous(x,TRUE)) if (length(exo)>0) { intercept(x,var) <- NA covariance(x,var) <- NA exogenous(x) <- union(exogenous(x),exo) } return(x) } if (!missing(p)) breaks <- qnorm(cumsum(p)) if (!is.null(breaks)) { breaks <- ordreg_ithreshold(breaks) K <- length(breaks)+1 } if (!missing(labels)) K <- length(labels) if (length(var)>length(K)) K <- rep(K[1],length(var)) if (length(var)==1 && !missing(constrain)) constrain <- list(constrain) if (length(var)>1) { if (!missing(labels) && !is.list(labels)) labels <- rep(list(labels),length(var)) if (!missing(breaks) && !is.list(breaks)) breaks <- rep(list(breaks),length(var)) if (!missing(constrain) && !is.list(constrain)) constrain <- rep(list(constrain),length(var)) } addvar(x) <- var for (i in seq_len(length(var))) { if (K[i]>2 || (K[i]==2 && !liability)) { parname <- paste0(var[i],":",paste(seq(K[i]-1)-1,seq(K[i]-1),sep="|")) newpar <- if (is.null(breaks)) { rep(-1,K[i]-1) } else if (is.list(breaks)) breaks[[i]] else breaks if (length(newpar)2,"categorical","binary") if (K[i]>2) intfix(x,var[i],NULL) <- 0 if (!liability) { mytr <- function(y,p,idx,...) { breaks <- c(-Inf,ordreg_threshold(p[idx]),Inf) as.numeric(cut(y,breaks=breaks))-1 } myalist <- substitute(alist(y=,p=,idx=pp), list(pp=x$attributes$ordinalparname[[var[i]]])) formals(mytr) <- eval(myalist) transform(x,var[i],post=FALSE) <- mytr } } x$attributes$liability[var] <- liability x$attributes$ordinal[var] <- TRUE if (!missing(labels)) { if (length(var)==1) labels <- list(labels) x$attributes$labels[var] <- labels } x$attributes$nordinal[var] <- K x$attributes$normal[var] <- FALSE covfix(x,var,NULL,exo=exo) <- 1 if (is.null(index(x))) index(x) <- reindex(x) return(x) } lava/R/plot.estimate.R0000644000176200001440000000207413162174023014313 0ustar liggesusers ##' @export plot.estimate <- function(x,f,idx,intercept=FALSE,data,confint=TRUE,type="l",xlab="x",ylab="f(x)",col=1,add=FALSE,...) { if (!missing(f) && !is.null(f)) { data <- as.list(data) env <- new.env() for (y in names(data)) { assign(y,data[[y]],env) } environment(f) <- env pp <- estimate(x,f,..., vcov=vcov(x),iid=FALSE)$coefmat if (!add) suppressWarnings(plot(data[[1]],pp[,1],xlab=xlab,ylab=ylab,type=type,...)) else lines(data[[1]],pp[,1],xlab=xlab,ylab=ylab,type=type,col=col,...) if (confint) confband(data[[1]],pp[,3],pp[,4],polygon=TRUE,col=Col(col),lty=0) return(invisible(pp)) } if (!is.null(x$coefmat)) { pp <- x$coefmat[,c(1,3,4),drop=FALSE] } else { pp <- cbind(coef(x),confint(x)) } if (!missing(idx)) pp <- pp[idx,,drop=FALSE] if (!intercept) { idx <- match("(Intercept)",rownames(pp)) if (length(idx)>0 && !is.na(idx)) pp <- pp[-idx,,drop=FALSE] } forestplot(pp[rev(seq(nrow(pp))),,drop=FALSE],...) } lava/R/vcov.R0000644000176200001440000000071613162174023012501 0ustar liggesusers##' @export vcov.lvmfit <- function(object,...) { res <- object$vcov if (inherits(object,"lvm.missing")) { resnames <- names(coef(object)) } else { resnames <- coef(Model(object),fix=FALSE, mean=object$control$meanstructure) } colnames(res) <- rownames(res) <- resnames return(res) } ##' @export vcov.multigroupfit <- function(object,...) { res <- object$vcov colnames(res) <- rownames(res) <- object$model$name return(res) } lava/R/moments.R0000644000176200001440000000375013162174023013207 0ustar liggesusersMoments <- function(x,p,data,conditional=TRUE,...) { } ##' @export `moments` <- function(x,...) UseMethod("moments") ##' @export moments.lvmfit <- function(x, p=pars(x),...) moments(Model(x),p=p,...) ##' @export moments.lvm.missing <- function(x, p=pars(x), ...) { idx <- match(coef(Model(x)),names(coef(x))) moments.lvmfit(x,p=p[idx],...) } ##' @export moments.lvm <- function(x, p, debug=FALSE, conditional=FALSE, data=NULL, latent=FALSE, ...) { ### p: model-parameters as obtained from e.g. 'startvalues'. ### (vector of regression parameters and variance parameters) ### meanpar: mean-parameters (optional) ii <- index(x) pp <- modelPar(x,p) AP <- with(pp, matrices(x,p,meanpar=meanpar,epars=p2,data=data,...)) P <- AP$P v <- AP$v if (!is.null(v)) { names(v) <- ii$vars } J <- ii$J if (conditional) { J <- ii$Jy if (latent) { J <- diag(nrow=length(ii$vars))[sort(c(ii$endo.idx,ii$eta.idx)),,drop=FALSE] } px <- ii$px exo <- exogenous(x) ## if (missing(row)) { v <- rbind(v) %x% cbind(rep(1,nrow(data))) if (length(ii$exo.idx)>0) { v[,ii$exo.idx] <- as.matrix(data[,exo]) } ## } else { ## if (!is.null(v)) ## v[exo] <- as.numeric(data[row,exo]) ## } P <- px%*% tcrossprod(P, px) } Im <- diag(nrow=nrow(AP$A)) if (ii$sparse) { IAi <- with(AP, as(Inverse(Im-t(A)),"sparseMatrix")) ##IAi <- as(solve(Matrix::Diagonal(nrow(A))-t(A)),"sparseMatrix") G <- as(J%*%IAi,"sparseMatrix") } else { IAi <- Inverse(Im-t(AP$A)) G <- J%*%IAi } xi <- NULL if (!is.null(v)) { xi <- v%*%t(G) ## Model-specific mean vector } Cfull <- as.matrix(IAi %*% tcrossprod(P,IAi)) C <- as.matrix(J %*% tcrossprod(Cfull,J)) return(list(Cfull=Cfull, C=C, v=v, e=AP$e, xi=xi, A=AP$A, P=P, IAi=IAi, J=J, G=G, npar=ii$npar, npar.reg=ii$npar.reg, npar.mean=ii$npar.mean, npar.ex=ii$npar.ex, parval=AP$parval, constrain.idx=AP$constrain.idx, constrainpar=AP$constrainpar)) } lava/R/plot.R0000644000176200001440000003446113162174023012506 0ustar liggesusers###{{{ plot.lvm ##' Plot path diagram ##' ##' Plot the path diagram of a SEM ##' ##' ##' @aliases plot.lvmfit ##' @param x Model object ##' @param diag Logical argument indicating whether to visualize ##' variance parameters (i.e. diagonal of variance matrix) ##' @param cor Logical argument indicating whether to visualize ##' correlation parameters ##' @param labels Logical argument indiciating whether to add labels ##' to plot (Unnamed parameters will be labeled p1,p2,...) ##' @param intercept Logical argument indiciating whether to add ##' intercept labels ##' @param addcolor Logical argument indiciating whether to add colors ##' to plot (overrides \code{nodecolor} calls) ##' @param plain if TRUE strip plot of colors and boxes ##' @param cex Fontsize of node labels ##' @param fontsize1 Fontsize of edge labels ##' @param noplot if TRUE then return \code{graphNEL} object only ##' @param graph Graph attributes (Rgraphviz) ##' @param attrs Attributes (Rgraphviz) ##' @param unexpr if TRUE remove expressions from labels ##' @param addstyle Logical argument indicating whether additional ##' style should automatically be added to the plot (e.g. dashed ##' lines to double-headed arrows) ##' @param plot.engine default 'Rgraphviz' if available, otherwise ##' visNetwork,igraph ##' @param init Reinitialize graph (for internal use) ##' @param layout Graph layout (see Rgraphviz or igraph manual) ##' @param edgecolor if TRUE plot style with colored edges ##' @param graph.proc Function that post-process the graph object ##' (default: subscripts are automatically added to labels of the ##' nodes) ##' @param ... Additional arguments to be passed to the low level ##' functions ##' @author Klaus K. Holst ##' @keywords hplot regression ##' @examples ##' ##' if (interactive()) { ##' m <- lvm(c(y1,y2) ~ eta) ##' regression(m) <- eta ~ z+x2 ##' regression(m) <- c(eta,z) ~ x1 ##' latent(m) <- ~eta ##' labels(m) <- c(y1=expression(y[scriptscriptstyle(1)]), ##' y2=expression(y[scriptscriptstyle(2)]), ##' x1=expression(x[scriptscriptstyle(1)]), ##' x2=expression(x[scriptscriptstyle(2)]), ##' eta=expression(eta)) ##' edgelabels(m, eta ~ z+x1+x2, cex=2, lwd=3, ##' col=c("orange","lightblue","lightblue")) <- expression(rho,phi,psi) ##' nodecolor(m, vars(m), border="white", labcol="darkblue") <- NA ##' nodecolor(m, ~y1+y2+z, labcol=c("white","white","black")) <- NA ##' plot(m,cex=1.5) ##' ##' d <- sim(m,100) ##' e <- estimate(m,d) ##' plot(e) ##' ##' m <- lvm(c(y1,y2) ~ eta) ##' regression(m) <- eta ~ z+x2 ##' regression(m) <- c(eta,z) ~ x1 ##' latent(m) <- ~eta ##' plot(lava:::beautify(m,edgecol=FALSE)) ##' } ##' @export ##' @method plot lvm `plot.lvm` <- function(x,diag=FALSE,cor=TRUE,labels=FALSE,intercept=FALSE,addcolor=TRUE,plain=FALSE,cex,fontsize1=10,noplot=FALSE,graph=list(rankdir="BT"), attrs=list(graph=graph), unexpr=FALSE, addstyle=TRUE,plot.engine=lava.options()$plot.engine,init=TRUE, layout=lava.options()$layout, edgecolor=lava.options()$edgecolor, graph.proc=lava.options()$graph.proc, ...) { if (is.null(vars(x))) { message("Nothing to plot: model has no variables.") return(NULL) } index(x) <- reindex(x) ## if (length(index(x)$vars)<2) { ## message("Not available for models with fewer than two variables") ## return(NULL) ## } myhooks <- gethook("plot.post.hooks") for (f in myhooks) { x <- do.call(f, list(x=x,...)) } plot.engine <- tolower(plot.engine) if (plot.engine=="rgraphviz" && (!(requireNamespace("graph",quietly=TRUE)) || !(requireNamespace("Rgraphviz",quietly=TRUE)))) { plot.engine <- "visnetwork" } if (plot.engine=="visnetwork" && (!(requireNamespace("visNetwork",quietly=TRUE)))) { plot.engine <- "igraph" } if (plot.engine=="igraph") { if (!requireNamespace("igraph",quietly=TRUE)) { message("package 'Rgraphviz','igraph' or 'visNetwork' not available") return(NULL) } L <- igraph::layout.sugiyama(g <- igraph.lvm(x,...))$layout if (noplot) return(graph::updateGraph(g)) dots <- list(...) if (is.character(layout)) plot(g,layout=L,...) else plot(g,layout=layout,...) return(invisible(g)) } if (plot.engine=="visnetwork") { g <- vis.lvm(x,labels=labels,...) if (!noplot) print(g) return(g) } if (init) { if (!is.null(graph.proc)) { x <- do.call(graph.proc, list(x,edgecol=edgecolor,...)) } g <- finalize(x,diag=diag,cor=cor,addcolor=addcolor,intercept=intercept,plain=plain,cex=cex,fontsize1=fontsize1,unexpr=unexpr,addstyle=addstyle) } else { g <- Graph(x) } if (labels) { AP <- matrices(x,paste0("p",seq_len(index(x)$npar))) mylab <- AP$P; mylab[AP$A!="0"] <- AP$A[AP$A!="0"] mylab[!is.na(x$par)] <- x$par[!is.na(x$par)] mylab[!is.na(x$covpar)] <- x$covpar[!is.na(x$covpar)] g <- edgelabels(g, lab=mylab) } if (lava.options()$debug) { plot(g) } else { ## graphRenderInfo(g)$recipEdges <- "distinct" .savedOpt <- options(warn=-1) ## Temporarily disable warnings as renderGraph comes with a stupid warning when labels are given as "expression" dots <- list(...) dots$attrs <- attrs dots$x <- g dots$recipEdges <- "distinct" if (attributes(g)$feedback) dots$recipEdges <- c("combine") if (is.null(dots$layoutType)) dots$layoutType <- layout[1] if (all(index(x)$A==0)) dots$layoutType <- "circo" g <- do.call(getFromNamespace("layoutGraph","Rgraphviz"), dots) ## Temporary work around: graph::nodeRenderInfo(g)$fill <- graph::nodeRenderInfo(dots$x)$fill graph::nodeRenderInfo(g)$col <- graph::nodeRenderInfo(dots$x)$col graph::edgeRenderInfo(g)$col <- graph::edgeRenderInfo(dots$x)$col if (noplot) return(g) res <- tryCatch(Rgraphviz::renderGraph(g),error=function(e) NULL) { # Redo nodes to avoid edges overlapping node borders par(new=TRUE) res <- tryCatch(Rgraphviz::renderGraph(g,drawEdges=NULL,new=FALSE),error=function(e) NULL) } options(.savedOpt) } ## if (!is.null(legend)) { ## op <- par(xpd=TRUE) ## legend(legend, c("Exogenous","Endogenous","Latent","Time to event"), ## pt.cex=1.5, pch=15, lty=0, col=cols[1:4], cex=0.8) ## par(op) ## } myhooks <- gethook("plot.hooks") for (f in myhooks) { do.call(f, list(x=x,...)) } invisible(g) } ###}}} plot.lvm ###{{{ vis.lvm vis.lvm <- function(m,randomSeed=1,width="100%",height="700px",labels=FALSE,cor=TRUE,...) { if (!requireNamespace("visNetwork",quietly=TRUE)) stop("'visNetwork' required") types <- rep("endogenous",length(vars(m))) types[index(m)$eta.idx] <- "latent" types[index(m)$exo.idx] <- "exogenous" col <- lava.options()$node.color colors <- rep(col[2],length(types)) colors[index(m)$eta.idx] <- col[3] colors[index(m)$exo.idx] <- col[1] trf <- transform(m) if (length(trf)>0) { colors[which(index(m)$vars%in%names(trf))] <- col[4] } shapes <- rep("box",length(types)) shapes[index(m)$eta.idx] <- "circle" nodes <- data.frame(id=seq_along(types), label=vars(m), color=colors, shape=shapes, shadow=TRUE, size=rep(1.0,length(types)), group=types) edges <- cbind(edgeList(m))#,shadow=TRUE) AP <- matrices(m,paste0("p",seq_len(index(m)$npar))) if (labels) { mylab <- AP$A; mylab[!is.na(m$par)] <- m$par[!is.na(m$par)] lab <- c() for (i in seq(nrow(edges))) { lab <- c(lab,t(mylab)[edges[i,1],edges[i,2]]) } edges <- cbind(edges,label=lab) } if (length(edges)>0) edges <- cbind(edges,dashes=FALSE,arrows="from") if (cor) { mylab <- AP$P mylab[!is.na(m$covpar)] <- m$covpar[!is.na(m$covpar)] coredges <- data.frame(from=numeric(),to=numeric(),label=character()) for (i in seq_len(nrow(mylab)-1)) { for (j in seq(i+1,nrow(mylab))) { if (mylab[i,j]!="0") { coredges <- rbind(coredges, data.frame(from=i,to=j,label=mylab[i,j])) } } } if (nrow(coredges)>0) { if (!labels) coredges <- coredges[,1:2,drop=FALSE] coredges <- cbind(coredges,dashes=TRUE,arrows="false") edges <- rbind(edges,coredges) } } if (length(edges)>0) edges$physics <- TRUE v <- visNetwork::visNetwork(nodes,edges,width=width,height=height,...) v <- visNetwork::visEdges(v, arrows=list(from=list(enabled=TRUE, scaleFactor = 0.5)), scaling = list(min = 2, max = 2)) v <- visNetwork::visLayout(v,randomSeed=randomSeed) v } ###}}} vis.lvm ###{{{ plot.lvmfit ##' @export `plot.lvmfit` <- function(x,diag=TRUE,cor=TRUE,type,noplot=FALSE,fontsize1=5,f,graph.proc=lava.options()$graph.proc,...) { if (!missing(f)) { return(plot.estimate(x,f=f,...)) } .savedOpt <- options(warn=-1) ## Temporarily disable warnings as renderGraph comes with a stupid warning when labels are given as "expression" if (!requireNamespace("graph",quietly=TRUE)) { plot(Model(x),...) return(invisible(x)) } g <- Graph(x) newgraph <- FALSE if (is.null(g)) { newgraph <- TRUE if (!is.null(graph.proc)) { Model(x) <- beautify(Model(x),edgecol=FALSE,...) } Graph(x) <- finalize(Model(x), diag=TRUE, cor=FALSE, fontsize1=fontsize1, ...) } if(noplot) return(Graph(x)) if (newgraph) { if (missing(type)) type <- "est" x <- edgelabels(x, type=type, diag=diag, cor=cor, fontsize1=fontsize1, ...) } else { if (!missing(type)) { x <- edgelabels(x, type=type, diag=diag, cor=cor, fontsize1=fontsize1, ...) } } g <- Graph(x) var <- rownames(covariance(Model(x))$rel) if (!cor) { delta <- 1 for (r in seq_len(nrow(covariance(Model(x))$rel)-delta) ) { for (s in seq(r+delta,ncol(covariance(Model(x))$rel)) ) { if (covariance(Model(x))$rel[r,s]==1) { g <- graph::removeEdge(var[r],var[s], g) g <- graph::removeEdge(var[s],var[r], g) } } } } if (!diag) { for (r in seq_len(nrow(covariance(Model(x))$rel)) ) { if (graph::isAdjacent(g,var[r],var[r])) g <- graph::removeEdge(var[r],var[r],g) } } m <- Model(x); Graph(m) <- g g <- plot(m, diag=diag, cor=cor, fontsize1=fontsize1, init=FALSE, ...) options(.savedOpt) invisible(g) } ###}}} plot.lvmfit ###{{{ plot.multigroup ##' @export plot.multigroup <- function(x,diag=TRUE,labels=TRUE,...) { k <- x$ngroup for (i in seq_len(k)) plot(x$lvm[[i]],diag=diag,labels=labels, ...) } ##' @export plot.multigroupfit <- function(x,...) { plot(Model(x),...) } ###}}} ###{{{ igraph.lvm ##' @export igraph.lvm <- function(x,layout=igraph::layout.kamada.kawai,...) { requireNamespace("igraph",quietly=TRUE) oC <- covariance(x)$rel for (i in seq_len(nrow(oC)-1)) for (j in seq(i+1,nrow(oC))) { if (oC[i,j]!=0) { x <- regression(x,vars(x)[i],vars(x)[j]) x <- regression(x,vars(x)[j],vars(x)[i]) } } g <- igraph::graph.adjacency(x$M,mode="directed") igraph::V(g)$color <- "lightblue" igraph::V(g)$label <- vars(x) igraph::V(g)$shape <- "rectangle" for (i in match(latent(x),igraph::V(g)$name)) { igraph::V(g)$shape[i] <- "circle" igraph::V(g)$color[i] <- "green" } endo <- index(x)$endogenous for (i in match(endo,igraph::V(g)$name)) { igraph::V(g)$color[i] <- "orange" } igraph::E(g)$label <- as.list(rep("",length(igraph::E(g)))) oE <- edgelabels(x) for (i in seq_along(igraph::E(g))) { st <- as.character(oE[i]) if (length(st)>0) igraph::E(g)$label[[i]] <- st } g$layout <- layout(g) return(g) } ###}}} igraph.lvm beautify <- function(x,col=lava.options()$node.color,border=rep("black",3),labcol=rep("darkblue",3),edgecol=TRUE,...) { if (is.null(x$noderender$fill)) notcolored <- vars(x) else notcolored <- vars(x)[is.na(x$noderender$fill)] x0 <- intersect(notcolored,exogenous(x)) if (length(x0)>0) nodecolor(x, x0, border=border[1], labcol=labcol[1]) <- col[1] x0 <- intersect(notcolored,endogenous(x)) if (length(x0)>0) nodecolor(x, x0, border=border[1], labcol=labcol[1]) <- col[2] x0 <- intersect(notcolored,latent(x)) if (length(x0)>0) nodecolor(x, x0, border=border[1], labcol=labcol[1]) <- col[3] trimmed <- gsub("[[:digit:]]*$","",vars(x)) keep <- num <- c() for (i in seq_len(length(vars(x)))) { lb <- labels(x)[vars(x)[i]] if (is.null(try(eval(lb),silent=TRUE))) { keep <- c(keep,i) num <- c(num,gsub(trimmed[i],"",vars(x)[i])) } } if (length(keep)>0) { trimmed <- trimmed[keep] trim <- gsub(" ",",",trimmed) lab <- paste0('"',vars(x)[keep],'"',"=",paste0("expression(",trim,"[scriptscriptstyle(",num,")])"),collapse=",") labels(x) <- eval(parse(text=paste("c(",lab,")"))) } if (!edgecol) return(x) iex <- index(x)$exo.idx ien <- index(x)$endo.idx ila <- index(x)$eta.idx for (i in iex) { for (j in which(x$M[i,]==1)) { elab <- edgelabels(x,to=vars(x)[j],from=rev(vars(x)[i])) elab2 <- try(eval(elab),silent=TRUE) if (is.null(elab2)) elab2 <- "" edgelabels(x, to=vars(x)[j], from=rev(vars(x)[i]), cex=2, lwd=3,col=col[1]) <- elab2 } } for (i in ien) { for (j in which(x$M[i,]==1)) { elab <- edgelabels(x,to=vars(x)[j],from=rev(vars(x)[i])) elab2 <- try(eval(elab),silent=TRUE) if (is.null(elab2)) elab2 <- "" edgelabels(x, to=vars(x)[j], from=rev(vars(x)[i]), cex=2, lwd=3,col=col[2]) <- elab2 } } for (i in ila) { for (j in which(x$M[i,]==1)) { elab <- edgelabels(x,to=vars(x)[j],from=rev(vars(x)[i])) elab2 <- try(eval(elab),silent=TRUE) if (is.null(elab2)) elab2 <- "" if (is.null(try(eval(elab),silent=TRUE))) elab <- "" edgelabels(x, to=vars(x)[j], from=rev(vars(x)[i]), cex=2, lwd=3,col=col[3]) <- elab2 } } x } lava/R/csplit.R0000644000176200001440000000324613162174023013023 0ustar liggesusers##' Split data into folds ##' ##' @title Split data into folds ##' @param x Data or integer (size) ##' @param p Number of folds, or if a number between 0 and 1 is given two folds of size p and (1-p) will be returned ##' @param replace With or with-out replacement ##' @param return.index If TRUE index of folds are returned otherwise the actual data splits are returned (default) ##' @param k (Optional, only used when p=NULL) number of folds without shuffling ##' @param ... additional arguments to lower level functions ##' @export ##' @aliases csplit foldr ##' @examples ##' foldr(5,2,rep=2) ##' csplit(10,3) ##' csplit(iris[1:10,]) ## Split in two sets 1:(n/2) and (n/2+1):n ##' csplit(iris[1:10,],0.5) ##' @author Klaus K. Holst csplit <- function(x,p=NULL,replace=FALSE,return.index=FALSE,k=2,...) { if (length(x)==1 & is.numeric(x)) x <- seq(x) N <- NROW(x) if (is.null(p)) { ## K <- base::round(N/k) idx <- split(seq(N),sort(rep(seq(k),length.out=N,each=K))) } else { if (p<1) { ## two folds (size N*p and N*(1-p)) idx1 <- base::sample(N,base::round(p*N),replace=replace) idx <- list(idx1, base::sample(setdiff(seq(N),idx1),replace=replace)) } else { ## Number of folds (equal size) idx <- split(sample(seq(N)), rep(seq(p), length=N)) } } if (return.index) return(idx) if (!is.vector(x)) { return(lapply(idx,function(ii) x[ii,,drop=FALSE])) } return(lapply(idx,function(ii) x[ii])) } ##' @export foldr <- function(n,K=5,rep=10) { replicate(rep,split(sample(seq(n)), rep(seq(K), length=n)),simplify=FALSE) } lava/R/modelVar.R0000644000176200001440000000063113162174023013271 0ustar liggesusers ###{{{ modelVar ##' @export `modelVar` <- function(x,p,...) UseMethod("modelVar") ##' @export modelVar.lvmfit <- function(x, p=pars(x), ...) modelVar(Model(x),p=p,...) ##' @export modelVar.lvm <- function(x,p,data,...) { pp <- modelPar(x,p) res <- moments(x, p=p, data=data,...) attr(res, "pars") <- pp$p attr(res, "meanpar") <- pp$meanpar attr(res, "epar") <- pp$epar res } ###}}} modelVar lava/R/zorg.R0000644000176200001440000000544113162174023012505 0ustar liggesusers##' Convert object to ascii suitable for org-mode ##' ##' @title Convert object to ascii suitable for org-mode ##' @param x R object ##' @param ... additional arguments to lower level functions ##' @param ncol If \code{x} is a vector and \code{ncol} is given as argument, the resulting output will be a \code{matrix} with \code{ncol} columns ##' @param include.rownames If \code{FALSE} row names are removed ##' @param include.colnames If \code{FALSE} column names are removed ##' @param header If TRUE the header is included ##' @param frame Frame argument (see \code{ascii}) ##' @param rownames Optional vector of row names ##' @param colnames Optional vector of column names ##' @param type Type argument (see \code{ascii}) ##' @param tab Tabulate? ##' @param margins Add margins to table? ##' @param print print or return result ##' @param html HTML prefix (added to ATTR_HTML) ##' @param latex LaTeX prefix (added to ATTR_LaTeX) ##' @param sep separator with type='ascii' ##' @author Klaus K. Holst ##' @export Org <- function(x,...,ncol,include.rownames=TRUE,include.colnames=TRUE,header=TRUE, frame="topbot",rownames=NULL,colnames=NULL,type="org",tab=FALSE,margins=TRUE,print=TRUE,html,latex,sep=" ") { if (type=="ascii") { x <- format(x,...) dots <- c(list(x=paste(x,collapse=sep)),list(...)) dots <- dots[intersect(names(dots),names(formals(strwrap)))] writeLines(do.call(strwrap,dots)) return(invisible(x)) } if (!requireNamespace("ascii",quietly=TRUE)) stop("ascii package required") dots <- list(...) if (tab) { if (!inherits(x,"table")) { x <- table(x) } if (is.null(dots$digits)) dots$digits <- 0 if (margins) x <- addmargins(x) } if (!missing(ncol)) { y <- formatC(as.vector(x)) n0 <- length(y)%%ncol if (n0 > 0) y <- c(y, rep("", ncol - n0)) x <- matrix(y, ncol = ncol, byrow = TRUE) } if (is.vector(x)) { if (is.null(names(x))) { include.colnames <- FALSE header <- FALSE } x <- rbind(x) if (!is.null(rownames)) { rownames(x) <- rownames[1] } else { include.rownames <- FALSE } } args <- c(list(x=x,include.rownames=include.rownames,include.colnames=include.colnames,header=header,frame=frame,type=type,rownames=rownames,colnames=colnames),dots) x <- do.call(getFromNamespace("ascii","ascii"),args) if (print) { op <- options(asciiType=type) if (!missing(html)) cat("#+ATTR_HTML: ",html,"\n",sep="") if (!missing(latex)) cat("#+ATTR_LaTeX: ",latex,"\n",sep="") suppressWarnings(do.call(getFromNamespace("print", "ascii"),c(x=x,dots))) options(op) } invisible(x) } org <- Org lava/R/curly.R0000644000176200001440000000575113162174023012666 0ustar liggesusers##' Adds curly brackets to plot ##' ##' @title Adds curly brackets to plot ##' @param x center of the x axis of the curly brackets (or start end coordinates (x1,x2)) ##' @param y center of the y axis of the curly brackets (or start end coordinates (y1,y2)) ##' @param len Length of the curly brackets ##' @param theta angle (in radians) of the curly brackets orientation ##' @param wid Width of the curly brackets ##' @param shape shape (curvature) ##' @param col color (passed to lines/grid.lines) ##' @param lwd line width (passed to lines/grid.lines) ##' @param lty line type (passed to lines/grid.lines) ##' @param grid If TRUE use grid graphics (compatability with ggplot2) ##' @param npoints Number of points used in curves ##' @param text Label ##' @param offset Label offset (x,y) ##' @export ##' @examples ##' if (interactive()) { ##' plot(0,0,type="n",axes=FALSE,xlab="",ylab="") ##' curly(x=c(1,0),y=c(0,1),lwd=2,text="a") ##' curly(x=c(1,0),y=c(0,1),lwd=2,text="b",theta=pi) ##' curly(x=-0.5,y=0,shape=1,theta=pi,text="c") ##' curly(x=0,y=0,shape=1,theta=0,text="d") ##' curly(x=0.5,y=0,len=0.2,theta=pi/2,col="blue",lty=2) ##' curly(x=0.5,y=-0.5,len=0.2,theta=-pi/2,col="red",shape=1e3,text="e") ##' } curly <- function(x,y,len=1,theta=0, wid,shape=1, col=1,lwd=1,lty=1, grid=FALSE,npoints=50,text=NULL,offset=c(0.05,0)) { if (length(x)==2 || length(y)==2) { x <- rep(x,length.out=2) y <- rep(y,length.out=2) v <- c(x[1]-x[2],y[1]-y[2]) v0 <- c(1,0)-v len <- sum(v^2)^.5 innerprod <- sum(v0) theta <- acos(innerprod/len)+theta len <- len/2 x <- (x[1]-x[2])/2 y <- (y[2]-y[1])/2 } ii <- seq(0, pi/2, length.out=npoints) if (missing(wid)) { wid <- with(devcoords(), (fig.y2-fig.y1)/50) } x1 <- c(wid*(sin(ii)-1), c(0,0), wid*(1 - sin(rev(ii))), wid*(1 - sin(ii)), c(0,0), wid*(sin(rev(ii)) - 1)) y1 <- c(-cos(ii), c(0,shape), shape+(cos(rev(ii))), shape+(2 - cos(ii)), c(shape+2, 2*shape+2), 2*shape+2+cos(rev(ii))) x1 <- x1 + x + wid idx.max <- which.max(x1) max.y <- max(y1) y1 <- y1+1-(max.y+1)/2 min.y <- min(y1) y1 <- y1*len/min.y+y ## Rotation x2 <- cos(theta) * (x1 - x) - sin(theta) * (y1 - y) + x y2 <- cos(theta) * (y1 - y) + sin(theta) * (x1 - x) + y x0 <- x1[idx.max]+offset[1] y0 <- y1[idx.max]+offset[2] xm <- cos(theta) * (x0 - x) - sin(theta) * (y0 - y) + x ym <- cos(theta) * (y0 - y) + sin(theta) * (x0 - x) + y if(grid){ grid::grid.lines(grid::unit(x2,"npc"), grid::unit(y2,"npc"), gp=grid::gpar(col=col,lwd=lwd,lty=lty)) } else{ points(x2,y2,type='l',col=col,lwd=lwd,lty=lty,xpd=TRUE) } theta <- acos(abs(cos(theta))) deg <- ((theta-pi/2)*180/pi) if (!is.null(text)) { text(xm,ym,text,srt=deg) } } lava/R/devcoords.R0000644000176200001440000000241313162174023013510 0ustar liggesusers##' Returns device-coordinates and plot-region ##' ##' @title Returns device-coordinates and plot-region ##' @return A \code{list} with elements ##' \item{dev.x1}{Device: Left x-coordinate} ##' \item{dev.x2}{Device: Right x-coordinate} ##' \item{dev.y1}{Device Bottom y-coordinate} ##' \item{dev.y2}{Device Top y-coordinate} ##' \item{fig.x1}{Plot: Left x-coordinate} ##' \item{fig.x2}{Plot: Right x-coordinate} ##' \item{fig.y1}{Plot: Bottom y-coordinate} ##' \item{fig.y2}{Plot: Top y-coordinate} ##' @author Klaus K. Holst ##' @export ##' @keywords hplot `devcoords` <- function() { cc <- par("usr") ## extremes of coordinates of plotting region (x1,x2,y1,y2) plotinch <- par("pin") ## Plot dimensions (width,height) in inches margininch <- par("mai") ## Margin sizes in inches (bottom, left, top ,right) plotlenX <- cc[2]-cc[1] unitinchX <- plotlenX/plotinch[1] plotlenY <- cc[4]-cc[3] unitinchY <- plotlenY/plotinch[2] deviceXleft <- cc[1]-unitinchX*margininch[2] deviceXright <- cc[2]+unitinchX*margininch[4] deviceYtop <- cc[4]+unitinchY*margininch[3] deviceYbottom <- cc[3]-unitinchY*margininch[1] return(list(dev.x1=deviceXleft, dev.x2=deviceXright, dev.y1=deviceYbottom, dev.y2=deviceYtop, fig.x1=cc[1], fig.x2=cc[2], fig.y1=cc[3], fig.y2=cc[4])) } lava/R/formula.R0000644000176200001440000000076513162174023013175 0ustar liggesusers##' @export formula.lvm <- function(x,char=FALSE,all=FALSE,...) { A <- index(x)$A res <- c() for (i in seq_len(ncol(A))) { if (all || !(colnames(A)[i]%in%c(index(x)$exogenous,parameter(x)) )) { f <- paste(colnames(A)[i],"~ 1") if (any(A[,i]!=0)) { f <- (paste(colnames(A)[i],"~",paste(colnames(A)[A[,i]!=0],collapse="+"))) } if (!char) f <- formula(f) res <- c(res, list(f)) } } return(res) } ##' @export formula.lvmfit <- formula.lvm lava/R/startvalues.R0000644000176200001440000003061713162174023014104 0ustar liggesusersmgstart <- function(x,p) { if (is.list(p)) { npar <- with(x, npar+npar.mean+length(unlist(expar))) pos <- modelPar(x,seq(npar))$p start <- matrix(NA,ncol=npar,nrow=x$ngroup) startl <- lapply(pos, function(x) rep(NA,length(x))) for (i in seq_len(x$ngroup)) { p0 <- p[[i]] pos0 <- pos[[i]] if (!is.null(names(p0))) { ii <- parpos(Model(x)[[i]],names(p0)) startl[[i]][ii] <- p0[names(ii)] } else { ii <- seq(min(length(p0),length(startl[[i]]))) startl[[i]][ii] <- p0[ii] } start[i,pos0] <- startl[[i]] } start0 <- apply(start,2,function(x) mean(x,na.rm=TRUE)) } return(start0) } ###{{{ starter.multigroup ##' @export starter.multigroup <- function(x, starterfun=startvalues2, meanstructure=TRUE,silent=TRUE,...) { ## Initial values: W <- c() ## Weight-vector s <- list() for (i in seq_len(x$ngroup)) { mydata <- x$data[[i]][,manifest(x$lvm[[i]]),drop=FALSE] W <- c(W, nrow(mydata)) if (nrow(mydata)<3) { ii <- index(x$lvm[[i]]) nn <- ifelse(meanstructure, ii$npar+ii$npar.mean, ii$npar) s0 <- rep(1,nn) } else { S <- x$samplestat[[i]]$S mu <- if (meanstructure) x$samplestat[[i]]$mu else NULL; ## S <- cov(mydata); mu <- if (meanstructure) colMeans(mydata) else NULL; s0 <- do.call(starterfun, list(x$lvm[[i]], S=S, mu=mu,silent=TRUE)) } s <- c(s, list(s0)) } Wtotal <- sum(W); W <- W/Wtotal pg <- vector("list", x$npar); for (i in seq_len(length(pg))) pg[[i]] <- rep(0,x$ngroup) meang <- vector("list", x$npar.mean); for (i in seq_len(length(meang))) meang[[i]] <- rep(0,x$ngroup) for (i in seq_len(x$ngroup)) { pp <- modelPar(x$lvm[[i]],s[[i]]) pos <- sapply(x$parlist[[i]], function(y) char2num(substr(y,2,nchar(y)))) for (j in seq_len(length(pos))) pg[[ pos[j] ]][i] <- pp$p[j] pos <- sapply(x$meanlist[[i]], function(y) char2num(substr(y,2,nchar(y)))) ptype <- sapply(x$meanlist[[i]], function(y) substr(y,1,1)=="m") if (!(any(ptype))) pos <- NULL else pos <- pos[ptype] if (length(pos)>0) for (j in seq_len(length(pos))) { meang[[ pos[j] ]][i] <- pp$meanpar[j] } } ## Weighted average wp <- unlist(lapply(pg, function(y) { ppos <- !is.na(y) myweight <- W[ppos]/sum(W[ppos]) sum(y[ppos]*myweight) })) wmean <- unlist(lapply(meang, function(y) { ppos <- !is.na(y) myweight <- W[ppos]/sum(W[ppos]) sum(y[ppos]*myweight) })) res <- c(wmean,wp) res[!is.finite(res) | is.nan(res) | is.na(res) | is.complex(res)] <- .5 return(as.numeric(res)) } ###}}} ###{{{ startmean startmean <- function(x,p,mu) { if (is.null(mu)) return(p) meanpar <- numeric(index(x)$npar.mean) mymeans <- vars(x)[index(x)$v1==1] midx <- na.omit(match(names(mu),mymeans)) meanpar[midx] <- mu[midx] AP <- matrices(x,p,meanpar) nu <- numeric(length(vars(x))) nu[vars(x)%in%manifest(x)] <- mu meanstart <- ((diag(nrow=nrow(AP$A))-t(AP$A))%*%nu)[index(x)$v1==1] names(meanstart) <- vars(x)[index(x)$v1==1] return( c(meanstart, p) ) } ###}}} ###{{{ startvalues3 `startvalues3` <- function(x, S, debug=FALSE, tol=1e-6,...) { S <- reorderdata.lvm(x,S) if (nrow(S)!=length(manifest(x))) stop("Number of observed variables in data and models does not agree.") J <- index(x)$J ## Manifest selection P0 <- index(x)$P0 ## covariance 'adjacency' A <- t(index(x)$M) ## Adjacency matrix n <- nrow(S) ## Number of manifest variables m <- nrow(A) ## Number of variables A1 <- t(index(x)$M1) ## Adjacency matrix (without fixed parameters and duplicates) A0 <- t(index(x)$M0) ## Adjacency matrix (without fixed parameters) obs.idx <- index(x)$obs.idx; ##obs.idx <- as.vector(J%*%(seq_len(m))); latent.idx <- setdiff(seq_len(m), obs.idx) lat <- colnames(A)[latent.idx] exo.idx <- index(x)$exo.idx ## match(exogenous(x),vars(x)) exo.idxObs <- index(x)$exo.obsidx ##match(exogenous(x),manifest(x)) AP0 <- moments(x, rep(0,index(x)$npar)) newP <- t(AP0$P) newA <- t(AP0$A) fixed <- t(x$fix) for (i in latent.idx) { fix.idx <- colnames(fixed)[which(!is.na(t(fixed[,i])))[1]] lambda0 <- newA[fix.idx,i] rel.idx <- which(A0[,i]==1) rel.all <- which(A[,i]==1) rel.pos <- colnames(A)[rel.all] ## Estimation of lambda (latent -> endogenous) for (j in rel.idx) { lambda <- lambda0*S[fix.idx, j]/S[fix.idx,fix.idx] newA[j,i] <- lambda } lambdas <- newA[rel.pos,i] ## Estimation of beta (covariate -> latent) exo2latent <- which(A0[i,exo.idx]==1) exo.pos <- colnames(S)[exo.idxObs[exo2latent]] varX.eta <- S[exo.pos, exo.pos] InvvarX.eta <- Inverse(varX.eta,tol=1e-3) rel.pos <- setdiff(rel.pos,lat) covXY <- S[exo.pos, rel.pos,drop=FALSE] beta <- 0 for (j in seq_len(length(rel.pos))) beta <- beta + 1/lambdas[j]*InvvarX.eta %*% covXY[,j] beta <- beta/length(rel.pos) for (k in seq_len(length(exo.pos))) { if (A0[i,exo.pos[k]]==1) { newA[i,exo.pos[k]] <- beta[k] } } beta.eta <- matrix(newA[i,exo.pos], ncol=1) ## Estimation of zeta^2 (variance of latent variable) betavar <- matrix(beta.eta,nrow=1)%*%varX.eta%*%beta.eta zetas <- c() for (r1 in seq_len(length(rel.pos)-1)) for (r2 in seq(r1+1,length(rel.pos))) { zetas <- c(zetas, S[rel.pos[r1], rel.pos[r2]]/ (lambdas[r1]*lambdas[r2]) - betavar) } zeta <- mean(zetas) newP[i,i] <- zeta for (j in rel.all) { pos <- colnames(newA)[j] vary <- S[pos,pos] - newA[pos,i]^2*(zeta+betavar) newP[pos,pos] <- ifelse(vary<0.25,0.25,vary) } } Debug(list("start=",start), debug) start <- pars(x, A=t(newA), P=newP) return(start) } ###}}} startvalues3 ###{{{ startvalues2 ## Estimate sub-models (measurement models) ##' @export `startvalues2` <- function(x, S, mu=NULL, debug=FALSE, silent=FALSE,...) { if (!silent) cat("Obtaining start values...\n") S <- reorderdata.lvm(x,S) ss <- startvalues(x,S) Debug(list("ss=",ss),debug); g <- measurement(x,silent=TRUE) keep <- c() if (length(g)>1) { for (i in seq_len(length(g))) { if (length(endogenous(g[[i]]))>2) keep <- c(keep,i) } g <- g[keep] } if (length(g)<2) return(startmean(x,ss,mu=mu)) ## if (!silent) cat("Fitting marginal measurement models...\n") op <- options(warn=-1) e <- lapply(g, function(y) { estimate(y, data=list(S=S[manifest(y),manifest(y),drop=FALSE], mu=mu[manifest(y)], n=100), control=list(meanstructure=FALSE, starterfun="startvalues", estimator="Simple", method="nlminb1"), optcontrol=list(), debug=FALSE, silent=TRUE) }) for (l in e) { ## a <- coef(l$estimate)[,1] a <- coef(l) for (i in seq_len(length(a))) { pos <- match(names(a)[i],names(ss)) if (!is.na(pos)) ss[pos] <- a[i] } } options(op) startmean(x,ss,mu=mu) } ###}}} startvalues2 ###{{{ startvalues0 ##' @export startvalues1 <- function(x,S,mu=NULL,tol=1e-6,delta=1e-6,...) { p0 <- startvalues(x,S,mu,...) p0[index(x)$npar.mean+variances(x)] <- 0.1 p0[index(x)$npar.mean+offdiags(x)] <- 0 p0 } startvalues00 <- function(x,S,mu=NULL,tol=1e-6,delta=1e-6,...) { p0 <- startvalues(x,S,mu,...) p0 <- numeric(length(p0)) P0 <- x$cov*1 ##P0[!is.na(x$covfix)] <- ##P0 <- x$covfix; P0[is.na(P0)] <- 0 ##diag(P0)[index(x)$endo.idx] <- diag(S)[index(x)$endo.obsidx]/2 lu <- min(diag(P0)[index(x)$endo.idx])/2 ## diag(P0)[] <- 0.1 ## diag(P0)[index(x)$endo.idx] <- 1 diag(P0)[index(x)$eta.idx] <- 0.1 ##mean(diag(S)[index(x)$endo.idx])/2 ee <- eigen(P0) tol <- 1e-6 ii <- ee$values ii[ee$values0) ## pp[seq(length(meanstart))] <- meanstart ## } names(pp) <- coef(x, silent=TRUE, fixed=FALSE, mean=TRUE)[seq_len(length(pp))] pp[!is.finite(pp) | is.nan(pp) | is.na(pp)] <- 0.01 return(pp) } ###}}} startvalues0 ###{{{ startvalues ## McDonald & Hartmann, 1992 ##' @export startvalues <- function(x, S, mu=NULL, debug=FALSE, silent=FALSE, tol=1e-6, delta=1e-6,...) { ## As proposed by McDonald & Hartmann, 1992. ## Implementation based on John Fox's implementation in the 'sem' R-package S <- reorderdata.lvm(x,S) if (nrow(S)!=length(manifest(x))) stop("Number of observed variables in data and models does not agree.") J <- index(x)$J ## Manifest selection P0 <- index(x)$P0 ## covariance 'adjacency' A <- t(index(x)$M) ## Adjacency matrix n <- nrow(S) ## Number of manifest variables m <- nrow(A) ## Number of variables A0 <- t(index(x)$M0) ## Adjacency matrix (without fixed parameters) obs.idx <- as.vector(J%*%(seq_len(m))); latent.idx <- setdiff(seq_len(m), obs.idx) s <- sqrt(diag(S)) suppressWarnings(R <- (cov2cor(S))) ## S/outer(s,s) C <- P0 Debug(list("obs.idx", obs.idx), debug) C[obs.idx,obs.idx] <- R ## Estimates of covariance between latent and manifest variables Debug((C), debug) for (i in latent.idx) { inRelation <- A[obs.idx,i]==1 for (j in seq_len(length(obs.idx))) { Debug((j), debug) C[obs.idx[j],i] <- C[i,obs.idx[j]] <- if (any(inRelation)) { numerator <- sum(R[j, which(inRelation)]) denominator <- sqrt(sum(R[which(inRelation), which(inRelation)])) numerator/denominator ## as proposed by McDonald & Hartmann } else { runif(1, .3, .5) ## No arrows => small random covariance } } } ## Estimates of covariance between latent variables for (i in latent.idx) { for (j in latent.idx) { C[i,j] <- C[j,i] <- if (i==j) { 1 } else { inRelation.i <- A[obs.idx, i]==1 inRelation.j <- A[obs.idx, j]==1 if ((any(inRelation.i)) | (any(inRelation.j))) { numerator <- sum(R[which(inRelation.i), which(inRelation.j)]) denominator <- sqrt( sum(R[which(inRelation.i), which(inRelation.i)]) * sum(R[which(inRelation.j), which(inRelation.j)])) numerator/(denominator+0.01) ## Avoid division by zero } else { runif(1, .3, .5) } } } } if (debug) { print("C="); print(C); } Ahat <- matrix(0,m,m) C[is.nan(C)] <- 0 for (j in seq_len(m)) { ## OLS-estimates relation <- A[j,]==1 if (!any(relation)) next Ahat[j, relation] <- tryCatch(Inverse(C[relation,relation] + diag(nrow=sum(relation))*delta,tol=1e-3) %*% C[relation,j], error=function(...) 0) } Ahat[obs.idx,] <- Ahat[obs.idx,]*matrix(s, n, m) Ahat[,obs.idx] <- Ahat[,obs.idx]/matrix(s, m, n, byrow=TRUE) Chat <- C Chat[obs.idx,] <- Chat[obs.idx,]*matrix(s,n,m) ## Chat[,obs.idx] <- Chat[,obs.idx]*matrix(s,m,n,byrow=TRUE) ## Phat <- (diag(m)-Ahat)%*%Chat%*%t(diag(m)-Ahat) ##diag(Phat) <- abs(diag(Phat)) ## Guarantee PD-matrix: Phat[is.nan(Phat) | is.na(Phat)] <- 0 diag(Phat)[diag(Phat)==0] <- 1 eig <- eigen(Phat) L <- abs(eig$values); L[L<1e-3] <- 1e-3 Phat <- eig$vectors%*%diag(L,ncol=ncol(eig$vectors))%*%t(eig$vectors) Debug(list("start=",start), debug) start <- pars(x, A=t(Ahat*A0), P=(Phat*P0)) names(start) <- coef(x, silent=TRUE, fixed=FALSE, mean=FALSE)[seq_len(length(start))] res <- startmean(x,start,mu) res[!is.finite(res) | is.nan(res) | is.na(res)] <- 1 res } ###}}} startvalues lava/R/predict.R0000644000176200001440000002657413162174023013170 0ustar liggesusers##' @export predict.lvmfit <- function(object,x=NULL,y=NULL,data=model.frame(object),p=coef(object),...) { predict(Model(object),x=x,y=y,p=p,data=data,...) } ##' @export predict.lvm.missing <- function(object,x=NULL,y=NULL,data=model.frame(object),p=coef(object),...) { idx <- match(coef(Model(object)),names(coef(object))) xx <- exogenous(object) p <- p[idx] if (!is.null(x)) { if (inherits(x,"formula")) { xy <- getoutcome(x) if (length(xy)>0) { if (is.null(y)) y <- decomp.specials(xy) } x <- attributes(xy)$x } x <- intersect(x,endogenous(object)) if (is.null(y)) y <- setdiff(vars(object),c(x,xx)) } obs0 <- !is.na(data[,x,drop=FALSE]) data[,xx][which(is.na(data[,xx]),arr.ind=TRUE)] <- 0 pp <- predict.lvmfit(object,x=x,y=y,data=data,p=p,...) if (all(obs0)) return(pp) if (!requireNamespace("mets",quietly=TRUE)) stop("Requires 'mets'") obs <- mets::fast.pattern(obs0) res <- matrix(nrow=nrow(data),ncol=NCOL(pp)) for (i in seq(nrow(obs$pattern))) { jj <- which(obs$pattern[i,]==1) ii <- which(obs$group==i-1) if (length(jj)==0) { res[ii,] <- NA } else { res[ii,] <- predict.lvmfit(object,...,p=p,x=x[jj],y=y,data=data[ii,,drop=FALSE])[,colnames(pp),drop=FALSE] } } attributes(res) <- attributes(pp) return(res) } ##' Prediction in structural equation models ##' ##' Prediction in structural equation models ##' @param object Model object ##' @param x optional list of (endogenous) variables to condition on ##' @param y optional subset of variables to predict ##' @param residual If true the residuals are predicted ##' @param p Parameter vector ##' @param data Data to use in prediction ##' @param path Path prediction ##' @param quick If TRUE the conditional mean and variance given covariates are returned (and all other calculations skipped) ##' @param \dots Additional arguments to lower level function ##' @seealso predictlvm ##' @examples ##' m <- lvm(list(c(y1,y2,y3)~u,u~x)); latent(m) <- ~u ##' d <- sim(m,100) ##' e <- estimate(m,d) ##' ##' ## Conditional mean (and variance as attribute) given covariates ##' r <- predict(e) ##' ## Best linear unbiased predictor (BLUP) ##' r <- predict(e,vars(e)) ##' ## Conditional mean of y3 giving covariates and y1,y2 ##' r <- predict(e,y3~y1+y2) ##' ## Conditional mean gives covariates and y1 ##' r <- predict(e,~y1+y2) ##' ## Predicted residuals (conditional on all observed variables) ##' r <- predict(e,vars(e),residual=TRUE) ##' ##' @method predict lvm ##' @aliases predict.lvmfit ##' @export predict.lvm <- function(object,x=NULL,y=NULL,residual=FALSE,p,data,path=FALSE,quick=is.null(x)&!(residual|path),...) { ## data = data.frame of exogenous variables if (!quick && !all(exogenous(object)%in%colnames(data))) stop("data.frame should contain exogenous variables") m <- moments(object,p,data=data,...) if (quick) { ## Only conditional moments given covariates ii <- index(object) P.x <- m$P; P.x[ii$exo.idx, ii$exo.idx] <- 0 Cy.x <- (m$IAi%*% tcrossprod(P.x,m$IAi))[ii$endo.idx,ii$endo.idx,drop=FALSE] X <- ii$exogenous mu.0 <- m$v; mu.0[ii$exo.idx] <- 0 if (length(X)>0) { mu.x <- matrix(0,ncol=nrow(data),nrow=length(mu.0)) mu.x[ii$exo.idx,] <- t(data[,X,drop=FALSE]) xi.x <- t(m$IAi[ii$endo.idx,,drop=FALSE]%*%(mu.0 + mu.x)) } else { xi.x <- m$xi%x%rep(1,nrow(data)) colnames(xi.x) <- ii$endogenous ##xi.x <- matrix(as.vector(m$IAi[ii$endo.obsidx,]%*%mu.0),ncol=nrow(data),nrow=length(mu.0)) ##rownames(xi.x) <- names(mu.0) } return(structure(xi.x,cond.var=Cy.x, p=m$p, e=m$e)) } X <- exogenous(object) Y <- setdiff(manifest(object), X) if (path) { X <- colnames(data) Y <- setdiff(Y,X) idx <- which(vars(object)%in%X) if (length(Y)==0) stop("New data set should only contain exogenous variables and a true subset of the endogenous variables for 'path' prediction.") A <- t(m$A) A[,idx] <- 0 ## i.e., A <- A%*%J IAi <- solve(diag(nrow=nrow(A))-t(A)) mu.0 <- m$v; mu.0[X] <- 0 mu.x <- matrix(0,ncol=nrow(data),nrow=length(mu.0)) mu.x[idx,] <- t(data[,vars(object)[idx],drop=FALSE]) pred <- t(IAi%*%(mu.0 + mu.x)) return(pred) ## Y <- endogenous(object,top=TRUE) ## X <- setdiff(manifest(object),Y) } IAi <- m$IAi P <- m$P X.idx <- match(X,manifest(object)) eta.idx <- match(latent(object),vars(object)) obs.idx <- match(manifest(object),vars(object)) X.idx.all <- match(X, vars(object)) Y.idx.all <- match(Y, vars(object)) ## Calculation of conditional variance given X=x P.x <- m$P; P.x[X.idx.all, X.idx.all] <- 0 C.x <- (IAi%*% P.x %*%t(IAi)) Cy.x <- C.x[Y.idx.all,Y.idx.all,drop=FALSE] ## Calculation of conditional mean given X=x G <- m$J%*%IAi mu.0 <- m$v; mu.0[X.idx.all] <- 0 if (length(X)>0) { xs <- data[,X,drop=FALSE] mu.x <- apply(xs, 1, FUN=function(i) {res <- rep(0,length(mu.0)); res[X.idx.all] <- i; res}) xi.x <- (IAi%*%(mu.0 + mu.x)) } else { xi.x <- matrix(as.vector(IAi%*%mu.0),ncol=nrow(data),nrow=length(mu.0)) rownames(xi.x) <- names(mu.0) } attr(xi.x,"cond.var") <- Cy.x if (path) return(t(xi.x)) Ey.x <- xi.x[Y.idx.all,,drop=FALSE] Eeta.x <- xi.x[eta.idx,,drop=FALSE] Cy.epsilon <- P.x%*%t(IAi) ## Covariance y,residual Czeta.y <- Cy.epsilon[eta.idx,index(object)$endo.idx] A <- m$A IA <- diag(nrow=nrow(A))-t(A) y0 <- intersect(Y,colnames(data)) ys <- data[,y0,drop=FALSE] y0.idx <- match(y0,Y) ry <- t(ys)-Ey.x[y0.idx,,drop=FALSE] if (!is.null(x)) { if (inherits(x,"formula")) { xy <- getoutcome(x) if (length(xy)>0) { if (is.null(y)) y <- decomp.specials(xy) } x <- attributes(xy)$x } if (length(x)==0) { if (!is.null(y)) { xi.x <- xi.x[y,,drop=FALSE] attr(xi.x,"cond.var") <- Cy.x[y,y,drop=FALSE] } return(t(xi.x)) } x <- intersect(x,endogenous(object)) if (is.null(y)) y <- setdiff(vars(object),c(x,exogenous(object))) if (length(x)>0) { E.x <- xi.x[y,,drop=FALSE] + C.x[y,x]%*%solve(C.x[x,x])%*%ry[x,,drop=FALSE] } else { E.x <- xi.x[y,,drop=FALSE] } if (residual) { Vhat <- matrix(0, nrow(data), length(vars(object))); colnames(Vhat) <- vars(object) Vhat[,obs.idx] <- as.matrix(data[,manifest(object),drop=FALSE]) Vhat[,y] <- t(E.x) return(t((IA%*%t(Vhat)-m$v))) } res <- t(E.x); colnames(res) <- y if (length(x)>0) { attr(res,"cond.var") <- C.x[y,y,drop=FALSE]-C.x[y,x,drop=FALSE]%*%solve(C.x[x,x,drop=FALSE])%*%C.x[x,y,drop=FALSE] } else { attr(res,"cond.var") <- C.x[y,y,drop=FALSE] } return(res) } ys <- data[,Y,drop=FALSE] ry <- t(ys)-Ey.x if (length(eta.idx)>0) { Ceta.x <- C.x[eta.idx,eta.idx] Lambda <- A[Y.idx.all,eta.idx,drop=FALSE] ##, ncol=length(eta.idx)) Cetay.x <- Ceta.x%*%t(Lambda) KK <- Cetay.x %*% solve(Cy.x) Eeta.y <- Eeta.x + KK %*% ry Ceta.y <- Ceta.x - KK%*% t(Cetay.x) } else { Eeta.y <- NA Ceta.y <- NA } Vhat <- matrix(0, nrow(data), length(vars(object))); colnames(Vhat) <- vars(object) Vhat[,obs.idx] <- as.matrix(data[,manifest(object)]) if (length(eta.idx)>0) Vhat[,latent(object)] <- t(Eeta.y) I <- diag(nrow=nrow(A)); epsilonhat <- (t( IA%*%t(Vhat) - m$v ))[,c(endogenous(object),latent(object)),drop=FALSE] if (residual) { return(epsilonhat) } mydata <- matrix(0,ncol=ncol(A),nrow=nrow(data)); colnames(mydata) <- vars(object) mydata[,manifest(object)] <- as.matrix(data[,manifest(object)]) for (i in latent(object)) mydata[,i] <- m$v[i] Yhat <- t(mydata%*%t(A)) + (m$v) res <- cbind(t(Ey.x)) ## Conditional mean attr(res, "cond.var") <- Cy.x attr(res, "blup") <- t(Eeta.y) attr(res, "var.blup") <- Ceta.y attr(res, "Ey.x") <- Ey.x attr(res, "eta.x") <- Eeta.x attr(res, "epsilon.y") <- epsilonhat attr(res, "p") <- m$p attr(res, "e") <- m$e class(res) <- c("lvm.predict","matrix") return(res) } ##' @export print.lvm.predict <- function(x,...) print(x[,]) ##' Predict function for latent variable models ##' ##' Predictions of conditinoal mean and variance and calculation of ##' jacobian with respect to parameter vector. ##' @export ##' @param object Model object ##' @param formula Formula specifying which variables to predict and which to condition on ##' @param p Parameter vector ##' @param data Data.frame ##' @param ... Additional arguments to lower level functions ##' @seealso predict.lvm ##' @examples ##' m <- lvm(c(x1,x2,x3)~u1,u1~z, ##' c(y1,y2,y3)~u2,u2~u1+z) ##' latent(m) <- ~u1+u2 ##' d <- simulate(m,10,"u2,u2"=2,"u1,u1"=0.5,seed=123) ##' e <- estimate(m,d) ##' ##' ## Conditional mean given covariates ##' predictlvm(e,c(x1,x2)~1)$mean ##' ## Conditional variance of u1,y1 given x1,x2 ##' predictlvm(e,c(u1,y1)~x1+x2)$var predictlvm <- function(object,formula,p=coef(object),data=model.frame(object),...) { model <- Model(object) if (!missing(formula)) { yx <- getoutcome(formula) y <- decomp.specials(yx) x <- attr(yx,"x") x <- setdiff(x,index(model)$exogenous) } else { y <- index(model)$latent x <- index(model)$endogenous } endo <- with(index(model),setdiff(vars,exogenous)) idxY <- match(y,endo) idxX <- match(x,endo) ny <- length(y) if (ny==0) return(NULL) m <- modelVar(model,p,conditional=TRUE,data=data,latent=TRUE) D <- deriv.lvm(model,p,conditional=TRUE,data=data,latent=TRUE) N <- nrow(data) ii0 <- seq(N) iiY <- sort(unlist(lapply(idxY,function(x) ii0+N*(x-1)))) k <- ncol(m$xi) J <- matrix(seq(k^2),k) if (length(idxX)==0) { ## Return conditional mean and variance given covariates M <- m$xi[,idxY,drop=FALSE] dM <- D$dxi[iiY,,drop=FALSE] V <- m$C[idxY,idxY,drop=FALSE] dV <- D$dS[as.vector(J[idxY,idxY]),,drop=FALSE] } else { iiX <- sort(unlist(lapply(idxX,function(x) ii0+N*(x-1)))) X <- as.matrix(data[,x,drop=FALSE]) rX <- X-m$xi[,idxX,drop=FALSE] dX <- D$dxi[iiX,,drop=FALSE] ic <- solve(m$C[idxX,idxX,drop=FALSE]) c2 <- m$C[idxY,idxX,drop=FALSE] B <- c2%*%ic ## Conditional variance V <- m$C[idxY,idxY,drop=FALSE]-B%*%t(c2) dV <- D$dS[as.vector(J[idxY,idxY]),,drop=FALSE] - ( (B%x%diag(nrow=ny))%*%D$dS[as.vector(J[idxY,idxX]),,drop=FALSE] + -(B%x%B)%*%D$dS[as.vector(J[idxX,idxX]),,drop=FALSE] + (diag(nrow=ny)%x%B)%*%D$dS[as.vector(J[idxX,idxY]),,drop=FALSE] ) ## Conditional mean M <- m$xi[,idxY,drop=FALSE]+rX%*%t(B) dB <- (ic%x%diag(nrow=ny))%*%D$dS[as.vector(J[idxY,idxX]),,drop=FALSE]+ -(ic%x%B)%*%D$dS[as.vector(J[idxX,idxX]),,drop=FALSE] ## Find derivative of transposed matrix n0 <- as.vector(matrix(seq(prod(dim(B))),ncol=nrow(B),byrow=TRUE)) dB. <- dB[n0,,drop=FALSE] dM <- D$dxi[iiY,,drop=FALSE] + ((diag(nrow=ny)%x%rX)%*%dB.) - kronprod(B,dX) } colnames(M) <- y dimnames(V) <- list(y,y) return(list(mean=M,mean.jacobian=dM,var=V,var.jacobian=dV)) } lava/R/logo.R0000644000176200001440000000202413162174023012456 0ustar liggesusersgfilter <- function(x,sigma=1) { gridfn <- function(fn,width,height,center=TRUE) { jx <- seq_len(height) jy <- seq_len(width) if (center) { jx <- jx/height-0.5 jy <- jy/width-0.5 } outer(jx, jy, FUN=fn) } width <- ncol(x); height <- nrow(x) oscunits <- gridfn(function(x,y) ((-1)^(x+y)),height=height,width=width,center=FALSE) x0 <- x*oscunits ## translate origo to center of image X <- fft(x0) d <- gridfn(function(x,y) (x^2+y^2),height=height,width=width,center=TRUE) Gn <- exp(-2*(base::pi*sigma)^2*d) # frequency response H <- X*Gn res <- Re(fft(H,inverse=TRUE))/(width*height)*oscunits return(res) } ##' @export lava <- function(seed,w=128,h=w,bw=4,sigma=5000,bg=20000,numcol=128,col=grDevices::heat.colors(numcol),...) { if (!missing(seed)) set.seed(seed) x <- matrix(rnorm(w*h,bg,sigma),nrow=h, ncol=w) x0 <- gfilter(x,sigma=bw) y <- (x0-min(x0)+1)^1.2 opt <- graphics::par(mai=c(0,0,0,0)) graphics::image(y,axes=FALSE,col=col) graphics::par(opt) invisible(y) } lava/R/blockdiag.R0000644000176200001440000000146513162174023013445 0ustar liggesusers##' Combine matrices to block diagonal structure ##' @title Combine matrices to block diagonal structure ##' @param x Matrix ##' @param \dots Additional matrices ##' @param pad Vyalue outside block-diagonal ##' @author Klaus K. Holst ##' @export ##' @examples ##' A <- diag(3)+1 ##' blockdiag(A,A,A,pad=NA) blockdiag <- function(x,...,pad=0) { if (is.list(x)) xx <- x else xx <- list(x,...) rows <- unlist(lapply(xx,nrow)) crows <- c(0,cumsum(rows)) cols <- unlist(lapply(xx,ncol)) ccols <- c(0,cumsum(cols)) res <- matrix(pad,nrow=sum(rows),ncol=sum(cols)) for (i in seq_len(length(xx))) { idx1 <- seq_len(rows[i])+crows[i]; idx2 <- seq_len(cols[i])+ccols[i] res[idx1,idx2] <- xx[[i]] } colnames(res) <- unlist(lapply(xx,colnames)); rownames(res) <- unlist(lapply(xx,rownames)) return(res) } lava/R/Col.R0000644000176200001440000000240313162174023012234 0ustar liggesusersmypal <- function(set=TRUE,...) { oldpal <- palette() col <- c("black","darkblue","darkred","goldenrod","mediumpurple", "seagreen","aquamarine3","violetred1","salmon1", "lightgoldenrod1","darkorange2","firebrick1","violetred1", "gold") if (!set) return(col) palette(col) invisible(oldpal) } ##' This function transforms a standard color (e.g. "red") into an ##' transparent RGB-color (i.e. alpha-blend<1). ##' ##' This only works for certain graphics devices (Cairo-X11 (x11 as of R>=2.7), quartz, pdf, ...). ##' @title Generate a transparent RGB color ##' @param col Color (numeric or character) ##' @param alpha Degree of transparency (0,1) ##' @param locate Choose colour (with mouse) ##' @return A character vector with elements of 7 or 9 characters, '"\#"' ##' followed by the red, blue, green and optionally alpha values in ##' hexadecimal (after rescaling to '0 ... 255'). ##' @author Klaus K. Holst ##' @examples ##' plot(runif(1000),cex=runif(1000,0,4),col=Col(c("darkblue","orange"),0.5),pch=16) ##' @keywords color ##' @export Col <- function(col,alpha=0.2,locate=0) { if (locate>0) return(colsel(locate)) mapply(function(x,alpha) do.call(rgb,as.list(c(col2rgb(x)/255,alpha))), col,alpha) } lava/R/profile.R0000644000176200001440000000365113162174023013165 0ustar liggesusers##' @export profile.lvmfit <- function(fitted,idx,tau,...) { mm <- parfix(Model(fitted),idx,tau) index(mm) <- reindex(mm,zeroones=TRUE,deriv=TRUE) fixed <- attributes(mm)$fixed plogl <- function(tau0) { for (i in fixed$v) { mm$mean[[i]] <- tau0 } for (i in seq_len(nrow(fixed$A))) { index(mm)$A[fixed$A[i,1],fixed$A[i,2]] <- mm$fix[fixed$A[i,1],fixed$A[i,2]] <- tau0 } for (i in seq_len(nrow(fixed$P))) { index(mm)$P[fixed$P[i,1],fixed$P[i,2]] <- mm$covfix[fixed$P[i,1],fixed$P[i,2]] <- tau0 } for (i in length(fixed$e)) { index(mm)$exfix[i] <- tau0 } dots <- list(...) dots$silent <- FALSE if (!is.null(dots$control)) control <- dots$control else control <- list() control$start <- coef(fitted) dots$control <- control dots$index <- FALSE dots$fix <- FALSE dots$silent <- TRUE dots$quick <- TRUE dots$data <- model.frame(fitted) dots$x <- mm ee <- do.call("estimate",dots) return(logLik(mm,p=ee,data=dots$data)) } val <- sapply(tau,plogl) attributes(val) <- NULL val } profci.lvmfit <- function(x,parm,level=0.95,interval=NULL,curve=FALSE,n=20,lower=TRUE,upper=TRUE,...) { ll <- logLik(x)-qchisq(level,1)/2 pp <- function(tau) (profile.lvmfit(x,parm,tau) - ll) tau0 <- coef(x)[parm] tau0.sd <- x$vcov[parm,parm]^0.5 if (is.null(interval)) { interval <- tau0 + 3*c(-1,1)*tau0.sd if (parm%in%(variances(x)+index(x)$npar.mean)) interval[1] <- max(1e-5,interval[1]) } if (curve) { xx <- seq(interval[1],interval[2],length.out=n) val <- sapply(xx,pp) res <- cbind(par=xx,val=val) return(res) } low <- up <- NA if (lower) low <- uniroot(pp,interval=c(interval[1],tau0))$root if (upper) up <- uniroot(pp,interval=c(tau0,interval[2]))$root ## res <- rbind(lower$root,upper$root); rownames(res) <- coef() return(c(low,up)) } lava/R/score.R0000644000176200001440000001163313162174023012637 0ustar liggesusers##' @export `score` <- function(x,...) UseMethod("score") ###{{{ score.lvm ##' @export score.lvm <- function(x, data, p, model="gaussian", S, n, mu=NULL, weights=NULL, data2=NULL, debug=FALSE, reindex=FALSE, mean=TRUE, constrain=TRUE, indiv=TRUE,...) { cl <- match.call() lname <- paste0(model,"_score.lvm") if (!exists(lname)) { lname <- paste0(model,"_gradient.lvm") mygrad <- get(lname) scoreFun <- function(...) -mygrad(...) if (is.null(mygrad)) { stop("Missing gradient") } } else { scoreFun <- get(lname) } if (missing(data) || is.null(data)) { cl[[1]] <- scoreFun score <- eval.parent(cl) return(rbind(score)) } if (is.null(index(x)$dA) | reindex) x <- updatelvm(x,zeroones=TRUE,deriv=TRUE) xfix <- colnames(data)[(colnames(data)%in%parlabels(x,exo=TRUE))] xconstrain <- intersect(unlist(lapply(constrain(x),function(z) attributes(z)$args)),index(x)$manifest) Debug(xfix,debug) if (missing(n)) { n <- nrow(data) } if (length(xfix)>0 | length(xconstrain)>0) { ##### Random slopes! x0 <- x if (length(xfix)>0) { Debug("random slopes...",debug) nrow <- length(vars(x)) xpos <- lapply(xfix,function(y) which(regfix(x)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix <- list(var=xfix, col=colpos, row=rowpos) for (i in seq_along(myfix$var)) for (j in seq_along(myfix$col[[i]])) { regfix(x0, from=vars(x0)[myfix$row[[i]][j]],to=vars(x0)[myfix$col[[i]][j]]) <- data[1,myfix$var[[i]]] } index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) } pp <- modelPar(x0,p) ##p0 <- with(pp, c(meanpar,p,p2)) k <- length(index(x0)$manifest) myfun <- function(ii) { if (length(xfix)>0) for (i in seq_along(myfix$var)) { index(x0)$A[cbind(myfix$row[[i]],myfix$col[[i]])] <- data[ii,myfix$var[[i]]] } return(scoreFun(x0,data=data[ii,], p=with(pp,c(meanpar,p,p2)),weights=weights[ii,,drop=FALSE],data2=data2[ii,,drop=FALSE],model=model,debug=debug,indiv=indiv,...)) } score <- t(sapply(seq_len(nrow(data)),myfun)) if (!indiv) { score <- colSums(rbind(score)) } if (length(score)0) myorder[[x$allmis]] <- NULL for (i in seq_along(S)) S0[myorder[[i]],] <- S[[i]] if (length(x$allmis)>0) { S0 <- S0[-x$orderlist[[x$allmis]],] } S0[is.na(S0)] <- 0 colnames(S0) <- names(coef(x)) return(S0) } return(S) } ###}}} score.lvm.missing ###{{{ score.multigroupfit ##' @export score.multigroupfit <- function(x,p=pars(x), weights=Weights(x), estimator=x$estimator, ...) { score(x$model0, p=p, weights=weights, model=estimator,...) } ###}}} score.multigroupfit ###{{{ score.multigroup ##' @export score.multigroup <- function(x,data=x$data,weights=NULL,data2=NULL,p,indiv=combine,combine=FALSE,...) { rm <- procrandomslope(x) pp <- with(rm, modelPar(model,p)$p) parord <- modelPar(rm$model,seq_len(with(rm$model,npar+npar.mean)))$p S <- list() for (i in seq_len(x$ngroup)) { S0 <- rbind(score(x$lvm[[i]],p=pp[[i]],data=data[[i]],weights=weights[[i]],data2=data2[[i]],indiv=indiv,...)) S1 <- matrix(ncol=length(p),nrow=nrow(S0)) S1[,parord[[i]]] <- S0 S <- c(S, list(S1)) } if (combine) { S <- Reduce("rbind",S); S[is.na(S)] <- 0 if (!indiv) S <- colSums(S) return(S) } if (indiv) return(S) res <- matrix(0,nrow=1,ncol=length(p)) for (i in seq_len(x$ngroup)) res[,parord[[i]]] <- res[,parord[[i]]] + S[[i]][,parord[[i]]] return(as.vector(res)) } ###}}} score.multigroup ###{{{ score.lvmfit ##' @export score.lvmfit <- function(x, data=model.frame(x), p=pars(x), model=x$estimator, weights=Weights(x), data2=x$data$data2, ...) { score(x$model0,data=data,p=p,model=model,weights=weights,data2=data2,...) } ###}}} score.lvmfit lava/R/fplot.R0000644000176200001440000000347413162174023012654 0ustar liggesusers##' Faster plot via RGL ##' @title fplot ##' @export ##' @examples ##' if (interactive()) { ##' data(iris) ##' fplot(Sepal.Length ~ Petal.Length+Species, data=iris, size=2, type="s") ##' } ##' @param x X variable ##' @param y Y variable ##' @param z Z variable (optional) ##' @param xlab x-axis label ##' @param ylab y-axis label ##' @param ... additional arggument to lower level plot functions ##' @param z.col Color ##' @param data data.frame ##' @param add If TRUE use current active device fplot <- function(x,y,z=NULL,xlab,ylab,...,z.col=topo.colors(64), data=parent.frame(),add=FALSE) { if (!requireNamespace("rgl",quietly=TRUE)) stop("Requires 'rgl'") if (inherits(x,"formula")) { y <- getoutcome(x) x <- attributes(y)$x if (length(x)>1) { z <- as.numeric(with(data, get(x[2]))) } if (length(x)==0) { x <- seq(nrow(data)) if (missing(xlab)) xlab <- "Index" } else { if (missing(xlab)) xlab <- x[1] x <- with(data, get(x[1])) } if (missing(ylab)) ylab <- y y <- with(data, get(y)) } else { if (missing(y)) { y <- x if (missing(ylab)) ylab <- deparse(substitute(x)) x <- seq(nrow(data)) if (missing(xlab)) xlab <- "Index" } else { if (missing(xlab)) xlab <- deparse(substitute(x)) if (missing(ylab)) ylab <- deparse(substitute(y)) } } rgl::.check3d() if (!is.null(z)) { ncol <- length(z.col); glut <- approxfun(seq(min(z),max(z),length.out=ncol),seq(ncol)) rgl::plot3d(x,y,0,col=z.col[round(glut(z))],xlab=xlab,ylab=ylab,...) } else { rgl::plot3d(x,y,0,xlab=xlab,ylab=ylab,...) } rgl::view3d(0,0,fov=0) } lava/R/estimate.lvm.R0000644000176200001440000011016613162174023014135 0ustar liggesusers###{{{ estimate.lvm ##' Estimation of parameters in a Latent Variable Model (lvm) ##' ##' Estimate parameters. MLE, IV or user-defined estimator. ##' ##' A list of parameters controlling the estimation and optimization procedures ##' is parsed via the \code{control} argument. By default Maximum Likelihood is ##' used assuming multivariate normal distributed measurement errors. A list ##' with one or more of the following elements is expected: ##' ##' \describe{ ##' \item{start:}{Starting value. The order of the parameters can be shown by ##' calling \code{coef} (with \code{mean=TRUE}) on the \code{lvm}-object or with ##' \code{plot(..., labels=TRUE)}. Note that this requires a check that it is ##' actual the model being estimated, as \code{estimate} might add additional ##' restriction to the model, e.g. through the \code{fix} and \code{exo.fix} ##' arguments. The \code{lvm}-object of a fitted model can be extracted with the ##' \code{Model}-function.} ##' ##' \item{starterfun:}{Starter-function with syntax ##' \code{function(lvm, S, mu)}. Three builtin functions are available: ##' \code{startvalues}, \code{startvalues0}, \code{startvalues1}, ...} ##' ##' \item{estimator:}{ String defining which estimator to use (Defaults to ##' ``\code{gaussian}'')} ##' ##' \item{meanstructure}{Logical variable indicating ##' whether to fit model with meanstructure.} ##' ##' \item{method:}{ String pointing to ##' alternative optimizer (e.g. \code{optim} to use simulated annealing).} ##' ##' \item{control:}{ Parameters passed to the optimizer (default ##' \code{stats::nlminb}).} ##' ##' \item{tol:}{ Tolerance of optimization constraints on lower limit of ##' variance parameters. } } ##' ##' @param x \code{lvm}-object ##' @param data \code{data.frame} ##' @param estimator String defining the estimator (see details below) ##' @param control control/optimization parameters (see details below) ##' @param missing Logical variable indiciating how to treat missing data. ##' Setting to FALSE leads to complete case analysis. In the other case ##' likelihood based inference is obtained by integrating out the missing data ##' under assumption the assumption that data is missing at random (MAR). ##' @param weights Optional weights to used by the chosen estimator. ##' @param weightsname Weights names (variable names of the model) in case ##' \code{weights} was given as a vector of column names of \code{data} ##' @param data2 Optional additional dataset used by the chosen ##' estimator. ##' @param id Vector (or name of column in \code{data}) that identifies ##' correlated groups of observations in the data leading to variance estimates ##' based on a sandwich estimator ##' @param fix Logical variable indicating whether parameter restriction ##' automatically should be imposed (e.g. intercepts of latent variables set to ##' 0 and at least one regression parameter of each measurement model fixed to ##' ensure identifiability.) ##' @param index For internal use only ##' @param graph For internal use only ##' @param silent Logical argument indicating whether information should be ##' printed during estimation ##' @param quick If TRUE the parameter estimates are calculated but all ##' additional information such as standard errors are skipped ##' @param method Optimization method ##' @param param set parametrization (see \code{help(lava.options)}) ##' @param cluster Obsolete. Alias for 'id'. ##' @param p Evaluate model in parameter 'p' (no optimization) ##' @param ... Additional arguments to be passed to the low level functions ##' @return A \code{lvmfit}-object. ##' @author Klaus K. Holst ##' @seealso estimate.default score, information ##' @keywords models regression ##' @export ##' @method estimate lvm ##' @examples ##' dd <- read.table(header=TRUE, ##' text="x1 x2 x3 ##' 0.0 -0.5 -2.5 ##' -0.5 -2.0 0.0 ##' 1.0 1.5 1.0 ##' 0.0 0.5 0.0 ##' -2.5 -1.5 -1.0") ##' e <- estimate(lvm(c(x1,x2,x3)~u),dd) ##' ##' ## Simulation example ##' m <- lvm(list(y~v1+v2+v3+v4,c(v1,v2,v3,v4)~x)) ##' covariance(m) <- v1~v2+v3+v4 ##' dd <- sim(m,10000) ## Simulate 10000 observations from model ##' e <- estimate(m, dd) ## Estimate parameters ##' e ##' ##' ## Using just sufficient statistics ##' n <- nrow(dd) ##' e0 <- estimate(m,data=list(S=cov(dd)*(n-1)/n,mu=colMeans(dd),n=n)) ##' rm(dd) ##' ##' ## Multiple group analysis ##' m <- lvm() ##' regression(m) <- c(y1,y2,y3)~u ##' regression(m) <- u~x ##' d1 <- sim(m,100,p=c("u,u"=1,"u~x"=1)) ##' d2 <- sim(m,100,p=c("u,u"=2,"u~x"=-1)) ##' ##' mm <- baptize(m) ##' regression(mm,u~x) <- NA ##' covariance(mm,~u) <- NA ##' intercept(mm,~u) <- NA ##' ee <- estimate(list(mm,mm),list(d1,d2)) ##' ##' ## Missing data ##' d0 <- makemissing(d1,cols=1:2) ##' e0 <- estimate(m,d0,missing=TRUE) ##' e0 `estimate.lvm` <- function(x, data=parent.frame(), estimator=NULL, control=list(), missing=FALSE, weights, weightsname, data2, id, fix, index=!quick, graph=FALSE, silent=lava.options()$silent, quick=FALSE, method, param, cluster, p, ...) { cl <- match.call() if (!base::missing(param)) { oldparam <- lava.options()$param lava.options(param=param) on.exit(lava.options(param=oldparam)) } if (!base::missing(method)) { control["method"] <- list(method) } Optim <- list( iter.max=lava.options()$iter.max, trace=ifelse(lava.options()$debug,3,0), gamma=lava.options()$gamma, gamma2=1, ngamma=lava.options()$ngamma, backtrack=lava.options()$backtrack, lambda=0.05, abs.tol=1e-9, epsilon=1e-10, delta=1e-10, rel.tol=1e-10, S.tol=1e-5, stabil=FALSE, start=NULL, constrain=lava.options()$constrain, method=NULL, starterfun="startvalues0", information="E", meanstructure=TRUE, sparse=FALSE, tol=lava.options()$tol) defopt <- lava.options()[] defopt <- defopt[intersect(names(defopt),names(Optim))] Optim[names(defopt)] <- defopt if (length(control)>0) { Optim[names(control)] <- control } if (is.environment(data)) { innames <- intersect(ls(envir=data),vars(x)) data <- as.data.frame(lapply(innames,function(x) get(x,envir=data))) names(data) <- innames } if (length(exogenous(x)>0)) { catx <- categorical2dummy(x,data) x <- catx$x; data <- catx$data } if (!lava.options()$exogenous) exogenous(x) <- NULL redvar <- intersect(intersect(parlabels(x),latent(x)),colnames(data)) if (length(redvar)>0) warning(paste("Latent variable exists in dataset",redvar)) ## Random-slopes: xfix <- setdiff(colnames(data)[(colnames(data)%in%parlabels(x,exo=TRUE))],latent(x)) if (base::missing(fix)) { fix <- ifelse(length(xfix)>0,FALSE,TRUE) } Debug(list("start=",Optim$start)) if (!base::missing(cluster)) id <- cluster ## commented; don't reduce data ## if (!missing & (is.matrix(data) | is.data.frame(data))) { ## includelist <- c(manifest(x),xfix) ## if (!base::missing(weights) && is.character(weights)) includelist <- c(includelist,weights) ## if (!base::missing(data2) && is.character(data2)) includelist <- c(includelist,data2) ## if (!base::missing(id) && is.character(id)) includelist <- c(includelist,id) ## ##data <- na.omit(data[,intersect(colnames(data),includelist),drop=FALSE]) ## } ## Weights... if (!base::missing(weights)) { if (is.character(weights)) { weights <- data[,weights,drop=FALSE] if (!base::missing(weightsname)) { colnames(weights) <- weightsname } else { yvar <- index(x)$endogenous nw <- seq_len(min(length(yvar),ncol(weights))) colnames(weights)[nw] <- yvar[nw] } } weights <- cbind(weights) } else { weights <- NULL } if (!base::missing(data2)) { if (is.character(data2)) { data2 <- data[,data2] } } else { data2 <- NULL } ## Correlated clusters... if (!base::missing(id)) { if (is.character(id)) { id <- data[,id] } } else { id <- NULL } Debug("procdata") val <- try({ dd <- procdata.lvm(x,data=data,missing=missing) S <- dd$S; mu <- dd$mu; n <- dd$n var.missing <- setdiff(vars(x),colnames(S)) }, silent=TRUE) if (inherits(val,"try-error")) { var.missing <- setdiff(vars(x),colnames(data)) S <- NULL; mu <- NULL; n <- nrow(data) } ## Debug(list("n=",n)) ## Debug(list("S=",S)) ## Debug(list("mu=",mu)) ## if (fix) { if (length(var.missing)>0) {## Convert to latent: new.lat <- setdiff(var.missing,latent(x)) if (length(new.lat)>0) x <- latent(x, new.lat) } ##} ## Run hooks (additional lava plugins) myhooks <- gethook() for (f in myhooks) { res <- do.call(f, list(x=x,data=data,weights=weights,data2=data2,estimator=estimator,optim=Optim)) if (!is.null(res$x)) x <- res$x if (!is.null(res$data)) data <- res$data if (!is.null(res$weights)) weights <- res$weights if (!is.null(res$data2)) data2 <- res$data2 if (!is.null(res$optim)) Optim <- res$optim if (!is.null(res$estimator)) estimator <- res$estimator rm(res) } if (is.null(estimator)) { if (!missing(weights) && !is.null(weights)) { estimator <- "normal" } else estimator <- "gaussian" } checkestimator <- function(x,...) { ffname <- paste0(x,c("_objective","_gradient"),".lvm") exists(ffname[1])||exists(ffname[2]) } if (!checkestimator(estimator)) { ## Try down/up-case version estimator <- tolower(estimator) if (!checkestimator(estimator)) { estimator <- toupper(estimator) } } ObjectiveFun <- paste0(estimator, "_objective", ".lvm") GradFun <- paste0(estimator, "_gradient", ".lvm") if (!exists(ObjectiveFun) & !exists(GradFun)) stop("Unknown estimator.") Method <- paste0(estimator, "_method", ".lvm") if (!exists(Method)) { Method <- "nlminb1" } else { Method <- get(Method) } NoOptim <- "method"%in%names(control) && is.null(control$method) if (is.null(Optim$method) && !(NoOptim)) { Optim$method <- if (missing && Method!="nlminb0") "nlminb1" else Method } if (index) { ## Proces data and setup some matrices x <- fixsome(x, measurement.fix=fix, S=S, mu=mu, n=n,debug=!silent) if (!silent) message("Reindexing model...\n") if (length(xfix)>0) { index(x) <- reindex(x,sparse=Optim$sparse,zeroones=TRUE,deriv=TRUE) } else { x <- updatelvm(x,sparse=Optim$sparse,zeroones=TRUE,deriv=TRUE,mean=TRUE) } } if (is.null(estimator) || estimator==FALSE) { return(x) } if (length(index(x)$endogenous)==0) stop("No observed outcome variables. Check variable names in model and data.") if (!Optim$meanstructure) { mu <- NULL } nparall <- index(x)$npar + ifelse(Optim$meanstructure, index(x)$npar.mean+index(x)$npar.ex,0) ## Get starting values if (!missing(p)) { start <- p Optim$start <- p } else { myparnames <- coef(x,mean=TRUE) paragree <- FALSE paragree.2 <- c() if (!is.null(Optim$start)) { paragree <- myparnames%in%names(Optim$start) paragree.2 <- names(Optim$start)%in%myparnames } if (sum(paragree)>=length(myparnames)) Optim$start <- Optim$start[which(paragree.2)] if (! (length(Optim$start)==length(myparnames) & sum(paragree)==0)) if (is.null(Optim$start) || sum(paragree)0 | (length(xconstrain)>0 & XconstrStdOpt | !lava.options()$test)) { ## Yes x0 <- x if (length(xfix)>0) { myclass <- c("lvmfit.randomslope",myclass) nrow <- length(vars(x)) xpos <- lapply(xfix,function(y) which(regfix(x)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix <- list(var=xfix, col=colpos, row=rowpos) x0 <- x for (i in seq_along(myfix$var)) for (j in seq_len(length(myfix$col[[i]]))) regfix(x0, from=vars(x0)[myfix$row[[i]][j]], to=vars(x0)[myfix$col[[i]][j]]) <- colMeans(data[,myfix$var[[i]],drop=FALSE]) x0 <- updatelvm(x0,zeroones=TRUE,deriv=TRUE) x <- x0 yvars <- endogenous(x0) ## Alter start-values/constraints: new.par.idx <- which(coef(mymodel,mean=TRUE,fix=FALSE)%in%coef(x0,mean=TRUE,fix=FALSE)) if (length(Optim$start)>length(new.par.idx)) Optim$start <- Optim$start[new.par.idx] lower <- lower[new.par.idx] if (Optim$constrain) { constrained <- match(constrained,new.par.idx) } } mydata <- as.matrix(data[,manifest(x0)]) myObj <- function(pp) { if (Optim$constrain) { pp[constrained] <- exp(pp[constrained]) } myfun <- function(ii) { if (length(xfix)>0) for (i in seq_along(myfix$var)) { x0$fix[cbind(rowpos[[i]],colpos[[i]])] <- index(x0)$A[cbind(rowpos[[i]],colpos[[i]])] <- data[ii,xfix[i]] } if (is.list(data2)) { res <- do.call(ObjectiveFun, list(x=x0, p=pp, data=mydata[ii,], n=1, weights=weights[ii,], data2=data2[ii,])) } else { res <- do.call(ObjectiveFun, list(x=x0, p=pp, data=mydata[ii,], n=1, weights=weights[ii,], data2=data2)) } return(res) } sum(sapply(seq_len(nrow(data)),myfun)) } myGrad <- function(pp) { if (Optim$constrain) { pp[constrained] <- exp(pp[constrained]) } myfun <- function(ii) { if (length(xfix)>0) for (i in seq_along(myfix$var)) { x0$fix[cbind(rowpos[[i]],colpos[[i]])] <- index(x0)$A[cbind(rowpos[[i]],colpos[[i]])] <- data[ii,xfix[i]] } if (is.list(data2)) { rr <- do.call(GradFun, list(x=x0, p=pp, data=mydata[ii,,drop=FALSE], n=1, weights=weights[ii,], data2=data2)) } else { rr <- do.call(GradFun, list(x=x0, p=pp, data=mydata[ii,,drop=FALSE], n=1, weights=weights[ii,], data2=data2[ii,])) } return(rr) } ss <- rowSums(rbind(sapply(seq_len(nrow(data)),myfun))) if (Optim$constrain) { ss[constrained] <- ss[constrained]*pp[constrained] } return(ss) } myInfo <- function(pp) { myfun <- function(ii) { if (length(xfix)>0) for (i in seq_along(myfix$var)) { x0$fix[cbind(rowpos[[i]],colpos[[i]])] <- index(x0)$A[cbind(rowpos[[i]],colpos[[i]])] <- data[ii,xfix[i]] } if (is.list(data2)) { res <- do.call(InformationFun, list(p=pp, obj=myObj, x=x0, data=data[ii,], n=1, weights=weights[ii,], data2=data2)) } else { res <- do.call(InformationFun, list(p=pp, obj=myObj, x=x0, data=data[ii,], n=1, weights=weights[ii,], data2=data2[ii,])) } return(res) } L <- lapply(seq_len(nrow(data)),function(x) myfun(x)) val <- apply(array(unlist(L),dim=c(length(pp),length(pp),nrow(data))),c(1,2),sum) if (!is.null(attributes(L[[1]])$grad)) { attributes(val)$grad <- colSums ( matrix( unlist(lapply(L,function(i) attributes(i)$grad)) , ncol=length(pp), byrow=TRUE) ) } return(val) } ################################################## } else { ## No, standard model ## Non-linear parameter constraints involving observed variables? (e.g. nonlinear regression) xconstrain <- c() for (i in seq_len(length(constrain(x)))) { z <- constrain(x)[[i]] xx <- intersect(attributes(z)$args,manifest(x)) if (length(xx)>0) { warg <- setdiff(attributes(z)$args,xx) wargidx <- which(attributes(z)$args%in%warg) exoidx <- which(attributes(z)$args%in%xx) parname <- names(constrain(x))[i] y <- names(which(unlist(lapply(intercept(x),function(x) x==parname)))) el <- list(i,y,parname,xx,exoidx,warg,wargidx,z) names(el) <- c("idx","endo","parname","exo","exoidx","warg","wargidx","func") xconstrain <- c(xconstrain,list(el)) } } yconstrain <- unlist(lapply(xconstrain,function(x) x$endo)) iconstrain <- unlist(lapply(xconstrain,function(x) x$idx)) MkOffset <- function(pp,grad=FALSE) { if (length(xconstrain)>0) { Mu <- matrix(0,nrow(data),length(vars(x))); colnames(Mu) <- vars(x) M <- modelVar(x,p=pp,data=data) M$parval <- c(M$parval, x$mean[unlist(lapply(x$mean,is.numeric))]) for (i in seq_len(length(xconstrain))) { pp <- unlist(M$parval[xconstrain[[i]]$warg]); myidx <- with(xconstrain[[i]],order(c(wargidx,exoidx))) D <- cbind(rbind(pp)%x%cbind(rep(1,nrow(Mu))), data[,xconstrain[[i]]$exo,drop=FALSE])[,myidx,drop=FALSE] mu <- try(xconstrain[[i]]$func(D),silent=TRUE) if (is.data.frame(mu)) mu <- mu[,1] if (inherits(mu,"try-error") || NROW(mu)!=NROW(Mu)) { ## mu1 <- with(xconstrain[[i]], ## apply(data[,exo,drop=FALSE],1, ## function(x) func(unlist(c(pp,x))[myidx]))) mu <- apply(D,1,xconstrain[[i]]$func) } Mu[,xconstrain[[i]]$endo] <- mu } offsets <- Mu%*%t(M$IAi)[,endogenous(x)] return(offsets) } return(NULL) } myObj <- function(pp) { if (Optim$constrain) { pp[constrained] <- exp(pp[constrained]) } offset <- MkOffset(pp) mu0 <- mu; S0 <- S; x0 <- x if (!is.null(offset)) { x0$constrain[iconstrain] <- NULL data0 <- data[,manifest(x0)] data0[,endogenous(x)] <- data0[,endogenous(x)]-offset pd <- procdata.lvm(x0,data=data0) S0 <- pd$S; mu0 <- pd$mu x0$mean[yconstrain] <- 0 } do.call(ObjectiveFun, list(x=x0, p=pp, data=data, S=S0, mu=mu0, n=n, weights=weights ,data2=data2, offset=offset )) } myGrad <- function(pp) { if (Optim$constrain) pp[constrained] <- exp(pp[constrained]) ## offset <- MkOffset(pp) ## mu0 <- mu; S0 <- S; x0 <- x ## if (!is.null(offset)) { ## x0$constrain[iconstrain] <- NULL ## data0 <- data[,manifest(x0)] ## data0[,endogenous(x)] <- data0[,endogenous(x)]-offset ## pd <- procdata.lvm(x0,data=data0) ## S0 <- pd$S; mu0 <- pd$mu ## } S <- do.call(GradFun, list(x=x, p=pp, data=data, S=S, mu=mu, n=n, weights=weights , data2=data2##, offset=offset )) if (Optim$constrain) { S[constrained] <- S[constrained]*pp[constrained] } if (is.null(mu) & index(x)$npar.mean>0) { return(S[-c(seq_len(index(x)$npar.mean))]) } if (length(S)0) { return(I[-seq_len(index(x)$npar.mean),-seq_len(index(x)$npar.mean)]) } return(I) } } myHess <- function(pp) { p0 <- pp if (Optim$constrain) pp[constrained] <- exp(pp[constrained]) I0 <- myInfo(pp) attributes(I0)$grad <- NULL D <- attributes(I0)$grad if (is.null(D)) { D <- myGrad(p0) attributes(I0)$grad <- D } if (Optim$constrain) { I0[constrained,-constrained] <- apply(I0[constrained,-constrained,drop=FALSE],2,function(x) x*pp[constrained]); I0[-constrained,constrained] <- t(I0[constrained,-constrained]) if (sum(constrained)==1) { I0[constrained,constrained] <- I0[constrained,constrained]*outer(pp[constrained],pp[constrained]) - D[constrained] } else { I0[constrained,constrained] <- I0[constrained,constrained]*outer(pp[constrained],pp[constrained]) - diag(D[constrained],ncol=length(constrained)) } } return(I0) } if (is.null(tryCatch(get(InformationFun),error = function (x) NULL))) myInfo <- myHess <- NULL if (is.null(tryCatch(get(GradFun),error = function (x) NULL))) myGrad <- NULL if (!silent) message("Optimizing objective function...") if (Optim$trace>0 & !silent) message("\n") ## Optimize with lower constraints on the variance-parameters if ((is.data.frame(data) | is.matrix(data)) && nrow(data)==0) stop("No observations") if (!missing(p)) { opt <- list(estimate=p) ## if (!is.null(myGrad)) ## opt <- c(opt,list(gradient=myGrad(p))) ## if (!is.null(myObj)) ## opt <- c(opt,list(objective=myObj(p))) } else { if (!is.null(Optim$method)) { optarg <- list(start=Optim$start, objective=myObj, gradient=myGrad, hessian=myHess, lower=lower, control=Optim, debug=debug) if (length(Optim$method)>1) { Optim$optimx.method <- Optim$method } if (!is.null(Optim$optimx.method)) { Optim$method <- "optimx" } if (Optim$method%in%c("optimx","optim")) { optimcontrolnames <- c("trace", "follow.on", "save.failures", "maximize", "all.methods", "kkt", "kkttol", "kkt2tol", "starttests", "dowarn", "badval", "usenumDeriv", "fnscale", "parscale", "ndeps", "maxit", "abstol", "reltol", #"alpha","beta","gamma", "REPORT", "type", "lmm", "factr", "pgtol") if (!is.null(optarg$control)) { optarg$control[names(optarg$control)%ni%optimcontrolnames] <- NULL } args <- names(formals(get(Optim$method))) names(optarg)[1] <- "par" if (is.null(optarg$upper)) optarg$upper <- Inf if (!is.null(optarg[["objective"]])) names(optarg)[2] <- "fn" if (!is.null(optarg[["gradient"]])) names(optarg)[3] <- "gr" ##if (!is.null(optarg[["hessian"]])) names(optarg)[4] <- "hess" optarg$hessian <- NULL optarg[names(optarg)%ni%args] <- NULL } if (!is.null(Optim$optimx.method)) optarg$method <- Optim$optimx.method opt <- do.call(Optim$method, optarg) if (inherits(opt,"optimx")) { opt0 <- opt opt <- list(par=coef(opt)[1,]) } if (is.null(opt$estimate)) opt$estimate <- opt$par if (Optim$constrain) { opt$estimate[constrained] <- exp(opt$estimate[constrained]) } if (XconstrStdOpt & !is.null(myGrad)) opt$gradient <- as.vector(myGrad(opt$par)) else { opt$gradient <- numDeriv::grad(myObj,opt$par) } } else { if (!NoOptim) { opt <- do.call(ObjectiveFun, list(x=x,data=data,control=control,...)) opt$gradient <- rep(0,length(opt$estimate)) } else { opt <- list(estimate=Optim$start, gradient=rep(0,length(Optim$start))) } } } if (!is.null(opt$convergence)) { if (opt$convergence!=0) warning("Lack of convergence. Increase number of iteration or change starting values.") } else if (!is.null(opt$gradient) && mean(opt$gradient)^2>1e-3) warning("Lack of convergence. Increase number of iteration or change starting values.") if (quick) { return(opt$estimate) } ## Calculate std.err: pp <- rep(NA,length(coefname)); names(pp) <- coefname pp.idx <- NULL if (!is.null(names(opt$estimate))) { pp[names(opt$estimate)] <- opt$estimate pp.idx <- na.omit(match(coefname,names(opt$estimate))) } else { pp[] <- opt$estimate pp.idx <- seq(length(pp)) } ## TODO: ## if (length(pp.idx)!=length(pp)) { ## pp <- rep(NA,length(coefname)); names(pp) <- coefname ## pp[] <- opt$estimate ## pp.idx <- seq(length(pp)) ## } suppressWarnings(mom <- tryCatch(modelVar(x, pp, data=data),error=function(x)NULL)) if (NoOptim) { asVar <- matrix(NA,ncol=length(pp),nrow=length(pp)) } else { if (!silent) message("\nCalculating asymptotic variance...\n") asVarFun <- paste0(estimator, "_variance", ".lvm") if (!exists(asVarFun)) { if (is.null(myInfo)) { if (!is.null(myGrad)) myInfo <- function(pp) numDeriv::jacobian(myGrad,pp,method=lava.options()$Dmethod) else myInfo <- function(pp) numDeriv::hessian(myObj,pp) } I <- myInfo(opt$estimate) asVar <- tryCatch(Inverse(I), error=function(e) matrix(NA, length(opt$estimate), length(opt$estimate))) } else { asVar <- tryCatch(do.call(asVarFun, list(x=x,p=opt$estimate,data=data,opt=opt)), error=function(e) matrix(NA, length(opt$estimate), length(opt$estimate))) } if (any(is.na(asVar))) { warning("Problems with asymptotic variance matrix. Possibly non-singular information matrix!") } if (!is.null(attributes(asVar)$pseudo) && attributes(asVar)$pseudo) { warning("Near-singular covariance matrix, using pseudo-inverse!") } diag(asVar)[diag(asVar)==0] <- NA } mycoef <- matrix(NA,nrow=nparall,ncol=4) mycoef[pp.idx,1] <- opt$estimate ## Will be finished during post.hooks ### OBS: v = t(A)%*%v + e res <- list(model=x, call=cl, coef=mycoef, vcov=asVar, mu=mu, S=S, ##A=A, P=P, model0=mymodel, ## Random slope hack estimator=estimator, opt=opt,expar=x$expar, data=list(model.frame=data, S=S, mu=mu, C=mom$C, v=mom$v, n=n, m=length(latent(x)), k=length(index(x)$manifest), data2=data2), weights=weights, data2=data2, cluster=id, pp.idx=pp.idx, graph=NULL, control=Optim) class(res) <- myclass myhooks <- gethook("post.hooks") for (f in myhooks) { res0 <- do.call(f,list(x=res)) if (!is.null(res0)) res <- res0 } if(graph) { res <- edgelabels(res,type="est") } return(res) } ###}}} estimate.lvm ###{{{ estimate.formula ##' @export estimate.formula <- function(x,data=parent.frame(),pred.norm=c(),unstruct=FALSE,silent=TRUE,id=NULL,distribution=NULL,estimator="gaussian",...) { cl <- match.call() formulaId <- union(Specials(x,c("cluster")),Specials(x,c("id"))) formulaSt <- paste0("~.-cluster(",formulaId,")-id(",formulaId,")") if (!is.null(formulaId)) { id <- formulaId x <- update(x,as.formula(formulaSt)) } if (!is.null(id)) x <- update(x,as.formula(paste(".~.+",id))) varnames <- all.vars(x) mf <- model.frame(x,data) mt <- attr(mf, "terms") yvar <- names(mf)[1] y <- mf[,yvar] opt <- options(na.action="na.pass") mm <- model.matrix(x,data) options(opt) covars <- colnames(mm) covars <- unlist(lapply(covars, function(x) gsub("[^a-zA-Z0-9._]","",x))) colnames(mm) <- covars if (attr(terms(x),"intercept")==1) { covars <- covars[-1] it <- c() } else { it <- "0" } if (!is.null(id)) covars <- setdiff(covars,id) model <- lvm(toformula(yvar,c(it,covars)),silent=TRUE) if (!is.null(distribution)) { lava::distribution(model,yvar) <- distribution estimator <- "glm" } mydata <- na.omit(as.data.frame(cbind(data.frame(y),mm))); names(mydata)[1] <- yvar exogenous(model) <- setdiff(covars,pred.norm) if (unstruct) { model <- covariance(model,pred.norm,pairwise=TRUE) } estimate(model,mydata,silent=silent,id=id,estimator=estimator,...) } ###}}} estimate.formula lava/R/zib.R0000644000176200001440000003347113162174023012314 0ustar liggesusers##' Dose response calculation for binomial regression models ##' ##' @title Dose response calculation for binomial regression models ##' @param model Model object or vector of parameter estimates ##' @param intercept Index of intercept parameters ##' @param slope Index of intercept parameters ##' @param prob Index of mixture parameters (only relevant for ##' \code{zibreg} models) ##' @param x Optional weights ##' length(x)=length(intercept)+length(slope)+length(prob) ##' @param level Probability at which level to calculate dose ##' @param ci.level Level of confidence limits ##' @param vcov Optional estimate of variance matrix of parameter ##' estimates ##' @param family Optional distributional family argument ##' @param EB Optional ratio of treatment effect and adverse effects ##' used to find optimal dose (regret-function argument) ##' @author Klaus K. Holst ##' @export PD <- function(model,intercept=1,slope=2,prob=NULL,x,level=0.5, ci.level=0.95,vcov,family, EB=NULL) { if (is.vector(model)) { beta <- model if (missing(vcov)) stop("vcov argument needed") if (missing(family)) stop("family argument needed") } else beta <- coef(model) if (missing(vcov)) vcov <- stats::vcov(model) if (missing(family)) family <- stats::family(model) N <- length(intercept)+length(slope)+length(prob) if (length(intercept)geq[i]) return(TRUE) if (xyz[i]=length(geq)) return(TRUE) return(FALSE) } lava.env <- new.env() assign("init.hooks",c(),envir=lava.env) assign("remove.hooks",c(),envir=lava.env) assign("estimate.hooks",c(),envir=lava.env) assign("color.hooks",c(),envir=lava.env) assign("sim.hooks",c(),envir=lava.env) assign("post.hooks",c(),envir=lava.env) assign("print.hooks",c(),envir=lava.env) assign("plot.post.hooks",c(),envir=lava.env) assign("plot.hooks",c(),envir=lava.env) assign("options", list( trace=0, tol=1e-6, gamma=1, backtrack="wolfe", ngamma=0, iter.max=300, eval.max=250, constrain=FALSE, allow.negative.variance=FALSE, silent=TRUE, progressbarstyle=3, itol=1e-16, cluster.index=versioncheck("mets",c(0,2,7)), tobit=versioncheck("lava.tobit",c(0,5)), Dmethod="simple", ##"Richardson" messages=1, parallel=TRUE, param="relative", sparse=FALSE, test=TRUE, coef.names=FALSE, constrain=TRUE, graph.proc="beautify", regex=FALSE, min.weight=1e-3, exogenous=TRUE, plot.engine="Rgraphviz", node.color=c(exogenous="lightblue",endogenous="orange", latent="yellowgreen",transform="lightgray"), edgecolor=FALSE, layout="dot", ## symbols=c("<-","<->"), symbols=c("~","~~"), devel=FALSE, debug=FALSE), envir=lava.env) lava/R/variances.R0000644000176200001440000000116113162174023013472 0ustar liggesusers### Return position of variance elements in the parameter vector (without mean parameters) ### Optimization constraints are needed on these parameters ##' @export variances <- function(x,mean=FALSE) { ## if (is.null(x$parpos)) ## x$parpos <- parpos(x) x$parpos <- parpos(Model(x),mean=TRUE) res <- diag(x$parpos$P)[which(diag(index(x)$P0)==1)] if (!mean) { return(res - index(x)$npar.mean) } return(res) } ## And the off-diagonal (covariance) parameters ##' @export offdiags <- function(x,mean=FALSE) { parpos <- parpos(x,mean=mean) pp <- parpos$P pp[lower.tri(pp)][(index(x)$P0)[lower.tri(pp)]==1] } lava/R/timedep.R0000644000176200001440000000604113162174023013150 0ustar liggesusers##' Add time-varying covariate effects to model ##' ##' @title Time-dependent parameters ##' @param object Model ##' @param formula Formula with rhs specifying time-varying covariates ##' @param rate Optional rate parameters. If given as a vector this ##' parameter is interpreted as the raw (baseline-)rates within each ##' time interval defined by \code{timecut}. If given as a matrix the ##' parameters are interpreted as log-rates (and log-rate-ratios for ##' the time-varying covariates defined in the formula). ##' @param timecut Time intervals ##' @param type Type of model (default piecewise constant intensity) ##' @param ... Additional arguments to lower level functions ##' @author Klaus K. Holst ##' @aliases timedep timedep<- ##' @export ##' @examples ##' ##' ## Piecewise constant hazard ##' m <- lvm(y~1) ##' m <- timedep(m,y~1,timecut=c(0,5),rate=c(0.5,0.3)) ##' ##' \dontrun{ ##' d <- sim(m,1e4); d$status <- TRUE ##' dd <- mets::lifetable(Surv(y,status)~1,data=d,breaks=c(0,5,10)); ##' exp(coef(glm(events ~ offset(log(atrisk)) + -1 + interval, dd, family=poisson))) ##' } ##' ##' ##' ## Piecewise constant hazard and time-varying effect of z1 ##' m <- lvm(y~1) ##' distribution(m,~z1) <- ones.lvm(0.5) ##' R <- log(cbind(c(0.2,0.7,0.9),c(0.5,0.3,0.3))) ##' m <- timedep(m,y~z1,timecut=c(0,3,5),rate=R) ##' ##' \dontrun{ ##' d <- sim(m,1e4); d$status <- TRUE ##' dd <- mets::lifetable(Surv(y,status)~z1,data=d,breaks=c(0,3,5,Inf)); ##' exp(coef(glm(events ~ offset(log(atrisk)) + -1 + interval+z1:interval, dd, family=poisson))) ##' } ##' ##' ##' ##' ## Explicit simulation of time-varying effects ##' m <- lvm(y~1) ##' distribution(m,~z1) <- ones.lvm(0.5) ##' distribution(m,~z2) <- binomial.lvm(p=0.5) ##' #variance(m,~m1+m2) <- 0 ##' #regression(m,m1[m1:0] ~ z1) <- log(0.5) ##' #regression(m,m2[m2:0] ~ z1) <- log(0.3) ##' regression(m,m1 ~ z1,variance=0) <- log(0.5) ##' regression(m,m2 ~ z1,variance=0) <- log(0.3) ##' intercept(m,~m1+m2) <- c(-0.5,0) ##' m <- timedep(m,y~m1+m2,timecut=c(0,5)) ##' ##' \dontrun{ ##' d <- sim(m,1e5); d$status <- TRUE ##' dd <- mets::lifetable(Surv(y,status)~z1,data=d,breaks=c(0,5,Inf)) ##' exp(coef(glm(events ~ offset(log(atrisk)) + -1 + interval + interval:z1, dd, family=poisson))) ##' } timedep <- function(object,formula,rate,timecut,type="coxExponential.lvm",...) { if (missing(timecut)) stop("'timecut' needed") ##if (inherits(formula,"formula")) ff <- getoutcome(formula) simvars <- attributes(ff)$x if (is.null(object$attributes$simvar)) { object$attributes$simvar <- list(simvars) names(object$attributes$simvar) <- ff object$attributes$timedep <- object$attributes$simvar } else { object$attributes$simvar[[ff]] <- simvars object$attributes$timedep[[ff]] <- simvars } if (missing(rate)) rate <- rep(1,length(timecut)) args <- list(timecut=timecut,rate=rate,...) covariance(object,ff) <- 1 distribution(object,ff) <- do.call(type,args) return(object) } ##' @export "timedep<-" <- function(object,...,value) { timedep(object,value,...) } lava/R/img.R0000644000176200001440000001220413162174023012273 0ustar liggesusersimg <- function(x,idx,col=list(gray.colors(10,1,0.2)), ylab="Item",xlab="Subject",lab=TRUE, border=1,rowcol=FALSE,plotfun=NULL, axis1=TRUE,axis2=TRUE,yaxs="r",xaxs="r",cex.axis=0.4,...) { x0 <- seq(nrow(x)) y0 <- seq(ncol(x)) image(x=x0,y=y0,as.matrix(x),col=col[[1]],axes=FALSE,ylab=ylab,xlab=xlab,xaxs=xaxs,yaxs=yaxs,...) if (axis1) { axis(1,at=seq(nrow(x)),lwd=0.5,cex.axis=cex.axis,las=3) if (lab) suppressWarnings(title("",xlab=xlab,...)) } if (axis2) { axis(2,at=seq(ncol(x)),lwd=0.5,cex.axis=cex.axis,las=1) if (lab) suppressWarnings(title("",ylab=ylab,...)) } if (!is.null(plotfun)) { plotfun(...) } if (!missing(idx)) { if (rowcol) { for (i in seq_len(length(idx))) image(x=x0,y=idx[[i]],as.matrix(x[,idx[[i]]]),col=col[[i]],add=TRUE,xaxs=xaxs,yaxs=yaxs,...) } else for (i in seq_len(length(idx))) image(x=idx[[i]],y=y0,as.matrix(x[idx[[i]],]),col=col[[i]],add=TRUE,xaxs=xaxs,yaxs=yaxs,...) } } ##' Visualize categorical by group variable ##' ##' @title Organize several image calls (for visualizing categorical data) ##' @param x data.frame or matrix ##' @param group group variable ##' @param ncol number of columns in layout ##' @param byrow organize by row if TRUE ##' @param colorbar Add color bar ##' @param colorbar.space Space around color bar ##' @param label.offset label offset ##' @param order order ##' @param colorbar.border Add border around color bar ##' @param main Main title ##' @param rowcol switch rows and columns ##' @param plotfun Alternative plot function (instead of 'image') ##' @param axis1 Axis 1 ##' @param axis2 Axis 2 ##' @param mar Margins ##' @param col Colours ##' @param ... Additional arguments to lower level graphics functions ##' @author Klaus Holst ##' @examples ##' X <- matrix(rbinom(400,3,0.5),20) ##' group <- rep(1:4,each=5) ##' images(X,colorbar=0,zlim=c(0,3)) ##' images(X,group=group,zlim=c(0,3)) ##' \dontrun{ ##' images(X,group=group,col=list(RColorBrewer::brewer.pal(4,"Purples"), ##' RColorBrewer::brewer.pal(4,"Greys"), ##' RColorBrewer::brewer.pal(4,"YlGn"), ##' RColorBrewer::brewer.pal(4,"PuBuGn")),colorbar=2,zlim=c(0,3)) ##' } ##' images(list(X,X,X,X),group=group,zlim=c(0,3)) ##' images(list(X,X,X,X),ncol=1,group=group,zlim=c(0,3)) ##' images(list(X,X),group,axis2=c(FALSE,FALSE),axis1=c(FALSE,FALSE), ##' mar=list(c(0,0,0,0),c(0,0,0,0)),yaxs="i",xaxs="i",zlim=c(0,3)) ##' @export images <- function(x,group,ncol=2,byrow=TRUE,colorbar=1,colorbar.space=0.1,label.offset=0.02, order=TRUE,colorbar.border=0,main,rowcol=FALSE,plotfun=NULL, axis1,axis2,mar, col=list(c("#EFF3FF", "#BDD7E7", "#6BAED6", "#2171B5"), c("#FEE5D9", "#FCAE91", "#FB6A4A", "#CB181D"), c("#EDF8E9", "#BAE4B3", "#74C476", "#238B45"), c("#FEEDDE", "#FDBE85", "#FD8D3C", "#D94701")), ...) { if (is.data.frame(x) || is.matrix(x)) x <- list(x) K <- length(x) lout <- matrix(seq(K),ncol=ncol,byrow=byrow) hei <- rep(1,nrow(lout))/nrow(lout) wid <- rep(1,ncol)/ncol if (colorbar==1) { wid <- c(rep(1,ncol)/ncol*(1-colorbar.space),colorbar.space) lout <- cbind(lout,K+1) } if (colorbar==2) { hei <- c(rep(1,nrow(lout))/nrow(lout)*(1-colorbar.space),colorbar.space) lout <- rbind(lout,K+1) } if (missing(group)) { group <- rep(1,nrow(x[[1]])) } if (missing(main)) main <- rep("",K) if (!is.list(col)) col <- list(col) group <- factor(group) idxs <- lapply(levels(group), function(x) which(group==x)) layout(lout,widths=wid,heights=hei) ##if (missing(mar)) par(mar=c(4,4,3,0)) if (missing(axis2)) axis2 <- c(TRUE,rep(FALSE,K-1)) if (missing(axis1)) axis1 <- rep(TRUE,K) for (i in seq(length(x))) { ## if (!missing(mar)) par(mar=mar[[i]]) img(x[[i]],idxs,col,axis2=axis2[i],axis1=axis1[i],main=main[i],rowcol=rowcol,plotfun=plotfun[[i]],...) ## if (missing(mar)) par(mar=c(4,2,3,2)) } G <- nlevels(group) M <- length(col[[1]]) if (colorbar==1) { par(mar=c(0,0,0,2)) plot.new(); plot.window(xlim=c(0,1),ylim=c(0,1)) for (i in seq(G)) { lava::colorbar(col[[i]],values=seq(M)-1,direction="horizontal", y.range=c(1-i/(G+1),1-i/(G+1)+label.offset), border=colorbar.border,x.range=c(0,1),srt=0,cex=0.6) text(0.5,1-i/(G+1)-label.offset, levels(group)[i]) } } if (colorbar==2) { par(mar=c(0,0,0,0)) plot.new(); plot.window(xlim=c(0,1),ylim=c(0,1)) for (i in seq(G)) { xr <- c(1-i/(G+1),1-i/(G+1)+.1)-.1/2 lava::colorbar(col[[i]],values=seq(M)-1,direction="horizontal", x.range=xr, border=colorbar.border,y.range=c(0.3,0.5),srt=0,cex=0.6) text(mean(xr),.1, levels(group)[i]) } } } lava/R/parpos.R0000644000176200001440000000412513162174023013026 0ustar liggesusers ##' Generic method for finding indeces of model parameters ##' ##' @title Generic method for finding indeces of model parameters ##' @param x Model object ##' @param \dots Additional arguments ##' @author Klaus K. Holst ##' @export `parpos` <- function(x,...) UseMethod("parpos") ##' @export parpos.default <- function(x,p,...) { if (is.numeric(p)) return(p) na.omit(match(coef(x),p)) } ##' @export parpos.multigroup <- function(x,p,mean=TRUE,...) { if (missing(p)) { p <- unique(unlist(lapply(x$lvm, function(z) setdiff(parlabels(z),names(constrain(z))) ))) } if (!is.character(p)) p <- names(p) p0 <- rep(NA,with(x,npar+npar.mean)); names(p0) <- c(x$mean,x$par) for (i in seq_along(x$lvm)) { cur <- parpos(x$lvm[[i]],p=p) if (length(cur)>0) { p0[c(x$meanpos[[i]],x$parpos[[i]])[cur]] <- names(cur) M <- na.omit(match(names(cur),p)) if (length(M)>0) p <- p[-M] } if (length(p)==0) break; } p1 <- which(!is.na(match(x$name,p))) p0[p1] <- x$name[p1] return(structure(which(!is.na(p0)),name=p0)) ## return(p0) } ##' @export parpos.multigroupfit <- function(x,...) parpos.multigroup(x$model0,...) ##' @export parpos.lvm <- function(x,p,mean=TRUE,...) { if (!missing(p)) { if (!is.character(p)) p <- names(p) cc1 <- coef(Model(x),mean=mean,fix=FALSE) cc2 <- coef(Model(x),mean=mean,fix=FALSE,labels=TRUE) idx1 <- na.omit(match(p,cc1)) idx11 <- na.omit(match(p,cc2)) res <- (union(idx1,idx11)); if (length(res)!=length(p)) { names(res) <- cc1[res] } else { names(res) <- p } ## res <- idx1; res[!is.na(idx11)] <- idx11[!is.na(idx11)] ## names(res) <- p ord <- order(res) res <- sort(res) attributes(res)$ord <- ord return(res) } if (mean) nn <- with(index(x),matrices2(x,seq_len(npar+npar.mean+npar.ex))) ## Position of parameters else nn <- with(index(x),matrices(x,seq_len(npar),NULL,seq_len(npar.ex)+npar)) nn$A[index(x)$M0!=1] <- 0 nn$P[index(x)$P0!=1] <- 0 nn$v[index(x)$v0!=1] <- 0 nn$e[index(x)$e0!=1] <- 0 nn } ##' @export parpos.lvmfit <- parpos.lvm lava/R/multinomial.R0000644000176200001440000001643313162174023014061 0ustar liggesusers ##' Estimate probabilities in contingency table ##' ##' @title Estimate probabilities in contingency table ##' @aliases multinomial kappa.multinomial kappa.table gkgamma ##' @param x Formula (or matrix or data.frame with observations, 1 or 2 columns) ##' @param data Optional data.frame ##' @param marginal If TRUE the marginals are estimated ##' @param transform Optional transformation of parameters (e.g., logit) ##' @param vcov Calculate asymptotic variance (default TRUE) ##' @param iid Return iid decomposition (default TRUE) ##' @param ... Additional arguments to lower-level functions ##' @export ##' @examples ##' set.seed(1) ##' breaks <- c(-Inf,-1,0,Inf) ##' m <- lvm(); covariance(m,pairwise=TRUE) <- ~y1+y2+y3+y4 ##' d <- transform(sim(m,5e2), ##' z1=cut(y1,breaks=breaks), ##' z2=cut(y2,breaks=breaks), ##' z3=cut(y3,breaks=breaks), ##' z4=cut(y4,breaks=breaks)) ##' ##' multinomial(d[,5]) ##' (a1 <- multinomial(d[,5:6])) ##' (K1 <- kappa(a1)) ## Cohen's kappa ##' ##' K2 <- kappa(d[,7:8]) ##' ## Testing difference K1-K2: ##' estimate(merge(K1,K2,id=TRUE),diff) ##' ##' estimate(merge(K1,K2,id=FALSE),diff) ## Wrong std.err ignoring dependence ##' sqrt(vcov(K1)+vcov(K2)) ##' ##' ## Average of the two kappas: ##' estimate(merge(K1,K2,id=TRUE),function(x) mean(x)) ##' estimate(merge(K1,K2,id=FALSE),function(x) mean(x)) ## Independence ##' ##' ##' ## Goodman-Kruskal's gamma ##' m2 <- lvm(); covariance(m2) <- y1~y2 ##' breaks1 <- c(-Inf,-1,0,Inf) ##' breaks2 <- c(-Inf,0,Inf) ##' d2 <- transform(sim(m2,5e2), ##' z1=cut(y1,breaks=breaks1), ##' z2=cut(y2,breaks=breaks2)) ##' ##' (g1 <- gkgamma(d2[,3:4])) ##' ## same as ##' \dontrun{ ##' gkgamma(table(d2[,3:4])) ##' gkgamma(multinomial(d2[,3:4])) ##' } ##' ##' ##partial gamma ##' d2$x <- rbinom(nrow(d2),2,0.5) ##' gkgamma(z1~z2|x,data=d2) ##' @author Klaus K. Holst multinomial <- function(x,data=parent.frame(),marginal=FALSE,transform,vcov=TRUE,iid=TRUE,...) { formula <- NULL if (inherits(x,"formula")) { trm <- terms(x) if (length(attr(trm,"term.labels"))>1) { x <- update(x,as.formula(paste0(".~ interaction(", paste0(attr(trm,"term.labels"),collapse=","),")"))) trm <- terms(x) } formula <- x x <- as.matrix(model.frame(trm,data)) if (ncol(x)>1) x <- x[,c(seq(ncol(x)-1)+1,1),drop=FALSE] } else { trm <- NULL } if (!vcov) iid <- FALSE if (is.table(x) && iid) x <- lava::Expand(x) if (NCOL(x)==1) { if (!is.table(x)) { x <- as.factor(x) lev <- levels(x) k <- length(lev) n <- length(x) P <- table(x)/n } else { n <- sum(x) P <- x/n lev <- names(x) k <- length(lev) } if (iid) { iid <- matrix(0,n,k) for (i in seq(k)) { iid[,i] <- (1*(x==lev[i])-P[i])/n }; varcov <- crossprod(iid) } else { iid <- varcov <- NULL if (vcov) { varcov <- tcrossprod(cbind(P))/n diag(varcov) <- P*(1-P)/n } } coefs <- as.vector(P); names(coefs) <- paste0("p",seq(k)) res <- list(call=match.call(), coef=coefs,P=P,vcov=varcov,iid=iid,position=seq(k),levels=list(lev),data=x, terms=trm) class(res) <- "multinomial" return(res) } if (!is.table(x)) { if (NCOL(x)!=2L) stop("Matrix or data.frame with one or two columns expected") x <- as.data.frame(x) x[,1] <- as.factor(x[,1]) x[,2] <- as.factor(x[,2]) lev1 <- levels(x[,1]) lev2 <- levels(x[,2]) k1 <- length(lev1) k2 <- length(lev2) M <- table(x) n <- sum(M) } else { lev1 <- rownames(x) lev2 <- colnames(x) k1 <- length(lev1) k2 <- length(lev2) M <- x n <- sum(x) } Pos <- P <- M/n if (iid) { iid <- matrix(0,n,k1*k2) for (j in seq(k2)) { for (i in seq(k1)) { pos <- (j-1)*k1+i iid[,pos] <- (x[,1]==lev1[i])*(x[,2]==lev2[j])-P[i,j] Pos[i,j] <- pos } }; iid <- iid/n } else { iid <- varcov <- NULL } coefs <- as.vector(P); names(coefs) <- as.vector(outer(seq(k1),seq(k2),function(...) paste0("p",...))) position1 <- position2 <- NULL if (marginal) { p1 <- rowSums(P) p2 <- colSums(P) names(p1) <- paste0("p",seq(k1),".") names(p2) <- paste0("p",".",seq(k2)) coefs <- c(coefs,p1,p2) position1 <- length(P)+seq(k1) position2 <- length(P)+k1+seq(k2) if (!is.null(iid)) { iid1 <- apply(Pos,1,function(x) rowSums(iid[,x])) iid2 <- apply(Pos,2,function(x) rowSums(iid[,x])) iid <- cbind(iid,iid1,iid2) colnames(iid) <- names(coefs) } } if (!missing(transform) && !is.null(iid)) { f <- function(p) do.call(transform,list(p)) D <- diag(numDeriv::grad(f,coefs),ncol=length(coefs)) coefs <- f(coefs) iid <- iid%*%t(D) } if (vcov && !is.null(iid)) varcov <- crossprod(iid) res <- list(call=match.call(), formula=formula, coef=coefs,P=P,vcov=varcov,iid=iid, position=Pos, call=match.call(), levels=list(lev1,lev2), data=x, position1=position1,position2=position2, ## Position of marginals) terms=trm ) class(res) <- "multinomial" if (length(list(...))>0) { res <- structure(estimate(res,...),class=c("multinomial","estimate")) } res } ##' @export model.frame.multinomial <- function(formula,...) { formula$data } ##' @export iid.multinomial <- function(x,...) { x$iid } ##' @export coef.multinomial <- function(object,...) { object$coef } ##' @export vcov.multinomial <- function(object,...) { object$vcov } ##' @export predict.multinomial <- function(object,newdata,type=c("prob","map"),...) { if (missing(newdata) || is.null(newdata)) newdata <- object$data if (!is.null(object$formula) && is.data.frame(newdata)) { trm <- terms(object$formula) newdata <- model.frame(trm,newdata)[,-1] } px <- rowSums(object$P) idx <- match(trim(as.character(newdata)),trim(rownames(object$P))) pcond <- object$P for (i in seq(nrow(pcond))) pcond[i,] <- pcond[i,]/px[i] pr <- pcond[idx,,drop=FALSE] if (tolower(type[1])%in%c("map","class")) { pr <- colnames(pr)[apply(pr,1,which.max)] } return(pr) } ## logLik.multinomial <- function(object,...) { ## } ##' @export print.multinomial <- function(x,...) { cat("Call: "); print(x$call) cat("\nJoint probabilities:\n") print(x$P,quote=FALSE) if (length(dim(x$P))>1) { cat("\nConditional probabilities:\n") print(predict(x,newdata=rownames(x$P)),quote=FALSE) } cat("\n") print(estimate(NULL,coef=coef(x),vcov=vcov(x))) ## stderr <- diag(vcov(x))^.5 ## StdErr <- x$position ## StdErr[] <- stderr[StdErr] ## cat("\nStd.Err:\n") ## print(StdErr,quote=FALSE) ## cat("\nPosition:\n") ## print(x$position,quote=FALSE) } lava/R/glmest.R0000644000176200001440000002560113162174023013017 0ustar liggesusers glm.estimate.hook <- function(x,estimator,...) { yy <- c() if (length(estimator)>0 && estimator=="glm") { for (y in endogenous(x)) { fam <- attributes(distribution(x)[[y]])$family if (is.null(fam)) fam <- stats::gaussian() if (!(tolower(fam$family)%in% c("gaussian","gamma","inverse.gaussian","weibull"))) { yy <- c(yy,y) } } if (length(yy)>0) covariance(x,yy) <- 1 } return(c(list(x=x,estimator=estimator,...))) } GLMest <- function(m,data,control=list(),...) { v <- vars(m) yvar <- endogenous(m) res <- c() count <- 0 V <- NULL mymsg <- c() iids <- c() breads <- c() et <- eventTime(m) yvar.et <- rep(NA,length(yvar)) names(yvar.et) <- yvar if (!is.null(et)) { for (i in seq_along(et)) { ## if (!survival::is.Surv(data[,et[[i]]$names[1]])) ## data[,et[[i]]$names[1]] <- with(et[[i]], ## survival::Surv(data[,names[1]],data[,names[2]])) yvar <- setdiff(yvar,c(et[[i]]$latentTimes[-1],et[[i]]$names)) yvar.et[et[[i]]$latentTimes[1]] <- et[[i]]$names[1] } } ## newpar <- c() for (y in yvar) { count <- count+1 xx <- parents(m,y) fam <- attributes(distribution(m)[[y]])$family if (is.null(fam)) fam <- stats::gaussian() if (!is.null(fam$link)) { mymsg <- c(mymsg, with(fam, paste0(family,"(",link,")"))) } else { mymsg <- c(mymsg, with(fam, paste0(family))) } if (length(xx)==0) xx <- 1 nn0 <- paste(y,xx,sep=lava.options()$symbol[1]) y0 <- y ## isEventTime <- !is.na(yvar.et[y]) ## if (isEventTime) { ## y <- yvar.et[y] ## } #nn0 <- paste(y,xx,sep=lava.options()$symbol[1]) f <- as.formula(paste0(y,"~",paste(xx,collapse="+"))) isSurv <- inherits(data[1,y],"Surv") if (isSurv) { g <- survival::survreg(f,data=data,dist=fam$family) } else { g <- glm(f,family=fam,data=data) } p <- pars(g) ii <- iid(g) V0 <- attr(ii,"bread") iids <- cbind(iids,ii) y <- y0 names(p)[1] <- y if (length(p)>1) { nn <- paste(y,xx,sep=lava.options()$symbol[1]) names(p)[seq_along(nn)+1] <- nn0 if (length(p)>length(nn)+1) names(p)[length(p)] <- paste(y,y,sep=lava.options()$symbol[2]) } ## if (isEventTime) { ## newpar <- c(newpar,names(p)) ## } if (tolower(fam$family)%in%c("gaussian","gamma","inverse.gaussian") && !isSurv) { iids <- cbind(iids,0) null <- matrix(0); dimnames(null) <- list("scale","scale") V0 <- blockdiag(V0,null,pad=0) } breads <- c(breads,list(V0)) res <- c(res, list(p)); } coefs <- unlist(res) idx <- na.omit(match(coef(m),names(coefs))) coefs <- coefs[idx] ##V <- Reduce(blockdiag,breads)[idx,idx] V <- crossprod(iids[,idx]) ##V <- crossprod(iids[,idx]) mymsg <- noquote(cbind(mymsg)) colnames(mymsg) <- "Family(Link)"; rownames(mymsg) <- paste(yvar,":") list(estimate=coefs,vcov=V,breads=breads,iid=iids[,idx],summary.message=function(...) { mymsg }, dispname="Dispersion:") ##, new.parameters=newpar) } GLMscore <- function(x,p,data,indiv=TRUE,logLik=FALSE,...) { v <- vars(x) yvar <- endogenous(x) S <- pnames <- c() count <- 0 pos <- 0 breads <- c() L <- 0 for (y in yvar) { count <- count+1 xx <- parents(x,y) pname <- c(y,paste0(y,sep=lava.options()$symbol[1],xx),paste(y,y,sep=lava.options()$symbol[2])) pidx <- na.omit(match(pname,coef(x))) ##pidx <- na.omit(match(coef(x),pname)) fam <- attributes(distribution(x)[[y]])$family if (is.null(fam)) fam <- stats::gaussian() if (length(xx)==0) xx <- 1 f <- as.formula(paste0(y,"~",paste(xx,collapse="+"))) isSurv <- inherits(data[1,y],"Surv") if (inherits(data[,y],"Surv")) { g <- survival::survreg(f,data=data,dist=fam$family) } else { g <- glm(f,family=fam,data=data) } pdispersion <- NULL npar <- length(xx)+2 p0 <- p[pidx] if (!isSurv) L0 <- logL.glm(g,p=p0,indiv=TRUE,...) if (tolower(fam$family)%in%c("gaussian","gamma","inverse.gaussian") && !isSurv) { p0 <- p0[-length(p0)] S0 <- score(g,p=p0,indiv=TRUE,pearson=TRUE,...) V0 <- attr(S0,"bread") r <- attr(S0,"pearson") dispersion <- mean(r^2) S0 <- cbind(S0,scale=0) null <- matrix(0); dimnames(null) <- list("scale","scale") V0 <- blockdiag(V0,null,pad=0) } else { S0 <- score(g,p=p0,indiv=TRUE,...) if (isSurv) L0 <- attr(S0,"logLik") V0 <- attr(S0,"bread") } L <- L+sum(L0) breads <- c(breads,list(V0)) S <- c(S,list(S0)) pnames <- c(pnames, list(pname)); } coefs <- unlist(pnames) idx <- na.omit(match(coefs,coef(x))) idx <- order(idx) V <- Reduce(blockdiag,breads)[idx,idx] S1 <- Reduce(cbind,S)[,idx,drop=FALSE] colnames(S1) <- coef(x) attributes(S1)$bread <- V attributes(S1)$logLik <- structure(L,nobs=nrow(data),nall=nrow(data),df=length(p),class="logLik") if (!indiv) S1 <- colSums(S1) return(S1) } ##' @export score.lm <- function(x,p=coef(x),data,indiv=FALSE, y,X,offset=NULL,weights=NULL,...) { response <- all.vars(formula(x))[1] sigma2 <- summary(x)$sigma^2 if (missing(data)) { X <- model.matrix(x) y <- model.frame(x)[,1] } else { X <- model.matrix(formula(x),data=data) y <- model.frame(formula(x),data=data)[,1] } n <- nrow(X) if(any(is.na(p))) warning("Over-parameterized model") Xbeta <- X%*%p if (is.null(offset)) offset <- x$offset if (!is.null(offset)) Xbeta <- Xbeta+offset r <- y-Xbeta if (is.null(weights)) weights <- x$weights if (!is.null(weights)) r <- r*weights A <- as.vector(r)/sigma2 S <- apply(X,2,function(x) x*A) if (!indiv) return(colSums(S)) attributes(S)$bread <- vcov(x) return(S) } ##' @export score.glm <- function(x,p=coef(x),data,indiv=FALSE,pearson=FALSE, y,X,link,dispersion,offset=NULL,weights=NULL,...) { response <- all.vars(formula(x))[1] if (inherits(x,"glm")) { link <- family(x) if (missing(data)) { X <- model.matrix(x) y <- model.frame(x)[,1] } else { X <- model.matrix(formula(x),data=data) y <- model.frame(formula(x),data=data)[,1] } offset <- x$offset } else { if (missing(link)) stop("Family needed") if (missing(data)) stop("data needed") X <- model.matrix(formula(x),data=data) y <- model.frame(formula(x),data=data)[,1] } if (is.character(y) || is.factor(y)) { y <- as.numeric(as.factor(y))-1 } n <- nrow(X) g <- link$linkfun ginv <- link$linkinv dginv <- link$mu.eta ## D[linkinv] ##dg <- function(x) 1/dginv(g(x)) ## Dh^-1 = 1/(h'(h^-1(x))) canonf <- do.call(link$family,list()) caninvlink <- canonf$linkinv canlink <- canonf$linkfun Dcaninvlink <- canonf$mu.eta Dcanlink <- function(x) 1/Dcaninvlink(canlink(x)) ##gmu <- function(x) g(caninvlink(x)) ##invgmu <- function(z) canlink(ginv(z)) h <- function(z) Dcanlink(ginv(z))*dginv(z) if(any(is.na(p))) stop("Over-parameterized model") Xbeta <- X%*%p if (!is.null(offset)) Xbeta <- Xbeta+offset if (missing(data) && !is.null(x$offset) && is.null(offset) ) Xbeta <- Xbeta+x$offset pi <- ginv(Xbeta) ##res <- as.vector(y/pi*dginv(Xbeta)-(1-y)/(1-pi)*dginv(Xbeta))*X ##return(res) r <- y-pi if (!is.null(x$prior.weights) || !is.null(weights)) { if (is.null(weights)) weights <- x$prior.weights } else { weights <- !is.na(r) } r <- r*weights a.phi <- 1 rpearson <- as.vector(r)/link$variance(pi)^.5 if (length(p)>length(coef(x))) { a.phi <- p[length(coef(x))+1] } else if (tolower(family(x)$family)%in%c("gaussian","gamma","inverse.gaussian")) { ##a.phi <- summary(x)$dispersion*g0$df.residual/sum(weights) a.phi <- sum(rpearson^2)*x$df.residual/x$df.residual^2 } A <- as.vector(h(Xbeta)*r)/a.phi S <- apply(X,2,function(x) x*A) if (!indiv) return(colSums(S)) if (pearson) attr(S,"pearson") <- rpearson attributes(S)$bread <- vcov(x) if (x$family$family=="quasi" && x$family$link=="identity" && x$family$varfun=="constant") attributes(S)$bread <- -Inverse(information.glm(x)) return(S) } ##' @export pars.glm <- function(x,...) { if (tolower(family(x)$family)%in%c("gaussian","gamma","inverse.gaussian")) { res <- c(coef(x),summary(x)$dispersion) names(res)[length(res)] <- "Dispersion" return(res) } return(coef(x)) } logL.glm <- function(x,p=pars.glm(x),data,indiv=FALSE,...) { if (!missing(data)) { x <- update(x,data=data,...) } f <- family(x) ginv <- f$linkinv X <- model.matrix(x) n <- nrow(X) disp <- 1; p0 <- p if (tolower(family(x)$family)%in%c("gaussian","gamma","inverse.gaussian")) { if (length(p)==ncol(X)) { disp <- suppressWarnings((summary(x)$dispersion)) } else { disp <- tail(p,1) p0 <- p[-length(p)] } } if(any(is.na(p))) { warning("Over-parametrized model") } Xbeta <- X%*%p0 if (!is.null(x$offset)) Xbeta <- Xbeta+x$offset y <- model.frame(x)[,1] mu <- ginv(Xbeta) w <- x$prior.weights dev <- f$dev.resids(y,mu,w) if (indiv) { } loglik <- length(p)-(f$aic(y,n,mu,w,sum(dev))/2+x$rank) structure(loglik,nobs=n,df=length(p),class="logLik") } ##' @export iid.glm <- function(x,...) { ## if (x$family$family=="quasi" && x$family$link=="identity" && x$family$varfun=="constant") { ## return(iid.default(x,information.glm,...)) ## } iid.default(x,...) } hessian.glm <- function(x,p=coef(x),...) { numDeriv::jacobian(function(theta) score.glm(x,p=theta,indiv=FALSE,...),p) } ##' @export information.glm <- function(x,...) hessian.glm(x,...) robustvar <- function(x,id=NULL,...) { U <- score(x,indiv=TRUE) II <- unique(id) K <- length(II) J <- 0 if (is.null(id)) { J <- crossprod(U) } else { for (ii in II) { J <- J+tcrossprod(colSums(U[which(id==ii),,drop=FALSE])) } J <- K/(K-1)*J } iI <- vcov(x) V <- iI%*%J%*%iI return(V) } glm_logLik.lvm <- function(object,...) { attr(GLMscore(object,...),"logLik") } glm_method.lvm <- NULL glm_objective.lvm <- function(x,p,data,...) { GLMest(x,data,...) } glm_gradient.lvm <- function(x,p,data,...) { -GLMscore(x,p,data,...) } glm_variance.lvm <- function(x,p,data,opt,...) { opt$vcov } lava/R/revdiag.R0000644000176200001440000000410013162174023013134 0ustar liggesusers##' Create/extract 'reverse'-diagonal matrix or off-diagonal elements ##' @title Create/extract 'reverse'-diagonal matrix or off-diagonal elements ##' @aliases revdiag revdiag<- offdiag offdiag<- ##' @usage ##' revdiag(x,...) ##' offdiag(x,type=0,...) ##' ##' revdiag(x,...) <- value ##' offdiag(x,type=0,...) <- value ##' @param x vector ##' @param value For the assignment function the values to put in the diagonal ##' @param type 0: upper and lower triangular, 1: upper triangular, 2: lower triangular, 3: upper triangular + diagonal, 4: lower triangular + diagonal ##' @param \dots additional arguments to lower level functions ##' @author Klaus K. Holst ##' @export revdiag <- function(x,...) { if (NCOL(x)==1) { res <- matrix(0,length(x),length(x)) revdiag(res) <- x return(res) } n <- max(ncol(x),nrow(x)) x[cbind(rev(seq(n)),seq(n))] } ##' @export "revdiag<-" <- function(x,...,value) { n <- max(ncol(x),nrow(x)) x[cbind(rev(seq(n)),seq(n))] <- value x } ##' @export offdiag <- function(x,type=0,...) { ##if (NCOL(x)==1) return(NULL) if (type%in%c(1,3)) { ii <- which(upper.tri(x,diag=(type==3))) } else if (type%in%c(2,4)) { ii <- which(lower.tri(x,diag=(type==4))) } else { ii <- c(which(lower.tri(x,diag=FALSE)),which(upper.tri(x,diag=FALSE))) } res <- x[ii] class(res) <- c("offdiag",class(res)) attributes(res) <- c(attributes(res),list(type=type,dimension=dim(x),index=ii,nam=dimnames(x))) return(res) } ##' @export "offdiag<-" <- function(x,type=0,...,value) { if (type%in%c(1,3)) { ii <- which(upper.tri(x,diag=(type==3))) } else if (type%in%c(2,4)) { ii <- which(lower.tri(x,diag=(type==4))) } else { ii <- c(which(lower.tri(x,diag=FALSE)),which(upper.tri(x,diag=FALSE))) } x[ii] <- value return(x) } ##' @export print.offdiag <- function(x,...) { type <- attr(x,"type") nn <- attr(x,"dimension") M <- matrix(NA,nn[1],nn[2]) M[attr(x,"index")] <- x dimnames(M) <- attr(x,"nam") print(M,na.print="",...) } lava/R/estimate.default.R0000644000176200001440000006556313162174023014775 0ustar liggesusers##' @export estimate <- function(x,...) UseMethod("estimate") ##' @export estimate.list <- function(x,...) { if (inherits(x[[1]],"lvm")) return(estimate.lvmlist(x,...)) lapply(x,function(x) estimate(x,...)) } ##' Estimation of functional of parameters ##' ##' Estimation of functional of parameters. ##' Wald tests, robust standard errors, cluster robust standard errors, ##' LRT (when \code{f} is not a function)... ##' @param x model object (\code{glm}, \code{lvmfit}, ...) ##' @param f transformation of model parameters and (optionally) data, or contrast matrix (or vector) ##' @param ... additional arguments to lower level functions ##' @param data \code{data.frame} ##' @param id (optional) id-variable corresponding to iid decomposition of model parameters. ##' @param iddata (optional) id-variable for 'data' ##' @param stack if TRUE (default) the i.i.d. decomposition is automatically stacked according to 'id' ##' @param average if TRUE averages are calculated ##' @param subset (optional) subset of data.frame on which to condition (logical expression or variable name) ##' @param score.deriv (optional) derivative of mean score function ##' @param level level of confidence limits ##' @param iid if TRUE (default) the iid decompositions are also returned (extract with \code{iid} method) ##' @param type type of small-sample correction ##' @param keep (optional) index of parameters to keep from final result ##' @param use (optional) index of parameters to use in calculations ##' @param contrast (optional) Contrast matrix for final Wald test ##' @param null (optional) null hypothesis to test ##' @param vcov (optional) covariance matrix of parameter estimates (e.g. Wald-test) ##' @param coef (optional) parameter coefficient ##' @param robust if TRUE robust standard errors are calculated. If ##' FALSE p-values for linear models are calculated from t-distribution ##' @param df degrees of freedom (default obtained from 'df.residual') ##' @param print (optional) print function ##' @param labels (optional) names of coefficients ##' @param label.width (optional) max width of labels ##' @param only.coef if TRUE only the coefficient matrix is return ##' @param back.transform (optional) transform of parameters and confidence intervals ##' @param folds (optional) aggregate influence functions (divide and conquer) ##' @param cluster (obsolete) alias for 'id'. ##' @param R Number of simulations (simulated p-values) ##' @param null.sim Mean under the null for simulations ##' @details ##' ##' iid decomposition ##' \deqn{\sqrt{n}(\widehat{\theta}-\theta) = \sum_{i=1}^n\epsilon_i + o_p(1)} ##' can be extracted with the \code{iid} method. ##' ##' @export ##' @examples ##' ##' ## Simulation from logistic regression model ##' m <- lvm(y~x+z); ##' distribution(m,y~x) <- binomial.lvm("logit") ##' d <- sim(m,1000) ##' g <- glm(y~z+x,data=d,family=binomial()) ##' g0 <- glm(y~1,data=d,family=binomial()) ##' ##' ## LRT ##' estimate(g,g0) ##' ##' ## Plain estimates (robust standard errors) ##' estimate(g) ##' ##' ## Testing contrasts ##' estimate(g,null=0) ##' estimate(g,rbind(c(1,1,0),c(1,0,2))) ##' estimate(g,rbind(c(1,1,0),c(1,0,2)),null=c(1,2)) ##' estimate(g,2:3) ## same as cbind(0,1,-1) ##' estimate(g,as.list(2:3)) ## same as rbind(c(0,1,0),c(0,0,1)) ##' ## Alternative syntax ##' estimate(g,"z","z"-"x",2*"z"-3*"x") ##' estimate(g,z,z-x,2*z-3*x) ##' estimate(g,"?") ## Wilcards ##' estimate(g,"*Int*","z") ##' estimate(g,"1","2"-"3",null=c(0,1)) ##' estimate(g,2,3) ##' ##' ## Usual (non-robust) confidence intervals ##' estimate(g,robust=FALSE) ##' ##' ## Transformations ##' estimate(g,function(p) p[1]+p[2]) ##' ##' ## Multiple parameters ##' e <- estimate(g,function(p) c(p[1]+p[2],p[1]*p[2])) ##' e ##' vcov(e) ##' ##' ## Label new parameters ##' estimate(g,function(p) list("a1"=p[1]+p[2],"b1"=p[1]*p[2])) ##' ##' ##' ## Multiple group ##' m <- lvm(y~x) ##' m <- baptize(m) ##' d2 <- d1 <- sim(m,50) ##' e <- estimate(list(m,m),list(d1,d2)) ##' estimate(e) ## Wrong ##' estimate(e,id=rep(seq(nrow(d1)),2)) ##' estimate(lm(y~x,d1)) ##' ##' ## Marginalize ##' f <- function(p,data) ##' list(p0=lava:::expit(p["(Intercept)"] + p["z"]*data[,"z"]), ##' p1=lava:::expit(p["(Intercept)"] + p["x"] + p["z"]*data[,"z"])) ##' e <- estimate(g, f, average=TRUE) ##' e ##' estimate(e,diff) ##' estimate(e,cbind(1,1)) ##' ##' ## Clusters and subset (conditional marginal effects) ##' d$id <- rep(seq(nrow(d)/4),each=4) ##' estimate(g,function(p,data) ##' list(p0=lava:::expit(p[1] + p["z"]*data[,"z"])), ##' subset=d$z>0, id=d$id, average=TRUE) ##' ##' ## More examples with clusters: ##' m <- lvm(c(y1,y2,y3)~u+x) ##' d <- sim(m,10) ##' l1 <- glm(y1~x,data=d) ##' l2 <- glm(y2~x,data=d) ##' l3 <- glm(y3~x,data=d) ##' ##' ## Some random id-numbers ##' id1 <- c(1,1,4,1,3,1,2,3,4,5) ##' id2 <- c(1,2,3,4,5,6,7,8,1,1) ##' id3 <- seq(10) ##' ##' ## Un-stacked and stacked i.i.d. decomposition ##' iid(estimate(l1,id=id1,stack=FALSE)) ##' iid(estimate(l1,id=id1)) ##' ##' ## Combined i.i.d. decomposition ##' e1 <- estimate(l1,id=id1) ##' e2 <- estimate(l2,id=id2) ##' e3 <- estimate(l3,id=id3) ##' (a2 <- merge(e1,e2,e3)) ##' ##' ## If all models were estimated on the same data we could use the ##' ## syntax: ##' ## Reduce(merge,estimate(list(l1,l2,l3))) ##' ##' ## Same: ##' iid(a1 <- merge(l1,l2,l3,id=list(id1,id2,id3))) ##' ##' iid(merge(l1,l2,l3,id=TRUE)) # one-to-one (same clusters) ##' iid(merge(l1,l2,l3,id=FALSE)) # independence ##' ##' ##' ## Monte Carlo approach, simple trend test example ##' ##' m <- categorical(lvm(),~x,K=5) ##' regression(m,additive=TRUE) <- y~x ##' d <- simulate(m,100,seed=1,'y~x'=0.1) ##' l <- lm(y~-1+factor(x),data=d) ##' ##' f <- function(x) coef(lm(x~seq_along(x)))[2] ##' null <- rep(mean(coef(l)),length(coef(l))) ## just need to make sure we simulate under H0: slope=0 ##' estimate(l,f,R=1e2,null.sim=null) ##' ##' estimate(l,f) ##' @aliases estimate estimate.default estimate.estimate merge.estimate ##' @method estimate default ##' @export estimate.default <- function(x=NULL,f=NULL,...,data,id, iddata,stack=TRUE,average=FALSE,subset, score.deriv,level=0.95,iid=TRUE, type=c("robust","df","mbn"), keep,use, contrast,null,vcov,coef, robust=TRUE,df=NULL, print=NULL,labels,label.width, only.coef=FALSE,back.transform=NULL, folds=0, cluster, R=0, null.sim) { cl <- match.call(expand.dots=TRUE) if (!missing(use)) { p0 <- c("f","contrast","only.coef","subset","average","keep","labels") cl0 <- cl cl0[c("use",p0)] <- NULL cl0$keep <- use cl$x <- eval(cl0,parent.frame()) cl[c("vcov","use")] <- NULL return(eval(cl,parent.frame())) } expr <- suppressWarnings(inherits(try(f,silent=TRUE),"try-error")) if (!missing(coef)) { pp <- coef } else { pp <- suppressWarnings(try(stats::coef(x),"try-error")) if (inherits(x,"survreg") && length(pp)0 } idstack <- NULL ## Preserve id from 'estimate' object if (missing(id) && inherits(x,"estimate") && !is.null(x$id)) id <- x$id if (!missing(id) && iid) { if (is.null(iidtheta)) stop("'iid' method needed") nprev <- nrow(iidtheta) if (inherits(id,"formula")) { id <- interaction(get_all_vars(id,data)) } ## e <- substitute(id) ## expr <- suppressWarnings(inherits(try(id,silent=TRUE),"try-error")) ## if (expr) id <- eval(e,envir=data) ##if (!is.null(data)) id <- eval(e, data) if (is.logical(id) && length(id)==1) { id <- if(is.null(iidtheta)) seq(nrow(data)) else seq(nprev) stack <- FALSE } if (is.character(id) && length(id)==1) id <- data[,id,drop=TRUE] if (!is.null(iidtheta)) { if (length(id)!=nprev) { if (!is.null(x$na.action) && (length(id)==length(x$na.action)+nprev)) { warning("Applying na.action") id <- id[-x$na.action] } else stop("Dimensions of i.i.d decomposition and 'id' does not agree") } } else { if (length(id)!=nrow(data)) { if (!is.null(x$na.action) && (length(id)==length(x$na.action)+nrow(data))) { warning("Applying na.action") id <- id[-x$na.action] } else stop("Dimensions of i.i.d decomposition and 'id' does not agree") } } if (stack) { N <- nrow(iidtheta) clidx <- NULL atr <- attributes(iidtheta) atr$dimnames <- NULL atr$dim <- NULL if (!lava.options()$cluster.index) { iidtheta <- matrix(unlist(by(iidtheta,id,colSums)),byrow=TRUE,ncol=ncol(iidtheta)) attributes(iidtheta)[names(atr)] <- atr idstack <- sort(unique(id)) } else { clidx <- mets::cluster.index(id,mat=iidtheta,return.all=TRUE) iidtheta <- clidx$X attributes(iidtheta)[names(atr)] <- atr idstack <- id[as.vector(clidx$firstclustid)+1] } if (is.null(attributes(iidtheta)$N)) { attributes(iidtheta)$N <- N } } else idstack <- id } else { if (!is.null(data)) idstack <- rownames(data) } if (!is.null(iidtheta) && (length(idstack)==nrow(iidtheta))) rownames(iidtheta) <- idstack if (!robust) { if (inherits(x,"lm") && family(x)$family=="gaussian" && is.null(df)) df <- x$df.residual if (missing(vcov)) vcov <- stats::vcov(x) } if (!is.null(iidtheta) && (missing(vcov) || is.null(vcov))) { ## if (is.null(f)) V <- crossprod(iidtheta) ### Small-sample corrections for clustered data K <- NROW(iidtheta) N <- attributes(iidtheta)$N if (is.null(N)) N <- K p <- NCOL(iidtheta) adj0 <- K/(K-p) ## Mancl & DeRouen, 2001 adj1 <- K/(K-1) ## Mancl & DeRouen, 2001 adj2 <- (N-1)/(N-p)*(K/(K-1)) ## Morel,Bokossa & Neerchal, 2003 if (tolower(type[1])=="mbn" && !is.null(attributes(iidtheta)$bread)) { V0 <- V iI0 <- attributes(iidtheta)$bread I0 <- Inverse(iI0) I1 <- crossprod(iidtheta%*%I0) delta <- min(0.5,p/(K-p)) phi <- max(1,tr(I0%*%V0)*adj2/p) V <- adj2*V0 + delta*phi*iI0 } if (tolower(type[1])=="df") { V <- adj0*V } if (tolower(type[1])=="df1") { V <- adj1*V } if (tolower(type[1])=="df2") { V <- adj2*V } } else { if (!missing(vcov)) { if (length(vcov)==1 && is.na(vcov)) vcov <- matrix(NA,length(pp),length(pp)) V <- vcov } else { V <- stats::vcov(x) } } ## Simulate p-value if (R>0) { if (is.null(f)) stop("Supply function 'f'") if (missing(null.sim)) null.sim <- rep(0,length(pp)) est <- f(pp) if (is.list(est)) { nn <- names(est) est <- unlist(est) names(est) <- nn } if (missing(labels)) { labels <- colnames(rbind(est)) } res <- simnull(R,f,mu=null.sim,sigma=V,labels=labels) return(structure(res, class=c("estimate.sim","sim"), coef=pp, vcov=V, f=f, estimate=est)) } if (!is.null(f)) { form <- names(formals(f)) dots <- ("..."%in%names(form)) form0 <- setdiff(form,"...") parname <- "p" if (!is.null(form)) parname <- form[1] # unless .Primitive if (length(form0)==1 && !(form0%in%c("object","data"))) { ##names(formals(f))[1] <- "p" parname <- form0 } if (!is.null(iidtheta)) { arglist <- c(list(object=x,data=data,p=vec(pp)),list(...)) names(arglist)[3] <- parname } else { arglist <- c(list(object=x,p=vec(pp)),list(...)) names(arglist)[2] <- parname } if (!dots) { arglist <- arglist[intersect(form0,names(arglist))] } newf <- NULL if (length(form)==0) { arglist <- list(vec(pp)) ##newf <- function(p,...) do.call("f",list(p,...)) newf <- function(...) do.call("f",list(...)) val <- do.call("f",arglist) } else { val <- do.call("f",arglist) if (is.list(val)) { nn <- names(val) val <- do.call("cbind",val) ##newf <- function(p,...) do.call("cbind",f(p,...)) newf <- function(...) do.call("cbind",f(...)) } } k <- NCOL(val) N <- NROW(val) D <- attributes(val)$grad if (is.null(D)) { D <- numDeriv::jacobian(function(p,...) { if (length(form)==0) arglist[[1]] <- p else arglist[[parname]] <- p if (is.null(newf)) return(do.call("f",arglist)) return(do.call("newf",arglist)) }, pp) } if (is.null(iidtheta)) { pp <- structure(as.vector(val),names=names(val)) V <- D%*%V%*%t(D) } else { if (!average || (N1) { ## More than one parameter (and depends on data) if (!missing(subset)) { ## Conditional estimate val <- apply(val,2,function(x) x*subset) } D0 <- matrix(nrow=k,ncol=length(pp)) for (i in seq_len(k)) { D1 <- D[seq(N)+(i-1)*N,,drop=FALSE] if (!missing(subset)) ## Conditional estimate D1 <- apply(D1,2,function(x) x*subset) D0[i,] <- colMeans(D1) } D <- D0 iid2 <- iidtheta%*%t(D) } else { ## Single parameter if (!missing(subset)) { ## Conditional estimate val <- val*subset D <- apply(rbind(D),2,function(x) x*subset) } D <- colMeans(rbind(D)) iid2 <- iidtheta%*%D } pp <- vec(colMeans(cbind(val))) iid1 <- (cbind(val)-rbind(pp)%x%cbind(rep(1,N)))/N if (!missing(id)) { if (!lava.options()$cluster.index) iid1 <- matrix(unlist(by(iid1,id,colSums)),byrow=TRUE,ncol=ncol(iid1)) else { iid1 <- mets::cluster.index(id,mat=iid1,return.all=FALSE) } } if (!missing(subset)) { ## Conditional estimate phat <- mean(subset) iid3 <- cbind(-1/phat^2 * (subset-phat)/N) ## check if (!missing(id)) { if (!lava.options()$cluster.index) { iid3 <- matrix(unlist(by(iid3,id,colSums)),byrow=TRUE,ncol=ncol(iid3)) } else { iid3 <- mets::cluster.index(id,mat=iid3,return.all=FALSE) } } iidtheta <- (iid1+iid2)/phat + rbind(pp)%x%iid3 pp <- pp/phat V <- crossprod(iidtheta) } else { if (nrow(iid1)!=nrow(iid2)) { message("Assuming independence between model iid decomposition and new data frame") V <- crossprod(iid1) + crossprod(iid2) } else { iidtheta <- iid1+iid2 V <- crossprod(iidtheta) } } } } } if (is.null(V)) { res <- cbind(pp,NA,NA,NA,NA) } else { if (length(pp)==1) res <- rbind(c(pp,diag(V)^0.5)) else res <- cbind(pp,diag(V)^0.5) beta0 <- res[,1] if (!missing(null) && missing(contrast)) beta0 <- beta0-null if (!is.null(df)) { za <- qt(1-alpha/2,df=df) pval <- 2*pt(abs(res[,1]/res[,2]),df=df,lower.tail=FALSE) } else { za <- qnorm(1-alpha/2) pval <- 2*pnorm(abs(res[,1]/res[,2]),lower.tail=FALSE) } res <- cbind(res,res[,1]-za*res[,2],res[,1]+za*res[,2],pval) } colnames(res) <- c("Estimate","Std.Err",alpha.str,"P-value") if (!is.null(nn)) { rownames(res) <- nn } else { nn <- attributes(res)$varnames if (!is.null(nn)) rownames(res) <- nn if (is.null(rownames(res))) rownames(res) <- paste0("p",seq(nrow(res))) } coefs <- res[,1,drop=TRUE]; names(coefs) <- rownames(res) res <- structure(list(coef=coefs,coefmat=res,vcov=V, iid=NULL, print=print, id=idstack),class="estimate") if (iid) ## && is.null(back.transform)) res$iid <- iidtheta if (!missing(contrast) | !missing(null)) { p <- length(res$coef) if (missing(contrast)) contrast <- diag(nrow=p) if (missing(null)) null <- 0 if (is.vector(contrast) || is.list(contrast)) { contrast <- contr(contrast, names(res$coef), ...) ## if (length(contrast)==p) contrast <- rbind(contrast) ## else { ## cont <- contrast ## contrast <- diag(nrow=p)[cont,,drop=FALSE] ## } } cc <- compare(res,contrast=contrast,null=null,vcov=V,level=level,df=df) res <- structure(c(res, list(compare=cc)),class="estimate") if (!is.null(df)) { pval <- with(cc,pt(abs(estimate[,1]-null)/estimate[,2],df=df,lower.tail=FALSE)*2) } else { pval <- with(cc,pnorm(abs(estimate[,1]-null)/estimate[,2],lower.tail=FALSE)*2) } res$coefmat <- with(cc, cbind(estimate,pval)) colnames(res$coefmat)[5] <- "P-value" rownames(res$coefmat) <- cc$cnames if (!is.null(res$iid)) { res$iid <- res$iid%*%t(contrast) colnames(res$iid) <- cc$cnames } res$compare$estimate <- NULL res$coef <- res$compare$coef res$vcov <- res$compare$vcov } if (!is.null(back.transform)) { res$coefmat[,c(1,3,4)] <- do.call(back.transform,list(res$coefmat[,c(1,3,4)])) res$coefmat[,2] <- NA } if (!missing(keep) && !is.null(keep)) { if (is.character(keep)) { keep <- match(keep,rownames(res$coefmat)) } res$coef <- res$coef[keep] res$coefmat <- res$coefmat[keep,,drop=FALSE] if (!is.null(res$iid)) res$iid <- res$iid[,keep,drop=FALSE] res$vcov <- res$vcov[keep,keep,drop=FALSE] } if (!missing(labels)) { names(res$coef) <- labels if (!is.null(res$iid)) colnames(res$iid) <- labels colnames(res$vcov) <- rownames(res$vcov) <- labels rownames(res$coefmat) <- labels } if (!missing(label.width)) { rownames(res$coefmat) <- make.unique(unlist(lapply(rownames(res$coefmat), function(x) toString(x,width=label.width)))) } if (only.coef) return(res$coefmat) res$call <- cl res$back.transform <- back.transform res$n <- nrow(data) res$ncluster <- nrow(res$iid) return(res) } simnull <- function(R,f,mu,sigma,labels=NULL) { X <- rmvn(R,mu=mu,sigma=sigma) est <- f(mu) res <- apply(X,1,f) if (is.list(est)) { nn <- names(est) est <- unlist(est) names(est) <- nn res <- matrix(unlist(res),byrow=TRUE,ncol=length(est)) } else { res <- t(rbind(res)) } if (is.null(labels)) { labels <- colnames(rbind(est)) if (is.null(labels)) labels <- paste0("p",seq_along(est)) } colnames(res) <- labels return(res) } ##' @export estimate.estimate.sim <- function(x,f,R=0,labels,...) { atr <- attributes(x) if (R>0) { if (missing(f)) { val <- simnull(R,f=atr[["f"]],mu=atr[["coef"]],sigma=atr[["vcov"]]) res <- rbind(x,val) for (a in setdiff(names(atr),c("dim","dimnames"))) attr(res,a) <- atr[[a]] } else { res <- simnull(R,f=f,mu=atr[["coef"]],sigma=atr[["vcov"]]) for (a in setdiff(names(atr),c("dim","dimnames","f"))) attr(res,a) <- atr[[a]] attr(f,"f") <- f est <- unlist(f(atr[["coef"]])) if (missing(labels)) labels <- colnames(rbind(est)) attr(res,"estimate") <- est } if (!missing(labels)) colnames(res) <- labels return(res) } if (missing(f)) { if (!missing(labels)) colnames(res) <- labels return(x) } est <- f(atr[["coef"]]) res <- apply(x,1,f) if (is.list(est)) { res <- matrix(unlist(res),byrow=TRUE,ncol=length(est)) } else { res <- t(rbind(res)) } if (missing(labels)) { labels <- colnames(rbind(est)) if (is.null(labels)) labels <- paste0("p",seq_along(est)) } colnames(res) <- labels for (a in setdiff(names(atr),c("dim","dimnames","f","estimate"))) attr(res,a) <- atr[[a]] attr(f,"f") <- f attr(res,"estimate") <- unlist(est) return(res) } ##' @export print.estimate.sim <- function(x,level=.05,...) { quantiles <- c(level/2,1-level/2) est <- attr(x,"estimate") mysummary <- function(x,i) { x <- as.vector(x) res <- c(mean(x,na.rm=TRUE), sd(x,na.rm=TRUE), quantile(x,quantiles,na.rm=TRUE), est[i], mean(abs(x)>abs(est[i]),na.rm=TRUE)) names(res) <- c("Mean","SD",paste0(quantiles*100,"%"), "Estimate","P-value") res } env <- new.env() assign("est",attr(x,"estimate"),env) environment(mysummary) <- env print(summary(x,fun=mysummary,...)) } estimate.glm <- function(x,...) { estimate.default(x,...) } ##' @export print.estimate <- function(x,level=0,digits=4,width=25,std.error=TRUE,p.value=TRUE,...) { if (!is.null(x$print)) { x$print(x,digits=digits,width=width,...) return(invisible(x)) } if (level>0 && !is.null(x$call)) { cat("Call: "); print(x$call) printline(50) } if (level>0) { if (!is.null(x[["n"]]) && !is.null(x[["k"]])) { cat("n = ",x[["n"]],", clusters = ",x[["k"]],"\n\n",sep="") } else { if (!is.null(x[["n"]])) { cat("n = ",x[["n"]],"\n\n",sep="") } if (!is.null(x[["k"]])) { cat("n = ",x[["k"]],"\n\n",sep="") } } } cc <- x$coefmat rownames(cc) <- make.unique(unlist(lapply(rownames(cc), function(x) toString(x,width=width)))) if (!std.error) cc <- cc[,-2,drop=FALSE] if (!p.value) cc[,-ncol(cc),drop=FALSE] print(cc,digits=digits,...) if (!is.null(x$compare)) { cat("\n",x$compare$method[3],"\n") cat(paste(" ",x$compare$method[-(1:3)],collapse="\n"),"\n") if (length(x$compare$method)>4) { out <- character() out <- with(x$compare, c(out, paste(names(statistic), "=", format(round(statistic, 4))))) out <- with(x$compare, c(out, paste(names(parameter), "=", format(round(parameter,3))))) fp <- with(x$compare, format.pval(p.value, digits = digits)) out <- c(out, paste("p-value", if (substr(fp, 1L, 1L) == "<") fp else paste("=", fp))) cat(" ",strwrap(paste(out, collapse = ", ")), sep = "\n") } } } ##' @export vcov.estimate <- function(object,...) { res <- object$vcov nn <- names(coef(object)) dimnames(res) <- list(nn,nn) res } ##' @export coef.estimate <- function(object,mat=FALSE,...) { if (mat) return(object$coefmat) if (lava.options()$messages>0 && !is.null(object$back.transform)) message("Note: estimates on original scale (before 'back.transform')") object$coef } ##' @export summary.estimate <- function(object,...) { ##object[c("iid","id","print")] <- NULL object <- object[c("coef","coefmat","vcov","call","ncluster")] class(object) <- "summary.estimate" object } ##' @export coef.summary.estimate <- function(object,...) { object$coefmat } ##' @export print.summary.estimate <- function(x,...) { print.estimate(x,level=2,...) } ##' @export iid.estimate <- function(x,...) { if (is.null(x$iid)) return(NULL) dimn <- dimnames(x$iid) if (!is.null(dimn)) { dimn[[2]] <- names(coef(x)) } else { dimn <- list(NULL,names(coef(x))) } structure(x$iid,dimnames=dimn) } ##' @export model.frame.estimate <- function(formula,...) { NULL } lava/R/latent.R0000644000176200001440000000313613162174023013012 0ustar liggesusers##' @export "latent<-" <- function(x,...,value) UseMethod("latent<-") ##' @export "latent<-.lvm" <- function(x, clear=FALSE,..., value) { if (inherits(value,"formula")) { return(latent(x,all.vars(value),clear=clear,...)) } latent(x, var=value, clear=clear,...) } ##' @export `latent` <- function(x,...) UseMethod("latent") ##' @export `latent.lvm` <- function(x,var,clear=FALSE,silent=lava.options()$silent,...) { if (missing(var)) { latentidx <- unlist(x$latent) if (length(latentidx)>0) return(names(latentidx)) else return(NULL) } if (inherits(var,"formula")) var <- all.vars(var) if (clear) { x$noderender$shape[var] <- "rectangle" x$latent[var] <- NULL ## intfix(x,var) <- NA } else { if (!all(var%in%vars(x))) { addvar(x,silent=silent,reindex=FALSE,) <- setdiff(var,vars(x)) } x$noderender$shape[var] <- "ellipse" x$latent[var] <- TRUE ord <- intersect(var,ordinal(x)) if (length(ord)>0) ordinal(x,K=NULL) <- ord } xorg <- exogenous(x) exoset <- setdiff(xorg,var) if (length(exoset)0) { Optim[names(control)] <- control } Debug("Start values...") if (!is.null(Optim$start) & length(Optim$start)==(x$npar+x$npar.mean)) { mystart <- Optim$start } else { if (!silent) cat("Obtaining starting value...") if (is.null(control$starterfun) && lava.options()$param!="relative") Optim$starterfun <- startvalues0 mystart <- with(Optim, starter.multigroup(x,meanstructure=meanstructure,starterfun=starterfun,silent=FALSE,fix=FALSE)) if (!is.null(Optim$start)) { pname <- names(Optim$start) ppos <- parpos.multigroup(x,p=pname,mean=TRUE) if (any(!is.na(ppos))) mystart[ppos] <- Optim$start[na.omit(match(attributes(ppos)$name,pname))] } if (!silent) cat("\n") } Debug(mystart) Debug("Constraints...") ## Setup optimization constraints lower <- rep(-Inf, x$npar); for (i in seq_len(x$ngroup)) { vpos <- sapply(x$parlist[[i]][variances(x$lvm[[i]],mean=FALSE)], function(y) char2num(substr(y,2,nchar(y)))) if (length(vpos)>0) lower[vpos] <- Optim$lbound } if (Optim$meanstructure) lower <- c(rep(-Inf,x$npar.mean), lower) if (any(Optim$constrain)) { if (length(Optim$constrain)!=length(lower)) constrained <- is.finite(lower) else constrained <- Optim$constrain constrained <- which(constrained) lower[] <- -Inf Optim$constrain <- TRUE mystart[constrained] <- log(mystart[constrained]) } if (!missing(weights)) { if (is.character(weights)) { stweights <- weights weights <- list() for (i in seq_along(x$data)) { newweights <- as.matrix(x$data[[i]][,stweights]) colnames(newweights) <- index(x$lvm[[i]])$endogenous[seq_len(ncol(newweights))] weights <- c(weights, list(newweights)) } } } else { weights <- NULL } if (!missing(data2)) { if (is.character(data2)) { stdata2 <- data2 data2 <- list() for (i in seq_along(x$data)) { newdata <- as.matrix(x$data[[i]][,stdata2,drop=FALSE]) dropcol <- apply(newdata,2,function(x) any(is.na(x))) newdata <- newdata[,!dropcol,drop=FALSE] colnames(newdata) <- index(x$lvm[[i]])$endogenous[seq_len(ncol(newdata))] data2 <- c(data2, list(newdata)) } } } else { data2 <- NULL } ### Run hooks (additional lava plugins) myhooks <- gethook() newweights <- list() newdata2 <- list() newoptim <- newestimator <- NULL for (f in myhooks) { for ( i in seq_len(x$ngroup)) { res <- do.call(f, list(x=x$lvm[[i]],data=x$data[[i]],weights=weights[[i]],data2=data2[[i]],estimator=estimator,optim=Optim)) if (!is.null(res$x)) x$lvm[[i]] <- res$x if (!is.null(res$data)) x$data[[i]] <- res$data if (!is.null(res$weights)) newweights <- c(newweights,list(res$weights)) if (!is.null(res$data2)) newdata2 <- c(newdata2,list(res$data2)) if (!is.null(res$optim)) newoptim <- res$optim if (!is.null(res$estimator)) newestimator <- res$estimator } if (!is.null(newestimator)) estimator <- newestimator if (!is.null(newoptim)) Optim <- newoptim if (!is.null(res$weights)) if (!any(unlist(lapply(newweights,is.null)))) { weights <- newweights } if (!is.null(res$data2)) if (!any(unlist(lapply(newdata2,is.null)))) { data2 <- newdata2 } } if (is.null(estimator)) { if (!missing(weights) && !is.null(weights)) { estimator <- "normal" } else estimator <- "gaussian" } checkestimator <- function(x,...) { ffname <- paste0(x,c("_objective","_gradient"),".lvm") exists(ffname[1])||exists(ffname[2]) } if (!checkestimator(estimator)) { ## Try down/up-case version estimator <- tolower(estimator) if (!checkestimator(estimator)) { estimator <- toupper(estimator) } } Method <- paste0(estimator, "_method", ".lvm") if (!exists(Method)) Method <- "nlminb1" else Method <- get(Method) if (is.null(Optim$method)) { Optim$method <- Method } ## Check for random slopes xXx <- exogenous(x) Xfix <- FALSE Xconstrain <- FALSE xfix <- list() for (i in seq_len(x$ngroup)) { x0 <- x$lvm[[i]] data0 <- x$data[[i]] xfix0 <- colnames(data0)[(colnames(data0)%in%parlabels(x0,exo=TRUE))] xconstrain0 <- intersect(unlist(lapply(constrain(x0),function(z) attributes(z)$args)),manifest(x0)) xfix <- c(xfix, list(xfix0)) if (length(xfix0)>0) Xfix<-TRUE ## Yes, random slopes if (length(xconstrain0)>0) Xconstrain <- TRUE ## Yes, nonlinear regression } ## Non-linear parameter constraints involving observed variables? (e.g. nonlinear regression) constr <- c() XconstrStdOpt <- TRUE xconstrainM <- TRUE xconstrain <- c() if (Xconstrain) for (i in seq_len(x$ngroup)) { x0 <- x$lvm[[i]] data0 <- x$data[[i]] constr0 <- lapply(constrain(x0), function(z)(attributes(z)$args)) xconstrain0 <- intersect(unlist(constr0), manifest(x0)) xconstrain <- c(xconstrain, list(xconstrain0)) if (length(xconstrain0)>0) { constrainM0 <- names(constr0)%in%unlist(x0$mean) for (i in seq_len(length(constr0))) { if (!constrainM0[i]) { if (xconstrain0%in%constr0[[i]]) { xconstrainM <- FALSE } } } if (xconstrainM & ((is.null(control$method) || Optim$method=="nlminb0") & (lava.options()$test & estimator=="gaussian")) ) { XconstrStdOpt <- FALSE Optim$method <- "nlminb0" if (is.null(control$constrain)) control$constrain <- TRUE } } } ## Define objective function and first and second derivatives ObjectiveFun <- paste0(estimator, "_objective", ".lvm") GradFun <- paste0(estimator, "_gradient", ".lvm") if (!exists(ObjectiveFun) & !exists(GradFun)) stop("Unknown estimator.") InformationFun <- paste0(estimator, "_hessian", ".lvm") parord <- modelPar(x,seq_len(with(x,npar+npar.mean)))$p mymodel <- x parkeep <- c() myclass <- c("multigroupfit","lvmfit") myfix <- list() if (Xfix | (Xconstrain & XconstrStdOpt | !lava.options()$test)) { ## Model with random slopes: ############################################################# if (Xfix) { myclass <- c(myclass,"lvmfit.randomslope") for (k in seq_len(x$ngroup)) { x1 <- x0 <- x$lvm[[k]] data0 <- x$data[[k]] nrow <- length(vars(x0)) xpos <- lapply(xfix[[k]],function(y) which(regfix(x0)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix0 <- list(var=xfix[[k]], col=colpos, row=rowpos) myfix <- c(myfix, list(myfix0)) for (i in seq_along(myfix0$var)) for (j in seq_along(myfix0$col[[i]])) regfix(x0, from=vars(x0)[myfix0$row[[i]][j]],to=vars(x0)[myfix0$col[[i]][j]]) <- colMeans(data0[,myfix0$var[[i]],drop=FALSE],na.rm=TRUE) index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) x$lvm[[k]] <- x0 yvars <- endogenous(x0) parkeep <- c(parkeep, parord[[k]][coef(x1,mean=TRUE,fix=FALSE)%in%coef(x0,mean=TRUE,fix=FALSE)]) } parkeep <- sort(unique(parkeep)) ## Alter start-values: if (length(mystart)!=length(parkeep)) mystart <- mystart[parkeep] lower <- lower[parkeep] x <- multigroup(x$lvm,x$data,fix=FALSE,exo.fix=FALSE) } parord <- modelPar(x,seq_along(mystart))$p mydata <- list() for (i in seq_len(x$ngroup)) { mydata <- c(mydata, list(as.matrix(x$data[[i]][,manifest(x$lvm[[i]])]))) } myObj <- function(theta) { if (Optim$constrain) theta[constrained] <- exp(theta[constrained]) pp <- modelPar(x,theta)$p res <- 0 for (k in seq_len(x$ngroup)) { x0 <- x$lvm[[k]] data0 <- x$data[[k]] if (Xfix) { xfix0 <- xfix[[k]] myfix0 <- myfix[[k]] } p0 <- pp[[k]] myfun <- function(ii) { if (Xfix) for (i in seq_along(myfix0$var)) { x0$fix[cbind(myfix0$row[[i]],myfix0$col[[i]])] <- index(x0)$A[cbind(myfix0$row[[i]],myfix0$col[[i]])] <- data0[ii,xfix0[i]] } if (is.list(data2[[k]][ii,])) { res <- do.call(ObjectiveFun, list(x=x0, p=p0, data=data0[ii,manifest(x0),drop=FALSE], n=1, S=NULL, weights=weights[[k]][ii,], data2=data2[[k]])) } else { res <- do.call(ObjectiveFun, list(x=x0, p=p0, data=data0[ii,manifest(x0),drop=FALSE], n=1, S=NULL, weights=weights[[k]][ii,], data2=data2[[k]][ii,])) } return(res) } res <- res + sum(sapply(seq_len(nrow(mydata[[k]])),myfun)) } res } myGrad <- function(theta) { if (Optim$constrain) { theta[constrained] <- exp(theta[constrained]) } pp <- modelPar(x,theta)$p D0 <- res <- rbind(numeric(length(mystart))) for (k in seq_len(x$ngroup)) { if (Xfix) { myfix0 <- myfix[[k]] } x0 <- x$lvm[[k]] myfun <- function(ii) { if (Xfix) for (i in seq_along(myfix0$var)) { x0$fix[cbind(myfix0$row[[i]],myfix0$col[[i]])] <- index(x0)$A[cbind(myfix0$row[[i]],myfix0$col[[i]])] <- x$data[[k]][ii,xfix[[k]][i]] } if (is.list(data2[[k]][ii,])) { } else { val <- do.call(GradFun, list(x=x0, p=pp[[k]], data=mydata[[k]][ii,,drop=FALSE], n=1, S=NULL, weights=weights[[k]][ii,], data2=data2[[k]][ii,])) } return(val) } D <- D0; D[parord[[k]]] <- rowSums(sapply(seq_len(nrow(mydata[[k]])),myfun)) res <- res+D } if (Optim$constrain) { res[constrained] <- res[constrained]*theta[constrained] } return(as.vector(res)) } myInformation <- function(theta) { theta0 <- theta if (Optim$constrain) { theta[constrained] <- exp(theta[constrained]) } pp <- modelPar(x,theta)$p I0 <- res <- matrix(0,length(theta),length(theta)) grad <- grad0 <- numeric(length(theta)) for (k in seq_len(x$ngroup)) { x0 <- x$lvm[[k]] if (Xfix) { myfix0 <- myfix[[k]] } myfun <- function(ii) { if (Xfix) for (i in seq_along(myfix0$var)) { x0$fix[cbind(myfix0$row[[i]],myfix0$col[[i]])] <- index(x0)$A[cbind(myfix0$row[[i]],myfix0$col[[i]])] <- x$data[[k]][ii,xfix[[k]][i]] } I <- I0 J <- do.call(InformationFun, list(x=x0, p=pp[[k]], data=mydata[[k]][ii,], n=1, S=NULL, weights=weights[[k]][ii,], data2=data2[[k]][ii,], type=Optim$information ) ) D <- grad0 if (!is.null(attributes(J)$grad)) { D[ parord[[k]] ] <- attributes(J)$grad attributes(I)$grad <- D } I[ parord[[k]], parord[[k]] ] <- J return(I) } L <- lapply(seq_len(nrow(x$data[[k]])),function(x) myfun(x)) if (!is.null(attributes(L[[1]])$grad)) grad <- grad + rowSums(matrix((unlist(lapply(L,function(x) attributes(x)$grad))),ncol=length(L))) res <- res + apply(array(unlist(L),dim=c(length(theta),length(theta),nrow(x$data[[k]]))),c(1,2),sum) } if (!is.null(attributes(L[[1]])$grad)) attributes(res)$grad <- grad return(res) } } else { ## Model without random slopes: ########################################################### ## Non-linear parameter constraints involving observed variables? (e.g. nonlinear regression) yconstrain <- c() iconstrain <- c() xconstrain <- c() for (j in seq_len(x$ngroup)) { x0 <- x$lvm[[j]] data0 <- x$data[[j]] xconstrain0 <- c() for (i in seq_len(length(constrain(x0)))) { z <- constrain(x0)[[i]] xx <- intersect(attributes(z)$args,manifest(x0)) if (length(xx)>0) { warg <- setdiff(attributes(z)$args,xx) wargidx <- which(attributes(z)$args%in%warg) exoidx <- which(attributes(z)$args%in%xx) parname <- names(constrain(x0))[i] y <- names(which(unlist(lapply(intercept(x0),function(x) x==parname)))) el <- list(i,y,parname,xx,exoidx,warg,wargidx,z) names(el) <- c("idx","endo","parname","exo","exoidx","warg","wargidx","func") xconstrain0 <- c(xconstrain0,list(el)) } } yconstrain0 <- unlist(lapply(xconstrain0,function(x) x$endo)) iconstrain0 <- unlist(lapply(xconstrain0,function(x) x$idx)) xconstrain <- c(xconstrain, list(xconstrain0)) yconstrain <- c(yconstrain, list(yconstrain0)) iconstrain <- c(iconstrain, list(iconstrain0)) } MkOffset <- function(pp,x,data,xconstrain,grad=FALSE) { if (length(xconstrain)>0) { Mu <- matrix(0,nrow(data),length(vars(x))); colnames(Mu) <- vars(x) M <- modelVar(x,p=pp,data=data) M$parval <- c(M$parval, x$mean[unlist(lapply(x$mean,is.numeric))]) for (i in seq_len(length(xconstrain))) { pp <- unlist(M$parval[xconstrain[[i]]$warg]); myidx <- with(xconstrain[[i]],order(c(wargidx,exoidx))) mu <- with(xconstrain[[i]], apply(data[,exo,drop=FALSE],1, function(x) func( unlist(c(pp,x))[myidx]))) Mu[,xconstrain[[i]]$endo] <- mu } offsets <- Mu%*%t(M$IAi)[,endogenous(x)] return(offsets) } return(NULL) } myObj <- function(theta) { theta0 <- theta if (Optim$constrain) { theta[constrained] <- exp(theta[constrained]) } pp <- modelPar(x,theta)$p res <- c() for (i in seq_len(x$ngroup)) { offset <- MkOffset(pp[[i]],x$lvm[[i]],x$data[[i]],xconstrain[[i]]) x0 <- x$lvm[[i]] data0 <- x$data[[i]][,index(x$lvm[[i]])$manifest,drop=FALSE] S <- x$samplestat[[i]]$S mu <- x$samplestat[[i]]$mu n <- x$samplestat[[i]]$n if (!is.null(offset)) { x0$constrain[iconstrain[[i]]] <- NULL pd <- procdata.lvm(x0,data0[,endogenous(x0),drop=FALSE]-offset) S[endogenous(x0),endogenous(x0)] <- pd$S mu[endogenous(x0)] <- pd$mu n <- pd$n x0$mean[yconstrain[[i]]] <- 0 } res <- c(res, do.call(ObjectiveFun, list(x=x0, p=pp[[i]], data=data0, S=S, mu=mu, n=n, weights=weights[[i]], data2=data2[[i]], offset=offset))) } sum(res) } if (!exists(GradFun)) { myGrad <- NULL } else { myGrad <- function(theta) { theta0 <- theta if (Optim$constrain) { theta[constrained] <- exp(theta[constrained]) } pp <- modelPar(x,theta)$p D0 <- res <- rbind(numeric(length(theta))) for (i in seq_len(x$ngroup)) { repval <- with(x$samplestat[[i]], do.call(GradFun, list(x=x$lvm[[i]],p=pp[[i]], data=x$data[[i]][,index(x$lvm[[i]])$manifest,drop=FALSE], S=S,mu=mu,n=n, weights=weights[[i]], data2=data2[[i]]))) D <- D0; D[ parord[[i]] ] <- repval res <- res + D } if (Optim$constrain) { res[constrained] <- res[constrained]*theta[constrained] } return(as.vector(res)) } } myInformation <- function(theta) { theta0 <- theta if (Optim$constrain) { theta[constrained] <- exp(theta[constrained]) } pp <- modelPar(x,theta)$p I0 <- res <- matrix(0,length(theta),length(theta)) for (i in seq_len(x$ngroup)) { I <- I0; I[ parord[[i]], parord[[i]] ] <- with(x$samplestat[[i]], do.call(InformationFun, list(p=pp[[i]], x=x$lvm[[i]], data=x$data[[i]], S=S, mu=mu, n=n, weights=weights[[i]], data2=data2[[i]], type=Optim$information))) res <- res + I } D <- myGrad(theta0) if (Optim$constrain) { res[constrained,-constrained] <- apply(res[constrained,-constrained,drop=FALSE],2,function(x) x*theta[constrained]); res[-constrained,constrained] <- t(res[constrained,-constrained]) if (sum(constrained)==1) { res[constrained,constrained] <- res[constrained,constrained]*outer(theta[constrained],theta[constrained]) - (D[constrained]) } else { res[constrained,constrained] <- res[constrained,constrained]*outer(theta[constrained],theta[constrained]) - diag(D[constrained],nrow=length(constrained)) } } attributes(res)$grad <- D return(res) } } ############################################################## if (!exists(InformationFun)) myInformation <- NULL else if (is.null(get(InformationFun))) myInformation <- NULL if (is.null(get(GradFun))) myGrad <- NULL if (!silent) cat("Optimizing objective function...\n") if (lava.options()$debug) { print(lower) print(Optim$constrain) print(Optim$method) } opt <- do.call(Optim$method, list(start=mystart, objective=myObj, gradient=myGrad, hessian=myInformation, lower=lower, control=Optim)) ## if (!silent) cat("\n") opt$estimate <- opt$par if (Optim$constrain) { opt$estimate[constrained] <- exp(opt$estimate[constrained]) } if (quick) return(list(opt=opt,vcov=NA)) if (is.null(myGrad) | !XconstrStdOpt ) { ## if (!requireNamespace("numDeriv")) { ## opt$gradient <- naiveGrad(myObj, opt$estimate) ## } else { opt$gradient <- numDeriv::grad(myObj, opt$par, method=lava.options()$Dmethod) } else { opt$gradient <- myGrad(opt$estimate) } if (!is.null(opt$convergence)) { if (opt$convergence!=0) warning("Lack of convergence. Increase number of iteration or change starting values.") } else if (!is.null(opt$gradient) && mean(opt$gradient)^2>1e-3) warning("Lack of convergence. Increase number of iteration or change starting values.") if (!XconstrStdOpt) { myInformation <- function(theta) information(x,p=theta) } else { if (is.null(myInformation)) { ## if (!requireNamespace("numDeriv")) stop("I do not know how to calculate the asymptotic variance of this estimator. ## For numerical approximation please install the library 'numDeriv'.") if (!is.null(myGrad) & XconstrStdOpt) myInformation <- function(theta) numDeriv::jacobian(myGrad, theta, method=lava.options()$Dmethod) else { myInformation <- function(theta) numDeriv::hessian(myObj, theta) } } } I <- myInformation(opt$estimate) asVar <- tryCatch(Inverse(I), error=function(e) matrix(NA, length(mystart), length(mystart))) res <- list(model=x, model0=mymodel, call=cl, opt=opt, meanstructure=Optim$meanstructure, vcov=asVar, estimator=estimator, weights=weights, data2=data2, cluster=id) class(res) <- myclass myhooks <- gethook("post.hooks") for (f in myhooks) { res0 <- do.call(f,list(x=res)) if (!is.null(res0)) res <- res0 } return(res) } ###}}} ###{{{ estimate.list estimate.lvmlist <- function(x, data, silent=lava.options()$silent, fix, missing=FALSE, ...) { if (base::missing(data)) { return(estimate(x[[1]],x[[2]],missing=missing,...)) } nm <- length(x) if (nm==1) { return(estimate(x[[1]],data,missing=missing,...)) } if (!all(unlist(lapply(x, function(y) inherits(y,"lvm"))))) stop ("Expected a list of 'lvm' objects.") if (is.data.frame(data)) { warning("Only one dataset - going for standard analysis on each submodel.") res <- c() for (i in seq_len(nm)) { res <- c(res, list(estimate(x[[i]],data=data,silent=TRUE,missing=missing, ...))) } return(res) } if (nm!=length(data)) stop("Supply dataset for each model") Xfix <- FALSE xfix <- list() for (i in seq_along(x)) { data0 <- data[[i]] xfix0 <- colnames(data0)[(colnames(data0)%in%parlabels(x[[i]],exo=TRUE))] xfix <- c(xfix, list(xfix0)) if (length(xfix0)>0) { ## Yes, random slopes Xfix<-TRUE } } if (base::missing(fix)) { fix <- ifelse(Xfix,FALSE,TRUE) } mg <- multigroup(x,data,fix=fix,missing=missing,...) res <- estimate(mg,...) return(res) } ###}}} lava/R/effects.R0000644000176200001440000001544413162174023013147 0ustar liggesusers ##' @export totaleffects <- function(object,...,value) UseMethod("totaleffects") ##' @export totaleffects.lvmfit <- function(object,to,...,level=0.95) { p <- (1-level)/2 q <- qnorm(p) res <- c() if (inherits(to,"formula")) { if (substr(deparse(to[3]),1,1)==".") { trim <- function(x) sapply(x,function(z) gsub(" ","",z,fixed=TRUE)) to <- trim(strsplit(deparse(to),"~")[[1]][1]) } else { to <- list(to) } } if (is.null(list(...)$from) & is.character(to)[1]) { to <- lapply(paste(to,setdiff(vars(object),to),sep="~"),FUN=as.formula) } ef <- function(tt) { f <- effects(object,tt,...) rbind(with(f$totalef,c(est,sd,est/sd,2*(pnorm(abs(est/sd),lower.tail=FALSE)),est+q*sd,est-q*sd))) } if (is.list(to)) { for (tt in to) { res <- rbind(res,ef(tt)) } } else res <- ef(to) colnames(res) <- c("Estimate","Std.Err","z value","Pr(>|z|)", paste0(c(1-p,p)*100,"%")) rownames(res) <- to res } ##' @export effects.lvmfit <- function(object,to,from,silent=FALSE,...) { if (missing(to)) { return(summary(object)) } P <- path(object,to=to,from=from,...) if (is.null(P$path)) { if (inherits(to,"formula")) { f <- extractvar(to) to <- f$y; from <- f$x } } else { from <- P$path[[1]][1] to <- tail(P$path[[1]],1) } cc <- coef(object,level=9,labels=FALSE) ## All parameters (fixed and variable) cc0 <- cbind(coef(object)) ## Estimated parameters i1 <- na.omit(match(rownames(cc),rownames(cc0))) idx.cc0 <- which(rownames(cc)%in%rownames(cc0)); ## Position of estimated parameters among all parameters S <- matrix(0,nrow(cc),nrow(cc)); rownames(S) <- colnames(S) <- rownames(cc) V <- object$vcov npar.mean <- index(object)$npar.mean ## if (object$control$meanstructure & npar.mean>0) ## V <- V[-seq_len(npar.mean),-seq_len(npar.mean)] S[idx.cc0,idx.cc0] <- V[i1,i1] ## "Covariance matrix" of all parameters cclab <- rownames(coef(object,level=9,labels=TRUE)) ## Identify equivalence constraints cctab <- table(cclab) equiv <- which(cctab>1) for (i in seq_len(length(equiv))) { orgpos <- which(cclab==(names(equiv)[i])) pos <- orgpos[-1] for (p in pos) S[p,-orgpos[1]] <- S[-orgpos[1],p] <- S[orgpos[1],-p] } idx.orig <- unique(unlist(P$idx)) coefs.all <- cc[idx.orig] S.all <- S[idx.orig,idx.orig] idx.all <- numberdup(unlist(P$idx)) pos <- 1; idx.list <- P$idx; for (i in seq_len(length(idx.list))) { K <- length(idx.list[[i]]) idx.list[[i]] <- idx.all[pos:(pos+K-1)]; pos <- pos+K } margef <- list() if (length(coefs.all)==1 && is.na(coefs.all)) { totalef <- list(est=0,sd=0) margef <- c(margef,list(est=0,sd=NA)) } else { totalef <- prodsumdelta(coefs.all, idx.list, S.all,...) for (i in seq_len(length(idx.list))) { margef <- c(margef, list(prodsumdelta(coefs.all, idx.list[i], S.all,...))) } paths <- list() } directidx <- which(lapply(P$path,length)==2) inef.list <- idx.list if (length(directidx)==0) { directef <- list(est=0, sd=NA) } else { inef.list <- inef.list[-directidx] directef <- margef[[directidx]] } if (length(inef.list)==0) { totalinef <- list(est=0,sd=NA,grad=NA,hess=NA) } else { totalinef <- prodsumdelta(coefs.all, inef.list, S.all,...) } nn <- c("total","direct","indirect") for (i in seq_len(length(margef))) { if (length(P$path[[i]])>2) { nn <- c(nn,paste(rev(P$path[[i]]),collapse=lava.options()$symbol[1])) } } b <- c(totalef$est,directef$est,totalinef$est,totalinef$b) names(b) <- nn D <- t(cbind(totalef$grad,directef$grad,totalinef$grad,totalinef$D)) V <- D%*%S.all%*%t(D) val <- list(coef=b, vcov=V, grad=D, paths=P$path, totalef=totalef, directef=directef, totalinef=totalinef, margef=margef, from=from, to=to) class(val) <- "effects" val } ##' @export print.effects <- function(x,digits=4,...) { s <- summary(x,...) print(s$coef,digits=digits,...) cat("\n") print(s$medprop$coefmat[,c(1,3,4),drop=FALSE],digits=digits,...) return(invisible(x)) } ##' @export coef.effects <- function(object,...) { object$coef } ##' @export vcov.effects <- function(object,...) { object$vcov } ##' @export summary.effects <- function(object,...) { totalef <- with(object$totalef, cbind(est,sd[1])) directef <- with(object$directef, cbind(est,sd[1])) totindirectef <- with(object$totalinef, cbind(est,sd[1])) rownames(totalef) <- "Total" rownames(directef) <- "Direct" rownames(totindirectef) <- "Indirect" nn <- indirectef <- c() K <- seq_len(length(object$margef)) for (i in K) { if (length(object$paths[[i]])>2) { nn <- c(nn,paste(rev(object$paths[[i]]),collapse=lava.options()$symbol[1])) indirectef <- rbind(indirectef, with(object$margef[[i]], c(est,sd))) } }; rownames(indirectef) <- nn mycoef <- rbind(totalef,directef,totindirectef,indirectef) mycoef <- cbind(mycoef,mycoef[,1]/mycoef[,2]) mycoef <- cbind(mycoef,2*(pnorm(abs(mycoef[,3]),lower.tail=FALSE))) colnames(mycoef) <- c("Estimate","Std.Err","z value","Pr(>|z|)") medprop <- NULL if (totindirectef[1]!=0) medprop <- estimate(object, function(x) list("Mediation proportion"=logit(x[3]/x[1])),back.transform=expit) list(coef=mycoef,medprop=medprop) } ##' @export confint.effects <- function(object,parm,level=0.95,...) { mycoef <- summary(object)$coef p <- 1-(1-level)/2 res <- mycoef[,1] + + qnorm(p)*cbind(-1,1)%x%mycoef[,2] colnames(res) <- paste0(c(1-p,p)*100,"%") rownames(res) <- rownames(mycoef) res } prodtrans <- function(betas) { k <- length(betas) res <- prod(betas) ## if (all(betas>0)) { ## attr(res,"gradient") <- res/betas ## return(res) ## } nabla <- numeric(k) for (i in seq_len(k)) nabla[i] <- prod(betas[-i]) H <- matrix(0,k,k) if (k>1) for (i in seq_len(k-1)) for (j in (i+1):k) H[j,i] <- H[i,j] <- prod(c(1,betas[-c(i,j)])) attr(res,"gradient") <- nabla attr(res,"hessian") <- H return(res) } prodsumdelta <- function(betas,prodidx,S,order=1) { ## Delta-method k <- length(prodidx) p <- length(betas) if (p==1) { return(list(est=betas, sd=sqrt(S), grad=0, beta=betas, D=0, hess=0)) } val <- 0; grad <- numeric(p) D <- matrix(0,nrow=p,ncol=k) beta <- numeric(k) H <- matrix(0,p,p) for (i in seq_len(k)) { ii <- prodidx[[i]] myterm <- prodtrans(betas[ii]); if (order>1) { H0 <- attributes(myterm)$hessian Sigma <- S[ii,ii] print(sum(diag(Sigma%*%H0))/2) val <- val + (myterm + sum(diag(Sigma%*%H0))/2) } else { val <- val + myterm beta[i] <- myterm } D[ii,i] <- attributes(myterm)$gradient grad[ii] <- grad[ii] + attributes(myterm)$gradient }; grad <- matrix(grad,ncol=1) return(list(est=val, sd=sqrt(t(grad)%*%S%*%grad), grad=grad, b=beta, D=D, hess=H)) } lava/R/plotConf.R0000644000176200001440000003242513162174023013312 0ustar liggesusers##' Plot regression line (with interactions) and partial residuals. ##' ##' @title Plot regression lines ##' @param model Model object (e.g. \code{lm}) ##' @param var1 predictor (Continuous or factor) ##' @param var2 Factor that interacts with \code{var1} ##' @param data data.frame to use for prediction (model.frame is used as default) ##' @param ci.lty Line type for confidence limits ##' @param ci Boolean indicating wether to draw pointwise 95\% confidence limits ##' @param level Level of confidence limits (default 95\%) ##' @param pch Point type for partial residuals ##' @param lty Line type for estimated regression lines ##' @param lwd Line width for regression lines ##' @param npoints Number of points used to plot curves ##' @param xlim Range of x axis ##' @param col Color (for each level in \code{var2}) ##' @param colpt Color of partial residual points ##' @param alpha Alpha level ##' @param cex Point size ##' @param delta For categorical \code{var1} ##' @param centermark For categorical \code{var1} ##' @param jitter For categorical \code{var1} ##' @param cidiff For categorical \code{var1} ##' @param mean For categorical \code{var1} ##' @param legend Boolean (add legend) ##' @param trans Transform estimates (e.g. exponential) ##' @param partres Boolean indicating whether to plot partial residuals ##' @param partse . ##' @param labels Optional labels of \code{var2} ##' @param vcov Optional variance estimates ##' @param predictfun Optional predict-function used to calculate confidence limits and predictions ##' @param plot If FALSE return only predictions and confidence bands ##' @param new If FALSE add to current plot ##' @param \dots additional arguments to lower level functions ##' @return list with following members: ##' \item{x}{Variable on the x-axis (\code{var1})} ##' \item{y}{Variable on the y-axis (partial residuals)} ##' \item{predict}{Matrix with confidence limits and predicted values} ##' @author Klaus K. Holst ##' @seealso \code{termplot} ##' @aliases plotConf ##' @export ##' @examples ##' n <- 100 ##' x0 <- rnorm(n) ##' x1 <- seq(-3,3, length.out=n) ##' x2 <- factor(rep(c(1,2),each=n/2), labels=c("A","B")) ##' y <- 5 + 2*x0 + 0.5*x1 + -1*(x2=="B")*x1 + 0.5*(x2=="B") + rnorm(n, sd=0.25) ##' dd <- data.frame(y=y, x1=x1, x2=x2) ##' lm0 <- lm(y ~ x0 + x1*x2, dd) ##' plotConf(lm0, var1="x1", var2="x2") ##' abline(a=5,b=0.5,col="red") ##' abline(a=5.5,b=-0.5,col="red") ##' ### points(5+0.5*x1 -1*(x2=="B")*x1 + 0.5*(x2=="B") ~ x1, cex=2) ##' ##' data(iris) ##' l <- lm(Sepal.Length ~ Sepal.Width*Species,iris) ##' plotConf(l,var2="Species") ##' plotConf(l,var1="Sepal.Width",var2="Species") ##' ##' \dontrun{ ##' ## lme4 model ##' dd$Id <- rbinom(n, size = 3, prob = 0.3) ##' lmer0 <- lme4::lmer(y ~ x0 + x1*x2 + (1|Id), dd) ##' plotConf(lmer0, var1="x1", var2="x2") ##' } ##' @keywords hplot, regression plotConf <- function(model, var1=NULL, var2=NULL, data=NULL, ci.lty=0, ci=TRUE, level=0.95, pch=16, lty=1, lwd=2, npoints=100, xlim, col=NULL, colpt, alpha=0.5, cex=1, delta=0.07, centermark=0.03, jitter=0.2, cidiff=FALSE, mean=TRUE, legend=ifelse(is.null(var1),FALSE,"topright"), trans=function(x) {x}, partres=inherits(model,"lm"), partse=FALSE, labels, vcov, predictfun, plot=TRUE, new=TRUE, ...) { if (inherits(model,"formula")) model <- lm(model,data=data,...) if (inherits(model,"lmerMod")) { intercept <- lme4::fixef(model)["(Intercept)"] } else { intercept <- coef(model)["(Intercept)"] } if (is.na(intercept)) intercept <- 0 if (is.null(data)) { curdata <- get_all_vars(model,data=model.frame(model)) } else { curdata <- get_all_vars(formula(model), data=data) } if (inherits(model,"lmerMod")) { curdata0 <- model.frame(model, data = data, fixed.only = FALSE) } else { curdata0 <- model.frame(model,data) ## Checking for factors } if (is.null(var1) && is.null(var2)) { var1 <- colnames(curdata)[2] var10 <- colnames(curdata0)[2] } else var10 <- var1 responseorig <- colnames(curdata)[1] if (inherits(curdata0[,var10],c("character","factor"))) { curdata <- curdata0 var2 <- var10; var1 <- NULL } dots <- list(...) response <- all.vars(formula(model))[1] cname <- colnames(curdata)[-1] if (!is.factor(curdata[,var2]) & !is.null(var2)) { curdata[,var2] <- as.factor(curdata[,var2]) colnames(curdata)[1] <- response model <- update(model,as.formula(paste(response,"~.")),data=curdata) } thelevels <- levels(curdata[,var2]) if (missing(labels)) labels <- thelevels k <- ifelse(is.null(var2),1,length(thelevels)) if (is.null(col)) { col <- c("black","darkblue","darkred","goldenrod","mediumpurple", "seagreen","aquamarine3","violetred1","salmon1", "lightgoldenrod1","darkorange2","firebrick1","violetred1", "gold") } if (missing(xlim)) { if (!is.null(var1)) xlim <- range(curdata[,var1]) else xlim <- c(0,length(thelevels))+0.5 } dots$xlim <- xlim if (is.null(var1) & !is.null(var2)) { ##npoints <- 1 x <- unique(curdata[,var2]) npoints <- 1#length(x) } else { x <- seq(xlim[1], xlim[2], length.out=npoints) } xx <- c() newdata <- data.frame(id=seq(npoints)) partdata <- curdata var1.idx <- var2.idx <- 0 ii <- 1 for (nn in cname) { ii <- ii+1 v <- curdata[,nn] if (!is.null(var1) && nn==var1) { var1.idx <- ii newdata <- cbind(newdata, rep(x, k)) partdata[,nn] <- 0 } else { if (is.factor(v)) { if (nn%in%var2) { var2.idx <- ii newdata <- cbind(newdata, factor(rep(levels(v), each=npoints),levels=thelevels)) partdata[,nn] <- factor(rep(levels(v)[1], nrow(partdata)),levels=levels(v)) } else { newdata <- cbind(newdata, factor(rep(levels(v)[1], k*npoints), levels=levels(v))) } } else { if (is.logical(v)) newdata <- cbind(newdata,FALSE) else newdata <- cbind(newdata,0) } } }; colnames(newdata) <- c("_id", cname) partdata[,response] <- newdata[,response] <- 0 var1.newdata <- newdata[,var1.idx] ##newdata <- model.frame(model,data=newdata) ## if (is.factor(newdata[,var1.idx])) { ## partdata[,var1.idx] <- min(var1.newdata) ## } #o#partdata <- model.frame(model,data=partdata) atr <- c("terms") attributes(newdata)[atr] <- attributes(curdata)[atr] attributes(partdata)[atr] <- attributes(partdata)[atr] if (is.factor(newdata[,var1.idx])) { ##partdata[,var1.idx] <- levels(newdata[,var1.idx])[1] } Y <- model.frame(model)[,1] if(inherits(Y,"Surv")) Y <- Y[,1] XX <- model.matrix(formula(terms(model)),data=newdata) if (inherits(model,"lmerMod")) { bb <- lme4::fixef(model) } else { bb <- coef(model) } if (!missing(vcov)) SS <- vcov else { if (inherits(model,"geeglm")) { SS <- (summary(model)$cov.unscaled) } else { SS <- as.matrix(stats::vcov(model)) } } ## coefnames <- c("(Intercept)",var1) ## if (!is.null(var2)) { ## var2n <- paste0(var2,thelevels[-1]) ## coefnames <- c(coefnames,var2n,paste(var1,var2n,sep=":"), ## paste(var2n,var1,sep=":")) ## } ##bidx <- which(names(bb)%in%coefnames) bidx <- which(apply(XX,2,function(x) !all(x==0))) notbidx <- setdiff(seq(length(bb)),bidx) bb0 <- bb; bb0[notbidx] <- 0 myse <- apply(XX[,bidx,drop=FALSE],1,function(x) rbind(x)%*%SS[bidx,bidx,drop=FALSE]%*%cbind(x))^.5 ci.all <- list(fit=XX%*%bb0,se.fit=myse) z <- qnorm(1-(1-level)/2) ci.all$fit <- cbind(ci.all$fit,ci.all$fit-z*ci.all$se.fit,ci.all$fit+z*ci.all$se.fit) ##residuals(model,type="response") ## ## zero <- bb; zero[-1] <- 0 ## intercept <- (model.matrix(model,curdata[1,,])%*%zero)[1] if (!missing(predictfun)) { R <- Y-predict(model, newdata=partdata) ci.all <- predict(model, newdata=newdata, se.fit=TRUE, interval = "confidence", level=level,...) } else { XX0 <- model.matrix(formula(terms(model)),data=partdata) R <- Y-XX0%*%bb } if (inherits(model,"lmerMod")) { uz <- as.matrix(unlist(lme4::ranef(model))%*%do.call(Matrix::rBind,lme4::getME(model,"Ztlist")))[1,] R <- R-uz } pr <- trans(intercept + R) intercept0 <- 0 if (is.na(intercept)) { intercept <- 0 if (!is.null(var2)) { intercept <- coef(model)[paste0(var2,thelevels)][as.numeric(curdata[,var2])] intercept0 <- coef(model)[paste0(var2,thelevels[1])] } } if (is.null(dots$ylim)) { if (partres) { if (cidiff) dots$ylim <- range(pr) else dots$ylim <- range(trans(c(ci.all$fit)),pr) } else dots$ylim <- trans(range(ci.all$fit)) } if (is.null(dots$ylab)) dots$ylab <- responseorig if (is.null(var1)) { dots$axes=FALSE if (is.null(dots$xlab)) dots$xlab <- "" } else { if (is.null(dots$xlab)) dots$xlab <- var1 } if (!plot) return(list(x=x, y=pr, predict=ci.all, predict.newdata=newdata)) plot.list <- c(x=0,y=0,type="n",dots) if (new) { do.call(graphics::plot, plot.list) if (is.null(var1)) { box() axis(2) axis(1,at=seq(length(thelevels)),labels) } } col.trans <- Col(col,alpha) Wrap <- function(k,n) { (seq_len(k)-1)%%n +1 } col.i <- Wrap(k,length(col)); col.k <- col[col.i]; lty.k <- lty[Wrap(k,length(lty))] pch.k <- pch[Wrap(k,length(pch))] if (!is.null(var1)) { for (i in seq_len(k)) { ci0 <- trans(ci.all$fit[(npoints*(i-1)+1):(i*npoints),]) y <- ci0[,1]; yu <- ci0[,3]; yl <- ci0[,2] lines(y ~ x, col=col.k[i], lwd=lwd, lty=lty.k[i]) if (ci) { lines(yl ~ x, lwd=1, col=col.k[i], lty=ci.lty) lines(yu ~ x, lwd=1, col=col.k[i], lty=ci.lty) xx <- c(x, rev(x)) yy <- c(yl, rev(yu)) polygon(xx,yy, col=col.trans[col.i[i]], lty=0) } } } ii <- as.numeric(curdata[,var2]) if (is.null(var1)) { xx <- curdata[,var2] x <- jitter(as.numeric(xx),jitter) if (missing(colpt)) colpt <- Col(col[1],alpha) if (partres>0) points(pr ~ x,pch=pch[1], col=colpt[1], cex=cex, ...) positions <- seq(k) mycoef <- bb[paste0(var2,thelevels)][-1] if (inherits(model,c("lm","glm"))) myconf <- confint(model)[paste0(var2,thelevels)[-1],,drop=FALSE] else { myconf <- matrix(mycoef,ncol=2,nrow=length(mycoef)) myconf <- myconf + qnorm(0.975)*cbind((diag(as.matrix(SS))[-1])^0.5)%x%cbind(-1,1) } for (pos in seq(k)) { if (cidiff) { if (pos>1) { ci0 <- trans(intercept+myconf[pos-1,]) yl <- ci0[1]; yu <- ci0[2]; y <- trans(intercept+mycoef[pos-1]) } else { yu <- yl <- NULL; y <- trans(intercept) } } else if (partse) { y0 <- pr[xx==levels(xx)[pos]] ci0 <- confint(lm(y0~1)) yl <- ci0[1]; yu <- ci0[2]; y <- trans(mean(y0)) } else { ci0 <- rbind(trans(ci.all$fit[(npoints*(pos-1)+1):(pos*npoints),])) y <- ci0[,1]; yu <- ci0[,3]; yl <- ci0[,2] } if (!mean) y <- NULL confband(pos,yl,yu,delta=delta,center=y,centermark=centermark,col=col[1],lwd=lwd[1],lty=lty[1],cex=cex) } } else { if (partres) { xx <- curdata[,var1] if (!missing(colpt)) { points(pr ~ xx, col=colpt, cex=cex, pch=pch[1],...) } else { if (!is.null(var2)) points(pr ~ xx, col=col.k[ii], pch=pch.k[ii], cex=cex, ...) else points(pr ~ xx, col=col[1], pch=pch[1], cex=cex,...) } } } if (k>1 && legend!=FALSE) { if (length(lty)>1) legend(legend, legend=thelevels, col=col.k, pch=pch.k, bg="white", lty=lty.k,cex=cex) else legend(legend, legend=thelevels, col=col.k, pch=pch.k, bg="white",cex=cex) } ## palette(curpal) invisible(list(x=xx, y=pr, predict=ci.all, predict.newdata=newdata)) } lava/R/wrapvec.R0000644000176200001440000000054513162174023013173 0ustar liggesusers##' Wrap vector ##' ##' Wrap vector ##' @param x Vector or integer ##' @param delta Shift ##' @param ... Additional parameters ##' @export ##' @examples ##' wrapvec(5,2) wrapvec <- function(x,delta=0L,...) { if (length(x)==1 && floor(x)==x && x>0) { x <- seq(x) } if (delta==0L) return(x) x[(seq_along(x)+delta-1L)%%length(x)+1L] } lava/R/Missing.R0000644000176200001440000000537313162174023013141 0ustar liggesusers##' Missing value generator ##' ##' This function adds a binary variable to a given \code{lvm} model ##' and also a variable which is equal to the original variable where ##' the binary variable is equal to zero ##' ##' @title Missing value generator ##' @param object \code{lvm}-object. ##' @param formula The right hand side specifies the name of a latent ##' variable which is not always observed. The left hand side ##' specifies the name of a new variable which is equal to the latent ##' variable but has missing values. If given as a string then this ##' is used as the name of the latent (full-data) name, and the ##' observed data name is 'missing.data' ##' @param Rformula Missing data mechanism with left hand side ##' specifying the name of the observed data indicator (may also just ##' be given as a character instead of a formula) ##' @param missing.name Name of observed data variable (only used if ##' 'formula' was given as a character specifying the name of the ##' full-data variable) ##' @param suffix If missing.name is missing, then the name of the ##' oberved data variable will be the name of the full-data variable + ##' the suffix ##' @param ... Passed to binomial.lvm. ##' @return lvm object ##' @aliases Missing, Missing<- ##' @examples ##' library(lava) ##' set.seed(17) ##' m <- lvm(y0~x01+x02+x03) ##' m <- Missing(m,formula=x1~x01,Rformula=R1~0.3*x02+-0.7*x01,p=0.4) ##' sim(m,10) ##' ##' ##' m <- lvm(y~1) ##' m <- Missing(m,"y","r") ##' ## same as ##' ## m <- Missing(m,y~1,r~1) ##' sim(m,10) ##' ##' ## same as ##' m <- lvm(y~1) ##' Missing(m,"y") <- r~x ##' sim(m,10) ##' ##' m <- lvm(y~1) ##' m <- Missing(m,"y","r",suffix=".") ##' ## same as ##' ## m <- Missing(m,"y","r",missing.name="y.") ##' ## same as ##' ## m <- Missing(m,y.~y,"r") ##' sim(m,10) ##' ##' @export ##' @author Thomas A. Gerds Missing <- function(object,formula,Rformula,missing.name,suffix="0",...){ if (is.character(Rformula)) { indicatorname <- Rformula Rformula <- toformula(Rformula,1) } else { indicatorname <- all.vars(Rformula)[1] } if (length(all.vars(formula))==1) formula <- all.vars(formula) if (is.character(formula)) { if (missing(missing.name)) missing.name <- paste0(formula,suffix) formula <- toformula(missing.name,formula) } newf <- update(formula,paste(".~.+",indicatorname)) if (is.null(distribution(object,indicatorname)[[1]]) || length(list(...))>0) { distribution(object,indicatorname) <- binomial.lvm(...) } transform(object,newf) <- function(u){ out <- u[,1] out[u[,2]==0] <- NA out } regression(object) <- Rformula object } ##' @export "Missing<-" <- function(object,formula,...,value) { Missing(object,formula,value,...) } lava/R/coef.R0000644000176200001440000006373413162174023012451 0ustar liggesusers###{{{ coef.lvm ##' @export `coef.lvm` <- function(object, mean=TRUE, fix=TRUE, symbol=lava.options()$symbol, silent=TRUE, p, data, vcov, level=9, labels=lava.options()$coef.names, ...) { if (fix) object <- fixsome(object,measurement.fix=FALSE) if (!missing(p)) { coefs <- matrix(NA,nrow=length(p),ncol=4); coefs[,1] <- p rownames(coefs) <- c(coef(object,mean=TRUE,fix=FALSE)[c(seq_len(index(object)$npar.mean))], {if (index(object)$npar>0) paste0("p",seq_len(index(object)$npar)) }, {if (index(object)$npar.ex>0) paste0("e",seq_len(index(object)$npar.ex))} ) if (missing(vcov)) { if (!missing(data) && !is.null(data)) { I <- information(object,p=p,data=data,type="E") myvcov <- solve(I) } else { myvcov <- matrix(NA,length(p),length(p)) } object$vcov <- myvcov } else object$vcov <- vcov coefs[,2] <- sqrt(diag(object$vcov)) coefs[,3] <- coefs[,1]/coefs[,2] coefs[,4] <- 2*(pnorm(abs(coefs[,3]),lower.tail=FALSE)) colnames(coefs) <- c("Estimate","Std. Error", "Z value", "Pr(>|z|)") object$coefficients <- coefs; return(coef.lvmfit(object,level=level,labels=labels,symbol=symbol,...)) } ## Free regression/covariance parameters AP <- matrices(object, paste0("p",seq_len(index(object)$npar))) A <- AP$A; A[index(object)$M1==0] <- "0" ## Only free parameters P <- AP$P; P[index(object)$P1==0] <- "0"; P[upper.tri(P)] <- "0" nn <- vars(object) counter <- 0 res <- c() resname <- c() ## if (DEBUG) { ii <- which(t(A)!="0",arr.ind=TRUE) rname <- paste(nn[ii[,1]],nn[ii[,2]],sep=symbol[1]) if (labels) { rname2 <- t(regfix(Model(object))$labels)[ii] rname[which(!is.na(rname2))] <- rname2[which(!is.na(rname2))] } res <- rname resname <- c(resname,t(A)[ii]) ## } else ## for (i in seq_len(ncol(A))) ## for (j in seq_len(nrow(A))) { ## val <- A[j,i] ## if (val!="0") { ## if (labels & !is.na(regfix(Model(object))$labels[j,i])) ## res <- c(res, regfix(Model(object))$labels[j,i]) ## else ## res <- c(res, paste0(nn[i],symbol[1],nn[j])) ## counter <- counter+1 ## resname <- c(resname, val) ## } ## } ## if (DEBUG) { ii <- which(P!="0",arr.ind=TRUE) if (length(symbol)<2) rname <- paste(nn[ii[,2]],nn[ii[,1]],sep=lava.options()$symbol[2]) else rname <- paste(nn[ii[,2]],nn[ii[,1]],sep=symbol[2]) if (labels) { rname2 <- (covfix(Model(object))$labels)[ii] rname[which(!is.na(rname2))] <- rname2[which(!is.na(rname2))] } res <- c(res,rname) resname <- c(resname,P[ii]) ## } else ## for (i in seq_len(ncol(P))) ## for (j in seq(i,nrow(P))) ## { ## val <- P[j,i] ## if (val!="0") { ## counter <- counter+1 ## if (length(symbol)<2) { ## if (nn[i]!=nn[j]) { ## part2 <- paste(nn[i],nn[j],sep=",") ## } else part2 <- nn[i] ## } else { ## part2 <- paste0(nn[i],symbol[2],nn[j]) ## } ## if (labels & !is.na(covfix(Model(object))$labels[j,i])) ## res <- c(res, covfix(Model(object))$labels[j,i]) ## else ## res <- c(res, part2) ## resname <- c(resname, val) ## } ## } names(res) <- resname resnum <- sapply(resname, function(s) char2num(substr(s,2,nchar(s)))) res <- res[order(resnum)] if (mean) { nmean <- sum(index(object)$v1==1) if (nmean>0) { if (!labels) res <- c(vars(object)[index(object)$v1==1], res) else { mres <- c() for (i in seq_len(length(index(object)$v1))) { val <- index(object)$v1[i] if (val==1) { if (!is.na(intfix(Model(object))[[i]])) { mres <- c(mres, intfix(Model(object))[[i]]) } else mres <- c(mres, vars(object)[i]) } } res <- c(mres,res) } names(res)[seq_len(nmean)] <- paste0("m",seq_len(nmean)) } } if (!is.null(object$expar) && sum(index(object)$e1==1)>0) { n2 <- names(object$expar)[index(object)$e1==1] if (labels) { count <- 0 for (i in seq_len(length(index(object)$e1))) { if (index(object)$e1[i]==1) { val <- object$exfix[[i]] count <- count+1 if(!is.na(val)) n2[count] <- val } } } names(n2) <- paste0("e",seq_len(length(n2))) res <- c(res,n2) } if (!silent) { cat(paste(res, collapse="\n")); cat("\n") } if (!is.null(object$order)) res <- res[object$order] res } ###}}} ###{{{ coef.lvmfit ##' @export `coef.lvmfit` <- function(object, level=ifelse(missing(type),-1,2), symbol=lava.options()$symbol, data, std=NULL, labels=lava.options()$coef.names, ##labels=TRUE, vcov, type, reliability=FALSE, second=FALSE, ...) { res <- (pars.default(object,...)) if (level<0 && !is.null(names(res))) return(res) if (is.null(object$control$meanstructure)) meanstructure <- TRUE else meanstructure <- object$control$meanstructure npar <- index(object)$npar; npar.mean <- index(object)$npar.mean*meanstructure npar.ex <- index(object)$npar.ex para <- parameter(Model(object)) para.idx <- which(vars(object)%in%para) if (inherits(object,"lvm.missing")) { if (length(object$cc)==0) {## No complete cases coefs <- coef(object$estimate) c1 <- coef(Model(object),mean=TRUE,fix=FALSE) c1. <- coef(Model(object),mean=FALSE,fix=FALSE) nn <- gsub("^[0-9]*@","",names(coefs)) myorder <- match(c1,nn) myorder.reg <- order(na.omit(match(nn,c1.))) myorder.extra <- c() ##mp <-effect modelPar(object,seq_len(npar+npar.mean+npar.ex)) ## mp <- modelPar(object,seq_len(npar+npar.mean+npar.ex)) ## myorder <- c(mp$meanpar,mp$p) ## myorder.reg <- seq_len(length(mp$p)) ## myorder.extra <- mp$p2 } else { myorder <- na.omit(modelPar(object$multigroup,seq_len(npar+npar.mean))$p[[object$cc]]) myorder.reg <- na.omit(modelPar(object$multigroup,seq_len(npar))$p[[object$cc]]) myorder.extra <- seq_len(index(object)$npar.ex)+length(myorder) myorder <- c(myorder,myorder.extra) } } else { myorder <- seq_len(npar+npar.mean) myorder.reg <- seq_len(npar) myorder.extra <- seq_len(index(object)$npar.ex)+length(myorder) myorder <- c(myorder,myorder.extra) } ## myorder <- seq_len(npar+npar.mean) ## myorder.reg <- seq_len(npar) ## myorder.extra <- seq_len(index(object)$npar.ex)+length(myorder) ## myorder <- c(myorder,myorder.extra) if (level<0) { names(res)[seq_len(length(myorder))] <- coef(Model(object),fix=FALSE, mean=meanstructure, symbol=symbol)[order(myorder)] return(res) } latent.var <- latent(object) latent.idx <- which(vars(object)%in%latent.var) Type <- Var <- From <- VarType <- FromType <- c() Astd <- Pstd <- vstd <- mytype <- NULL if (!is.null(std)) { stdCoef <- stdcoef(object) { switch(tolower(std), latent = {Astd=stdCoef$Astar; Pstd=stdCoef$Pstar; vstd=stdCoef$vstar}, y = {Astd=stdCoef$AstarY; Pstd=stdCoef$PstarY; vstd=stdCoef$vstarY}, xy = {Astd=stdCoef$AstarXY; Pstd=stdCoef$PstarXY; vstd=stdCoef$vstarXY}, yx = {Astd=stdCoef$AstarXY; Pstd=stdCoef$PstarXY; vstd=stdCoef$vstarXY} ) } } myparnames <- paste0("p",seq_len(npar+npar.ex))[myorder.reg] p <- matrices(Model(object), myparnames) A <- p$A P <- p$P mycoef <- object$coef if (!missing(type) | !missing(vcov)) { if (!missing(vcov)) { mycoef[,2] <- sqrt(diag(vcov))[myorder] } else { if (!missing(data)) myvcov <- information(object,type=type,data=data,inverse=TRUE) else myvcov <- information(object,type=type,inverse=TRUE) mycoef[,2] <- sqrt(diag(myvcov))[myorder] } mycoef[,3] <- mycoef[,1]/mycoef[,2] mycoef[,4] <- 2*(pnorm(abs(mycoef[,3]),lower.tail=FALSE)) } coefs <- mycoef[myorder,,drop=FALSE] nn <- colnames(A) free <- A!="0" free[index(object)$M1!=1] <- FALSE nlincon <- matrix(Model(object)$par%in%names(constrain(Model(object))),nrow(A)) if (missing(data)) { data <- matrix(0,ncol=length(index(Model(object))$manifest)); colnames(data) <- index(Model(object))$manifest } nlincon.estimates.full<- constraints(object,second=second,data=data) nlincon.estimates <- nlincon.estimates.full[,-(5:6),drop=FALSE] matched <- c() res <- c() for (i in seq_len(ncol(A))) for (j in seq_len(nrow(A))) { val <- A[j,i] if (val!="0") { matching <- match(val,rownames(coefs)) matched <- c(matched,matching) if (!is.na(matching)) { if (free[j,i]) newrow <- matrix(coefs[matching,],nrow=1) else { newrow <- matrix(c(coefs[matching,1],NA,NA,NA), nrow=1) } } else { Debug(list("(i,j)", i, ",", j)) if (nlincon[j,i]) { newrow <- matrix(nlincon.estimates[Model(object)$par[j,i],],nrow=1) } else { newrow <- matrix(c(Model(object)$fix[j,i], NA, NA, NA), nrow=1) } } if (!is.null(std)) { newrow <- cbind(newrow,Astd[j,i]) } if (labels & !is.na(regfix(Model(object))$labels[j,i])) { rownames(newrow) <- regfix(Model(object))$labels[j,i] if (labels>1) { newst <- paste0(nn[i],symbol[1],nn[j]) if (rownames(newrow)!=newst) rownames(newrow) <- paste(rownames(newrow),newst,sep=":") } } else { rownames(newrow) <- paste0(nn[i],symbol[1],nn[j]) } if (free[j,i] | level>2) { res <- rbind(res, newrow) Type <- c(Type,"regression") Var <- c(Var, nn[i]) From <- c(From, nn[j]) } } } free.var <- P!="0" free.var[index(object)$P1!=1] <- FALSE nlincon.var <- matrix(Model(object)$covpar%in%names(constrain(Model(object))),nrow(P)) if (level>0) ## Variance estimates: for (i in seq_len(ncol(p$P))) for (j in seq(i,nrow(p$P))) { val <- p$P[j,i] if (!(i%in%para.idx)) if (val!="0" & !any(vars(object)[c(i,j)]%in%index(Model(object))$exogenous)) if (level>1 | !all(vars(object)[c(i,j)]%in%index(Model(object))$manifest)) { matching <- match(val,rownames(coefs)) matched <- c(matched,matching) if (!is.na(matching)) { if (free.var[j,i]) newrow <- matrix(coefs[matching,],nrow=1) else newrow <- matrix(c(coefs[matching,1],NA,NA,NA), nrow=1) ## We don't want to report p-values of tests on the boundary of the parameter space if (i==j) newrow[,4] <- NA } else { Debug(list("(i,j)", i, ",", j)) if (nlincon.var[j,i]) { newrow <- matrix(nlincon.estimates[Model(object)$covpar[j,i],],nrow=1) } else { newrow <- matrix(c(Model(object)$covfix[j,i], NA, NA, NA), nrow=1) } } if (!missing(std)) { newrow <- cbind(newrow,Pstd[i,j]) } if (length(symbol)<2) { if (nn[i]!=nn[j]) { part2 <- paste(nn[i],nn[j],sep=lava.options()$symbol[2]) } else part2 <- nn[i] } else { part2 <- paste0(nn[i],symbol[2],nn[j]) } if (labels & !is.na(covfix(Model(object))$labels[j,i])) { rownames(newrow) <- covfix(Model(object))$labels[j,i] if (labels>1) { if (rownames(newrow)!=part2) rownames(newrow) <- paste(rownames(newrow),part2,sep=":") } } else { rownames(newrow) <- part2 } if ((free.var[j,i]) | level>2) { res <- rbind(res, newrow) Type <- c(Type,"variance") Var <- c(Var, nn[i]) From <- c(From, nn[j]) } } } res0 <- res ## Mean parameter: nlincon.mean <- lapply(Model(object)$mean, function(x) x%in%names(constrain(Model(object))) ) if (level>0 & npar.mean>0) { midx <- seq_len(npar.mean) rownames(coefs)[midx] <- paste0("m",myorder[midx]) munames <- rownames(coefs)[seq_len(npar.mean)] meanpar <- matrices(Model(object), myparnames, munames)$v for (i in seq_len(length(meanpar))) { if (!index(Model(object))$vars[i]%in%index(Model(object))$exogenous) { val <- meanpar[i] matching <- match(val,rownames(coefs)) if (!is.na(matching)) { if (index(object)$v1[i]==1) ## if free-parameter newrow <- matrix(coefs[matching,],nrow=1) else newrow <- matrix(c(coefs[matching,1],NA,NA,NA), nrow=1) } else { if (nlincon.mean[[i]]) { newrow <- matrix(nlincon.estimates[Model(object)$mean[[i]],],nrow=1) } else { newrow <- matrix(c(as.numeric(meanpar[i]), NA, NA, NA), nrow=1) } } if (!missing(std)) { newrow <- cbind(newrow,vstd[i]) } if (labels & !(is.na(intfix(Model(object))[[i]]) | is.numeric(intfix(Model(object))[[i]]))) { rownames(newrow) <- intfix(Model(object))[[i]] if (labels>1) { if (rownames(newrow)!=index(Model(object))$vars[i]) rownames(newrow) <- paste(rownames(newrow),index(Model(object))$vars[i],sep=":") } } else { rownames(newrow) <- index(Model(object))$vars[i] } if ((index(object)$v1[i]==1) | level>2) { res <- rbind(res, newrow) Type <- c(Type,ifelse(!(i%in%para.idx),"intercept","parameter")) Var <- c(Var, index(Model(object))$vars[i]) From <- c(From, NA) } } } } if (level>0 && length(myorder.extra>0)) { cc <- coefs[myorder.extra,,drop=FALSE] rownames(cc) <- rownames(index(object)$epar)[which(index(object)$e1==1)] cc <- cbind(cc,rep(NA,ncol(res)-ncol(cc))) res <- rbind(res,cc) Type <- c(Type,rep("extra",length(myorder.extra))) Var <- c(Var,rep(NA,length(myorder.extra))) From <- c(From,rep(NA,length(myorder.extra))) } mycolnames <- colnames(coefs) if (!is.null(std)) mycolnames <- c(mycolnames, paste("std",std,sep=".")) colnames(res) <- mycolnames attributes(res)$type <- Type attributes(res)$var <- Var attributes(res)$from <- From attributes(res)$latent <- latent.var attributes(res)$nlincon <- nlincon.estimates.full return(res) } ###}}} coef.lvmfit ###{{{ coef.multigroup ##' @export coef.multigroup <- function(object,...) { return(object$parpos) } ###}}} coef.multigroup ###{{{ coef.multigroupfit ##' @export coef.multigroupfit <- function(object, level=0,vcov, ext=FALSE, labels=lava.options()$coef.names, symbol=lava.options()$symbol, covsymb=NULL,groups=NULL,...) { if (level==0) { res <- pars(object); if (is.null(names(res))) names(res) <- object$model$name return(res) } if (level==1) { theta <- pars(object) if (missing(vcov)) theta.sd <- sqrt(diag(object$vcov)) else theta.sd <- sqrt(diag(vcov)) res <- cbind(theta,theta.sd,(Z <- theta/theta.sd),2*(pnorm(abs(Z),lower.tail=FALSE))) if (is.null(rownames(res))) rownames(res) <- object$model$name colnames(res) <- c("Estimate","Std. Error", "Z value", "Pr(>|z|)") return(res) } cc <- coef(object, level=1, symbol=symbol, ...) model <- Model(object) parpos <- modelPar(model, seq_len(nrow(cc)))$p npar.mean <- object$model$npar.mean npar <- object$model$npar mynames <- c() if (npar.mean>0) { mynames <- unlist(object$model$meanlist) mynames <- names(mynames)[!duplicated(mynames)] } if (npar>0) { mynames <- c(mynames,object$model$par) } res <- list() misrow <- list() parpos2 <- list() if (is.null(groups)) groups <- seq(model$ngroup) if (length(groups)==0) groups <- seq(model$ngroup) for (i in groups) { orignames <- coef(object$model0$lvm[[i]],fix=FALSE,mean=object$meanstructure, silent=TRUE, symbol=lava.options()$symbol) if (ext) { newnames. <- coef(Model(model)[[i]],fix=FALSE, mean=object$meanstructure, silent=TRUE, labels=labels, symbol=symbol) newnames <- coef(Model(model)[[i]],fix=FALSE, mean=object$meanstructure, silent=TRUE, labels=labels,symbol=lava.options()$symbol) newcoef <- matrix(NA,ncol=4,nrow=length(newnames)) rownames(newcoef) <- newnames. idx <- match(orignames,newnames) newcoef[idx,] <- cc[parpos[[i]],,drop=FALSE] newparpos <- rep(NA,length(newnames)) newparpos[idx] <- parpos[[i]] parpos2 <- c(parpos2, list(newparpos)) misrow <- c(misrow, list(setdiff(seq_len(length(newnames)),idx))) } else { newcoef <- cc[parpos[[i]],,drop=FALSE] rownames(newcoef) <- orignames } colnames(newcoef) <- colnames(cc) ## Position of variance parameters: varpos <- variances(Model(model)[[i]],mean=FALSE) ## Number of parameters resp mean-parameters p <- nrow(newcoef); p0 <- length(coef(Model(model)[[i]],fix=FALSE, mean=FALSE, silent=TRUE)) newcoef[(p-p0) + varpos,4] <- NA res <- c(res, list(newcoef)) } if (ext) { for (i in seq(length(groups))) { if (length(misrow[[i]])>0) { nn <- rownames(res[[i]])[misrow[[i]]] for (j in setdiff(seq_len(length(groups)),i)) { nn2 <- rownames(res[[j]]) matching <- na.omit(match(nn,nn2)) matching <- setdiff(matching,misrow[[j]]) if (length(matching)>0) { idxj <- match(nn2[matching],nn2) idxi <- match(nn2[matching],rownames(res[[i]])) res[[i]][nn2[matching],] <- res[[j]][nn2[matching],] parpos2[[i]][idxi] <- parpos2[[j]][idxj] nn <- setdiff(nn,nn2[matching]) } if (length(nn)<1) break; } } } attributes(res)$parpos <- parpos2 } return(res) } ###}}} ###{{{ CoefMat ##' @export CoefMat.multigroupfit <- function(x,level=9, labels=lava.options()$coef.names, symbol=lava.options()$symbol[1], data=NULL,groups=seq(Model(x)$ngroup),...) { cc <- coef(x,level=level,ext=TRUE,symbol=symbol,data=data,groups=groups) parpos <- attributes(cc)$parpos res <- c() nlincon.estimates <- c() nlincon.names <- c() count <- k <- 0 for (i in groups) { k <- k+1 m0 <- Model(Model(x))[[i]] mycoef <- cc[[k]] npar <- index(m0)$npar npar.mean <- index(m0)$npar.mean if (npar>0) rownames(mycoef)[(seq(npar))+npar.mean] <- paste0("p",seq(npar)) m0$coefficients <- mycoef m0$opt$estimate <- mycoef[,1] Vcov <- vcov(x)[parpos[[k]],parpos[[k]],drop=FALSE]; colnames(Vcov) <- rownames(Vcov) <- rownames(mycoef) m0$vcov <- Vcov cc0 <- coef.lvmfit(m0,level=level,labels=labels,symbol=symbol) attributes(cc0)$dispname <- x$opt$dispname res <- c(res, list(CoefMat(cc0))) newnlin <- attributes(cc0)$nlincon if (length(newnlin)>0) if (count==0) { count <- count+1 nlincon.estimates <- newnlin nlincon.names <- rownames(newnlin) } else { for (j in seq_len(NROW(newnlin))) { if (!(rownames(newnlin)[j]%in%nlincon.names)) { nlincon.estimates <- rbind(nlincon.estimates,newnlin[j,,drop=FALSE]) nlincon.names <- c(nlincon.names,rownames(newnlin)[j]) } } } } rownames(nlincon.estimates) <- nlincon.names attributes(res)$nlincon <- nlincon.estimates return(res) } ##' @export CoefMat <- function(x, digits = max(3, getOption("digits") - 2), level=9, symbol=lava.options()$symbol[1],...) { cc <- x if (!is.matrix(x)) { cc <- coef(x,level=level,symbol=symbol,...) } res <- c() mycoef <- format(round(cc,max(1,digits)),digits=digits) mycoef[,4] <- formatC(cc[,4],digits=digits-1,format="g", preserve.width="common",flag="") mycoef[is.na(cc)] <- "" mycoef[cc[,4]<1e-12,4] <- " <1e-12" M <- ncol(cc) N <- nrow(cc) Nreg <- sum(attributes(cc)$type=="regression") Nvar <- sum(attributes(cc)$type=="variance") Nint <- sum(attributes(cc)$type=="intercept") Nextra <- sum(attributes(cc)$type=="extra") latent.var <- attributes(cc)$latent if (Nreg>0) { reg.idx <- which(attributes(cc)$type=="regression") latent.from <- which(attributes(cc)$from[reg.idx]%in%latent.var) latent.from <- latent.from[which(is.na(match(attributes(cc)$var[latent.from],latent.var)))] reg.idx <- setdiff(reg.idx,latent.from) Nmeas <- length(latent.from) if (Nmeas>0) { first.entry <- c() for (i in latent.var) { pos <- match(i,attributes(cc)$from[latent.from]) if (!is.na(pos)) first.entry <- c(first.entry, pos) } res <- rbind(res, c("Measurements:",rep("",M))) count <- 0 Delta <- FALSE for (i in latent.var) { count <- count+1 Delta <- !Delta Myidx <- which(attributes(cc)$from==i & attributes(cc)$type=="regression" & !(attributes(cc)$var%in%latent.var)) prefix <- ifelse(Delta," "," ") for (j in Myidx) { newrow <- mycoef[j,] newname <- rownames(cc)[j] res <- rbind(res,c(paste(prefix,newname),newrow)) } } } if ((Nreg-Nmeas)>0) { responses <- unique(attributes(cc)$var[reg.idx]) first.entry <- c() for (i in responses) { pos <- match(i,attributes(cc)$var[reg.idx]) first.entry <- c(first.entry, pos) } res <- rbind(res, c("Regressions:",rep("",M))) count <- 0 Delta <- FALSE for (i in reg.idx) { count <- count+1 newrow <- mycoef[i,] newname <- rownames(cc)[i] if (count%in%first.entry) Delta <- !Delta prefix <- ifelse(Delta," "," ") res <- rbind(res,c(paste(prefix,newname),newrow)) } } } if (Nint>0) { int.idx <- which(attributes(cc)$type=="intercept") res <- rbind(res, c("Intercepts:",rep("",M))) for (i in int.idx) { newrow <- mycoef[i,] newname <- rownames(cc)[i] res <- rbind(res,c(paste(" ",newname),newrow)) } } par.idx <- which(attributes(cc)$type=="parameter") parres <- rbind(c("Additional Parameters:",rep("",M))) for (i in par.idx) { newrow <- mycoef[i,] newname <- rownames(cc)[i] parres <- rbind(parres,c(paste(" ",newname),newrow)) } extra.idx <- which(attributes(cc)$type=="extra") for (i in extra.idx) { newrow <- mycoef[i,] newname <- rownames(cc)[i] parres <- rbind(parres,c(paste(" ",newname),newrow)) } if (nrow(parres)>1) res <- rbind(res,parres) if (Nvar>0) { var.idx <- which(attributes(cc)$type=="variance") vname <- "Residual Variances:" if (!is.list(x)) { if (!is.null(attributes(x)$dispname)) vname <- attributes(x)$dispname } else if (!is.null(x$opt$dispname)) vname <- x$opt$dispname res <- rbind(res, c(vname,rep("",M))) for (i in var.idx) { newrow <- mycoef[i,] newname <- rownames(cc)[i] res <- rbind(res,c(paste(" ",newname),newrow)) } } res0 <- res[,-1] rownames(res0) <- format(res[,1],justify="left") res0 } ###}}} CoefMat ###{{{ standardized coefficients stdcoef <- function(x,p=coef(x),...) { M0 <- moments(x,p=p,...) A <- t(M0$A) P <- M0$P v <- M0$v C <- M0$Cfull N <- diag(sqrt(diag(C)),ncol=nrow(C)); colnames(N) <- rownames(N) <- vars(x) iN <- N; diag(iN)[diag(N)>0] <- 1/diag(iN)[diag(N)>0] diag(iN)[diag(N)==0] <- NA Nn <- N; Nn[] <- 0; diag(Nn) <- 1 Nn[latent(x),latent(x)] <- N[latent(x),latent(x)] iNn <- Nn; diag(iNn) <- 1/diag(Nn) Ny <- Nn; Ny[endogenous(x),endogenous(x)] <- N[endogenous(x),endogenous(x)] iNy <- Ny; diag(iNy) <- 1/diag(Ny) ## Standardized w.r.t. latent,y and x: AstarXY <- t(iN%*%A%*%N) PstarXY <- iN%*%P%*%iN if (!is.null(v)) vstarXY <- iN%*%v else vstarXY <- NULL pstdXY <- pars(Model(x),A=AstarXY,P=PstarXY,v=vstarXY) ## Standardized w.r.t. latent, y: AstarY <- t(iNy%*%A%*%Ny) PstarY <- iNy%*%P%*%iNy if (!is.null(v)) vstarY <- iNy%*%v else vstarY <- NULL pstdY <- pars(Model(x),A=AstarY,P=PstarY,v=vstarY) ## Standardized w.r.t. latent only: Astar <- t(iNn%*%A%*%Nn) Pstar <- iNn%*%P%*%iNn if (!is.null(v)) vstar <- iNn%*%v else vstar <- NULL pstd <- pars(Model(x),A=Astar,Pstar,v=vstar) k <- length(p)-length(pstd) res <- list(par=cbind(p,c(pstd,rep(NA,k)),c(pstdXY,rep(NA,k))), AstarXY=AstarXY, PstarXY=PstarXY, vstarXY=vstarXY, AstarY=AstarY, PstarY=PstarY, vstarY=vstarY, Astar=Astar, Pstar=Pstar, vstar=vstar) return(res) } ###}}} standardized coefficients lava/R/procformula.R0000644000176200001440000001450313162174023014054 0ustar liggesusers##' @export procformula <- function(object=NULL,value,exo=lava.options()$exogenous,...) { ## Split into reponse and covariates by ~ disregarding expressions in parantheses ## '(?!...)' Negative lookahead assertion regex <- "~(?![^\\(].*\\))" yx <- lapply(strsplit(as.character(value),regex,perl=TRUE),function(x) gsub(" ","",x))[-1] yx <- lapply(yx,function(x) gsub("\n","",x)) iscovar <- FALSE if (length(yx)==1) { lhs <- NULL; xidx <- 1 } else { lhs <- yx[1]; xidx <- 2 if (yx[[xidx]][1]=="") { yx[[xidx]] <- yx[[xidx]][-1] iscovar <- TRUE } } ##Check for link function invlink <- NULL if (xidx==2) { if (length(grep("[a-zA-Z0-9_]*\\(.*\\)$",yx[[xidx]]))>0) { ## rhs of the form F(x+y) invlink <- strsplit(yx[[xidx]],"\\(.*\\)")[[1]][1] if (invlink%in%c("f","v","I","") || grepl("\\+",invlink)) { ## Reserved for setting linear constraints invlink <- NULL } else { yx[[xidx]] <- gsub(paste0(invlink,"\\(|\\)$"),"",yx[[xidx]]) } } } ## Handling constraints with negative coefficients ## while not tampering with formulas like y~f(x,-2) st <- yx[[xidx]] st <- gsub("\\-","\\+\\-",gsub("\\+\\-","\\-",st)) ## Convert - to +- (to allow for splitting on '+') ##gsub("[^,]\\-","\\+\\-",st) ## Convert back any - not starting with ',' st <- gsub(",\\+",",",st) ## Remove + inside 'f' and 'v' constraints st <- gsub("^\\+","",st) ## Remove leading plus yx[[xidx]] <- st ## Match '+' but not when preceeded by ( ... ) X <- strsplit(yx[[xidx]],"\\+(?![^\\(]*\\))", perl=TRUE)[[1]] ##regex <- "(?!(\\(*))[\\(\\)]" regex <- "[\\(\\)]" ## Keep squares brackets and |(...) statements ## Extract variables from expressions like ## f(x,b) -> x,b and 2*x -> 2,cx ## but avoid to tamper with transformation expressions: ## a~(x*b) res <- lapply(X,decomp.specials,regex,pattern2="\\*",pattern.ignore="~",reverse=TRUE,perl=TRUE) ##OLD: ##res <- lapply(X,decomp.specials,pattern2="[*]",reverse=TRUE) xx <- unlist(lapply(res, function(x) x[1])) xxf <- lapply(as.list(xx),function(x) decomp.specials(x,NULL,pattern2="\\[|~",perl=TRUE)) xs <- unlist(lapply(xxf,function(x) x[1])) ## Alter intercepts? intpos <- which(vapply(xs,function(x) grepl("^[\\+\\-]*[\\.|0-9]+$",x), 0)==1) ## Match '(int)' intpos0 <- which(vapply(X,function(x) grepl("^\\([\\+\\-]*[\\.|0-9]+\\)$",x),0)==1) yy <- ys <- NULL if (length(lhs)>0) { yy <- decomp.specials(lhs) yyf <- lapply(yy,function(y) decomp.specials(y,NULL,pattern2="[",fixed=TRUE,perl=FALSE)) ys <- unlist(lapply(yyf,function(x) x[1])) } notexo <- c() if (!is.null(object)) { if (length(lhs)>0) { object <- addvar(object,ys,reindex=FALSE,...) notexo <- ys ## Add link-transformation if (!is.null(invlink)) { if (invlink=="") { object <- transform(object,ys,NULL,post=FALSE) covariance(object,ys) <- NA } else { ff <- function(x) {}; body(ff) <- parse(text=paste0(invlink,"(x)")) object <- transform(object,ys,ff,post=FALSE) covariance(object,ys) <- 0 } } } if (length(intpos>0)) { xs[intpos[1]] <- gsub("\\+","",xs[intpos[1]]) if (xs[intpos[1]]==1 && (!length(intpos0)>0) ) { xs[intpos[1]] <- NA } intercept(object,ys) <- char2num(xs[intpos[1]]) xs <- xs[-intpos] res[intpos] <- NULL } object <- addvar(object,xs,reindex=FALSE ,...) exolist <- c() for (i in seq_len(length(xs))) { ## Extract transformation statements: var~(expr) xf0 <- strsplit(xx[[i]],"~")[[1]] if (length(xf0)>1) { myexpr <- xf0[2] ftr <- toformula(y="",x=paste0("-1+I(",myexpr,")")) xtr <- all.vars(ftr) xf0 <- xf0[1] transform(object, y=xf0, x=xtr) <- function(x) { structure(model.matrix(ftr,as.data.frame(x)),dimnames=list(NULL,xf0)) } } xf <- unlist(strsplit(xf0,"[\\[\\]]",perl=TRUE)) if (length(xf)>1) { xpar <- strsplit(xf[2],":")[[1]] if (length(xpar)>1) { val <- ifelse(xpar[2]=="NA",NA,xpar[2]) valn <- char2num(val) covariance(object,xs[i]) <- ifelse(is.na(valn),val,valn) } val <- ifelse(xpar[1]=="NA",NA,xpar[1]) valn <- char2num(val) if (is.na(val) || val!=".") { intercept(object,xs[i]) <- ifelse(is.na(valn),val,valn) notexo <- c(notexo,xs[i]) } } else { exolist <- c(exolist,xs[i]) } } for (i in seq_len(length(ys))) { y <- ys[i] yf <- unlist(strsplit(yy[i],"[\\[\\]]",perl=TRUE)) if (length(yf)>1) { ypar <- strsplit(yf[2],":")[[1]] if (length(ypar)>1) { val <- ifelse(ypar[2]=="NA",NA,ypar[2]) valn <- char2num(val) covariance(object,y) <- ifelse(is.na(valn),val,valn) } val <- ifelse(ypar[1]=="NA",NA,ypar[1]) valn <- char2num(val) if (is.na(val) || val!=".") { intercept(object,y) <- ifelse(is.na(valn),val,valn) } } } curvar <- index(object)$var if (exo) { oldexo <- exogenous(object) newexo <- setdiff(exolist,c(notexo,curvar,ys)) exogenous(object) <- union(newexo,setdiff(oldexo,notexo)) } } return(list(object=object, yx=yx, X=X, ys=ys, xx=xx, xs=xs, yy=yy, ys=ys, res=res, notexo=notexo, intpos=intpos, invlink=invlink, lhs=lhs, iscovar=iscovar)) } lava/R/weights.R0000644000176200001440000000017613162174023013176 0ustar liggesusers##' @export `Weights` <- function(x,...) UseMethod("Weights") ##' @export Weights.default <- function(x,...) eval(x$weights) lava/R/By.R0000644000176200001440000000262213162174023012074 0ustar liggesusers##' Apply a Function to a Data Frame Split by Factors ##' ##' Simple wrapper of the 'by' function ##' @title Apply a Function to a Data Frame Split by Factors ##' @param x Data frame ##' @param INDICES Indices (vector or list of indices, vector of column names, or formula of column names) ##' @param FUN A function to be applied to data frame subsets of 'data'. ##' @param COLUMNS (Optional) subset of columns of x to work on ##' @param array if TRUE an array/matrix is always returned ##' @param ... Additional arguments to lower-level functions ##' @author Klaus K. Holst ##' @export ##' @examples ##' By(datasets::CO2,~Treatment+Type,colMeans,~conc) ##' By(datasets::CO2,~Treatment+Type,colMeans,~conc+uptake) By <- function(x,INDICES,FUN,COLUMNS,array=FALSE,...) { if (inherits(INDICES,"formula")) { INDICES <- as.list(model.frame(INDICES,x)) } else { if (is.character(INDICES) && length(INDICES)!=nrow(x)) { INDICES <- as.list(x[,INDICES,drop=FALSE]) } } if (!missing(COLUMNS)) { if (inherits(COLUMNS,"formula")) { x <- model.frame(COLUMNS,x) } else { x <- x[,COLUMNS,drop=FALSE] } } a <- by(x, INDICES, FUN=FUN, ...) if (NCOL(x)==1 && !array) { ##DimElem <- length(a[rep(1,length(dim(a)))][[1]]) a <- a[] attr(a,"call") <- NULL ## a <- array(a,) } return(a) } lava/R/graph.R0000644000176200001440000000176713162174023012634 0ustar liggesusers##' Extract graph ##' ##' Extract or replace graph object ##' ##' ##' @aliases Graph Graph<- ##' @usage ##' ##' Graph(x, ...) ##' ##' Graph(x, ...) <- value ##' ##' @param x Model object ##' @param value New \code{graphNEL} object ##' @param \dots Additional arguments to be passed to the low level functions ##' @author Klaus K. Holst ##' @seealso \code{\link{Model}} ##' @keywords graphs models ##' @export ##' @examples ##' ##' m <- lvm(y~x) ##' Graph(m) ##' ##' @export `Graph` <- function(x,...) UseMethod("Graph") ##' @export `Graph.lvm` <- function(x,add=FALSE,...) { if ((is.null(x$graph) || length(x$graph)==0) & add) { m <- Model(x) return(plot(m,noplot=TRUE)) } else return(x$graph) } ##' @export `Graph.lvmfit` <- function(x,...) Graph.lvm(x,...) ##' @export "Graph<-" <- function(x,...,value) UseMethod("Graph<-") ##' @export "Graph<-.lvmfit" <- function(x,...,value) { x$graph <- value; return(x) } ##' @export "Graph<-.lvm" <- function(x,...,value) { x$graph <- value; return(x) } lava/R/iv.R0000644000176200001440000002316413162174023012144 0ustar liggesusers###{{{ Objective IV_method.lvm <- NULL IV_objective.lvm <- function(x,p,data,...) { IV2(x,data,...) } IV_variance.lvm <- function(x,p,data,opt,...) { opt$vcov } IV0_method.lvm <- NULL IV0_objective.lvm <- function(x,p,data,...) { IV2(x,data,type="non-robust",...) } IV0_variance.lvm <- function(x,p,data,opt,...) { opt$vcov } IV1_method.lvm <- NULL IV1_objective.lvm <- function(x,p,data,...) { IV(x,data) } IV1_variance.lvm <- function(x,p,data,opt,...) { opt$vcov } ###}}} Objective CondVar <- function(S,idx) { idx2 <- setdiff(seq_len(ncol(S)),idx) S11 <- S[idx2,idx2]; S22 <- S[idx,idx] S12 <- S[idx2,idx] S11-S12%*%solve(S22)%*%t(S12) } varest <- function(x,data) { p <- IV(x,data)$estimate idx <- match(names(p),coef(x,mean=TRUE)) x0 <- parfix(Model(x),idx,p) index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) A <- t(index(x)$A) Afix <- A; Afix[t(index(x)$M0)==1] <- 0 A[A!=0] <- 1 k <- nrow(A) I <- diag(nrow=k) Ap <- modelVar(x)$A ## Estimated parameter matrix indicators <- setdiff(vars(x)[rowSums(A)==1],exogenous(x)) responses <- endogenous(x,top=TRUE) y.indicators <- responses[rowSums(A[responses,])==1] Sigma <- var(data[,manifest(x)]) var.eta <- c() for (eta in latent(x)) { m.sub <- subset(Model(x),c(eta,indicators)) reachable <- acc(x$M,eta) ys <- intersect(names(reachable),y.indicators) lambdas <- c() for (y in ys) { pp <- path(Model(x), from=eta, to=y) lambda1 <- 0 for (i in seq_along(pp)) { lambda <- 1 for (j in seq_len(length(pp[[i]])-1)) lambda <- lambda*Ap[pp[[i]][j],pp[[i]][j+1]] lambda1 <- lambda1+lambda } lambdas <- c(lambdas,lambda1) } val <- outer(1/lambdas,1/lambdas)*Sigma[ys,ys] var.eta <- c(var.eta, mean(val[upper.tri(val)])) } S <- rep(0,k); S[match(manifest(x),vars(x))] <- diag(Sigma); S[match(latent(x),vars(x))] <- var.eta; names(S) <- vars(x) I <- diag(nrow=k) IA <- (I-t(Ap)) IA%*%cbind(S)%*%t(IA) } ## Instrumental Variable Estimator / 2SLS ##' @export IV <- function(m,data,R2thres=0,type="robust", ...) { if (length(constrain(m))>0) stop("Nonlinear constrains not supported!") if (inherits(m,"lvmfit")) { m <- Model(m) } R2 <- cor(data[,manifest(m)])^2 A <- t(index(m)$A) Afix <- A; Afix[t(index(m)$M0)==1] <- 0 A[A!=0] <- 1 P <- index(m)$P k <- nrow(A) I <- diag(nrow=k) B <- rbind(I,solve(I-A)) VV <- B%*%P%*%t(B) u.var <- index(m)$vars all.idx <- seq_along(u.var) lat.idx <- with(index(m), which(vars%in%latent)) if (length(lat.idx)==0) stop("Estimator only defined for models with latent variable") y.var <- endogenous(m) y.idx <- which(index(m)$vars%in%y.var) x.idx <- which(vars(m)%in%exogenous(m)) ## Set of Indicator variables: indicators <- c() for (i in seq_len(nrow(A))) { ifix <- (Afix[i,]==1) if ((sum(ifix)==1) & all(A[i,-which(ifix)]==0)) indicators <- c(indicators, i) } y.indicators <- intersect(indicators,y.idx) y.scale <- list() for (eta in lat.idx) { pred.eta <- intersect(y.idx, which(Afix[,eta]==1)) ## Candidates for ## eta = y-epsilon if (length(pred.eta)<1) pred.eta <- intersect(lat.idx, which(Afix[,eta]==1)) myidx <- c() for (y in pred.eta) { y.pred <- setdiff(eta,which(A[y,]==1)) ## No other variables predicting y? if (length(y.pred)==0) myidx <- c(myidx,y) } y.scale <- c(y.scale, list(myidx)) } if (any(unlist(lapply(y.scale, function(x) length(x)))<1)) stop("At least one scale-measurement pr. latent variable") vv <- setdiff(seq_len(k),c(unlist(y.scale),x.idx)) Ecor <- list() eta.surrogate <- c() latno <- 0 for (e in lat.idx) { latno <- latno+1 y0 <- y.scale[[latno]][1] if (!(y0%in%lat.idx)) { eta.surrogate <- c(eta.surrogate,vars(m)[y0]) Ecor <- c(Ecor,list(y0)) } else { v0 <- vars(m)[-c(e,indicators)] ##m.sub <- subset(m,vars(m)[c(e,indicators)]) m.sub <- rmvar(m,v0) i <- 0 while (i0) { Debug(vars(m)[v]) pred.lat <- intersect(pred,lat.idx) # Any latent predictors? lpos <- match(v,lat.idx) lppos <- match(pred.lat,lat.idx) ecor <- c(v,unlist(Ecor[lppos])) if (!is.na(lpos)) { v0 <- match(eta.surrogate[lpos],vars(m)) ecor <- c(ecor,Ecor[[lpos]]) } else { v0 <- v } ecor <- unique(c(v0,ecor)) XX <- vars(m)[A[v,]==1] intpred <- exogenous(m) newf <- c() if (length(pred.lat)>0) { intpred <- vars(m) for (i in seq_along(pred.lat)) { uncor <- which(colSums(VV[ecor,k+seq_len(k),drop=FALSE])==0) uncor <- setdiff(uncor,c(lat.idx)) mypred <- vars(m)[uncor] XX[XX==vars(m)[pred.lat[i]]] <- eta.surrogate[lppos[i]] ## allpred <- c(allpred, mypred) intpred <- intersect(intpred,mypred) f <- toformula(eta.surrogate[lppos[i]],mypred) ff <- c(ff, f) f2 <- list(f) names(f2) <- vars(m)[i] newf <- c(newf,f2) } } intpred <- intersect(intpred,manifest(m)) R2max <- apply(R2[XX,intpred,drop=FALSE],2,max) if (any(R2max=R2thres] newf <- list(intpred); names(newf) <- vars(m)[v] instruments <- c(instruments, newf) covariates <- unique(c(setdiff(colnames(A)[A[v,]==1],latent(m)),intpred))##allpred) if (length(covariates)==0) stop("No instruments") Z <- model.matrix(toformula("",c("1",XX)),data) Y <- as.matrix(data[,vars(m)[v0]]) V <- model.matrix(toformula("",c("1",unique(covariates))),data) count <- count+1 V. <- c(V.,list(V)) Z. <- c(Z.,list(Z)) Y. <- c(Y.,list(Y)) XX <- vars(m)[A[v,]==1 & Afix[v,]!=1] parname <- c(parname, c(vars(m)[v0],paste(vars(m)[v],XX,sep=lava.options()$symbol[1]))) } else { if (vars(m)[v]%in%latent(m)) { lpos <- match(v,lat.idx) v0 <- match(eta.surrogate[lpos],vars(m)) Y <- matrix(data[,vars(m)[v0]],ncol=1) Y. <- c(Y.,list(Y)) V. <- c(V.,list(cbind(rep(1,nrow(Y))))) Z. <- c(Z.,list(cbind(rep(1,nrow(Y))))) parname <- c(parname, names(eta.surrogate)[lpos]) } } } LS <- function(X) { with(svd(X), v%*%diag(1/d,nrow=length(d))%*%t(u)) } projection <- function(X) X%*%LS(X) P0 <- lapply(V.,LS) Zhat <- list(); for (i in seq_along(Z.)) Zhat <- c(Zhat, list(V.[[i]]%*%(P0[[i]]%*%Z.[[i]]))) ZhatLS <- lapply(Zhat,LS) theta <- list(); for (i in seq_along(Y.)) theta <- c(theta, list(ZhatLS[[i]]%*%Y.[[i]])) u <- c() for (i in seq_along(Y.)) u <- cbind(u, Y.[[i]]-Z.[[i]]%*%theta[[i]]) covu <- crossprod(u)/nrow(u) theta.npar <- unlist(lapply(theta,length)) theta.ncum <- c(0,cumsum(theta.npar)) vartheta <- matrix(0,ncol=sum(theta.npar),nrow=sum(theta.npar)) for (i in seq_along(theta)) { for (j in seq(i,length(theta))) { idx1 <- seq_len(theta.npar[i]) + theta.ncum[i] idx2 <- seq_len(theta.npar[j]) + theta.ncum[j] if (type=="robust") { zi <- ZhatLS[[i]] for (k in seq(nrow(zi))) zi[k,] <- zi[k,]*u[,i] zj <- ZhatLS[[j]] for (k in seq(nrow(zj))) zj[k,] <- zj[k,]*u[,j] uZZ <- zi%*%t(zj) } else { uZZ <- covu[i,j]* (ZhatLS[[i]]%*%t(ZhatLS[[j]])) } vartheta[idx1,idx2] <- uZZ if (i!=j) { vartheta[idx2,idx1] <- t(uZZ) } } } parname[which(parname%in%eta.surrogate)] <- names(eta.surrogate)[which(eta.surrogate%in%parname)] coef <- cbind(unlist(theta),diag(vartheta)^0.5); rownames(coef) <- parname; colnames(coef) <- c("Estimate","Std.Err") res <- list(estimate=coef[,1], vcov=vartheta) attributes(res)$surrogates <- eta.surrogate attributes(res)$instruments <- instruments return(res) } IV2 <- function(m,data,control=list(),...) { if (is.null(control$R2thres)) control$R2thres <- 0 res <- IV(m,data,R2thres=control$R2thres,...) p <- res$estimate idx <- match(names(p),coef(m,mean=TRUE)) x0 <- parfix(m,idx,p) index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) idx0 <- order(idx) p0 <- p[idx0] V0 <- res$vcov[idx0,idx0] if (is.null(control$variance) || control$variance) { suppressWarnings(e0 <- estimate(x0,data,...,silent=TRUE,quick=TRUE)) p0 <- c(p0,e0) V0 <- V0%++%matrix(0,ncol=length(e0),nrow=length(e0)) } R2 <- noquote(formatC(cor(data[,manifest(m)])^2)) colnames(R2) <- rownames(R2) <- manifest(m) l1 <- noquote(rbind(paste(latent(m),collapse=","), paste(attributes(res)$surrogates,collapse=","), "")) rownames(l1) <- c("Latent variables","Surrogate variables:","") colnames(l1) <- "" ii <- attributes(res)$instruments I <- noquote(matrix(NA,ncol=2,nrow=length(ii))) rownames(I) <- rep("",nrow(I)) colnames(I) <- c("Response","Instruments") for (i in seq_along(ii)) { I[i,] <- c(names(ii)[i],paste(ii[[i]],collapse=",")) } mymsg <- list(l1,I); list(estimate=p0,vcov=V0,summary.message=function(...) { mymsg }) } lava/R/functional.R0000644000176200001440000000261313162174023013664 0ustar liggesusers##' @export "functional<-" <- function(x,...,value) UseMethod("functional<-") ##' @export "functional<-.lvm" <- function(x,to,from,...,value) { if (inherits(to,"formula")) { yy <- decomp.specials(getoutcome(to)) ##xx <- attributes(terms(to))$term.labels myvars <- all.vars(to) xx <- setdiff(myvars,yy) if (length(yy)*length(xx)>length(value) & length(value)!=1) stop("Wrong number of values") count <- 0 for (y in yy) { count <- count+1 for (i in seq_along(xx)) { suppressWarnings(x <- regression(x,to=y,from=xx[i],silent=TRUE)) count <- count+1 if (length(value)==1) { functional(x, to=y, from=xx[i],...) <- value } else functional(x, to=y, from=xx[i],...) <- value[[count]] } } return(x) } if (missing(from) | missing(to)) return(x) edges <- paste(from,to,sep="~") x$attributes$functional[[edges]] <- value return(x) } ##' @export "functional" <- function(x,...) UseMethod("functional") ##' @export functional.lvm <- function(x,to,from,f,...) { if (!missing(f)) { functional(x,to,from,...) <- f return(x) } if (missing(from)) return(x$attributes$functional) edges <- paste(from,to,sep="~") x$attributes$functional[edges] } lava/R/kappa.R0000644000176200001440000000216013162174023012613 0ustar liggesusers################################################## ## Cohen's kappa ################################################## ##' @export kappa.multinomial <- function(z,all=FALSE,...) { pp <- length(coef(z)) if ((length(z$levels)!=2) || !(identical(z$levels[[1]],z$levels[[2]]))) stop("Expected square table and same factor levels in rows and columns") k <- length(z$levels[[1]]) zeros <- rbind(rep(0,pp)) A0 <- zeros; A0[diag(z$position)] <- 1 A <- matrix(0,ncol=pp,nrow=2*k) for (i in seq(k)) A[i,z$position[i,]] <- 1 for (i in seq(k)) A[i+k,z$position[,i]] <- 1 b <- estimate(z,function(p) as.vector(rbind(A0,A)%*%p),iid=TRUE) b2 <- estimate(b,function(p) c(p[1],sum(p[seq(k)+1]*p[seq(k)+k+1])),iid=TRUE) if (!all) { return(estimate(b2,function(p) list(kappa=(p[1]-p[2])/(1-p[2])),iid=TRUE,...)) } estimate(b2,function(p) list(kappa=(p[1]-p[2])/(1-p[2]),agree=p[1], independence=p[2]),iid=TRUE,...) } ##' @export kappa.table <- function(z,...) { kappa(multinomial(Expand(z)),...) } ##' @export kappa.data.frame <- function(z,...) { kappa(multinomial(z),...) } lava/R/contr.R0000644000176200001440000000235513162174023012652 0ustar liggesusers##' Create contrast matrix ##' ##' Create contrast matrix typically for use with 'estimate' (Wald tests). ##' @export ##' @param p index of non-zero entries (see example) ##' @param n Total number of parameters (if omitted the max number in p will be used) ##' @param diff If FALSE all non-zero entries are +1, otherwise the second non-zero element in each row will be -1. ##' @param ... Additional arguments to lower level functions ##' @aliases contr parsedesign ##' @examples ##' contr(2,n=5) ##' contr(as.list(2:4),n=5) ##' contr(list(1,2,4),n=5) ##' contr(c(2,3,4),n=5) ##' contr(list(c(1,3),c(2,4)),n=5) ##' contr(list(c(1,3),c(2,4),5)) ##' ##' parsedesign(c("aa","b","c"),"?","?",diff=c(FALSE,TRUE)) contr <- function(p,n,diff=TRUE,...) { if (missing(n)) n <- max(unlist(p)) if (is.character(p)) { return(parsedesign(n,p,...)) } if (is.list(p)) { return(Reduce(rbind,lapply(p, function(x) do.call(contr, list(x,n,diff[1L]))))) } if (is.character(n)) n <- length(n) if (!is.numeric(n)) { try(n <- length(coef(n)),silent=TRUE) } B <- matrix(0,ncol=n,nrow=max(1L,length(p)-1L)) B[,p[1]] <- 1L if (length(p)>1L) B[cbind(seq(nrow(B)),p[-1])] <- ifelse(diff[1L],-1,1) return(B) } lava/R/normal.R0000644000176200001440000001420213162174023013007 0ustar liggesusersintrootpn <- function(p) { ## Find integer root of x^2-x-2*p=0 n <- 0.5*(1+sqrt(1+8*p)) if (floor(n)!=n) n <- NA return(n) } rho2sigma <- function(rho) { if (length(rho)==1) return(diag(2)*(1-rho)+rho) p <- introotpn(length(rho)) if (is.na(p)) stop("Unexpected length of correlation coefficients (p=n*(n-1)/2).") sigma <- diag(nrow=p) offdiag(sigma,type=2) <- rho offdiag(sigma,type=3) <- offdiag(t(sigma),type=3) return(sigma) } ##' @export rmvn <- function(n,mu,sigma,rho,...) { if (!missing(rho)) sigma <- rho2sigma(rho) if (!missing(mu) && missing(sigma)) sigma <- diag(nrow=length(mu)) if (missing(sigma)) sigma <- matrix(1) if (is.vector(sigma)) sigma <- diag(sigma,ncol=length(sigma)) if (missing(mu)) mu <- rep(0,ncol(sigma)) PP <- with(svd(sigma), v%*%diag(sqrt(d),ncol=length(d))%*%t(u)) res <- matrix(rnorm(ncol(sigma)*n),ncol=ncol(sigma))%*%PP if (NROW(mu)==nrow(res) && NCOL(mu)==ncol(res)) return(res+mu) return(res+cbind(rep(1,n))%*%mu) } ##' @export dmvn <- function(x,mu,sigma,rho,log=FALSE,nan.zero=TRUE,norm=TRUE,...) { if (!missing(rho)) sigma <- rho2sigma(rho) if (!missing(mu) && missing(sigma)) sigma <- diag(nrow=length(mu)) if (missing(sigma)) sigma <- matrix(1) if (is.vector(sigma)) sigma <- diag(sigma,ncol=length(sigma)) if (missing(mu)) mu <- rep(0,ncol(sigma)) if (length(sigma)==1) { k <- 1 isigma <- structure(cbind(1/sigma),det=as.vector(sigma)) } else { k <- ncol(sigma) isigma <- Inverse(sigma) } if (!missing(mu)) { if (NROW(mu)==NROW(x) && NCOL(mu)==NCOL(x)) { x <- x-mu } else { x <- t(t(x)-mu) } } logval <- -0.5*(base::log(2*base::pi)*k+ base::log(attributes(isigma)$det)+ rowSums((x%*%isigma)*x)) if (nan.zero) logval[is.nan(logval)] <- -Inf if (log) return(logval) return(exp(logval)) } normal_method.lvm <- "nlminb0" normal_objective.lvm <- function(x,p,data,weights=NULL,data2=NULL,indiv=FALSE,...) { if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) save.seed <- get(".Random.seed", envir = .GlobalEnv) on.exit(assign(".Random.seed", save.seed, envir = .GlobalEnv)) set.seed(1) ii <- lava::index(x) y.idx <- ii$endo.idx x.idx <- ii$exo.idx y <- ii$endogenous ord <- lava::ordinal(x) atr <- attributes(ord) ord <- intersect(y,ord) attributes(ord) <- atr status <- rep(0,length(y)) status[match(ord,y)] <- 2 Table <- (length(y)==length(ord)) && (length(x.idx)==0) if (Table) { pat <- mets::fast.pattern(data[,y,drop=FALSE],categories=max(data[,y,drop=FALSE])+1) data <- pat$pattern colnames(data) <- y } mu <- predict(x,data=data,p=p) S <- attributes(mu)$cond.var class(mu) <- "matrix" thres <- matrix(0,nrow=length(y),max(1,attributes(ord)$K-1)); rownames(thres) <- y for (i in seq_len(length(attributes(ord)$fix))) { nn <- names(attributes(ord)$idx)[i] ii <- attributes(ord)$idx[[nn]] val <- (attributes(mu)$e[ii]) thres[nn,seq_len(length(val))] <- cumsum(c(val[1],exp(val[-1]))) } yl <- yu <- as.matrix(data[,y,drop=FALSE]) if (!inherits(yl[1,1],c("numeric","integer","logical")) || !inherits(yu[1,1],c("numeric","integer","logical"))) stop("Unexpected data (normal_objective)") if (!is.null(data2)) { yu[,colnames(data2)] <- data2 status[match(colnames(data2),y)] <- 1 } l <- mets::loglikMVN(yl,yu,status,mu,S,thres) if (!is.null(weights)) { ##if (is.matrix(weights)) weights <- weights[,1] l <- l*weights } if (Table) { l <- l[pat$group+1] } if (indiv) return(-l) return(-sum(l)) } normal_logLik.lvm <- function(object,p,data,data2=NULL,...) { res <- -normal_objective.lvm(x=object,p=p,data=data,data2=data2,...) return(res) } normal_gradient.lvm <- function(x,p,data,weights=NULL,data2=NULL,indiv=FALSE,...) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) save.seed <- get(".Random.seed", envir = .GlobalEnv) on.exit(assign(".Random.seed", save.seed, envir = .GlobalEnv)) if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") if (is.null(ordinal(x)) && is.null(data2) && is.null(weights)) { D <- deriv.lvm(x,p=p) M <- moments(x,p) Y <- as.matrix(data[,manifest(x)]) mu <- M$xi%x%rep(1,nrow(Y)) ss <- -mets::scoreMVN(Y,mu,M$C,D$dxi,D$dS) if (!indiv) return(colSums(ss)) return(ss) } if (indiv) { return(numDeriv::jacobian(function(p0) normal_objective.lvm(x,p=p0,data=data,weights=weights,data2=data2,indiv=TRUE,...),p,method=lava.options()$Dmethod)) } numDeriv::grad(function(p0) normal_objective.lvm(x,p=p0,data=data,weights=weights,data2=data2,...),p,method=lava.options()$Dmethod) } normal_hessian.lvm <- function(x,p,outer=FALSE,data2=NULL,...) { if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") dots <- list(...); dots$weights <- NULL if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) save.seed <- get(".Random.seed", envir = .GlobalEnv) on.exit(assign(".Random.seed", save.seed, envir = .GlobalEnv)) if (!outer) { f <- function(p) { set.seed(1) do.call("normal_objective.lvm", c(list(x,p=p,indiv=FALSE,data2=data2),dots)) } g <- function(p) { set.seed(1) do.call("normal_gradient.lvm", c(list(x,p=p,indiv=FALSE,data2=data2),dots)) } if (is.null(ordinal(x)) && is.null(data2)) return(numDeriv::jacobian(g,p)) else { return(numDeriv::hessian(f,p)) } } ## Else calculate outer product of the score (empirical variance of score) S <- normal_gradient.lvm(x,p=p,indiv=TRUE,...) J <- t(S)%*%S attributes(J)$grad <- colSums(S) return(J) } ##normal_gradient.lvm <- normal_hessian.lvm <- NULL lava/R/path.R0000644000176200001440000001754013162174023012463 0ustar liggesusers##' Extract all possible paths from one variable to another connected component ##' in a latent variable model. In an estimated model the effect size is ##' decomposed into direct, indirect and total effects including approximate ##' standard errors. ##' ##' @title Extract pathways in model graph ##' @export ##' @aliases path effects path.lvm effects.lvmfit ##' totaleffects ##' @seealso \code{children}, \code{parents} ##' @return If \code{object} is of class \code{lvmfit} a list with the following ##' elements is returned \item{idx}{ A list where each element defines a ##' possible pathway via a integer vector indicating the index of the visited ##' nodes. } \item{V }{ A List of covariance matrices for each path. } ##' \item{coef }{A list of parameters estimates for each path} \item{path }{A ##' list where each element defines a possible pathway via a character vector ##' naming the visited nodes in order. } \item{edges }{Description of 'comp2'} ##' ##' If \code{object} is of class \code{lvm} only the \code{path} element will be ##' returned. ##' ##' The \code{effects} method returns an object of class \code{effects}. ##' @note For a \code{lvmfit}-object the parameters estimates and their ##' corresponding covariance matrix are also returned. The ##' \code{effects}-function additionally calculates the total and indirect ##' effects with approximate standard errors ##' @author Klaus K. Holst ##' @keywords methods models graphs ##' @examples ##' ##' m <- lvm(c(y1,y2,y3)~eta) ##' regression(m) <- y2~x1 ##' latent(m) <- ~eta ##' regression(m) <- eta~x1+x2 ##' d <- sim(m,500) ##' e <- estimate(m,d) ##' ##' path(Model(e),y2~x1) ##' parents(Model(e), ~y2) ##' children(Model(e), ~x2) ##' children(Model(e), ~x2+eta) ##' effects(e,y2~x1) ##' ## All simple paths (undirected) ##' path(m,y1~x1,all=TRUE) ##' ##' @usage ##' \method{path}{lvm} (object, to = NULL, from, all=FALSE, ...) ##' \method{effects}{lvmfit} (object, to, from, silent=FALSE, ...) ##' @param object Model object (\code{lvm}) ##' @param to Outcome variable (string). Alternatively a formula specifying ##' response and predictor in which case the argument \code{from} is ignored. ##' @param from Response variable (string), not necessarily directly affected by ##' \code{to}. ##' @param all If TRUE all simple paths (in undirected graph) is returned ##' @param silent Logical variable which indicates whether messages are turned ##' on/off. ##' @param \dots Additional arguments to be passed to the low level functions ##' @export path <- function(object,...) UseMethod("path") ##' @export path.lvmfit <- function(object,to=NULL,from,...) { mypath <- pathM(Model(object)$M,to,from,...) cc <- coef(object,level=9,labels=FALSE) ## All parameters (fixed and variable) #cc0 <- coef(object,level=1) ## Estimated parameters cc0 <- coef(object,level=2) ## Estimated parameters i1 <- na.omit(match(rownames(cc),rownames(cc0))) idx.cc0 <- which(rownames(cc)%in%rownames(cc0)); ## Position of estimated parameters among all parameters S <- matrix(0,nrow(cc),nrow(cc)); rownames(S) <- colnames(S) <- rownames(cc) V <- object$vcov npar.mean <- index(object)$npar.mean # if (object$control$meanstructure & npar.mean>0) # V <- V[-c(seq_len(npar.mean)),-c(seq_len(npar.mean))] S[idx.cc0,idx.cc0] <- V[i1,i1] ## "Covariance matrix" of all parameters idx <- list() coefs <- list() V <- list() for (i in seq_along(mypath)) { xx <- mypath[[i]] ii <- c() for (j in seq_len(length(xx)-1)) { st <- paste0(xx[j+1], lava.options()$symbol[1], xx[j]) ii <- c(ii, match(st,rownames(cc))) } idx <- c(idx, list(ii)) V <- c(V, list(S[ii,ii])) coefs <- c(coefs, list(cc[ii])) } edges <- list() for (i in seq_along(mypath)) { p0 <- mypath[[i]] ee <- c() for (i in seq_len(length(p0)-1)) { ee <- c(ee, paste(p0[i],p0[i+1],sep="~")) } edges <- c(edges, list(ee)) } res <- list(idx=idx,V=V,coef=coefs, path=mypath, edges=edges) return(res) } ##' @export path.lvm <- function(object,to=NULL,from,all=FALSE,...) { pathM(object$M,to=to,from=from,all=all,...) } ##' @export path.graphNEL <- function(object,to,from,...) { if (inherits(to,"formula")) { fvar <- extractvar(to) if (length(fvar$x)==1 & length(fvar$y)==1) return(path(object,to=fvar$y,from=fvar$x)) res <- list() for (y in fvar$y) { for (x in fvar$x) { cat("x=",x, " y=",y, "\n") res <- c(res, list(path(object,to=y,from=x))) } } return(res) } ff <- function(g,from=1,to=NULL,res=list()) { M <- graph::edgeMatrix(g) i1 <- which(M[1,]==from) for (i in i1) { e <- M[,i]; newto <- e[2]; if (is.null(to) || M[2,i]==to) { res <- c(res, list(M[,i])) } newpath <- ff(g,from=newto,to=to,list()) if (length(newpath)>0) for (j in seq_along(newpath)) { if (is.null(to) || (tail(newpath[[j]],1)==to)) res <- c(res, list(c(M[,i],newpath[[j]][-1]))) } } return(res) } idxfrom <- ifelse(is.numeric(from),from,which(from==graph::nodes(object))) ##M <- as(object,"matrix") ##reachable <- acc(M,graph::nodes(object)[idxfrom]) reachable <- graph::acc(object,graph::nodes(object)[idxfrom])[[1]] if (is.null(to)) { idxto <- reachable } else { idxto <- ifelse(is.numeric(to),to,which(to==graph::nodes(object))) } if (!(graph::nodes(object)[idxto] %in% names(reachable))) ## return(structure(list(),to=to[1],from=from[1])) return(NULL) ## stop("No directional relationship between variables") mypaths <- ff(object,idxfrom,idxto) res <- list() for (i in seq_along(mypaths)) { res <- c(res, list(graph::nodes(object)[mypaths[[i]]])) } return(res) } pathM <- function(M,to,from,all=FALSE,...) { nn <- colnames(M) if (inherits(to,"formula")) { fvar <- extractvar(to) if (length(fvar$x)==1 & length(fvar$y)==1) return(pathM(M,to=fvar$y,from=fvar$x,all=all)) res <- list() for (y in fvar$y) { for (x in fvar$x) { cat("x=",x, " y=",y, "\n") res <- c(res, list(pathM(M,to=y,from=x,all=all))) } } return(res) } if (all) { ## Get all simple paths res <- simplePaths(to,from,from,M,list()) return(res) } ff <- function(g,from=1,to=NULL,res=list()) { i1 <- which(M[from,]==1) for (i in i1) { ## e <- M[,i]; newto <- e[2]; if (is.null(to) || i==to) { res <- c(res, list(c(from,i))) } newpath <- ff(g,from=i,to=to,list()) if (length(newpath)>0) for (j in seq_along(newpath)) { if (is.null(to) || (tail(newpath[[j]],1)==to)) res <- c(res, list(c(c(from,i),newpath[[j]][-1]))) } } return(res) } idxfrom <- ifelse(is.numeric(from),from,which(from==nn)) reachable <- acc(M,nn[idxfrom]) if (is.null(to)) { idxto <- reachable } else { idxto <- ifelse(is.numeric(to),to,which(to==nn)) } if (!(nn[idxto] %in% reachable)) return(NULL) ## stop("No directional relationship between variables") mypaths <- ff(M,idxfrom,idxto) res <- list() for (i in seq_along(mypaths)) { res <- c(res, list(nn[mypaths[[i]]])) } return(res) } ## Find all simple paths (no cycles) in an undirected graph simplePaths <- function(target,currentpath,visited,adjmat,allpaths) { lastnode <- currentpath[length(currentpath)] A <- (adjmat+t(adjmat))>0 if (lastnode==target) { allpaths <- c(allpaths,list(currentpath)) } else { for (neighbour in rownames(adjmat)[which(A[,lastnode])]) { if (!(neighbour%in%visited)) { currentpath <- c(currentpath,neighbour) visited <- c(visited,neighbour) allpaths <- simplePaths(target,currentpath,visited,adjmat,allpaths) visited <- setdiff(visited,neighbour) currentpath <- currentpath[-length(currentpath)] } } } return(allpaths) } lava/R/twostage.R0000644000176200001440000003044213162174023013360 0ustar liggesuserstwostagelvm <- function(object, model2, formula=NULL, model.object=FALSE, predict.fun=NULL, type="quadratic",...) { if (!inherits(model2,c("lvm"))) stop("Expected lava object ('lvm',...)") if (!is.null(formula)) { model2 <- nonlinear(model2, formula, type=type) } nonlin <- NULL val <- nonlinear(model2) if (is.null(formula) && length(val)==0 && length(nonlinear(object))>0) { val <- nonlinear(object) } xnam <- c() if (length(val)>0) { predict.fun <- NULL for (i in seq_along(val)) { if (!all(val[[i]]$newx%in%xnam)) { xnam <- union(xnam,val[[i]]$newx) predict.fun <- c(predict.fun, list(val[[i]]$pred)) } model2$attributes$nonlinear <- NULL if (inherits(object,"lvmfit")) { object$model$attributes$nonlinear <- NULL } model2 <- regression(model2, to=names(val)[i], from=val[[i]]$newx) } nonlin <- val } if (model.object) { model <- Model(object) %++% model2 cl <- match.call(expand.dots=TRUE) cl[[1]] <- twostage cl$object <- object cl$model2 <- model2 cl$predict.fun <- predict.fun cl["model.object"] <- NULL return(structure(list(model=model, nonlinear=nonlin, call=cl), class="twostage.lvm")) } res <- c(list(object=object, model2=model2), list(...)) res$predict.fun <- predict.fun res$nonlinear <- val return(res) } uhat <- function(p=coef(model1), model1, data=model.frame(model1), nlobj) { if (!is.function(nlobj)) { predict.fun <- lapply(nlobj, function(x) x[["pred"]]) } else { predict.fun <- nlobj } if (inherits(model1, "lvm.mixture")) { Pr <- predict(model1, p=p, data=data) P <- list(mean=Pr, var=attr(Pr,"cond.var")) } else { P <- predictlvm(model1, p=p, data=data) } if (is.list(predict.fun)) { unams <- lapply(nlobj,function(x) x$newx) unam <- unique(unlist(unams)) args <- list(P$mean, P$var, data) res <- matrix(0, NROW(data), ncol=length(unam)) colnames(res) <- unam for (i in seq_along(predict.fun)) { res[, unams[[i]]] <- do.call(predict.fun[[i]], args) } return(res) } return(cbind(predict.fun(P$mean, P$var, model.frame(model1)))) } ##' Two-stage estimator ##' ##' Generic function. ##' ##' @seealso twostage.lvm twostage.lvmfit twostage.lvm.mixture twostage.estimate ##' @export ##' @param object Model object ##' @param ... Additional arguments to lower level functions "twostage" <- function(object,...) UseMethod("twostage") ##' Two-stage estimator (non-linear SEM) ##' ##' Two-stage estimator for non-linear structural equation models ##' @export ##' @param object Stage 1 measurement model ##' @param model2 Stage 2 SEM ##' @param data data.frame ##' @param predict.fun Prediction of latent variable ##' @param id1 Optional id-variable (stage 1 model) ##' @param id2 Optional id-variable (stage 2 model) ##' @param all If TRUE return additional output (naive estimates) ##' @param formula optional formula specifying non-linear relation ##' @param std.err If FALSE calculations of standard errors will be skipped ##' @param ... Additional arguments to lower level functions ##' @aliases twostage.lvmfit twostage.lvm twostage.lvm.mixture twostage.estimate nonlinear nonlinear<- ##' @examples ##' m <- lvm(c(x1,x2,x3)~f1,f1~z, ##' c(y1,y2,y3)~f2,f2~f1+z) ##' latent(m) <- ~f1+f2 ##' d <- simulate(m,100,p=c("f2,f2"=2,"f1,f1"=0.5),seed=1) ##' ##' ## Full MLE ##' ee <- estimate(m,d) ##' ##' ## Manual two-stage ##' \dontrun{ ##' m1 <- lvm(c(x1,x2,x3)~f1,f1~z); latent(m1) <- ~f1 ##' e1 <- estimate(m1,d) ##' pp1 <- predict(e1,f1~x1+x2+x3) ##' ##' d$u1 <- pp1[,] ##' d$u2 <- pp1[,]^2+attr(pp1,"cond.var")[1] ##' m2 <- lvm(c(y1,y2,y3)~eta,c(y1,eta)~u1+u2+z); latent(m2) <- ~eta ##' e2 <- estimate(m2,d) ##' } ##' ##' ## Two-stage ##' m1 <- lvm(c(x1,x2,x3)~f1,f1~z); latent(m1) <- ~f1 ##' m2 <- lvm(c(y1,y2,y3)~eta,c(y1,eta)~u1+u2+z); latent(m2) <- ~eta ##' pred <- function(mu,var,data,...) ##' cbind("u1"=mu[,1],"u2"=mu[,1]^2+var[1]) ##' (mm <- twostage(m1,model2=m2,data=d,predict.fun=pred)) ##' ##' if (interactive()) { ##' pf <- function(p) p["eta"]+p["eta~u1"]*u + p["eta~u2"]*u^2 ##' plot(mm,f=pf,data=data.frame(u=seq(-2,2,length.out=100)),lwd=2) ##' } ##' ##' ## Splines ##' f <- function(x) cos(2*x)+x+-0.25*x^2 ##' m <- lvm(x1+x2+x3~eta1, y1+y2+y3~eta2, latent=~eta1+eta2) ##' functional(m, eta2~eta1) <- f ##' d <- sim(m,500,seed=1,latent=TRUE) ##' m1 <- lvm(x1+x2+x3~eta1,latent=~eta1) ##' m2 <- lvm(y1+y2+y3~eta2,latent=~eta2) ##' mm <- twostage(m1,m2,formula=eta2~eta1,type="spline") ##' if (interactive()) plot(mm) ##' ##' nonlinear(m2,type="quadratic") <- eta2~eta1 ##' a <- twostage(m1,m2,data=d) ##' if (interactive()) plot(a) ##' ##' kn <- c(-1,0,1) ##' nonlinear(m2,type="spline",knots=kn) <- eta2~eta1 ##' a <- twostage(m1,m2,data=d) ##' x <- seq(-3,3,by=0.1) ##' y <- predict(a, newdata=data.frame(eta1=x)) ##' ##' if (interactive()) { ##' plot(eta2~eta1, data=d) ##' lines(x,y, col="red", lwd=5) ##' ##' p <- estimate(a,f=function(p) predict(a,p=p,newdata=x))$coefmat ##' plot(eta2~eta1, data=d) ##' lines(x,p[,1], col="red", lwd=5) ##' confband(x,lower=p[,3],upper=p[,4],center=p[,1], polygon=TRUE, col=Col(2,0.2)) ##' ##' l1 <- lm(eta2~splines::ns(eta1,knots=kn),data=d) ##' p1 <- predict(l1,newdata=data.frame(eta1=x),interval="confidence") ##' lines(x,p1[,1],col="green",lwd=5) ##' confband(x,lower=p1[,2],upper=p1[,3],center=p1[,1], polygon=TRUE, col=Col(3,0.2)) ##' } ##' ##' \dontrun{ ## Reduce timing ##' ## Cross-validation example ##' ma <- lvm(c(x1,x2,x3)~u,latent=~u) ##' ms <- functional(ma, y~u, f=function(x) -.4*x^2) ##' d <- sim(ms,500)#,seed=1) ##' ea <- estimate(ma,d) ##' ##' mb <- lvm() ##' mb1 <- nonlinear(mb,type="linear",y~u) ##' mb2 <- nonlinear(mb,type="quadratic",y~u) ##' mb3 <- nonlinear(mb,type="spline",knots=c(-3,-1,0,1,3),y~u) ##' mb4 <- nonlinear(mb,type="spline",knots=c(-3,-2,-1,0,1,2,3),y~u) ##' ff <- lapply(list(mb1,mb2,mb3,mb4), ##' function(m) function(data,...) twostage(ma,m,data=data,st.derr=FALSE)) ##' a <- cv(ff,data=d,rep=1,mc.cores=1) ##' a ##'} twostage.lvmfit <- function(object, model2, data=NULL, predict.fun=function(mu,var,data,...) cbind("u1"=mu[,1],"u2"=mu[,1]^2+var[1]), id1=NULL, id2=NULL, all=FALSE, formula=NULL, std.err=TRUE, ...) { val <- twostagelvm(object=object,model2=model2,predict.fun=predict.fun, id1=id1, id2=id2, all=all, formula=formula, ...) object <- val$object model2 <- val$model2 predict.fun <- val$predict.fun p1 <- coef(object) if (length(val$nonlinear)==0) { val$nonlinear <- predict.fun } pp <- uhat(p1,object,nlobj=val$nonlinear) newd <- data newd[,colnames(pp)] <- pp model2 <- estimate(model2,data=newd,...) p2 <- coef(model2) if (std.err) { if (is.null(id1)) id1 <- seq(nrow(model.frame(object))) if (is.null(id2)) id2 <- seq(nrow(model.frame(model2))) model1 <- object if (!inherits(object,"estimate")) { model1 <- estimate(NULL,coef=p1,id=id1,iid=iid(object)) } e2 <- estimate(model2, id=id2) U <- function(alpha=p1,beta=p2) { pp <- uhat(alpha,object,nlobj=val$nonlinear) newd <- model.frame(model2) newd[,colnames(pp)] <- pp score(model2,p=beta,data=newd) } Ia <- -numDeriv::jacobian(function(p) U(p),p1) stacked <- stack(model1,e2,Ia) } else { e2 <- estimate(coef=p2,vcov=NA) } coef <- model2$coef res <- model2 res$estimator <- "generic" if (std.err) { res[names(stacked)] <- stacked cc <- stacked$coefmat[,c(1,2)]; cc <- cbind(cc,cc[,1]/cc[,2],stacked$coefmat[,5]) coef[,] <- cc res$coef <- coef res$vcov <- vcov(stacked) if (all) { res$naive <- model2 res$naive.robust <- e2 } } else { res$coef[,-1] <- NA } res$fun <- predict.fun res$estimate1 <- object res$estimate2 <- model2 res$nonlinear <- val$nonlinear structure(res,class=c("twostage.lvmfit","measurement.error","lvmfit","estimate")) } ##' @export estimate.twostage.lvm <- function(x,data,...) { if (missing(data)) stop("'data' needed") m1 <- x$call$object m2 <- x$call$model2 nl <- x$nonlinear if (!inherits(m1,"lvmfit")) { args <- c(list(x=m1, data=data), list(...)) args <- args[intersect(names(as.list(base::args(estimate.lvm))),names(args))] m1 <- do.call(estimate, args) } m2$attributes$nonlinear <- nl twostage(object=m1,model2=m2,data=data,predict.fun=nl[[1]]$pred,...) } ##' @export twostage.twostage.lvm <- function(object,...) estimate.twostage.lvm(object,...) ##' @export twostage.lvm <- function(object,model2,data=NULL, ...) { if (is.null(data)) { return(twostagelvm(object=object, model2=model2, model.object=TRUE, ...)) } args <- c(list(x=object, data=data), list(...)) args <- args[intersect(names(as.list(base::args(estimate.lvm))),names(args))] e1 <- do.call(estimate, args) twostage(object=e1,model2=model2,data=data, ...) } ##' @export twostage.lvm.mixture <- twostage.lvmfit ##' @export twostage.estimate <- twostage.lvmfit ##' @export print.twostage.lvm <- function(x,...) { printline() cat("Model 1:\n") print(Model(x$call$object)) printline() cat("Model 2:\n") print(Model(x$call$model2)) } ##' @export plot.twostage.lvm <- function(x,...) { model <- x$model m1 <- Model(x$call$object) m2 <- x$call$model2 nl <- nonlinear(x) model <- regression(model, to=nl[[1]]$newx, from=nl[[1]]$x) elist <- edgeList(m1) vlist <- vars(m1) model <- beautify(model) for (i in seq_len(nrow(elist))) { e <- toformula(y=vlist[elist[i,2]],x=vlist[elist[i,1]]) edgelabels(model, e, cex=0.7) <- 1 } elist <- edgeList(m2) vlist <- vars(m2) for (i in seq_len(nrow(elist))) { e <- toformula(vlist[elist[i,2]],vlist[elist[i,1]]) edgelabels(model, e, cex=0.7) <- 2 } nodecolor(model, nl[[1]]$newx) <- "gray" for (xx in nl[[1]]$newx) { e <- toformula(y=names(nl)[1],x=xx) edgelabels(model,e,col="gray", cex=0.7, lty=1) <- 2 } for (xx in nl[[1]]$newx) { e <- toformula(y=xx,x=nl[[1]]$x) edgelabels(model,e,col="gray", cex=0.7, lty=2) <- "" } plot(model, ...) } ##' @export predict.twostage.lvmfit <- function(object, newdata, variable=names(nonlinear(object)), p=coef(object), type=c("model2","latent"), ...) { if (missing(newdata)) stop("provide data for prediction") nl <- nonlinear(object) unam <- unique(unlist(lapply(nl,function(x) x$x))) if (is.vector(newdata) || all(colnames(newdata)%in%unam)) type <- "latent" if (tolower(type[1])%ni%c("latent")) { p1 <- coef(object$estimate1) pred1 <- uhat(p1, data=newdata, object$estimate1, nlobj=nl) if (tolower(type[1])==c("model1")) return(pred1) newdata <- as.data.frame(newdata) newdata[,colnames(pred1)] <- pred1 pred <- predict(object$estimate2,...,p=p,data=newdata) attr(pred,"p") <- NULL attr(pred,"e") <- NULL return(pred) } ## Association between predicted latent variables and child nodes: if (is.numeric(variable)) { variable <- names(nonlinear(object))[variable] } nl <- nl[variable] res <- matrix(nrow=NROW(newdata),ncol=length(nl)) colnames(res) <- names(nl) ##unam <- unique(unlist(lapply(nl, function(x) x$newx))) #colnames(res) <- unam for (i in seq_along(nl)) { pnam <- c(variable,paste0(variable,"~",nl[[i]]$newx)) pidx <- match(pnam,names(coef(object))) b <- p[pidx] F <- nl[[i]]$f if (is.vector(newdata)) { res[,i] <- F(b,newdata) } else { res[,i] <- F(b,newdata[,nl[[i]]$x]) } } return(res) } lava/R/interactive.R0000644000176200001440000000626313162174023014044 0ustar liggesusers##' @export colsel <- function(locate,...) { ytop <- rep(seq(1/26,1,by=1/26),each=26)[1:657] ybottom <- rep(seq(0,1-1/26,by=1/26),each=26)[1:657] xleft <- rep(seq(0,1-1/26,by=1/26),times=26)[1:657] xright <- rep(seq(1/26,1,by=1/26),times=26)[1:657] pall <- round(col2rgb(colors())/256) pall <- colSums(pall) ; pall2 <- character(0) pall2[pall>0] <- "black" pall2[pall==0] <- "white" par(mar=c(0,0,1,0)) plot.new() title(main="Palette of colors()") rect(xleft,ybottom,xright,ytop,col=colors()) text(x=xleft+((1/26)/2) ,y=ytop-((1/26)/2) ,labels = 1:657 ,cex=0.55 ,col=pall2) if (missing(locate)) return(invisible(NULL)) colmat <- matrix(c(1:657,rep(NA,26^2-657)),byrow=T,ncol=26,nrow=26) cols <- NA for(i in seq_len(locate)) { h <- locator(1) if(any(h$x<0,h$y<0,h$x>1,h$y>1)) stop("locator out of bounds!") else { cc <- floor(h$x/(1/26))+1 rr <- floor(h$y/(1/26))+1 cols[i] <- colors()[colmat[rr,cc]] } } return(cols) } ##' Extension of the \code{identify} function ##' ##' For the usual 'X11' device the identification process is ##' terminated by pressing any mouse button other than the first. For ##' the 'quartz' device the process is terminated by pressing either ##' the pop-up menu equivalent (usually second mouse button or ##' 'Ctrl'-click) or the 'ESC' key. ##' @title Identify points on plot ##' @usage ##' \method{click}{default}(x, y=NULL, label=TRUE, n=length(x), pch=19, col="orange", cex=3, ...) ##' idplot(x,y,...,id=list()) ##' @aliases idplot click.default click colsel ##' @param x X coordinates ##' @param y Y coordinates ##' @param label Should labels be added? ##' @param n Max number of inputs to expect ##' @param pch Symbol ##' @param col Colour ##' @param cex Size ##' @param id List of arguments parsed to \code{click} function ##' @param \dots Additional arguments parsed to \code{plot} function ##' @author Klaus K. Holst ##' @seealso \code{\link{idplot}}, \code{identify} ##' @examples ##' if (interactive()) { ##' n <- 10; x <- seq(n); y <- runif(n) ##' plot(y ~ x); click(x,y) ##' ##' data(iris) ##' l <- lm(Sepal.Length ~ Sepal.Width*Species,iris) ##' res <- plotConf(l,var2="Species")## ylim=c(6,8), xlim=c(2.5,3.3)) ##' with(res, click(x,y)) ##' ##' with(iris, idplot(Sepal.Length,Petal.Length)) ##' } ##' @keywords iplot ##' @export click <- function(x,...){ UseMethod("click") } ##' @export click.default <- function(x, y=NULL, label=TRUE, n=length(x), pch=19, col="orange", cex=3, ...) { xy <- xy.coords(x, y); x <- xy$x; y <- xy$y sel <- rep(FALSE, length(x)); res <- integer(0) while(sum(sel) < n) { ans <- identify(x[!sel], y[!sel], n=1, plot=FALSE, ...) if(!length(ans)) break ans <- which(!sel)[ans] points(x[ans], y[ans], pch = pch, col=col, cex=cex) if (label) text(x[ans], y[ans], ans) sel[ans] <- TRUE res <- c(res, ans) } res } ##' @export idplot <- function(x,y,...,id=list()) { plot(x,y,...) id$x <- x; id$y <- y do.call("click",id) } lava/R/score.survreg.R0000644000176200001440000000243113162174023014327 0ustar liggesusers##' @export pars.survreg <- function(x,...) { c(coef(x),scale=x$scale) } ##' @export score.survreg <- function(x,p,scale=TRUE,logscale=FALSE,indiv.logLik=FALSE,...) { npar <- NROW(x$var) m <- model.frame(x) X <- model.matrix(terms(x), m) hasscale <- npar>length(x$coefficients) if (!missing(p)) { if (hasscale) sigma <- tail(p,1) p <- p[seq(length(p)-1)] x$linear.predictors <- as.vector(X%*%p) x$coefficients <- p x$scale <- sigma } derivatives <- residuals(x, type = "matrix") w <- model.weights(m) if (is.null(w)) w <- 1 dldLP <- w*derivatives[,"dg"] ## Derivative wrt linear-predictor p=Xbeta S <- apply(X,2,function(x) x*dldLP) if (!is.null(x$naive.var)) { V <- x$naive.var } else { V <- x$var } if (hasscale && scale) { ds <- cbind("logsigma"=derivatives[,"ds"]) if (!logscale) { ds <- ds/x$scale names(ds) <- "sigma" } S <- cbind(S,ds) } if (hasscale && !scale) { V <- V[-npar,-npar,drop=FALSE] } attributes(S)$logLik <- if (indiv.logLik) derivatives[,"g"] else sum(derivatives[,"g"]) attributes(S)$bread <- V return(S) } lava/R/lvm.R0000644000176200001440000000670013162174023012321 0ustar liggesusers##' Initialize new latent variable model ##' ##' Function that constructs a new latent variable model object ##' ##' @aliases lvm print.lvm summary.lvm ##' @param x Vector of variable names. Optional but gives control of the ##' sequence of appearance of the variables. The argument can be given as a ##' character vector or formula, e.g. \code{~y1+y2} is equivalent to ##' \code{c("y1","y2")}. Alternatively the argument can be a formula specifying ##' a linear model. ##' @param \dots Additional arguments to be passed to the low level functions ##' @param latent (optional) Latent variables ##' @param silent Logical variable which indicates whether messages are turned ##' on/off. ##' @return Returns an object of class \code{lvm}. ##' @author Klaus K. Holst ##' @seealso \code{\link{regression}}, \code{\link{covariance}}, ##' \code{\link{intercept}}, ... ##' @keywords models regression ##' @export ##' @examples ##' ##' m <- lvm() # Empty model ##' m1 <- lvm(y~x) # Simple linear regression ##' m2 <- lvm(~y1+y2) # Model with two independent variables (argument) ##' m3 <- lvm(list(c(y1,y2,y3)~u,u~x+z)) # SEM with three items ##' lvm <- function(x=NULL, ..., latent=NULL, silent=lava.options()$silent) { M <- C <- par <- fix <- numeric(); mu <- list() noderender <- list( fill=c(), shape=c(), label=c() ) edgerender <- list(lty=c(), lwd=c(), col=c(), textCol=c(), est=c(), arrowhead=c(), dir=c(), cex=c(), futureinfo=list()) graphrender <- list(recipEdges="distinct") graphdefault <- list( "fill"="white", "shape"="rectangle", "label"=expression(NA), "lty"=1, "lwd"=1, "col"="black", "textCol"="black", "est"=0, "arrowhead"="open", "dir"="forward", "cex"=1.5, "label"=expression(), "futureinfo"=c()) modelattr <- list( randomslope=list(), survival=list(), parameter=list(), categorical=list(), distribution=list(), nonlinear=list(), functional=list(), label=list()) res <- list(M=M, par=par, cov=C, covpar=C, fix=fix, covfix=fix,latent=list(), mean=mu, index=NULL, exogenous=NA, constrain=list(), constrainY=list(), attributes=modelattr, noderender=noderender, edgerender=edgerender, graphrender=graphrender, graphdef=graphdefault) class(res) <- "lvm" myhooks <- gethook("init.hooks") for (f in myhooks) { res <- do.call(f, list(x=res)) } myvar <- NULL if (!is.list(x)) x <- list(x,...) for (myvar in x) { if (inherits(myvar,"formula")) { ## if (length(getoutcome(myvar))>0) { ## regression(res,...,silent=silent) <- myvar ## } else { ## myvar <- all.vars(myvar) ## } ## regression(res,...,silent=silent) <- myvar regression(res,silent=silent) <- myvar } if (is.character(myvar)) { res <- addvar(res, myvar, silent=silent) } } if (!is.null(myvar)) { index(res) <- reindex(res,zeroones=TRUE) } if (!is.null(latent)) { latent(res) <- latent } return(res) } lava/R/Objective.R0000644000176200001440000002072113162174023013434 0ustar liggesusers###{{{ gaussian gaussian_method.lvm <- "nlminb2" `gaussian_objective.lvm` <- function(x,p,data,S,mu,n,...) { mp <- modelVar(x,p=p,data=data,...) C <- mp$C ## Model specific covariance matrix xi <- mp$xi ## Model specific mean-vector if (!lava.options()$allow.negative.variance && any(diag(mp$P)<0)) return(NaN) iC <- Inverse(C,det=TRUE, symmetric = TRUE) detC <- attributes(iC)$det if (n<2) { z <- as.numeric(data-xi) val <- log(detC) + tcrossprod(z,crossprod(z,iC))[1] return(0.5*val) } if (!is.null(mu)){ W <- suppressMessages(crossprod(rbind(mu-xi))) T <- S+W } else { T <- S } res <- n/2*log(detC) + n/2*tr(T%*%iC) ## Objective function (Full Information ML) ## if (any(attr(iC,"lambda")<1e-16)) res <- res-1e2 return(res) } `gaussian_hessian.lvm` <- function(x,p,n,...) { dots <- list(...); dots$weights <- NULL do.call("information", c(list(x=x,p=p,n=n),dots)) } gaussian_gradient.lvm <- function(x,p,data,S,mu,n,...) { dots <- list(...); dots$weights <- NULL if (n>2) data <- NULL val <- -gaussian_score.lvm(x,p=p,S=S,mu=mu,n=n,data=data,reindex=FALSE,...) if (!is.null(nrow(val))) { val <- colSums(val) } val } gaussian_score.lvm <- function(x, data, p, S, n, mu=NULL, weights=NULL, debug=FALSE, reindex=FALSE, mean=TRUE, constrain=TRUE, indiv=FALSE,...) { if (!is.null(data)) { if ((nrow(data)<2 | !is.null(weights))| indiv) { mp <- modelVar(x,p,data=data[1,]) iC <- Inverse(mp$C,det=FALSE, symmetric = TRUE) MeanPar <- attributes(mp)$meanpar D <- with(attributes(mp), deriv.lvm(x, meanpar=MeanPar, p=pars, mom=mp, mu=NULL)) ##, all=length(constrain(x))>0)) myvars <- (index(x)$manifest) if (NCOL(data)!=length(myvars)) { data <- subset(data,select=myvars) } score <- matrix(ncol=length(p),nrow=NROW(data)) score0 <- -1/2*as.vector(iC)%*%D$dS if (!is.null(weights)) { W0 <- diag(nrow=length(myvars)) widx <- match(colnames(weights),myvars) } for (i in seq_len(NROW(data))) { z <- as.numeric(data[i,]) u <- z-as.numeric(mp$xi) if (!is.null(weights)) { W <- W0; diag(W)[widx] <- as.numeric(weights[i,]) score[i,] <- as.numeric(crossprod(u,iC%*%W)%*%D$dxi + -1/2*(as.vector((iC - iC %*% crossprod(rbind(u)) %*% iC)%*%W)) %*% D$dS ) } else { score[i,] <- as.numeric(score0 + crossprod(u,iC)%*%D$dxi + 1/2*as.vector(iC%*%crossprod(rbind(u))%*%iC)%*%D$dS) } }; colnames(score) <- names(p) return(score) } } ### Here the emperical mean and variance of the population are sufficient statistics: if (missing(S)) { data0 <- na.omit(data[,manifest(x),drop=FALSE]) n <- NROW(data0) S <- cov(data0)*(n-1)/n mu <- colMeans(data0) } mp <- modelVar(x,p) C <- mp$C xi <- mp$xi iC <- Inverse(C,det=FALSE, symmetric = TRUE) Debug("Sufficient stats.",debug) if (!is.null(mu) & !is.null(xi)) { W <- crossprod(rbind(mu-xi)) T <- S+W } else { T <- S } D <- deriv.lvm(x, meanpar=attributes(mp)$meanpar, mom=mp, p=p, mu=mu, mean=mean) vec.iC <- as.vector(iC) if (lava.options()$devel) { Grad <- numeric(length(p)) imean <- with(index(x)$parBelongsTo,mean) Grad[-imean] <- n/2*crossprod(D$dS[,-imean], as.vector(iC%*%T%*%iC)-vec.iC) } else { Grad <- n/2*crossprod(D$dS, as.vector(iC%*%T%*%iC)-vec.iC) } if (!is.null(mu) & !is.null(xi)) { if (!(lava.options()$devel)) { Grad <- Grad - (n/2*crossprod(D$dT,vec.iC)) } else { Grad[with(index(x)$parBelongsTo,c(mean,reg))] <- Grad[with(index(x)$parBelongsTo,c(mean,reg))] - (n/2*crossprod(D$dT,vec.iC)) } } res <- as.numeric(Grad) return(rbind(res)) } ###}}} gaussian ###{{{ gaussian variants ## Maximum Likelihood with numerical gradient + hessian gaussian0_objective.lvm <- gaussian_objective.lvm gaussian1_objective.lvm <- gaussian_objective.lvm gaussian1_gradient.lvm <- function(...) gaussian_gradient.lvm(...) gaussian1_hessian.lvm <- function(x,p,...) { myg2 <- function(p1) gaussian_gradient.lvm(x,p=p1,...) myg3 <- function(p1) numDeriv::jacobian(myg2,p1) return(myg3(p)) ## myg <- function(p1) gaussian_objective.lvm(x,p=p1,...) ## numDeriv::hessian(myg,p) } ## BHHH gaussian2_method.lvm <- "NR" gaussian2_objective.lvm <- gaussian_objective.lvm gaussian2_gradient.lvm <- gaussian_gradient.lvm gaussian2_hessian.lvm <- function(x,p,n,data,...) { S <- -score(x,p=p,n=n,data=data,indiv=TRUE,...) I <- t(S)%*%S attributes(I)$grad <- colSums(S) return(I) } ###}}} ###{{{ Weighted weighted_method.lvm <- "NR" weighted_gradient.lvm <- function(x,p,data,weights,indiv=FALSE,...) { myvars <- index(x)$manifest if (NCOL(data)!=length(myvars)) data <- subset(data,select=myvars) score <- matrix(ncol=length(p),nrow=NROW(data)) myy <- index(x)$endogenous myx <- index(x)$exogenous mynx <- setdiff(myvars,myx) W0 <- diag(nrow=length(myy)) widx <- match(colnames(weights),myy) pp <- modelPar(x,p) mp <- moments(x,p=p,conditional=TRUE,data=data[1,]) iC <- Inverse(mp$C,det=FALSE, symmetric = TRUE) v <- matrix(0,ncol=length(vars(x)),nrow=NROW(data)) colnames(v) <- vars(x) for (i in mynx) v[,i] <- mp$v[i] for (i in myx) v[,i] <- data[,i] xi <- t(mp$G%*%t(v)) u <- as.matrix(data)[,myy]-xi D <- deriv.lvm(x, meanpar=pp$meanpar, p=pp$p, mom=mp, mu=NULL) if (NROW(data)==1) { W <- W0; diag(W)[widx] <- as.numeric(weights[i,]) score[i,] <- as.numeric(crossprod(u,iC%*%W)%*%D$dxi + -1/2*(as.vector((iC - iC %*% crossprod(rbind(u)) %*% iC)%*%W)) %*% D$dS) return(-score) } score0 <- -0.5*as.vector(iC)%*%D$dS Gdv <- mp$G%*%D$dv for (i in seq_len(NROW(data))) { W <- W0; diag(W)[widx] <- as.numeric(weights[i,]) dxi <- (t(as.numeric(v[i,]))%x%diag(nrow=length(myy)))%*%D$dG + Gdv score[i,] <- -0.5*as.vector(iC%*%W)%*%D$dS + as.numeric(crossprod(u[i,],iC%*%W)%*%dxi + 1/2*as.vector(iC%*%crossprod(rbind(u[i,]))%*%iC%*%W)%*%D$dS) ## score[i,] <- -0.5*as.vector(iC)%*%D$dS + ## as.numeric(crossprod(u[i,],iC)%*%dxi + ## 1/2*as.vector(iC%*%tcrossprod(u[i,])%*%iC)%*%D$dS) } if (indiv) return(-score) colSums(-score) } weighted_hessian.lvm <- function(...) { S <- weighted_gradient.lvm(...,indiv=TRUE) res <- crossprod(S) attributes(res)$grad <- colSums(-S) res } weighted0_method.lvm <- "estfun" weighted0_gradient.lvm <- function(...) { val <- -gaussian_score.lvm(...) colSums(val) } weighted0_hessian.lvm <- NULL weighted2_method.lvm <- "estfun" weighted2_gradient.lvm <- function(x,p,data,weights,indiv=FALSE,...) { myvars <- index(x)$manifest if (NCOL(data)!=length(myvars)) data <- subset(data,select=myvars) score <- matrix(ncol=length(p),nrow=NROW(data)) myy <- index(x)$endogenous myx <- index(x)$exogenous mynx <- setdiff(myvars,myx) W0 <- diag(nrow=length(myy)) widx <- match(colnames(weights),myy) pp <- modelPar(x,p) for (i in seq_len(NROW(data))) { z <- as.matrix(data[i,myy]) mp <- moments(x,p=p,conditional=TRUE,data=data[i,]) u <- as.numeric(z-mp$xi[,1]) iC <- Inverse(mp$C,det=FALSE, symmetric = TRUE) D <- deriv.lvm(x, meanpar=pp$meanpar, p=pp$p, mom=mp, mu=NULL) W <- W0; diag(W)[widx] <- as.numeric(weights[i,]) score[i,] <- -0.5*as.vector(iC%*%W)%*%D$dS + as.numeric(crossprod(u,iC%*%W)%*%D$dxi + 1/2*as.vector(iC%*%crossprod(rbind(u))%*%iC%*%W)%*%D$dS) } if (indiv) return(-score) colSums(-score) } weighted2_hessian.lvm <- NULL ###}}} Weighted ###{{{ Simple `Simple_hessian.lvm` <- function(p,...) { matrix(NA, ncol=length(p), nrow=length(p)) } Simple_gradient.lvm <- function(x,p,...) { naiveGrad(function(pp) Simple_objective.lvm(x,pp,...), p) } `Simple_objective.lvm` <- function(x, p=p, S=S, n=n, ...) { m. <- moments(x,p) C <- m.$C A <- m.$A P <- m.$P J <- m.$J IAi <- m.$IAi npar.reg <- m.$npar.reg; npar <- m.$npar G <- J%*%IAi detC <- det(C) iC <- Inverse(C, symmetric = TRUE) if (detC<0 | inherits(iC, "try-error")) return(.Machine$double.xmax) res <- n/2*(log(detC) + tr(S%*%iC) - log(det(S)) - npar) res } ###}}} ObjectiveSimple lava/R/commutation.R0000644000176200001440000000107713162174023014064 0ustar liggesusers##' Finds the unique commutation matrix K: ##' \eqn{K vec(A) = vec(A^t)} ##' ##' @title Finds the unique commutation matrix ##' @param m rows ##' @param n columns ##' @author Klaus K. Holst ##' @export commutation <- function(m, n=m) { if (inherits(m,"matrix")) { n <- ncol(m) m <- nrow(m) } H <- function(i,j) { ## mxn-matrix with 1 at (i,j) Hij <- matrix(0, nrow=m, ncol=n) Hij[i,j] <- 1 Hij } K <- matrix(0,m*n,m*n) for (i in seq_len(m)) for (j in seq_len(n)) K <- K + H(i,j)%x%t(H(i,j)) K } lava/R/print.R0000644000176200001440000001537113162174023012663 0ustar liggesusers###{{{ print.lvm ##' @export `print.lvm` <- function(x, ..., print.transform=TRUE,print.exogenous=TRUE) { res <- NULL myhooks <- gethook("print.hooks") for (f in myhooks) { res <- do.call(f, list(x=x,...)) } if (is.null(res)) { k <- length(vars(x)) L <- rep(FALSE,k); names(L) <- vars(x); L[latent(x)] <- TRUE cat("Latent Variable Model\n") ##;" \n\twith: ", k, " variables.\n", sep=""); if (k==0) { cat("\nEmpty\n") return() } ff <- formula(x,char=TRUE,all=TRUE) R <- Rx <- Rt <- c() exo <- exogenous(x) for (f in ff) { oneline <- as.character(f); y <- strsplit(f,"~")[[1]][1] y <- trim(y) { col1 <- as.character(oneline) D <- attributes(distribution(x)[[y]])$family Tr <- x$attributes$transform[[y]] col2 <- tryCatch(x$attributes$type[[y]],error=function(...) NULL) if (is.null(col2) || is.na(col2)) col2 <- "gaussian" if (!is.null(Tr)){ col1 <- paste0(y,' ~ ',paste0(Tr$x,collapse="+"),sep="") Rt <- rbind(Rt, c(col1,"")) } if (!is.null(D$family)) { col2 <- paste0(D$family) } if (!is.null(D$link)) col2 <- paste0(col2,"(",D$link,")") if (!is.null(D$par)) col2 <- paste0(col2,"(",paste(D$par,collapse=","),")") if (is.list(distribution(x)[[y]]) && is.vector(distribution(x)[[y]][[1]])) col2 <- "fixed" if (L[y]) col2 <- paste0(col2,", latent") if (y%in%exo) { Rx <- rbind(Rx,c(col1,col2)) } else { if (is.null(Tr)) { R <- rbind(R,c(col1,col2)) } } } } if (length(R)>0) { rownames(R) <- paste(" ",R[,1]," "); colnames(R) <- rep("",ncol(R)) print(R[,2,drop=FALSE],quote=FALSE,...) } if (print.exogenous && length(Rx)>0) { cat("\nExogenous variables:") rownames(Rx) <- paste(" ",gsub("~ 1","",Rx[,1])," "); colnames(Rx) <- rep("",ncol(Rx)) print(Rx[,2,drop=FALSE],quote=FALSE,...) } if (print.transform && length(Rt)>0) { cat("\nTransformations:") rownames(Rt) <- paste(" ",gsub("~ 1","",Rt[,1])," "); colnames(Rt) <- rep("",ncol(Rt)) print(Rt[,2,drop=FALSE],quote=FALSE,...) } } cat("\n") invisible(x) } ###}}} print.lvm ###{{{ print.lvmfit ##' @export `print.lvmfit` <- function(x,level=2,labels=FALSE,...) { print(CoefMat(x,labels=labels,level=level,...),quote=FALSE,right=TRUE) minSV <- attr(vcov(x),"minSV") if (!is.null(minSV) && minSV<1e-12) { warning("Small singular value: ", format(minSV)) } pseudo <- attr(vcov(x),"pseudo") if (!is.null(pseudo) && pseudo) warning("Singular covariance matrix. Pseudo-inverse used.") invisible(x) } ###}}} print.lvmfit ###{{{ print.lvmfit.randomslope ##' @export print.lvmfit.randomslope <- function(x,labels=FALSE,level=2,...) { print(CoefMat(x,labels=labels,level=level,...),quote=FALSE,right=TRUE) invisible(x) } ###}}} ###{{{ print.multigroupfit ##' @export print.multigroupfit <- function(x,groups=NULL,...) { if (is.null(groups)) { if (x$model$missing) { modelclass <- attributes(x$model0)$modelclass nmis <- attributes(x$model0)$nmis orggroup <- unique(modelclass) groupn <- unlist(lapply(orggroup,function(i) sum(modelclass==i))) cumsumgroup <- cumsum(c(0,groupn)) groups <- unlist(lapply(orggroup,function(i) which.min(nmis[which(modelclass==i)])+cumsumgroup[i])) ## groups with max. number of variables for (i in seq_len(length(groups))) { if (nmis[groups[i]]>0) warning("No complete cases in group ",i,". Showing results of group with max number of variables. All coefficients can be extracted with 'coef'. All missing pattern groups belonging to this sub-model can be extracted by calling: coef(..., groups=c(",paste(which(modelclass==i),collapse=","),"))") } if (!is.null(x$model$mnameses)) x$model$names <- x$model$mnames } else { groups <- seq_len(length(x$model$lvm)) } } res <- coef(x,level=2,groups=groups,...) counter <- 0 dots <- list(...) dots$groups <- groups level <- if (is.null(dots$level)) { dots$level <- 2 ## dots$level <- ifelse("lvmfit.randomslope"%in%class(x),2,9) } myargs <- c(list(x=x), dots) myargs$groups <- groups CC <- do.call("CoefMat.multigroupfit",myargs) for (cc in res) { counter <- counter+1 cat(rep("_",52),"\n",sep="") cat("Group ", counter, sep="") myname <- x$model$names[counter] if (!is.null(myname) && !is.na(myname)) cat(": ",myname,sep="") if (!x$model$missing) cat(" (n=",nrow(Model(x)$data[[groups[counter]]]), ")", sep="") cat("\n") print(CC[[counter]],quote=FALSE,right=TRUE) } cat("\n") invisible(x) } ###}}} print.multigroupfit ###{{{ print.multigroup ##' @export print.multigroup <- function(x,...) { cat("\n") cat("Number of groups:", x$ngroup, "\n") cat("Number of free parameters (not counting mean-parameters):", x$npar,"\n") ## cat("Parameter-vector:", unlist(x$parlist), "\n\n") cat("Number of free mean parameters:", length(grep("m",x$mean)),"\n") ## cat("Mean-vector:", x$mean, "\n\n") invisible(x) } ###}}} print.multigroup ###{{{ printmany printmany <- function(A,B,nspace=1,name1=NULL,name2=NULL,digits=3,rownames=NULL,emptystr=" ",bothrows=!is.table(A),right=TRUE,print=TRUE,...) { cA <- colnames(A); cB <- colnames(B) A <- format(A, digits=digits, right=right, ...) B <- format(B, digits=digits, right=right, ...) nA <- nrow(A); nB <- nrow(B) if (nrow(A)1) { blocksize <- NROW(R) } else { blocksize <- R } messages <- 0 } if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } if (mc.cores>1L || !missing(cl)) requireNamespace("parallel",quietly=TRUE) newcl <- FALSE if (!missing(cl) && is.logical(cl)) { if (.Platform$OS.type=="windows" || TRUE) { ## Don't fork processes on windows cl <- NULL mc.cores <- 1 } else { if (cl) { cl <- parallel::makeForkCluster(mc.cores) if (!is.null(seed)) parallel::clusterSetRNGStream(cl,seed) newcl <- TRUE } } } olddata <- NULL dots <- list(...) mycall <- match.call(expand.dots=FALSE) if (inherits(x,c("data.frame","matrix"))) olddata <- x if (inherits(x,"sim")) { oldtm <- attr(x,"time") oldcall <- attr(x,"call") x <- attr(x,"f") if (!is.null(f)) x <- f ex <- oldcall[["..."]] for (nn in setdiff(names(ex),names(dots))) { dots[[nn]] <- ex[[nn]] val <- list(ex[[nn]]); names(val) <- nn mycall[["..."]] <- c(mycall[["..."]],list(val)) } } else { if (!is.null(f)) x <- f if (!is.function(x)) stop("Expected a function or 'sim' object.") } if (is.null(x)) stop("Must give new function argument 'f'.") res <- val <- NULL on.exit({ if (messages>0) close(pb) if (newcl) parallel::stopCluster(cl) if (is.null(colnames) && !is.null(val)) { if (is.matrix(val[[1]])) { colnames <- base::colnames(val[[1]]) } else { colnames <- names(val[[1]]) } } base::colnames(res) <- colnames if (!is.null(olddata)) res <- rbind(olddata,res) attr(res,"call") <- mycall attr(res,"f") <- x class(res) <- c("sim","matrix") if (idx.done1) { parval_provided <- TRUE parval <- as.data.frame(R) if (is.vector(R)) names(parval) <- NULL else if (inherits(R,c("matrix","data.frame"))) names(parval) <- colnames(R) R <- NROW(parval) } else { parval <- as.data.frame(1:R) names(parval) <- NULL } nfolds <- max(1,round(R/blocksize)) idx <- split(1:R,sort((1:R)%%nfolds)) idx.done <- 0 count <- 0 if (messages>0) pb <- txtProgressBar(style=lava.options()$progressbarstyle,width=40) time <- c() robx <- function(iter__,...) tryCatch(x(...),error=function(e) NA) if (iter) formals(robx)[[1]] <- NULL for (ii in idx) { count <- count+1 if (!missing(cl) && !is.null(cl)) { pp <- c(as.list(parval[ii,,drop=FALSE]),dots,list(cl=cl,fun=robx,SIMPLIFY=FALSE),args) } else { pp <- c(as.list(parval[ii,,drop=FALSE]),dots,list(mc.cores=mc.cores,FUN=robx,SIMPLIFY=FALSE),args) } ##if (!iter & !parval_provided) pp[[1]] <- NULL if (mc.cores>1) { if (!missing(cl) && !is.null(cl)) { val <- do.call(parallel::clusterMap,pp) } else { val <- do.call(parallel::mcmapply,pp) } } else { pp$mc.cores <- NULL val <- do.call(mapply,pp) } if (messages>0) setTxtProgressBar(pb, count/length(idx)) if (is.null(res)) { ##res <- array(NA,dim=c(R,dim(val[[1]])),dimnames=c(list(NULL),dimnames(val[[1]]),NULL)) res <- matrix(NA,ncol=length(val[[1]]),nrow=R) } res[ii,] <- Reduce(rbind,val) ##rr <- abind::abind(val,along=length(dim(res))) ##res[ii,] <- abind(val,along=length(dim(res))) idx.done <- max(ii) } } ##' @export "[.sim" <- function (x, i, j, drop = FALSE) { atr <- attributes(x) if (!is.null(dim(x))) { class(x) <- "matrix" } else { class(x) <- class(x)[-1] } x <- NextMethod("[",drop=drop) atr.keep <- c("call","time") if (missing(j)) atr.keep <- c(atr.keep,"f") attributes(x)[atr.keep] <- atr[atr.keep] if (!drop) class(x) <- c("sim",class(x)) x } Time <- function(sec,print=FALSE,...) { h <- sec%/%3600 m0 <- (sec%%3600) m <- m0%/%60 s <- m0%%60 res <- c(h=h,m=m,s=s) if (print) { if (h>0) cat(h,"h ",sep="") if (m>0) cat(m,"m ",sep="") cat(s,"s",sep="") return(invisible(res)) } return(res) } Print <- function(x,n=5,digits=max(3,getOption("digits")-3),...) { mat <- !is.null(dim(x)) if (!mat) { x <- cbind(x) colnames(x) <- "" } if (is.null(rownames(x))) { rownames(x) <- seq(nrow(x)) } sep <- rbind("---"=rep('',ncol(x))) if (n<1) { print(x,quote=FALSE,digits=digits,...) } else { ## hd <- base::as.matrix(base::format(utils::head(x,n),digits=digits,...)) ## tl <- base::as.matrix(base::format(utils::tail(x,n),digits=digits,...)) ## print(rbind(hd,sep,tl),quote=FALSE,...) if (NROW(x)<=(2*n)) { hd <- base::format(utils::head(x,2*n),digits=digits,...) print(hd, quote=FALSE,...) } else { hd <- base::format(utils::head(x,n),digits=digits,...) tl <- base::format(utils::tail(x,n),digits=digits,...) print(rbind(base::as.matrix(hd),sep,base::as.matrix(tl)), quote=FALSE,...) } } invisible(x) } ##' @export print.sim <- function(x,...) { attr(x,"f") <- attr(x,"call") <- NULL if (!is.null(dim(x))) { class(x) <- "matrix" } Print(x,...) return(invisible(x)) } ##' Plot sim object ##' ##' @examples ##' n <- 1000 ##' val <- cbind(est1=rnorm(n,sd=1),est2=rnorm(n,sd=0.2),est3=rnorm(n,1,sd=0.5), ##' sd1=runif(n,0.8,1.2),sd2=runif(n,0.1,0.3),sd3=runif(n,0.25,0.75)) ##' ##' plot.sim(val,estimate=c(1,2),true=c(0,0),se=c(4,5),equal=TRUE) ##' plot.sim(val,estimate=c(1,3),true=c(0,1),se=c(4,6),density.xlim=c(-3,3),ylim=c(-3,3)) ##' plot.sim(val,estimate=c(1,2),true=c(0,0),se=c(4,5),equal=TRUE,plot.type="single") ##' plot.sim(val,estimate=c(1),se=c(4,5,6),plot.type="single") ##' plot.sim(val,estimate=c(1,2,3),equal=TRUE) ##' plot.sim(val,estimate=c(1,2,3),equal=TRUE,byrow=TRUE) ##' plot.sim(val,estimate=c(1,2,3),plot.type="single") ##' plot.sim(val,estimate=1,se=c(3,4,5),plot.type="single") ##' ##' density.sim(val,estimate=c(1,2,3),polygon.density=c(0,10,10),polygon.angle=c(0,45,-45)) ##' @param x sim object ##' @param ... Graphical arguments to plot.sim ##' @param plot.type Single or multiple plots ##' @aliases density.sim plot.sim ##' @export ##' @export density.sim density.sim <- function(x,...,plot.type="single") { plot.sim(x,...,scatter.plot=FALSE,plot.type=plot.type) } ##' @export ##' @export plot.sim plot.sim <- function(x,estimate,se=NULL,true=NULL, names=NULL, auto.layout=TRUE, byrow=FALSE, type="p", ask=grDevices::dev.interactive(), line.col=1, line.lwd=1.8, col=c("gray60","orange","darkblue","seagreen","darkred"), pch=16,cex=0.5,lty=1, true.lty=2,true.col="gray70",true.lwd=1.2, legend, legendpos="topleft", cex.legend=0.8, plot.type=c("multiple","single"), polygon=TRUE, polygon.density=0, polygon.angle=-45, cex.axis=0.8, alpha=0.5, rug=TRUE, rug.alpha=0.5, main, cex.main=1, equal=FALSE, delta=1.15, ylim=NULL, ylab="Estimate", density.ylab="Density", density.ylim=NULL, density.xlim=NULL, density.plot=TRUE, scatter.plot=TRUE, running.mean=scatter.plot, density.alpha=0.2, border=density.col, density.lty, density.col=col, density.lwd=0.4, xlab="",...) { if (missing(estimate)) { estimate <- seq(ncol(x)) } if (is.null(estimate)) { av <- apply(x[,drop=FALSE],2,function(z) cumsum(z)/seq(length(z))) graphics::matplot(x,type="p",pch=pch, cex=cex, col=col,...) graphics::matlines(av,type="l",col=col,lty=lty,...) if (!is.null(true)) abline(h=true,lty=true.lty,...) if (missing(legend)) legend <- colnames(x) if (!is.null(legend)) graphics::legend(legendpos,legend=legend,bg="white",col=col,lty=lty,pch=pch,...) return(invisible(NULL)) } if (is.character(estimate)) { estimate <- match(estimate,colnames(x)) } K <- length(estimate) est <- tru <- c() if (length(se)>0) { if (K==1 && !is.list(se)) se <- list(se) else se <- as.list(se) } else { est <- estimate; tru <- true } for (i in seq_along(estimate)) { est <- c(est,list(rep(estimate[i],length(se[[i]])))) if (!is.null(true)) tru <- c(tru,list(rep(true[i],length(se[[i]])))) } if (length(se)>0) { for (i in seq_along(se)) { if (is.character(se[[i]])) se[[i]] <- match(se[[i]],colnames(x)) } } ss <- summary.sim(x,estimate=unlist(est),se=unlist(se),true=unlist(tru),names=names) oldpar <- NULL on.exit({ par(oldpar) return(invisible(ss)) }) single <- tolower(plot.type[1])=="single" if (auto.layout) { nc <- (scatter.plot || running.mean) + density.plot nr <- min(6,K) if (single) nr <- 1 oma.multi = c(2, 0, 2, 0) mar.multi = c(1.5, 4.1, 1, 1) oldpar <- par(mar=mar.multi, oma=oma.multi, cex.axis=cex.axis,las=1, ask=FALSE) if (byrow) { par(mfrow=c(nr,nc)) } else { par(mfcol=c(nc,nr)) } } dys <- c() maxdy <- 0 if (density.plot) for (i in seq(K)) { ii <- estimate[i] y <- as.vector(x[,ii]) dy <- stats::density(y) dys <- c(dys,list(dy)) maxdy <- max(maxdy,dy$y) } if (equal || single) { if (is.null(ylim)) { rg <- range(x[,estimate]) rg <- rg+c(-1,1)*abs(diff(rg)*(delta-1)) ylim <- rep(list(rg),K) } if (density.plot) { if (is.null(density.ylim)) density.ylim <- rep(list(c(0,maxdy*delta)),K) if (is.null(density.xlim)) density.xlim <- ylim } } if (!is.null(ylim)) { if (!is.list(ylim)) ylim <- list(ylim) ylim <- rep(ylim,length.out=K) } ylab <- rep(ylab,length.out=K) if (!is.null(density.ylim)) { if (!is.list(density.ylim)) density.ylim <- list(density.ylim) density.ylim <- rep(density.ylim,length.out=K) } if (!is.null(density.xlim)) { if (!is.list(density.xlim)) density.xlim <- list(density.xlim) density.xlim <- rep(density.xlim,length.out=K) } if (missing(main)) { main <- NULL if (!missing(names)) main <- names else if (K>1 && !single) main <- colnames(ss) } if (!is.null(main)) main <- rep(main,length.out=K) if (missing(density.lty)) { density.lty <- rep(1,K) if (single || !polygon) { density.lty <- 1:20 } } my.scatter.sim <- function(i,add=FALSE,colors,...) { ii <- estimate[i] if (!missing(colors)) { col <- line.col <- true.col <- colors[1] } y <- as.vector(x[,ii]) args <- list(y,ylab=ylab[i],col=Col(col[1],alpha),cex=cex,pch=pch,type=type) if (!is.null(ylim)) args <- c(args,list(ylim=ylim[[i]])) if (scatter.plot) { if (!add) { do.call(graphics::plot,args) } else { do.call(graphics::points,args) } } if (running.mean) { lines(cumsum(y)/seq_along(y),col=line.col[1],lwd=line.lwd,lty=lty) if (!is.null(true)) abline(h=true[i],lty=true.lty,col=true.col[1],lwd=true.lwd) } } my.density.sim <- function(i,add=FALSE,colors, alphas=density.alpha, auto.legend=TRUE, densities=NULL, angles=polygon.angle, ...) { ii <- estimate[i] y <- as.vector(x[,ii]) if (!missing(colors)) { density.col <- border <- colors col <- true.col <- colors } if (density.plot) { dy <- stats::density(y) if (is.null(density.ylim)) { density.ylim0 <- c(0,max(dy$y)*delta) } else { density.ylim0 <- density.ylim[[i]] } if (is.null(density.xlim)) { density.xlim0 <- range(dy$x) } else { density.xlim0 <- density.xlim[[i]] } if (!add) graphics::plot(0,0,type="n",main="",ylab=density.ylab,xlab=xlab,ylim=density.ylim0,xlim=density.xlim0) if (polygon) { with(dy, graphics::polygon(c(x,rev(x)),c(y,rep(0,length(y))),col=Col(density.col[1],alpha=alphas[1]),border=NA,density=densities[1],angle=angles[1])) if (!is.null(border)) with(dy, lines(x,y,col=border[1],lty=density.lty[1],lwd=density.lwd[1])) } else { graphics::lines(dy,main="",lty=density.lty[1],col=density.col[1],lwd=density.lwd[1]) } if (rug) graphics::rug(y,col=Col(col[1],rug.alpha[1])) if (!is.null(main) && !(running.mean || scatter.plot)) { title(main[i],cex.main=cex.main) } if (!is.null(true)) { abline(v=true[i],lty=true.lty,col=true.col,lwd=true.lwd) } if (!is.null(se)) { se.pos <- match(se[[i]],unlist(se)) ns <- length(se.pos)+1 se.alpha <- rep(alphas,length.out=ns)[-1] se.border <- rep(border,length.out=ns)[-1] se.col <- rep(density.col,length.out=ns)[-1] se.lty <- rep(density.lty,length.out=ns)[-1] se.lwd <- rep(density.lwd,length.out=ns)[-1] xx <- dy$x for (j in seq_along(se.pos)) { if (polygon) { yy <- dnorm(xx,mean=ss["Mean",se.pos[j]],sd=ss["SE",se.pos[j]]) if (se.alpha[j]>0) graphics::polygon(c(xx,rev(xx)),c(yy,rep(0,length(yy))),col=Col(se.col[j],alpha=se.alpha[j]),border=NA,density=densities[j],angle=angles[j]) if (!is.null(border)) lines(xx,yy,col=se.border[j],lty=se.lty[j],lwd=se.lwd[j]) } else { graphics::curve(dnorm(x,mean=ss["Mean",se.pos[j]],sd=ss["SE",se.pos[j]]),lwd=se.lwd[j],lty=se.lty[j],col=se.col[j],add=TRUE) } } if (auto.legend) legend <- c("Kernel",colnames(ss)[se.pos]) if (!is.null(legend)) { if (polygon) { dcol <- c(density.col[1],se.col) graphics::legend(legendpos,legend, fill=Col(dcol,density.alpha),border=dcol,cex=cex.legend) } else { graphics::legend(legendpos,legend, col=c(density.col[1],se.col), lty=c(density.lty[1],se.lty), lwd=c(density.lwd[1],se.lwd),cex=cex.legend) } } } } } if (single) { N <- K nk <- unlist(lapply(se,length)) if (!is.null(se)) N <- sum(unlist(nk))+K col <- rep(col,length.out=K) for (i in seq(K)) { my.scatter.sim(i,add=(i>1),colors=col[i]) } if (!is.null(main) && !byrow) { title(main[1],cex.main=cex.main) } if (missing(legend)) legend <- colnames(x)[estimate] legendold <- legend legend <- NULL density.alpha <- rep(density.alpha,length.out=K) polygon.density <- rep(polygon.density,length.out=K) polygon.angle <- rep(polygon.angle,length.out=K) for (i in seq_len(K)) { alphas <- density.alpha[i] densities <- polygon.density[i] if (!is.null(densities) && densities<1) densities <- NULL if (length(se)>0) alphas <- c(alphas,rep(0,nk[i])) my.density.sim(i,add=(i>1),colors=col[i],alphas=alphas, densities=densities, angles=polygon.angle[i], auto.legend=FALSE) } if (!is.null(legendold)) { legend <- rep(legendold,length.out=K) graphics::legend(legendpos,legend, fill=Col(col,density.alpha),border=col,cex=cex.legend) } } else { for (i in seq(K)) { my.scatter.sim(i) if (!is.null(main) && !byrow && scatter.plot) { title(main[i],cex.main=cex.main) } my.density.sim(i,auto.legend=missing(legend)) if (i==1 && ask) par(ask=ask) } } } ##' @export print.summary.sim <- function(x,group=list(c("^mean$","^sd$","^se$","^se/sd$"), c("^min$","^[0-9.]+%$","^max$"), c("^na$","^missing$"), c("^true$","^bias$","^rmse$")), lower.case=TRUE, na.print="", digits = max(3, getOption("digits") - 2), quote=FALSE, time=TRUE, ...) { cat(attr(x,"n")," replications",sep="") if (time && !is.null(attr(x,"time"))) { cat("\t\t\t\t\tTime: ") Time(attr(x,"time")["elapsed"],print=TRUE) } cat("\n\n") nn <- rownames(x) if (lower.case) nn <- tolower(nn) gg <- lapply(group, function(x) unlist(lapply(x,function(v) grep(v,nn)))) gg <- c(gg,list(setdiff(seq_along(nn),unlist(gg)))) x0 <- c() ng <- length(gg) for (i in seq(ng)) { x0 <- rbind(x0, x[gg[[i]],,drop=FALSE], { if(i0) NA}) } print(structure(x0,class="matrix")[,,drop=FALSE],digits=digits,quote=quote,na.print=na.print,...) cat("\n") invisible(x) } ##' @export ##' @export summary.sim summary.sim <- function(object,estimate=NULL,se=NULL, confint=NULL,true=NULL, fun,names=NULL,unique.names=TRUE, level=0.95,quantiles=c(.025,0.5,.975),...) { mfun <- function(x,...) { res <- c(mean(x,na.rm=TRUE), sd(x,na.rm=TRUE), quantile(x,c(0,quantiles,1),na.rm=TRUE), mean(is.na(x))) names(res) <- c("Mean","SD","Min",paste0(quantiles*100,"%"),"Max","Missing") res } tm <- attr(object,"time") if (!is.null(estimate) && is.character(estimate)) { estimate <- match(estimate,colnames(object)) } if (!missing(fun)) { if (!is.null(estimate)) object <- object[,estimate,drop=FALSE] res <- lapply(seq(ncol(object)), function(i,...) fun(object[,i,drop=TRUE],i,...),...) res <- matrix(unlist(res),nrow=length(res[[1]]),byrow=FALSE) if (is.null(dim(res))) { res <- rbind(res) } if (is.null(rownames(res))) { rownames(res) <- names(fun(0,1,...)) } if (is.null(colnames(res))) { colnames(res) <- colnames(object) } return(structure(res, n=NROW(object), time=tm, class=c("summary.sim","matrix"))) } if (!is.null(estimate)) { est <- apply(object[,estimate,drop=FALSE],2,mfun) } else { est <- apply(object,2,mfun) } if (!is.null(true)) { if (length(true)!=length(estimate)) { ##stop("'true' should be of same length as 'estimate'.") true <- rep(true,length.out=length(estimate)) } est <- rbind(est, rbind(True=true),rbind(Bias=est["Mean",]-true), rbind(RMSE=((est["Mean",]-true)^2+(est["SD",])^2)^.5) ) } if (!is.null(se)) { if (is.character(se)) { se <- match(se,colnames(object)) } if (length(se)!=length(estimate)) stop("'se' should be of same length as 'estimate'.") est <- rbind(est, SE=apply(object[,se,drop=FALSE],2, function(x) c(mean(x,na.rm=TRUE)))) est <- rbind(est,"SE/SD"=est["SE",]/est["SD",]) } if (!is.null(confint)) { if (is.character(confint)) { confint <- match(confint,colnames(object)) } if (length(confint)==1 && confint) { if (is.null(se)) stop("Supply confidence limits or SE") confint <- c() pos <- ncol(object) for (i in seq_along(estimate)) { z <- 1-(1-level)/2 CI <- cbind(object[,estimate[i]]-qnorm(z)*object[,se[i]], object[,estimate[i]]+qnorm(z)*object[,se[i]]) colnames(CI) <- NULL object <- cbind(object,CI) confint <- c(confint,pos+1:2) pos <- pos+2 } } if (length(confint)!=2*length(estimate)) stop("'confint' should be of length 2*length(estimate).") Coverage <- c() for (i in seq_along(estimate)) { Coverage <- c(Coverage, mean((object[,confint[2*(i-1)+1]]true[i]),na.rm=TRUE)) } est <- rbind(est,Coverage=Coverage) } if (!is.null(names)) { if (length(names)1) { ff <- getFromNamespace(ff[2],ff[1]) } f <- do.call(ff,list(x)) if (is.null(val) || !is.logical(f[[attr]])) attrvar <- f[[attr]] else attrvar <- names(f[[attr]])[which(val==f[[attr]])] return(attrvar) } if (is.character(val)) myexpr <- paste0("list(",attr,"=c(", paste0("\"",var,"\"=\"",val,"\"" , collapse=", "), "))") else myexpr <- paste0("list(",attr,"=c(", paste0("\"",var,"\"=",val, collapse=", "), "))") Debug(list("str=",myexpr),debug) eval(parse(text=paste0(fun,"(x) <- ",myexpr))) return(x) } lava/R/Expand.R0000644000176200001440000000221313162174023012735 0ustar liggesusers##' Create a Data Frame from All Combinations of Factors ##' ##' Simple wrapper of the 'expand.grid' function. If x is a table ##' then a data frame is returned with one row pr individual ##' observation. ##' @title Create a Data Frame from All Combinations of Factors ##' @param _data Data.frame ##' @param ... vectors, factors or a list containing these ##' @author Klaus K. Holst ##' @export ##' @examples ##' dd <- Expand(iris, Sepal.Length=2:8, Species=c("virginica","setosa")) ##' summary(dd) ##' ##' T <- with(warpbreaks, table(wool, tension)) ##' Expand(T) Expand <- function(`_data`,...) { if (missing(`_data`)) { return(expand.grid(...)) } if (inherits(`_data`,"table")) { M <- as.data.frame(`_data`) idx <- rep(seq(nrow(M)),M[,ncol(M)]) return(M[idx,-ncol(M),drop=FALSE]) } if (!inherits(`_data`,"data.frame")) { return(expand.grid(`_data`,...)) } dots <- list(...) nn <- names(dots) for (n in nn) { y <- dots[[n]] if (is.factor(`_data`[1,n])) { dots[[n]] <- factor(y,levels=levels(`_data`[1,n])) } } do.call("expand.grid",dots) } lava/R/eventTime.R0000644000176200001440000003516213162174023013467 0ustar liggesusers##' Add an observed event time outcome to a latent variable model. ##' ##' For example, if the model 'm' includes latent event time variables ##' are called 'T1' and 'T2' and 'C' is the end of follow-up (right censored), ##' then one can specify ##' ##' \code{eventTime(object=m,formula=ObsTime~min(T1=a,T2=b,C=0,"ObsEvent"))} ##' ##' when data are simulated from the model ##' one gets 2 new columns: ##' ##' - "ObsTime": the smallest of T1, T2 and C ##' - "ObsEvent": 'a' if T1 is smallest, 'b' if T2 is smallest and '0' if C is smallest ##' ##' Note that "ObsEvent" and "ObsTime" are names specified by the user. ##' ##' @author Thomas A. Gerds, Klaus K. Holst ##' @keywords survival models regression ##' @examples ##' ##' # Right censored survival data without covariates ##' m0 <- lvm() ##' distribution(m0,"eventtime") <- coxWeibull.lvm(scale=1/100,shape=2) ##' distribution(m0,"censtime") <- coxExponential.lvm(rate=10) ##' m0 <- eventTime(m0,time~min(eventtime=1,censtime=0),"status") ##' sim(m0,10) ##' ##' # Alternative specification of the right censored survival outcome ##' ## eventTime(m,"Status") <- ~min(eventtime=1,censtime=0) ##' ##' # Cox regression: ##' # lava implements two different parametrizations of the same ##' # Weibull regression model. The first specifies ##' # the effects of covariates as proportional hazard ratios ##' # and works as follows: ##' m <- lvm() ##' distribution(m,"eventtime") <- coxWeibull.lvm(scale=1/100,shape=2) ##' distribution(m,"censtime") <- coxWeibull.lvm(scale=1/100,shape=2) ##' m <- eventTime(m,time~min(eventtime=1,censtime=0),"status") ##' distribution(m,"sex") <- binomial.lvm(p=0.4) ##' distribution(m,"sbp") <- normal.lvm(mean=120,sd=20) ##' regression(m,from="sex",to="eventtime") <- 0.4 ##' regression(m,from="sbp",to="eventtime") <- -0.01 ##' sim(m,6) ##' # The parameters can be recovered using a Cox regression ##' # routine or a Weibull regression model. E.g., ##' \dontrun{ ##' set.seed(18) ##' d <- sim(m,1000) ##' library(survival) ##' coxph(Surv(time,status)~sex+sbp,data=d) ##' ##' sr <- survreg(Surv(time,status)~sex+sbp,data=d) ##' library(SurvRegCensCov) ##' ConvertWeibull(sr) ##' ##' } ##' ##' # The second parametrization is an accelerated failure time ##' # regression model and uses the function weibull.lvm instead ##' # of coxWeibull.lvm to specify the event time distributions. ##' # Here is an example: ##' ##' ma <- lvm() ##' distribution(ma,"eventtime") <- weibull.lvm(scale=3,shape=0.7) ##' distribution(ma,"censtime") <- weibull.lvm(scale=2,shape=0.7) ##' ma <- eventTime(ma,time~min(eventtime=1,censtime=0),"status") ##' distribution(ma,"sex") <- binomial.lvm(p=0.4) ##' distribution(ma,"sbp") <- normal.lvm(mean=120,sd=20) ##' regression(ma,from="sex",to="eventtime") <- 0.7 ##' regression(ma,from="sbp",to="eventtime") <- -0.008 ##' set.seed(17) ##' sim(ma,6) ##' # The regression coefficients of the AFT model ##' # can be tranformed into log(hazard ratios): ##' # coef.coxWeibull = - coef.weibull / shape.weibull ##' \dontrun{ ##' set.seed(17) ##' da <- sim(ma,1000) ##' library(survival) ##' fa <- coxph(Surv(time,status)~sex+sbp,data=da) ##' coef(fa) ##' c(0.7,-0.008)/0.7 ##' } ##' ##' ##' # The Weibull parameters are related as follows: ##' # shape.coxWeibull = 1/shape.weibull ##' # scale.coxWeibull = exp(-scale.weibull/shape.weibull) ##' # scale.AFT = log(scale.coxWeibull) / shape.coxWeibull ##' # Thus, the following are equivalent parametrizations ##' # which produce exactly the same random numbers: ##' ##' model.aft <- lvm() ##' distribution(model.aft,"eventtime") <- weibull.lvm(scale=-log(1/100)/2,shape=0.5) ##' distribution(model.aft,"censtime") <- weibull.lvm(scale=-log(1/100)/2,shape=0.5) ##' set.seed(17) ##' sim(model.aft,6) ##' ##' model.cox <- lvm() ##' distribution(model.cox,"eventtime") <- coxWeibull.lvm(scale=1/100,shape=2) ##' distribution(model.cox,"censtime") <- coxWeibull.lvm(scale=1/100,shape=2) ##' set.seed(17) ##' sim(model.cox,6) ##' ##' # The minimum of multiple latent times one of them still ##' # being a censoring time, yield ##' # right censored competing risks data ##' ##' mc <- lvm() ##' distribution(mc,~X2) <- binomial.lvm() ##' regression(mc) <- T1~f(X1,-.5)+f(X2,0.3) ##' regression(mc) <- T2~f(X2,0.6) ##' distribution(mc,~T1) <- coxWeibull.lvm(scale=1/100) ##' distribution(mc,~T2) <- coxWeibull.lvm(scale=1/100) ##' distribution(mc,~C) <- coxWeibull.lvm(scale=1/100) ##' mc <- eventTime(mc,time~min(T1=1,T2=2,C=0),"event") ##' sim(mc,6) ##' ##' ##' @export ##' @aliases eventTime<- ##' @param object Model object ##' @param formula Formula (see details) ##' @param eventName Event names ##' @param \dots Additional arguments to lower levels functions eventTime <- function(object,formula,eventName="status",...) { if (missing(formula)) return(object$attributes$eventHistory) if (inherits(eventName,"formula")) eventName <- all.vars(eventName) ff <- as.character(formula) timeName <- all.vars(update.formula(formula,"~1")) if (length(timeName)==0){ timeName <- "observedTime" rhs <- ff[[2]] }else{ rhs <- ff[[3]] } ## rhs <- tolower(rhs) latentTimes <- strsplit(rhs,"[(,)]")[[1]] if (latentTimes[1]!="min") stop(paste("Formula ",formula," does not have the required form, ", "e.g. ~min(T1=1,T2=2,C=0), see (examples in) help(eventTime).")) latentTimes <- latentTimes[-1] NT <- length(latentTimes) events <- vector(NT,mode="character") for (lt in seq_len(NT)){ tmp <- strsplit(latentTimes[lt],"=")[[1]] stopifnot(length(tmp) %in% c(1,2)) if (length(tmp)==1){ events[lt] <- as.character(lt) latentTimes[lt] <- tmp } else{ events[lt] <- tmp[2] latentTimes[lt] <- tmp[1] } } events <- gsub(" ","",events) eventnum <- char2num(events) if (all(!is.na(eventnum))) { events <- eventnum } else { events <- gsub("\"","",events) } addvar(object) <- timeName ##distribution(object,timeName) <- NA ##m <- regression(m,formula(paste0("~",timeName))) ##if (missing(eventName)) eventName <- "Event" eventTime <- list(names=c(timeName,eventName), latentTimes=gsub(" ","",latentTimes), events=events ) transform(object, y=eventTime$names, x=eventTime$latentTimes) <- function(z) { idx <- apply(z,1,which.min) cbind(z[cbind(seq(NROW(z)),idx)], eventTime$events[idx]) } if (is.null(object$attributes$eventHistory)) { object$attributes$eventHistory <- list(eventTime) names(object$attributes$eventHistory) <- timeName } else { object$attributes$eventHistory[[timeName]] <- eventTime } return(object) } ##' @export "eventTime<-" <- function(object,...,value) { eventTime(object,value,...) } ## addhook("color.eventHistory","color.hooks") ## color.eventHistory <- function(x,subset=vars(x),...) { ## return(list(vars=intersect(subset,binary(x)),col="indianred1")) ## } addhook("plothook.eventHistory","plot.post.hooks") plothook.eventHistory <- function(x,...) { eh <- x$attributes$eventHistory ehnames <- unlist(lapply(eh,function(x) x$names)) for (f in eh) { x <- regression(x,to=f$names[1],from=f$latentTimes) latent(x) <- f$latentTimes kill(x) <- f$names[2] } timedep <- x$attributes$timedep for (i in seq_len(length(timedep))) { x <- regression(x,to=names(timedep)[i],from=timedep[[i]]) } return(x) } addhook("colorhook.eventHistory","color.hooks") colorhook.eventHistory <- function(x,subset=vars(x),...) { return(list(vars=intersect(subset,unlist(x$attributes$timedep)),col="lightblue4")) } addhook("print.eventHistory","print.hooks") print.eventHistory <- function(x,...) { eh <- x$attributes$eventHistory timedep <- x$attributes$timedep if (is.null(eh) & is.null(timedep)) return(NULL) ehnames <- unlist(lapply(eh,function(x) x$names)) cat("Event History Model\n") ff <- formula(x,char=TRUE,all=TRUE) R <- c() for (f in ff) { oneline <- as.character(f); y <- gsub(" ","",strsplit(f,"~")[[1]][1]) if (!(y %in% ehnames)) { col1 <- as.character(oneline) D <- attributes(distribution(x)[[y]])$family col2 <- "Normal" if (!is.null(D$family)) col2 <- paste0(D$family) if (!is.null(D$link)) col2 <- paste0(col2,"(",D$link,")") if (!is.null(D$par)) col2 <- paste0(col2,"(",paste(D$par,collapse=","),")") R <- rbind(R,c(col1," ",col2)) } } for (y in names(eh)) { col1 <- paste0(y, " = min(",paste(eh[[y]]$latentTimes,collapse=","),")") eh[[y]]$names[2] col2 <- paste0(eh[[y]]$names[2], " := {",paste(eh[[y]]$events,collapse=","),"}") R <- rbind(R,c(col1,"",col2)) } rownames(R) <- rep("",nrow(R)); colnames(R) <- rep("",ncol(R)) print(R,quote=FALSE,...) cat("\n") for (i in seq_len(length(timedep))) { cat("Time-dependent covariates:\n\n") cat(paste("",names(timedep)[i],"~", paste(timedep[[i]],collapse="+")),"\n") } TRUE } ## addhook("simulate.eventHistory","sim.hooks") ## simulate.eventHistory <- function(x,data,...){ ## if (is.null(eventTime(x))) { ## return(data) ## } ## else{ ## for (eh in eventTime(x)) { ## if (any((found <- match(eh$latentTimes,names(data),nomatch=0))==0)){ ## warning("Cannot find latent time variable: ", ## eh$latentTimes[found==0],".") ## } ## else{ ## for (v in seq_along(eh$latentTimes)) { ## if (v==1){ ## initialize with the first latent time and event ## eh.time <- data[,eh$latentTimes[v]] ## eh.event <- rep(eh$events[v],NROW(data)) ## } else{ ## now replace if next time is smaller ## ## in case of tie keep the first event ## eh.event[data[,eh$latentTimes[v]] 1 ## - declining if shape <1 ## - constant if shape=1 ## ## scale = exp(b0 + b1*X) f <- function(n,mu,Scale=scale,Shape=shape,...) { (- log(runif(n)) / (Scale * exp(mu)))^(1/Shape) } ff <- formals(f) expr <- "(- log(runif(n)) / (Scale * exp(mu)))^{1/Shape}" if (inherits(scale,"formula")) scale <- all.vars(scale)[1] if (is.character(scale)) { names(ff)[3] <- scale expr <- gsub("Scale",scale,expr) } if (inherits(shape,"formula")) shape <- all.vars(shape)[1] if (is.character(shape)) { names(ff)[4] <- shape expr <- gsub("Shape",shape,expr) } formals(f) <- ff e <- parse(text=expr) body(f) <- as.call(c(as.name("{"), e)) attr(f,"family") <- list(family="weibull", regression="PH", par=c(shape=shape,scale=scale)) return(f) } ##' @export coxExponential.lvm <- function(scale=1,rate,timecut){ if (missing(rate)) rate=1/scale if (missing(scale)) scale=1/rate if (missing(timecut)) { return(coxWeibull.lvm(shape=1,scale)) } if (NROW(rate)>length(timecut)) stop("Number of time-intervals (cuts) does not agree with number of rate parameters (beta0)") par <- paste(timecut,rate,sep=":") if (is.matrix(rate)) par <- "..." timecut <- c(timecut,Inf) f <- function(n,mu,...) { Ai <- function() { vals <- matrix(0,ncol=length(timecut)-1,nrow=n) ival <- numeric(n) if (is.matrix(rate)) { mu <- cbind(mu[,1],cbind(1,as.matrix(mu[,-1]))%*%t(rate)) rate <- rep(1,length(timecut)-1) } for (i in seq(length(timecut)-1)) { u <- -log(runif(n)) ##rexp(n,1) if (NCOL(mu)>1) { vals[,i] <- timecut[i] + u*exp(-mu[,1]-mu[,i+1])/(rate[i]) } else { vals[,i] <- timecut[i] + u*exp(-mu)/(rate[i]) } idx <- which(vals[,i]<=timecut[i+1] & ival==0) ival[idx] <- vals[idx,i] } ival } Ai() } attributes(f)$family <- list(family="CoxExponential",par=par) return(f) } ##' @export aalenExponential.lvm <- function(scale=1,rate,timecut=0){ if (missing(rate)) rate=1/scale if (missing(scale)) scale=1/rate if (missing(timecut)==1) { return(coxWeibull.lvm(shape=1,scale)) } if (length(rate)>length(timecut)) stop("Number of time-intervals (cuts) does not agree with number of rate parameters (beta0)") par <- paste(timecut,rate,sep=":") if (is.matrix(rate)) par <- "..." timecut <- c(timecut,Inf) f <- function(n,mu,...) { Ai <- function() { vals <- matrix(0,ncol=length(timecut)-1,nrow=n) ival <- numeric(n) if (is.matrix(rate)) { mu <- cbind(mu[,1],cbind(1,as.matrix(mu[,-1]))%*%t(rate)) rate <- rep(1,length(timecut)-1) } for (i in seq(length(timecut)-1)) { u <- -log(runif(n)) ##rexp(n,1) if (NCOL(mu)>1) { vals[,i] <- timecut[i] + u/(rate[i]+mu[,1]+mu[,i+1]) } else { vals[,i] <- timecut[i] + u/(rate[i]+mu) } idx <- which(vals[,i]<=timecut[i+1] & ival==0) ival[idx] <- vals[idx,i] } ival } Ai() } attributes(f)$family <- list(family="aalenExponential",par=par) return(f) } ##' @export coxGompertz.lvm <- function(shape=1,scale) { f <- function(n,mu,var,...) { (1/shape) * log(1 - (shape/scale) * (log(runif(n)) * exp(-mu))) } attr(f,"family") <- list(family="gompertz",par=c(shape,scale)) return(f) } lava/R/confint.R0000644000176200001440000000576113162174023013171 0ustar liggesusers##' Calculate Wald og Likelihood based (profile likelihood) confidence intervals ##' ##' Calculates either Wald confidence limits: \deqn{\hat{\theta} \pm ##' z_{\alpha/2}*\hat\sigma_{\hat\theta}} or profile likelihood confidence ##' limits, defined as the set of value \eqn{\tau}: ##' \deqn{logLik(\hat\theta_{\tau},\tau)-logLik(\hat\theta)< q_{\alpha}/2} ##' ##' where \eqn{q_{\alpha}} is the \eqn{\alpha} fractile of the \eqn{\chi^2_1} ##' distribution, and \eqn{\hat\theta_{\tau}} are obtained by maximizing the ##' log-likelihood with tau being fixed. ##' ##' @title Calculate confidence limits for parameters ##' @param object \code{lvm}-object. ##' @param parm Index of which parameters to calculate confidence limits for. ##' @param level Confidence level ##' @param profile Logical expression defining whether to calculate confidence ##' limits via the profile log likelihood ##' @param curve if FALSE and profile is TRUE, confidence limits are ##' returned. Otherwise, the profile curve is returned. ##' @param n Number of points to evaluate profile log-likelihood in ##' over the interval defined by \code{interval} ##' @param interval Interval over which the profiling is done ##' @param lower If FALSE the lower limit will not be estimated (profile intervals only) ##' @param upper If FALSE the upper limit will not be estimated (profile intervals only) ##' @param \dots Additional arguments to be passed to the low level functions ##' @return A 2xp matrix with columns of lower and upper confidence limits ##' @author Klaus K. Holst ##' @seealso \code{\link{bootstrap}{lvm}} ##' @keywords models regression ##' @examples ##' ##' m <- lvm(y~x) ##' d <- sim(m,100) ##' e <- estimate(y~x, d) ##' confint(e,3,profile=TRUE) ##' confint(e,3) ##' \donttest{ ## Reduce Ex.timings ##' B <- bootstrap(e,R=50) ##' B ##' } ##' @aliases confint.multigroupfit ##' @export ##' @method confint lvmfit confint.lvmfit <- function(object,parm=seq_len(length(coef(object))),level=0.95,profile=FALSE,curve=FALSE,n=20,interval=NULL,lower=TRUE,upper=TRUE,...) { if (is.character(parm)) { parm <- parpos(Model(object),p=parm) parm <- parm[attributes(parm)$ord] } if (!profile) { return(confint.default(object,parm=parm,level=level,...)) } res <- c() for (i in parm) { res <- rbind(res, profci.lvmfit(object,parm=i,level=level,profile=profile,n=n,curve=curve,interval=interval,lower=lower,upper=upper,...)) if (curve) return(res) } rownames(res) <- names(coef(object))[parm] colnames(res) <- paste((c(0,1) + c(1,-1)*(1-level)/2)*100,"%") return(res) } ##' @export confint.multigroupfit <- function(object,parm=seq_along(pars(object)),level=0.95, estimates=TRUE,...) { p <- 1-(1-level)/2 res <- cbind(pars(object),pars(object)) + qnorm(p)*cbind(-1,1)%x%diag(vcov(object))^0.5 colnames(res) <- paste0(c(1-p,p)*100,"%") rownames(res) <- parpos(object); rownames(res)[is.na(rownames(res))] <- "" if (estimates) res <- cbind(coef(object,level=0)[,c(1,2,4)],res) res[parm,,drop=FALSE] } lava/R/graph2lvm.R0000644000176200001440000000047213162174023013425 0ustar liggesusers##' @export `graph2lvm` <- function(g, debug=FALSE, silent=TRUE) { res <- lvm(graph::nodes(g), debug=debug,silent=silent) M <- t(as(g, Class="matrix")) for (i in seq_len(nrow(M))) { if (any(M[,i]==1)) { res <- regression(res, rownames(M)[M[,i]==1], rownames(M)[i], silent=silent) } } res } lava/R/dsep.R0000644000176200001440000000440613162174023012457 0ustar liggesusers##' @export `dsep` <- function(object,...) UseMethod("dsep") ##' Check d-separation criterion ##' ##' Check for conditional independence (d-separation) ##' @export ##' @aliases dsep dsep.lvm ##' @param object lvm object ##' @param x Variables for which to check for conditional independence ##' @param cond Conditioning set ##' @param return.graph If TRUE the moralized ancestral graph with the ##' conditioning set removed is returned ##' @param ... Additional arguments to lower level functions ##' @details The argument 'x' can be given as a formula, e.g. x~y|z+v ##' or ~x+y|z+v With everything on the rhs of the bar defining the ##' variables on which to condition on. ##' @examples ##' m <- lvm(x5 ~ x4+x3, x4~x3+x1, x3~x2, x2~x1) ##' if (interactive()) { ##' plot(m,layoutType='neato') ##' } ##' dsep(m,x5~x1|x2+x4) ##' dsep(m,x5~x1|x3+x4) ##' dsep(m,~x1+x2+x3|x4) ##' dsep.lvm <- function(object,x,cond=NULL,return.graph=FALSE,...) { if (inherits(x,"formula")) { xf <- getoutcome(x,sep="|") xx <- attr(xf,"x") if (length(xx)==0) stop("Not a valid formula") x <- c(xf,all.vars(xx[[1]])) if (length(xx)>1) { cond <- all.vars(xx[[2]]) } } if (inherits(cond,"formula")) { cond <- all.vars(cond) } nod <- vars(object) x <- intersect(x,nod) cond <- intersect(cond,nod) V <- c(x,cond) ## Ancenstral graph keep <- c(V,ancestors(object,V)) del <- setdiff(nod,keep) if (length(del)>0) object <- rmvar(object,del) ## moralized graph man <- object for (v in V) { pa <- parents(object,v) if (length(pa)>1) man$M[pa,pa] <- 1 ## for (i in seq(length(pa)-1)) { ## for (j in seq(length(pa)-1)+1) { ## man$M[i,j] ## man <- regression(man,from=pa[i],to=pa[j]) ## } ## } } man.sel <- rmvar(man,cond) ## with(man.sel, solve(diag(nrow=nrow(M))-M)) ii <- match(x,vars(man.sel)) A <- with(man.sel, (t(M)+M)>0) dsep <- c() for (i in ii) { conn <- DFS(A,i) i0 <- setdiff(ii,i) dsep <- c(dsep,!any(i0%in%conn)) } res <- all(dsep) attr(man.sel,"dsep") <- res if (return.graph) return(man.sel) return(res) } lava/R/spaghetti.R0000644000176200001440000002353313162174023013516 0ustar liggesusers##' Spaghetti plot for longitudinal data ##' ##' @title Spaghetti plot ##' @param formula Formula (response ~ time) ##' @param data data.frame ##' @param id Id variable ##' @param group group variable ##' @param type Type (line 'l', stair 's', ...) ##' @param lty Line type ##' @param pch Colour ##' @param col Colour ##' @param alpha transparency (0-1) ##' @param lwd Line width ##' @param level Confidence level ##' @param trend.formula Formula for trendline ##' @param tau Quantile to estimate (trend) ##' @param trend.lty Trend line type ##' @param trend.join Trend polygon ##' @param trend.delta Length of limit bars ##' @param trend Add trend line ##' @param trend.col Colour of trend line ##' @param trend.alpha Transparency ##' @param trend.lwd Trend line width ##' @param trend.jitter Jitter amount ##' @param legend Legend ##' @param by make separate plot for each level in 'by' (formula, name of column, or vector) ##' @param xlab Label of X-axis ##' @param ylab Label of Y-axis ##' @param add Add to existing device ##' @param ... Additional arguments to lower level arguments ##' @author Klaus K. Holst ##' @export ##' @examples ##' if (interactive() & requireNamespace("mets")) { ##' K <- 5 ##' y <- "y"%++%seq(K) ##' m <- lvm() ##' regression(m,y=y,x=~u) <- 1 ##' regression(m,y=y,x=~s) <- seq(K)-1 ##' regression(m,y=y,x=~x) <- "b" ##' N <- 50 ##' d <- sim(m,N); d$z <- rbinom(N,1,0.5) ##' dd <- mets::fast.reshape(d); dd$num <- dd$num+3 ##' spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4), ##' trend.formula=~factor(num),trend=TRUE,trend.col="darkblue") ##' dd$num <- dd$num+rnorm(nrow(dd),sd=0.5) ## Unbalance ##' spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4), ##' trend=TRUE,trend.col="darkblue") ##' spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4), ##' trend.formula=~num+I(num^2),trend=TRUE,trend.col="darkblue") ##' } spaghetti <- function(formula,data,id="id",group=NULL, type="o",lty=1,pch=NA,col=1:10,alpha=0.3,lwd=1, level=0.95, trend.formula=formula,tau=NULL, trend.lty=1,trend.join=TRUE,trend.delta=0.2, trend=!is.null(tau),trend.col=col, trend.alpha=0.2,trend.lwd=3, trend.jitter=0, legend=NULL, by=NULL, xlab="Time",ylab="",add=FALSE,...) { ##spaghetti <- function(formula,data,id,type="l",lty=1,col=Col(1),trend=FALSE,trend.col="darkblue",trend.alpha=0.2,trend.lwd=3,xlab="Time",ylab="",...) { if (!lava.options()$cluster.index) stop("mets not available? Check 'lava.options()cluster.index'.") if (!is.null(by)) { if (is.character(by) && length(by==1)) { by <- data[,by] } else if (inherits(by,"formula")) { ##by <- model.matrix(update(by,~-1+.), model.frame(~.,data,na.action=na.pass)) by <- model.frame(by,data,na.action=na.pass) } cl <- match.call(expand.dots=TRUE) cl$by <- NULL datasets <- split(data,by) res <- c() for (d in datasets) { cl$data <- d res <- c(res, eval(cl,parent.frame())) } return(invisible(res)) } if (!is.null(group)) { if (is.character(group) && length(group==1)) { M <- data[,group] } else if (inherits(group,"formula")) { ##M <- model.matrix(update(group,~-1+.),data) M <- model.frame(group,data,na.action=na.pass) } else { M <- group } if (!add) plot(formula,data=data,xlab=xlab,ylab=ylab,...,type="n") dd <- split(data,M) K <- length(dd) if (length(type)0))) nn <- widenames[idx] ord <- order(char2num(unlist(lapply(nn,function(x) gsub(vname,"",x))))) idx[ord] } if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") if (length(x)==0) { data <- data[,c(id,y),drop=FALSE] wide <- mets::fast.reshape(data,id=id,varying=y,...) yidx <- Idx(y,names(wide)) Y <- wide[,yidx,drop=FALSE] X <- NULL matplot(t(Y),type=type,lty=lty,pch=pch,lwd=lwd,col=Col(col[1],alpha[1]),xlab=xlab,ylab=ylab,...) } else { data <- data[,c(id,x,y),drop=FALSE] wide <- mets::fast.reshape(data[order(data[,id],data[,x]),],id=id,varying=c(y,x),...) yidx <- Idx(y,names(wide)) xidx <- Idx(x,names(wide)) Y <- wide[,yidx,drop=FALSE] X <- wide[,xidx,drop=FALSE] matplot(t(X),t(Y),type=type,pch=pch,lty=lty,lwd=lwd,col=Col(col[1],alpha[1]),xlab=xlab,ylab=ylab,add=add,...) if (trend) { if (is.numeric(trend.formula)) { trend.formula <- sort(trend.formula) tf <- toformula(y,"1") res <- c() if (!is.null(tau)) { if (length(trend.alpha)0) confband(trend.formula,res[,j],line=FALSE,col=trend.col[j],lty=trend.lty[j],lwd=trend.lwd[j],delta=trend.delta,...) } } else { confband(trend.formula,res[,2],res[,3],res[,1],col=Col(trend.col,trend.alpha),lty=trend.lty,lwd=trend.lwd,polygon=trend.join,...) } } else { tf <- getoutcome(trend.formula) if (is.list(tf)) { trend.formula <- update(trend.formula,toformula(y,".")) } if (!is.null(tau)) { ##if (!require(quantreg)) stop("Install 'quantreg'") suppressWarnings(r1 <- quantreg::rq(trend.formula,data=data,tau=tau)) newdata <- data.frame(seq(min(X,na.rm=TRUE),max(X,na.rm=TRUE),length.out=100)) names(newdata) <- x pr <- predict(r1,newdata=newdata,interval="confidence",level=level) ##confband(xx,pr[,3],pr[,2],polygon=TRUE,col=Col(trend.col,trend.alpha),border=FALSE) for (i in seq_along(tau)) lines(newdata[,1],pr[,i],col=trend.col,lwd=trend.lwd,lty=trend.lty) } else { l1. <- lm(trend.formula,data) l1 <- estimate(l1.,id=data[,id],level=level) xy <- plotConf(l1.,vcov=vcov(l1),data=data,partres=FALSE,plot=FALSE,level=level,...) xx <- xy$x pr <- xy$predict$fit if (is.factor(xx)) { xx <- char2num(as.character(xx)) if (trend.jitter>0) xx <- jitter(xx,trend.jitter) confband(xx,pr[,3],pr[,2],pr[,1],col=trend.col,lwd=2) } else { confband(xx,pr[,3],pr[,2],polygon=TRUE,col=Col(trend.col,trend.alpha),border=FALSE) lines(xx,pr[,1],col=trend.col,lwd=trend.lwd,lty=trend.lty) } } } } } return(invisible(list(Y,X))) } lava/R/constrain.R0000644000176200001440000003426713162174023013534 0ustar liggesusers##' Define range constraints of parameters ##' ##' @aliases Range.lvm ##' @title Define range constraints of parameters ##' @param a Lower bound ##' @param b Upper bound ##' @return function ##' @author Klaus K. Holst ##' @export Range.lvm <- function(a=0,b=1) { if (b==Inf) { f <- function(x) { res <- a+exp(x) attributes(res)$grad <- exp res } return(f) } if (a==-Inf) { f <- function(x) { res <- -exp(x)+b attributes(res)$grad <- function(x) -exp(x) res } return(f) } f <- function(x) { res <- (a+b*exp(x))/(1+exp(x)) attributes(res)$grad <- function(x) exp(x)*(b-a-a*b*exp(x))/(1+exp(x))^2 res } return(f) } ##' Add non-linear constraints to latent variable model ##' ##' Add non-linear constraints to latent variable model ##' ##' Add non-linear parameter constraints as well as non-linear associations ##' between covariates and latent or observed variables in the model (non-linear ##' regression). ##' ##' As an example we will specify the follow multiple regression model: ##' ##' \deqn{E(Y|X_1,X_2) = \alpha + \beta_1 X_1 + \beta_2 X_2} \deqn{V(Y|X_1,X_2) ##' = v} ##' ##' which is defined (with the appropiate parameter labels) as ##' ##' \code{m <- lvm(y ~ f(x,beta1) + f(x,beta2))} ##' ##' \code{intercept(m) <- y ~ f(alpha)} ##' ##' \code{covariance(m) <- y ~ f(v)} ##' ##' The somewhat strained parameter constraint \deqn{ v = ##' \frac{(beta1-beta2)^2}{alpha}} ##' ##' can then specified as ##' ##' \code{constrain(m,v ~ beta1 + beta2 + alpha) <- function(x) ##' (x[1]-x[2])^2/x[3] } ##' ##' A subset of the arguments \code{args} can be covariates in the model, ##' allowing the specification of non-linear regression models. As an example ##' the non-linear regression model \deqn{ E(Y\mid X) = \nu + \Phi(\alpha + ##' \beta X)} where \eqn{\Phi} denotes the standard normal cumulative ##' distribution function, can be defined as ##' ##' \code{m <- lvm(y ~ f(x,0)) # No linear effect of x} ##' ##' Next we add three new parameters using the \code{parameter} assigment ##' function: ##' ##' \code{parameter(m) <- ~nu+alpha+beta} ##' ##' The intercept of \eqn{Y} is defined as \code{mu} ##' ##' \code{intercept(m) <- y ~ f(mu)} ##' ##' And finally the newly added intercept parameter \code{mu} is defined as the ##' appropiate non-linear function of \eqn{\alpha}, \eqn{\nu} and \eqn{\beta}: ##' ##' \code{constrain(m, mu ~ x + alpha + nu) <- function(x) ##' pnorm(x[1]*x[2])+x[3]} ##' ##' The \code{constraints} function can be used to show the estimated non-linear ##' parameter constraints of an estimated model object (\code{lvmfit} or ##' \code{multigroupfit}). Calling \code{constrain} with no additional arguments ##' beyound \code{x} will return a list of the functions and parameter names ##' defining the non-linear restrictions. ##' ##' The gradient function can optionally be added as an attribute \code{grad} to ##' the return value of the function defined by \code{value}. In this case the ##' analytical derivatives will be calculated via the chain rule when evaluating ##' the corresponding score function of the log-likelihood. If the gradient ##' attribute is omitted the chain rule will be applied on a numeric ##' approximation of the gradient. ##' @aliases constrain constrain<- constrain.default constrain<-.multigroup ##' constrain<-.default constraints parameter<- ##' @return A \code{lvm} object. ##' @author Klaus K. Holst ##' @seealso \code{\link{regression}}, \code{\link{intercept}}, ##' \code{\link{covariance}} ##' @keywords models regression ##' @examples ##' ############################## ##' ### Non-linear parameter constraints 1 ##' ############################## ##' m <- lvm(y ~ f(x1,gamma)+f(x2,beta)) ##' covariance(m) <- y ~ f(v) ##' d <- sim(m,100) ##' m1 <- m; constrain(m1,beta ~ v) <- function(x) x^2 ##' ## Define slope of x2 to be the square of the residual variance of y ##' ## Estimate both restricted and unrestricted model ##' e <- estimate(m,d,control=list(method="NR")) ##' e1 <- estimate(m1,d) ##' p1 <- coef(e1) ##' p1 <- c(p1[1:2],p1[3]^2,p1[3]) ##' ## Likelihood of unrestricted model evaluated in MLE of restricted model ##' logLik(e,p1) ##' ## Likelihood of restricted model (MLE) ##' logLik(e1) ##' ##' ############################## ##' ### Non-linear regression ##' ############################## ##' ##' ## Simulate data ##' m <- lvm(c(y1,y2)~f(x,0)+f(eta,1)) ##' latent(m) <- ~eta ##' covariance(m,~y1+y2) <- "v" ##' intercept(m,~y1+y2) <- "mu" ##' covariance(m,~eta) <- "zeta" ##' intercept(m,~eta) <- 0 ##' set.seed(1) ##' d <- sim(m,100,p=c(v=0.01,zeta=0.01))[,manifest(m)] ##' d <- transform(d, ##' y1=y1+2*pnorm(2*x), ##' y2=y2+2*pnorm(2*x)) ##' ##' ## Specify model and estimate parameters ##' constrain(m, mu ~ x + alpha + nu + gamma) <- function(x) x[4]*pnorm(x[3]+x[1]*x[2]) ##' \donttest{ ## Reduce Ex.Timings ##' e <- estimate(m,d,control=list(trace=1,constrain=TRUE)) ##' constraints(e,data=d) ##' ## Plot model-fit ##' plot(y1~x,d,pch=16); points(y2~x,d,pch=16,col="gray") ##' x0 <- seq(-4,4,length.out=100) ##' lines(x0,coef(e)["nu"] + coef(e)["gamma"]*pnorm(coef(e)["alpha"]*x0)) ##' } ##' ##' ############################## ##' ### Multigroup model ##' ############################## ##' ### Define two models ##' m1 <- lvm(y ~ f(x,beta)+f(z,beta2)) ##' m2 <- lvm(y ~ f(x,psi) + z) ##' ### And simulate data from them ##' d1 <- sim(m1,500) ##' d2 <- sim(m2,500) ##' ### Add 'non'-linear parameter constraint ##' constrain(m2,psi ~ beta2) <- function(x) x ##' ## Add parameter beta2 to model 2, now beta2 exists in both models ##' parameter(m2) <- ~ beta2 ##' ee <- estimate(list(m1,m2),list(d1,d2),control=list(method="NR")) ##' summary(ee) ##' ##' m3 <- lvm(y ~ f(x,beta)+f(z,beta2)) ##' m4 <- lvm(y ~ f(x,beta2) + z) ##' e2 <- estimate(list(m3,m4),list(d1,d2),control=list(method="NR")) ##' e2 ##' @export ##' @usage ##' ##' \method{constrain}{default}(x,par,args,...) <- value ##' ##' \method{constrain}{multigroup}(x,par,k=1,...) <- value ##' ##' constraints(object,data=model.frame(object),vcov=object$vcov,level=0.95, ##' p=pars.default(object),k,idx,...) ##' ##' @param x \code{lvm}-object ##' @param par Name of new parameter. Alternatively a formula with lhs ##' specifying the new parameter and the rhs defining the names of the ##' parameters or variable names defining the new parameter (overruling the ##' \code{args} argument). ##' @param args Vector of variables names or parameter names that are used in ##' defining \code{par} ##' @param k For multigroup models this argument specifies which group to ##' add/extract the constraint ##' @param value Real function taking args as a vector argument ##' @param object \code{lvm}-object ##' @param data Data-row from which possible non-linear constraints should be ##' calculated ##' @param vcov Variance matrix of parameter estimates ##' @param level Level of confidence limits ##' @param p Parameter vector ##' @param idx Index indicating which constraints to extract ##' @param \dots Additional arguments to be passed to the low level functions "constrain<-" <- function(x,...,value) UseMethod("constrain<-") ##' @export "constrain" <- function(x,...) UseMethod("constrain") ##' @export constrain.default <- function(x,fun, idx, level=0.95, vcov, estimate=FALSE, ...) { if (estimate) { return(constraints(x,...)) } if (missing(fun)) { if (inherits(Model(x),"multigroup")) { res <- list() for (m in Model(x)$lvm) { if (length(constrain(m))>0) res <- c(res, constrain(m)) } return(res) } return(Model(x)$constrain) } if (is.numeric(x)) { b <- x } else { b <- pars(x) } if (missing(vcov)) { S <- stats::vcov(x) } else { S <- vcov } if (!missing(idx)) { b <- b[idx]; S <- S[idx,idx,drop=FALSE] } fb <- fun(b) pl <- 1-(1-level)/2 D <- rbind(numDeriv::grad(fun,b)) se <- (D%*%S%*%t(D))^0.5 res <- c(fb,se,fb+c(-1,1)*qnorm(pl)*c(se)) pstr <- paste0(format(c(round(1000-1000*pl),round(pl*1000))/10),"%") names(res) <- c("Estimate","Std.Err",pstr) res } ##' @export "constrain<-.multigroupfit" <- "constrain<-.multigroup" <- function(x,par,k=1,...,value) { constrain(Model(x)$lvm[[k]],par=par,...) <- value return(x) } ##' @export "constrain<-.default" <- function(x,par,args,...,value) { if (inherits(par,"formula")) { lhs <- getoutcome(par) xf <- attributes(terms(par))$term.labels par <- lhs if (par%in%vars(x)) { if (is.na(x$mean[[par]])) { intercept(x,par) <- par } else { par <- x$mean[[par]] } } args <- xf } if (is.null(value) || suppressWarnings(is.na(value))) { if (!is.null(par)) { Model(x)$constrain[[par]] <- NULL Model(x)$constrainY[[par]] <- NULL } else { Model(x)$constrain[[args]] <- NULL } return(x) } for (i in args) { if (!(i%in%c(parlabels(Model(x)),vars(Model(x)), names(constrain(x))))) { if (!lava.options()$silent) message("\tAdding parameter '", i,"'\n",sep="") parameter(x,silent=TRUE) <- i } } if (par%in%vars(x)) { if (!"..."%in%names(formals(value))) { formals(value) <- c(formals(value),alist(...=)) } Model(x)$constrainY[[par]] <- list(fun=value,args=args) } else { ## Wrap around do.call, since functions are not really ## parsed as call-by-value in R, and hence setting ## attributes to e.g. value=cos, will be overwritten ## if value=cos is used again later with new args. Model(x)$constrain[[par]] <- function(x) do.call(value,list(x)) attributes(Model(x)$constrain[[par]])$args <- args index(Model(x)) <- reindex(Model(x)) } return(x) } ##' @export constraints <- function(object,data=model.frame(object),vcov=object$vcov,level=0.95, p=pars.default(object),k,idx,...) { if (class(object)[1]=="multigroupfit") { if (!missing(k)) { if (class(data)[1]=="list") data <- data[[k]] parpos <- modelPar(object, seq_len(length(p)))$p[[k]] if (nrow(data)>1 & !missing(idx)) { res <- t(apply(data,1,function(x) constraints(Model(object)$lvm[[k]],data=x,p=p[parpos],vcov=vcov[parpos,parpos],level=level)[idx,])) return(res) } return(constraints(Model(object)$lvm[[k]],data=data,p=p[parpos],vcov=vcov[parpos,parpos],level=level)) } return(attributes(CoefMat.multigroupfit(object,data=data,vcov=vcov,...))$nlincon) } if (NROW(data)>1 & !missing(idx)) { res <- t(apply(data,1,function(x) constraints(object,data=x,p=p,vcov=vcov,level=level)[idx,],...)) return(res) } if (length(index(object)$constrain.par)<1) return(NULL) parpos <- Model(object)$parpos if (is.null(parpos)) { parpos <- with(index(object),matrices2(Model(object),seq_len(npar+npar.mean+npar.ex))) parpos$A[index(object)$M0==0] <- 0 parpos$P[index(object)$P0==0] <- 0 parpos$v[index(object)$v1==0] <- 0 parpos$e[index(object)$e1==0] <- 0 } myidx <- unlist(lapply(parpos$parval, function(x) { if (!is.null(attributes(x)$reg.idx)) { return(parpos$A[attributes(x)$reg.idx[1]]) } else if (!is.null(attributes(x)$cov.idx)) { return(parpos$P[attributes(x)$cov.idx[1]]) } else if (!is.null(attributes(x)$m.idx)) { return(parpos$v[attributes(x)$m.idx[1]]) } else if (!is.null(attributes(x)$e.idx)) return(parpos$e[attributes(x)$e.idx[1]]) else NA })) names(myidx) <- names(parpos$parval) mynames <- c() N <- length(index(object)$constrain.par) if (N>0) res <- c() count <- 0 mydata <- rbind(numeric(length(manifest(object)))) colnames(mydata) <- manifest(object) data <- rbind(data) iname <- intersect(colnames(mydata),colnames(data)) mydata[1,iname] <- unlist(data[1,iname]) for (pp in index(object)$constrain.par) { count <- count+1 myc <- constrain(Model(object))[[pp]] mycoef <- numeric(6) val.idx <- myidx[attributes(myc)$args] val.idx0 <- na.omit(val.idx) M <- modelVar(Model(object),p=p,data=as.data.frame(mydata)) vals <- with(M,c(parval,constrainpar))[attributes(myc)$args] fval <- try(myc(unlist(vals)),silent=TRUE) fmat <- inherits(fval,"try-error") if (fmat) { fval <- myc(rbind(unlist(vals))) } mycoef[1] <- fval myc0 <- function(theta) { theta0 <- unlist(vals); ## theta0[val.idx0] <- theta[val.idx0]; theta0[!is.na(val.idx)] <- theta if (fmat) { res <- myc(rbind(theta0)) } else { res <- myc(theta0) } return(res) } vals0 <- unlist(vals)[!is.na(val.idx)] ## vals0 <- unlist(vals)[na.omit(val.idx)] if (length(vals0)==0) mycoef[2] <- NA else { if (!is.null(attributes(fval)$grad)) { if (fmat) { Gr <- cbind(attributes(fval)$grad(rbind(unlist(vals0)))) } else { Gr <- cbind(attributes(fval)$grad(unlist(vals0))) } } else { if (fmat) { Gr <- cbind(as.numeric(numDeriv::jacobian(myc0, unlist(vals0)))) } else { Gr <- cbind(as.numeric(numDeriv::jacobian(myc0, rbind(unlist(vals0))))) } } V <- vcov[val.idx0,val.idx0] mycoef[2] <- (t(Gr)%*%V%*%Gr)^0.5 } ## if (second) { ## if (!is.null(attributes(fval)$hessian)) { ## H <- attributes(fval)$hessian(unlist(vals)) ## } else { ## H <- hessian(myc, unlist(vals)) ## } ## HV <- H%*%vcov[val.idx,val.idx] ## mycoef[1] <- mycoef[1] + 0.5*sum(diag(HV)) ## mycoef[2] <- mycoef[2] + 0.5*sum(diag(HV%*%HV)) ## } mycoef[3] <- mycoef[1]/mycoef[2] mycoef[4] <- 2*(pnorm(abs(mycoef[3]),lower.tail=FALSE)) mycoef[5:6] <- mycoef[1] + c(1,-1)*qnorm((1-level)/2)*mycoef[2] res <- rbind(res,mycoef) mynames <- c(mynames,pp) if (!is.null(attributes(fval)$inv)){ res2 <- attributes(fval)$inv(mycoef[c(1,5,6)]) res <- rbind(res, c(res2[1],NA,NA,NA,res2[2],res2[3])) mynames <- c(mynames,paste0("inv(",pp,")")) } } rownames(res) <- mynames colnames(res) <- c("Estimate","Std. Error", "Z value", "Pr(>|z|)", "2.5%", "97.5%") return(res) } lava/R/tr.R0000644000176200001440000000127613162174023012153 0ustar liggesusers##' Trace operator ##' ##' Calculates the trace of a square matrix. ##' @param x Square numeric matrix ##' @param \dots Additional arguments to lower level functions ##' @return \code{numeric} ##' @author Klaus K. Holst ##' @seealso \code{\link{crossprod}}, \code{\link{tcrossprod}} ##' @keywords math algebra ##' @examples ##' ##' tr(diag(1:5)) ##' @export "tr" <- function(x,...) UseMethod("tr") ##' @export `tr.matrix` <- function(x,na.rm=FALSE,...) { if (length(x)==1) return(x) n <- nrow(x) if (!n) stop("0 x 0 matrix") if (n != ncol(x)) stop("non-square matrix") if (!na.rm && any(!is.finite(x))) stop("infinite or missing values") return(sum(diag(x),na.rm=na.rm)) } lava/R/heavytail.R0000644000176200001440000000263313162174023013512 0ustar liggesusers##' @export `heavytail` <- function(x,...) UseMethod("heavytail") ##' @export "heavytail<-" <- function(x,...,value) UseMethod("heavytail<-") ##' @export "heavytail<-.lvm" <- function(x,...,value) { if (inherits(value,"formula")) { return(heavytail(x,all.vars(value),...)) } heavytail(x, value, ...) } ##' @export `heavytail.lvm` <- function(x,var=NULL,df=1,...) { if (is.null(var)) { htidx <- x$attributes$heavytail if (length(htidx)>0 && any(htidx!=0)) { res <- htidx[htidx>0] attributes(res)$couple <- unlist(x$attributes$heavytail.couple)[htidx>0] return(res) } return(NULL) } couples <- attributes(heavytail(x))$couple newval <- 1 if (length(couples)>0) newval <- max(couples)+1 x$attributes$heavytail.couple[var] <- newval x$attributes$heavytail[var] <- df return(x) } heavytail.init.hook <- function(x,...) { x$attributes$heavytail <- list() x$attributes$heavytail.couple <- list() return(x) } heavytail.sim.hook <- function(x,data,...) { n <- nrow(data) hvar <- heavytail(x) if (length(hvar)==0) return(data) couples <- unique(attributes(hvar)$couple) h.type <- list() for (j in couples) h.type <- c(h.type, list( hvar[(which(attributes(hvar)$couple==j))])) for (i in seq_along(couples)) { df <- hvar[[i]][1] Z <- rchisq(n,df=df)/df for (v in names(h.type[[i]])) { data[,v] <- data[,v]/sqrt(Z) } } return(data) } lava/R/zcolorbar.R0000644000176200001440000000522013162174023013514 0ustar liggesusers##' Add color-bar to plot ##' ##' @title Add color-bar to plot ##' @param clut Color look-up table ##' @param x.range x range ##' @param y.range y range ##' @param values label values ##' @param digits number of digits ##' @param label.offset label offset ##' @param srt rotation of labels ##' @param cex text size ##' @param border border of color bar rectangles ##' @param alpha Alpha (transparency) level 0-1 ##' @param position Label position left/bottom (1) or top/right (2) or no text (0) ##' @param direction horizontal or vertical color bars ##' @param \dots additional low level arguments (i.e. parsed to \code{text}) ##' @export ##' @examples ##' \dontrun{ ##' plotNeuro(x,roi=R,mm=-18,range=5) ##' colorbar(clut=Col(rev(rainbow(11,start=0,end=0.69)),0.5), ##' x=c(-40,40),y.range=c(84,90),values=c(-5:5)) ##' ##' colorbar(clut=Col(rev(rainbow(11,start=0,end=0.69)),0.5), ##' x=c(-10,10),y.range=c(-100,50),values=c(-5:5), ##' direction="vertical",border=1) ##' } colorbar <- function(clut=Col(rev(rainbow(11,start=0,end=0.69)),alpha), x.range=c(-.5,.5),y.range=c(-.1,.1), values=seq(clut),digits=2,label.offset,srt=45, cex=0.5,border=NA, alpha=0.5, position=1, direction=c("horizontal","vertical"),...) { nlut <- length(clut) X <- length(agrep(tolower(direction[1]),"horizontal"))>0 scale <- ifelse(X,diff(x.range),diff(y.range))/nlut barsize <- ifelse(X,diff(y.range),diff(x.range)) if (missing(label.offset)) label.offset <- barsize/3 delta <- ifelse(X,x.range[1],y.range[1]) if (!is.null(values)) dM <- diff(range(values))/(nlut-1) for (i in seq_len(nlut+1)-1) { pos <- delta + (i-1)*scale if (X) { x1 <- pos; x2 <- pos+scale; y1 <- y.range[1]; y2 <- y.range[2] } else { y1 <- pos; y2 <- pos+scale; x1 <- x.range[1]; x2 <- x.range[2] } if (i>0) rect(x1,y1,x2,y2, col=clut[i], border=border, xpd=TRUE) } if (!is.null(values)) { for (i in seq_len(nlut+1)-1) { pos <- delta + (i-1)*scale rund <- format(round(min(values)+dM*i,max(1,digits)),digits=digits) ## rund <- round((min(values)+dM*i)*10^digits)/(10^digits) x0 <- pos+(1+0.5)*scale; y0 <- y.range[2]+label.offset if (!X) { y0 <- x0; if (position==1) x0 <- x.range[1]-label.offset if (position==2) x0 <- x.range[1]+label.offset*5 if (position==3) x0 <- x.range[1]+label.offset*1 } if (i0) if (is.na(curfix) | overwrite) { count <- count+1 st <- ifelse(missing(labels),p0,labels[count]) ## st <- ifelse(missing(labels),paste0("m",count),labels[count]) intfix(x,p0) <- st } } } if (index(x)$npar.ex>0) { x$exfix[is.na(x$exfix)] <- names(x$exfix)[is.na(x$exfix)] index(x) <- reindex(x) } return(x) } ###}}} lava/R/partialcor.R0000644000176200001440000000330513162174023013661 0ustar liggesusers##' Calculate partial correlations ##' ##' Calculate partial correlation coefficients and confidence limits via Fishers ##' z-transform ##' ##' ##' @param formula formula speciying the covariates and optionally the outcomes ##' to calculate partial correlation for ##' @param data data.frame ##' @param level Level of confidence limits ##' @param ... Additional arguments to lower level functions ##' @return A coefficient matrix ##' @author Klaus K. Holst ##' @keywords models regression ##' @examples ##' ##' m <- lvm(c(y1,y2,y3)~x1+x2) ##' covariance(m) <- c(y1,y2,y3)~y1+y2+y3 ##' d <- sim(m,500) ##' partialcor(~x1+x2,d) ##' ##' @export partialcor <- function(formula,data,level=0.95,...) { y <- getoutcome(formula) if (length(y)==0) { preds <- all.vars(formula) yy <- setdiff(names(data),preds) } else { yy <- decomp.specials(y) preds <- attr(y,"x") } if (length(yy)<2) return(NULL) res <- c() for (i in seq_len(length(yy)-1)) for (j in seq(i+1,length(yy))) { f <- as.formula(paste("cbind(",yy[i],",",yy[j],")~", paste(preds,collapse="+"))) res <- rbind(res, partialcorpair(f,data,level=level)) rownames(res)[nrow(res)] <- paste(yy[i],yy[j],sep="~") } return(res) } partialcorpair <- function(formula,data,level=0.95,...) { l <- lm(formula,data) k <- ncol(model.matrix(l)) n <- nrow(model.matrix(l)) r <- residuals(l) rho <- cor(r)[1,2] zrho <- atanh(rho) var.z <- 1/(n-k-3) ci.z <- zrho + c(-1,1)*qnorm(1-(1-level)/2)*sqrt(var.z) ci.rho <- tanh(ci.z) z <- 1/sqrt(var.z)*zrho p.z <- 2*(pnorm(-abs(z))) # p-value using z-transform for H_0: rho=0. return(c(cor=rho,z=z,pval=p.z,lowerCI=ci.rho[1],upperCI=ci.rho[2])) } lava/R/trim.R0000644000176200001440000000101013162174023012463 0ustar liggesusers##' Trim tring of (leading/trailing/all) white spaces ##' @title Trim tring of (leading/trailing/all) white spaces ##' @param x String ##' @param all Trim all whitespaces? ##' @param \dots additional arguments to lower level functions ##' @author Klaus K. Holst ##' @export trim <- function(x,all=FALSE,...) { ## y <- gsub("^ .", "", x) # remove leading white space ## y <- gsub(". $", "", x) # remove trailing white space if (!all) return(gsub("^\\s+|\\s+$", "", x)) return(gsub(" ","",x,fixed=TRUE)) } lava/R/labels.R0000644000176200001440000003231713162174023012770 0ustar liggesusers###{{{ labels ##' Define labels of graph ##' ##' Alters labels of nodes and edges in the graph of a latent variable model ##' ##' ##' @aliases labels<- labels labels<-.default labels.lvm labels.lvmfit ##' labels.graphNEL edgelabels edgelabels<- edgelabels<-.lvm nodecolor ##' nodecolor<- nodecolor<-.default ##' @author Klaus K. Holst ##' @export ##' @keywords graphs aplot ##' @examples ##' m <- lvm(c(y,v)~x+z) ##' regression(m) <- c(v,x)~z ##' labels(m) <- c(y=expression(psi), z=expression(zeta)) ##' nodecolor(m,~y+z+x,border=c("white","white","black"), ##' labcol="white", lwd=c(1,1,5), ##' lty=c(1,2)) <- c("orange","indianred","lightgreen") ##' edgelabels(m,y~z+x, cex=c(2,1.5), col=c("orange","black"),labcol="darkblue", ##' arrowhead=c("tee","dot"), ##' lwd=c(3,1)) <- expression(phi,rho) ##' edgelabels(m,c(v,x)~z, labcol="red", cex=0.8,arrowhead="none") <- 2 ##' if (interactive()) { ##' plot(m,addstyle=FALSE) ##' } ##' ##' m <- lvm(y~x) ##' labels(m) <- list(x="multiple\nlines") ##' if (interactive()) { ##' op <- par(mfrow=c(1,2)) ##' plot(m,plain=TRUE) ##' plot(m) ##' par(op) ##' ##' d <- sim(m,100) ##' e <- estimate(m,d) ##' plot(e,type="sd") ##' } ##' @param object \code{lvm}-object. ##' @param value node label/edge label/color ##' @param to Formula specifying outcomes and predictors defining relevant ##' edges. ##' @param \dots Additional arguments (\code{lwd}, \code{cex}, \code{col}, ##' \code{labcol}), \code{border}. ##' @param var Formula or character vector specifying the nodes/variables to ##' alter. ##' @param border Colors of borders ##' @param labcol Text label colors ##' @param shape Shape of node ##' @param lwd Line width of border ##' @usage ##' \method{labels}{default}(object, ...) <- value ##' \method{edgelabels}{lvm}(object, to, ...) <- value ##' \method{nodecolor}{default}(object, var=vars(object), ##' border, labcol, shape, lwd, ...) <- value `labels<-` <- function(object,...,value) UseMethod("labels<-") ##' @export `labels<-.default` <- function(object,...,value) { labels(object,value) } ##' @export labels.graphNEL <- function(object,lab=NULL,...) { if (is.null(lab)) return(graph::nodeRenderInfo(object)$label) graph::nodeRenderInfo(object) <- list(label=lab) names(graph::nodeRenderInfo(object)$label) <- graph::nodes(object); return(object) } ##' @export labels.lvmfit <- function(object,lab=NULL,...) { if (is.null(lab)) return(object$noderender$label) object$noderender$label <- lab return(object) } ##' @export `labels.lvm` <- function(object,lab=NULL,...) { if (is.null(lab)) return(object$noderender$label) if (is.null(object$noderender$label)) object$noderender$label <- lab else object$noderender$label[names(lab)] <- lab return(object) } ###}}} labels ###{{{ edgelabels ##' @export "edgelabels<-.lvmfit" <- function(object,to,from,est=TRUE,edges=NULL,cex=1,...,value) { if (is.null(edges)) { if (inherits(to,"formula")) { yy <- decomp.specials(getoutcome(to)) from <- setdiff(all.vars(to),yy) to <- yy } edges <- paste(from,to,sep="~") } edges. <- paste0("\"", edges, "\"") fromto <- edge2pair(edges) val <- c() for (i in seq_along(edges)) { val <- c(val, formatC(effects(object,from=fromto[[i]][1],to=fromto[[i]][2],silent=TRUE)$directef[[1]]) ) } if (est) mytext <- paste("c(", paste(paste0(edges.,"=expression(",as.character(value),"==\"",val,"\")"),collapse=","),")") else mytext <- paste("c(", paste(paste0(edges.,"=expression(",as.character(value),")"),collapse=","),")") graph::edgeRenderInfo(Graph(object))$label <- eval(parse(text=mytext)) graph::edgeRenderInfo(Graph(object))$cex[edges] <- cex return(object) } ##' @export edgelabels.lvmfit <- function(object,value,type,pthres,intercept=FALSE,format.fun=formatC,...) { if (!missing(value)) { edgelabels(object,...) <- value return(object) } if (missing(type)) return(graph::edgeRenderInfo(Graph(object))$label) Afix <- index(object)$A ## Matrix with fixed parameters and ones where parameters are free Pfix <- index(object)$P ## Matrix with fixed covariance parameters and ones where param mfix <- index(object)$v0 npar.mean <- index(object)$npar.mean Par <- object$coef mpar <- c() if (npar.mean>0) { mpar <- do.call(format.fun,list(Par[seq_len(npar.mean)])) Par <- Par[-seq_len(npar.mean),,drop=FALSE] } Par <- switch(type, sd = paste0(do.call(format.fun,list(Par[,1,drop=FALSE])), " (", do.call(format.fun,list(Par[,2,drop=FALSE])), ")"), est = do.call(format.fun,list(Par[,1,drop=FALSE])), pval = do.call(format.fun,list(Par[,4,drop=FALSE])), name = rownames(Par), none = "" ) AP <- matrices(Model(object), Par,mpar) ## Ignore expar A <- AP$A; P <- AP$P P[exogenous(object),exogenous(object)] <- NA gr <- finalize(Model(object), ...) Anz <- A; Anz[Afix==0] <- NA gr <- edgelabels(gr, lab=Anz) Pnz <- P; Pnz[Model(object)$cov==0] <- NA if (intercept) { idx <- which(!is.na(diag(Pnz))) diag(Pnz)[idx] <- paste(paste0("[",AP$v[idx],"]"),diag(Pnz)[idx],sep="\n") } gr <- edgelabels(gr, lab=Pnz, expr=!intercept) Graph(object) <- gr return(object) } ##' @export `edgelabels` <- function(object, ...) UseMethod("edgelabels") ##' @export `edgelabels<-` <- function(object,...,value) UseMethod("edgelabels<-") ##' @export `edgelabels<-.lvm` <- function(object,to,...,value) { edgelabels(object,to=to, lab=value,...) } ##' @export `edgelabels<-.graphNEL` <- function(object,...,value) { edgelabels(object,lab=value,...) } ##' @export `edgelabels.graphNEL` <- function(object, lab=NULL, to=NULL, from=NULL, cex=1.5, lwd=1, lty=1, col="black", labcol="black", arrowhead="closed", expr=TRUE, debug=FALSE,...) { if (is.null(lab)) { return(graph::edgeRenderInfo(object)$label) } if (inherits(to,"formula")) { yy <- decomp.specials(getoutcome(to)) from <- all.vars(to[[3]])##setdiff(all.vars(to),yy) if (length(from)==0) from <- yy to <- yy } M <- as(object, Class="matrix") nodes <- graph::nodes(object) if (is.null(graph::edgeRenderInfo(object)$label)) graph::edgeRenderInfo(object)$label <- expression() if (!is.null(lab)) { if (!is.null(from) & !is.null(to)) { estr <- paste0("\"",from,"~",to,"\"") estr2 <- paste0(from,"~",to) if (length(lab)!=length(estr2)) lab <- rep(lab,length(estr2)) if (length(col)!=length(estr2)) col <- rep(col,length(estr2)) if (length(cex)!=length(estr2)) cex <- rep(cex,length(estr2)) if (length(lwd)!=length(estr2)) lwd <- rep(lwd,length(estr2)) if (length(lty)!=length(estr2)) lty <- rep(lty,length(estr2)) if (length(arrowhead)!=length(estr2)) arrowhead <- rep(arrowhead,length(estr2)) if (length(labcol)!=length(estr2)) labcol <- rep(labcol,length(estr2)) curedges <- names(graph::edgeRenderInfo(object)$label) Debug(estr,debug) estr2.idx <- which(estr2%in%curedges) newstr.idx <- setdiff(seq_along(estr2),estr2.idx) newstr <- estr2[newstr.idx] estr2 <- estr2[estr2.idx] if (length(estr2)>0) { if (!is.null(lab)) graph::edgeRenderInfo(object)$label[estr2] <- lab[estr2.idx] if (!is.null(cex)) graph::edgeRenderInfo(object)$cex[estr2] <- cex[estr2.idx] if (!is.null(col)) graph::edgeRenderInfo(object)$col[estr2] <- col[estr2.idx] if (!is.null(lwd)) graph::edgeRenderInfo(object)$lwd[estr2] <- lwd[estr2.idx] if (!is.null(lty)) graph::edgeRenderInfo(object)$lty[estr2] <- lty[estr2.idx] if (!is.null(labcol)) graph::edgeRenderInfo(object)$textCol[estr2] <- labcol[estr2.idx] if (!is.null(arrowhead)) graph::edgeRenderInfo(object)$arrowhead[estr2] <- arrowhead[estr2.idx] } if (length(newstr)>0) { if (!is.null(lab)) graph::edgeDataDefaults(object)$futureinfo$label[newstr] <- lab[newstr.idx] if (!is.null(cex)) graph::edgeDataDefaults(object)$futureinfo$cex[newstr] <- cex[newstr.idx] if (!is.null(col)) graph::edgeDataDefaults(object)$futureinfo$col[newstr] <- col[newstr.idx] if (!is.null(lwd)) graph::edgeDataDefaults(object)$futureinfo$lwd[newstr] <- lwd[newstr.idx] if (!is.null(lty)) graph::edgeDataDefaults(object)$futureinfo$lty[newstr] <- lty[newstr.idx] if (!is.null(labcol)) graph::edgeDataDefaults(object)$futureinfo$textCol[newstr] <- labcol[newstr.idx] if (!is.null(arrowhead)) graph::edgeDataDefaults(object)$futureinfo$arrowhead[newstr] <- arrowhead[newstr.idx] } return(object) } ## Used by "edgelabels.lvmfit" for (r in seq_len(nrow(M))) for (s in seq_len(ncol(M))) { if (M[r,s]!=0 & !is.na(lab[r,s])) { estr <- paste0("\"",nodes[r],"~",nodes[s],"\"") estr2 <- paste0(nodes[r],"~",nodes[s]) Debug(estr, debug) if (expr) st <- eval(parse(text=paste0("expression(",lab[r,s],")"))) else st <- lab[r,s] graph::edgeRenderInfo(object)$label[estr2] <- st } } } return(object) } ##' @export `edgelabels.lvm` <- function(object, lab=NULL, to=NULL, from=NULL, cex=1.5, lwd=1, lty=1, col="black", labcol="black", arrowhead="closed", expr=TRUE, debug=FALSE,...) { if (inherits(to,"formula")) { yy <- decomp.specials(getoutcome(to)) from <- all.vars(to[[3]])##setdiff(all.vars(to),yy) if (length(from)==0) from <- yy to <- yy } if (is.null(lab)) { res <- c(object$edgerender$label,object$edgerender$futureinfo$label) if (!is.null(to) && !is.null(from)) { estr <- apply(Expand(from,to),1,function(x) paste0(x,collapse="~")) res <- res[estr] } return(res) } M <- object$M nodes <- colnames(M) if (is.null(object$edgerender$label)) object$edgerender$label <- expression() if (!is.null(lab)) { if (!is.null(from) & !is.null(to)) { estr <- paste0("\"",from,"~",to,"\"") estr2 <- paste0(from,"~",to) if (length(lab)!=length(estr2)) lab <- rep(lab,length(estr2)) if (length(col)!=length(estr2)) col <- rep(col,length(estr2)) if (length(cex)!=length(estr2)) cex <- rep(cex,length(estr2)) if (length(lwd)!=length(estr2)) lwd <- rep(lwd,length(estr2)) if (length(lty)!=length(estr2)) lty <- rep(lty,length(estr2)) if (length(labcol)!=length(estr2)) labcol <- rep(labcol,length(estr2)) if (length(arrowhead)!=length(estr2)) arrowhead <- rep(arrowhead,length(estr2)) curedges <- names(object$edgerender$label) Debug(estr,debug) estr2.idx <- which(estr2%in%curedges) newstr.idx <- setdiff(seq_along(estr2),estr2.idx) newstr <- estr2[newstr.idx] estr2 <- estr2[estr2.idx] if (length(estr2)>0) { if (!is.null(lab)) object$edgerenderlabel[estr2] <- lab[estr2.idx] if (!is.null(cex)) object$edgerender$cex[estr2] <- cex[estr2.idx] if (!is.null(col)) object$edgerender$col[estr2] <- col[estr2.idx] if (!is.null(lwd)) object$edgerender$lwd[estr2] <- lwd[estr2.idx] if (!is.null(lty)) object$edgerender$lty[estr2] <- lty[estr2.idx] if (!is.null(labcol)) object$edgerender$textCol[estr2] <- labcol[estr2.idx] if (!is.null(arrowhead)) object$edgerender$arrowhead[estr2] <- arrowhead[estr2.idx] } if (length(newstr)>0) { if (!is.null(lab)) object$edgerender$futureinfo$label[newstr] <- lab[newstr.idx] if (!is.null(cex)) object$edgerender$futureinfo$cex[newstr] <- cex[newstr.idx] if (!is.null(col)) object$edgerender$futureinfo$col[newstr] <- col[newstr.idx] if (!is.null(lwd)) object$edgerender$futureinfo$lwd[newstr] <- lwd[newstr.idx] if (!is.null(lty)) object$edgerender$futureinfo$lty[newstr] <- lty[newstr.idx] if (!is.null(labcol)) object$edgerender$futureinfo$textCol[newstr] <- labcol[newstr.idx] if (!is.null(arrowhead)) object$edgerender$futureinfo$arrowhead[newstr] <- arrowhead[newstr.idx] } return(object) } ## Used by "edgelabels.lvmfit" for (r in seq_len(nrow(M))) for (s in seq_len(ncol(M))) { if (M[r,s]!=0 & !is.na(lab[r,s])) { estr <- paste0("\"",nodes[r],"~",nodes[s],"\"") estr2 <- paste0(nodes[r],"~",nodes[s]) Debug(estr, debug) if (expr) st <- eval(parse(text=paste0("expression(",lab[r,s],")"))) else st <- lab[r,s] object$edgerender$label[estr2] <- st } } } return(object) } ###}}} edgelabels lava/R/distribution.R0000644000176200001440000003277113162174023014251 0ustar liggesusers ###{{{ distribution ##' @export "distribution<-" <- function(x,...,value) UseMethod("distribution<-") ##' @export "distribution" <- function(x,...,value) UseMethod("distribution") ##' @export "distribution<-.lvm" <- function(x,variable,parname=NULL,init,mdist=FALSE,...,value) { if (inherits(variable,"formula")) variable <- all.vars(variable) dots <- list(...) if (!missing(value)) { for (obj in c("variable","parname","init","mdist")) if (!is.null(attr(value,obj)) && eval(substitute(missing(a),list(a=obj)))) assign(obj,attr(value,obj)) } ## Generator <- (is.function(value) && inherits(try(do.call(value,list()),silent=TRUE),"try-error")) ## if (Generator && length(variable)==1) value <- list(value) if (!is.null(parname) || length(dots)>0) { ## || Generator) { if (length(parname)>1 || (is.character(parname))) { if (missing(init)) { parameter(x,start=rep(1,length(parname))) <- parname } else { parameter(x,start=init) <- parname } ## if ("..."%ni%names(formals(value))) formals(value) <- c(formals(value),alist(...=)) ## formals(value) <- modifyList(formals(value),dots) gen <- function(n,p,...) { args <- c(n,as.list(p[parname]),dots) names(args) <- names(formals(value))[seq(length(parname)+1)] do.call(value,args) } } else { gen <- value if ("..."%ni%names(formals(gen))) formals(gen) <- c(formals(gen),alist(...=)) formals(gen) <- modifyList(formals(gen),dots) ## gen <- function(n,p,...) { ## args <- c(n=n,dots) ## names(args)[1] <- names(formals(value))[1] ## do.call(value,args) ## } } ## if (length(variable)>1) { gen <- list(gen) } distribution(x,variable,mdist=TRUE) <- gen return(x) } if (length(variable)==1 && !mdist) { addvar(x) <- as.formula(paste("~",variable)) if (is.numeric(value)) value <- list(value) if (!is.null(attributes(value)$mean)) intercept(x,variable) <- attributes(value)$mean if (!is.null(attributes(value)$variance)) variance(x,variable,exo=TRUE) <- attributes(value)$variance ## if (is.function(value) && "..."%ni%names(formals(value))) formals(value) <- c(formals(value),alist(...=)) x$attributes$distribution[[variable]] <- value ## Remove from 'mdistribution' vars <- which(names(x$attributes$mdistribution$var)%in%variable) for (i in vars) { pos <- x$attributes$mdistribution$var[[i]] x$attributes$mdistribution$fun[pos] <- NULL x$attributes$mdistribution$var[which(x$attributes$mdistribution$var==pos)] <- NULL above <- which(x$attributes$mdistribution$var>pos) if (length(above)>0) x$attributes$mdistribution$var[above] <- lapply(x$attributes$mdistribution$var[above],function(x) x-1) } return(x) } if (is.list(value) && length(value)==1 && (is.function(value[[1]]) || is.null(value[[1]]))) { addvar(x) <- variable ## Multivariate distribution if (is.null(x$attributes$mdistribution)) x$attributes$mdistribution <- list(var=list(), fun=list()) vars <- x$attributes$mdistribution$var if (any(ii <- which(names(vars)%in%variable))) { num <- unique(unlist(vars[ii])) vars[which(unlist(vars)%in%num)] <- NULL newfunlist <- list() numleft <- unique(unlist(vars)) for (i in seq_along(numleft)) { newfunlist <- c(newfunlist, x$attributes$mdistribution$fun[[numleft[i]]]) ii <- which(unlist(vars)==numleft[i]) vars[ii] <- i } K <- length(numleft) x$attributes$mdistribution$var <- vars x$attributes$mdistribution$fun <- newfunlist } else { K <- length(x$attributes$mdistribution$fun) } if (length(distribution(x))>0) distribution(x,variable) <- rep(list(NULL),length(variable)) x$attributes$mdistribution$var[variable] <- K+1 x$attributes$mdistribution$fun <- c(x$attributes$mdistribution$fun,value) return(x) } if ((length(value)!=length(variable) & length(value)!=1)) stop("Wrong number of values") ## if (length(value)==1 && "..."%ni%names(formals(value))) formals(value) <- c(formals(value),alist(...=)) for (i in seq_along(variable)) if (length(value)==1) { distribution(x,variable[i],...) <- value } else { ## if ("..."%ni%names(formals(value[[i]]))) formals(value[[i]]) <- c(formals(value[[i]]),alist(...=)) distribution(x,variable[i],...) <- value[[i]] } return(x) } ##' @export "distribution.lvm" <- function(x,var,value,multivariate=FALSE,...) { if (!missing(value)) { distribution(x,var,...) <- value return(x) } if (multivariate) return(x$attributes$mdistribution) x$attributes$distribution[var] } ###}}} distribution ###{{{ normal/gaussian ##' @export normal.lvm <- function(link="identity",mean,sd,log=FALSE,...) { rnormal <- if(log) rlnorm else rnorm fam <- stats::gaussian(link); fam$link <- link f <- function(n,mu,var,...) rnormal(n,fam$linkinv(mu),sqrt(var)) if (!missing(mean)) attr(f,"mean") <- mean if (!missing(sd)) attr(f,"variance") <- sd^2 attr(f,"family") <- fam return(f) } ##' @export gaussian.lvm <- normal.lvm ##' @export lognormal.lvm <- function(...) structure(normal.lvm(...,log=TRUE),family=list(family="log-normal",...)) ###}}} normal/gaussian ###{{{ poisson ##' @export poisson.lvm <- function(link="log",lambda,...) { fam <- stats::poisson(link); fam$link <- link f <- function(n,mu,...) { if (missing(n)) { return(fam) } rpois(n,fam$linkinv(mu)) } if (!missing(lambda)) attr(f,"mean") <- fam$linkfun(lambda) attr(f,"family") <- fam attr(f,"var") <- FALSE return(f) } ###}}} poisson ###{{{ pareto ## @examples ## m <- lvm() ## categorical(m,K=3) <- ~x ## distribution(m,~y) <- pareto.lvm(lambda=1) ## regression(m,additive=FALSE) <- y~x ## regression(m) <- y~z ## d <- sim(m,1e4,p=c("y~x:0"=1,"y~x:1"=1,"y~x:2"=exp(1))) ## ## X <- model.matrix(y~-1+factor(x)+z,data=d) ## mlogL <- function(theta) { ## lambda <- exp(theta[1]) ## mu <- exp(X%*%theta[-1]) ## -sum(log(lambda*mu*(1+mu*d$y)^{-lambda-1})) ## } ## nlminb(rep(0,ncol(X)+1),mlogL) ##' @export pareto.lvm <- function(lambda=1,...) { ## shape: lambda, scale: mu ## Density f(y): lambda*mu*(1+mu*y)^{-lambda-1} ## Survival S(y): (1+mu*y)^{-lambda} ## Inverse CDF: u -> ((1-u)^{-1/lambda}-1)/mu f <- function(n,mu,var,...) { ((1-runif(n))^(-1/lambda)-1)/exp(mu) } attr(f,"family") <- list(family="pareto", par=c(lambda=lambda)) return(f) } ###}}} pareto ###{{{ threshold ##' @export threshold.lvm <- function(p,labels=NULL,...) { if (sum(p)>1 || any(p<0 | p>1)) stop("wrong probability vector") ; if (!is.null(labels)) return(function(n,...) { return(cut(rnorm(n),breaks=c(-Inf,qnorm(cumsum(p)),Inf),labels=labels)) }) function(n,...) cut(rnorm(n),breaks=c(-Inf,qnorm(cumsum(p)),Inf)) } ###}}} threshold ###{{{ binomial ##' @export binomial.lvm <- function(link="logit",p,size=1) { if (substitute(link)==quote(identity)) { link <- "identity" } fam <- stats::binomial(link); fam$link <- link f <- function(n,mu,var,...) { if (missing(n)) { return(fam) } rbinom(n,size,fam$linkinv(mu)) } attr(f,"family") <- fam attr(f,"var") <- FALSE if (!missing(p)) attr(f,"mean") <- fam$linkfun(p) ## f <- switch(link, ## logit = ## function(n,mu,var,...) rbinom(n,1,tigol(mu)), ## cloglog = ## function(n,mu,var,...) rbinom(n,1,1-exp(-exp(1-mu))), ## function(n,mu,var=1,...) rbinom(n,1,pnorm(mu,sd=sqrt(var))) ## ### function(n,mu=0,var=1,...) (rnorm(n,mu,sqrt(var))>0)*1 ## ) ##} return(f) } ##' @export logit.lvm <- binomial.lvm("logit") ##' @export probit.lvm <- binomial.lvm("probit") ###}}} binomial ###{{{ Gamma ##' @export Gamma.lvm <- function(link="inverse",shape,rate,unit=FALSE,var=FALSE,log=FALSE,...) { fam <- stats::Gamma(link); fam$link <- link rgam <- if (!log) rgamma else function(...) log(rgamma(...)) if (!missing(shape) & !missing(rate)) f <- function(n,mu,var,...) rgam(n,shape=shape,rate=rate) if (!missing(shape) & missing(rate)) { if (unit) f <- function(n,mu,var,...) rgam(n,shape=shape,rate=shape) else if (var) f <- function(n,mu,var,...) rgam(n,shape=shape,rate=sqrt(shape/var)) else f <- function(n,mu,var,...) rgam(n,shape=shape,rate=shape/fam$linkinv(mu)) } if (missing(shape) & !missing(rate)) { if (unit) f <- function(n,mu,var,...) rgam(n,shape=shape,rate=rate) else if (var) f <- function(n,mu,var,...) rgam(n,shape=rate^2*var,rate=rate) else f <- function(n,mu,var,...) rgam(n,shape=rate*fam$linkinv(mu),rate=rate) } if (missing(shape) & missing(rate)) { if (var) f <- function(n,mu,var,...) rgam(n,shape=var,rate=1) else f <- function(n,mu,var,...) rgam(n,shape=fam$linkinv(mu),rate=1) } attr(f,"family") <- fam attr(f,"var") <- FALSE return(f) } ##' @export loggamma.lvm <- function(...) Gamma.lvm(...,log=TRUE) ###}}} Gamma ###{{{ chisq ##' @export chisq.lvm <- function(df=1,...) { function(n,mu,var,...) mu + rchisq(n,df=df) } ###}}} chisq ###{{{ student (t-distribution) ##' @export student.lvm <- function(df=2,mu,sigma,...) { f <- function(n,mu,var,...) mu + sqrt(var)*rt(n,df=df) if (!missing(mu)) attr(f,"mean") <- mu if (!missing(sigma)) attr(f,"variace") <- sigma^2 return(f) } ###}}} student (t-distribution) ###{{{ uniform ##' @export uniform.lvm <- function(a,b) { if (!missing(a) & !missing(b)) f <- function(n,mu,var,...) mu+runif(n,a,b) else f <- function(n,mu,var,...) (mu+(runif(n,-1,1)*sqrt(12)/2*sqrt(var))) return(f) } ###}}} uniform ###{{{ weibull ## see also eventTime.R for coxWeibull ##' @export weibull.lvm <- function(scale=1,shape=2) { ## accelerated failure time (AFT) regression ## parametrization. ## ## We parametrize the Weibull distribution (without covariates) as follows: ## hazard(t) = 1/shape * exp(-scale/shape) * t^(1/shape-1) ## The hazard is: ## - rising if shape > 1 ## - declining if shape <1 ## - constant if shape=1 ## ## AFT regression ## hazard(t|Z) = 1/shape * exp(-scale/shape) * t^(1/shape-1) exp(-beta/shape*Z) ## scale^(-1/shape) = exp(a0+a1*X) ## PH regression ## scale = exp(b0+ b1*X) f <- function(n,mu,var,...) { (- log(runif(n)) * exp(log(scale)/shape) * exp(mu/shape))^{shape} ## scale * (-log(1-runif(n)))^{1/shape} ## (- (log(runif(n)) / (1/scale)^(shape) * exp(-mu)))^(1/shape) } attr(f,"family") <- list(family="weibull", regression="AFT", par=c(shape=shape,scale=scale)) return(f) } ###}}} weibull ###{{{ sequence ##' @export sequence.lvm <- function(a=0,b=1,integer=FALSE) { if (integer) { f <- function(n,...) seq(n) return(f) } if (is.null(a) || is.null(b)) { if (!is.null(a)) { f <- function(n,...) seq(a,length.out=n) } else { f <- function(n,...) seq(n)-(n-b) } } else { f <- function(n,...) seq(a,b,length.out=n) } return(f) } ###}}} sequence ###{{{ ones ##' @export ones.lvm <- function(p=1,interval=NULL) { f <- function(n,...) { if (!is.null(interval)) { val <- rep(0L,n) if (!is.list(interval)) interval <- list(interval) for (i in seq_along(interval)) { ii <- interval[[i]] lo <- round(ii[1]*n) hi <- round(ii[2]*n) val[seq(lo,hi)] <- 1L } return(val) } if (p==0) return(rep(0L,n)) val <- rep(1L,n) if (p>0 && p<1) val[seq(n*(1-p))] <- 0L val } return(f) } ###}}} ones ###{{{ beta ##' @export beta.lvm <- function(alpha=1,beta=1,scale=TRUE) { ## CDF: F(x) = B(x,alpha,beta)/B(alpha,beta) ## Mean: alpha/(alpha+beta) ## Var: alpha*beta/((alpha+beta)^2*(alpha+beta+1)) if (scale) f <- function(n,mu,var,...) { m <- alpha/(alpha+beta) v <- alpha*beta/((alpha+beta)^2*(alpha+beta+1)) y <- stats::rbeta(n,shape1=alpha,shape2=beta) mu+(y-m)*sqrt(var/v) } else f <- function(n,mu,var,...) stats::rbeta(n,shape1=alpha,shape2=beta) return(f) } ###}}} beta ###{{{ Gaussian mixture ##' @export GM2.lvm <- function(...,parname=c("Pr","M1","M2","V1","V2"),init=c(0.5,-4,4,1,1)) { f <- function(n,pr,m1,m2,v1,v2) { y1 <- rnorm(n,m1,v1^0.5) if (pr>=1) return(y1) z <- rbinom(n,1,pr) y2 <- rnorm(n,m2,v2^0.5) return(z*y1+(1-z)*y2) } structure(f,parname=parname,init=init) } ##' @export GM3.lvm <- function(...,parname=c("Pr1","Pr2","M1","M2","M3","V1","V2","V3"),init=c(0.25,0.5,-4,0,4,1,1,1)) { f <- function(n,pr1,pr2,m1,m2,m3,v1,v2,v3) { p <- c(pr1,pr2,1-pr1-pr2) y1 <- rnorm(n,m1,v1^0.5) y2 <- rnorm(n,m2,v2^0.5) y3 <- rnorm(n,m3,v3^0.5) z <- stats::rmultinom(n,1,p) rowSums(cbind(y1,y2,y3)*t(z)) } structure(f,parname=parname,init=init) } ###}}} Gaussian mixture lava/R/fix.R0000644000176200001440000003677013162174023012323 0ustar liggesusers###{{{ print.fix ##' @export print.fix <- function(x,exo=FALSE,...) { switch(attributes(x)$type, reg = cat("Regression parameters:\n"), cov = cat("Covariance parameters:\n"), mean = cat("Intercept parameters:\n")) M <- linconstrain(x,print=TRUE) invisible(x) } linconstrain <- function(x,print=TRUE,indent=" ",exo=FALSE,...) { idx <- seq_len(attributes(x)$nvar) idx0 <- setdiff(idx,attributes(x)$exo.idx) if (!exo & attributes(x)$type!="reg") idx <- idx0 if (attributes(x)$type=="mean") { if (length(idx)>0){ M <- rbind(unlist(x[idx])) rownames(M) <- "" M[is.na(M)] <- "*" } else { M <- NULL } } else { if (length(x$rel)==0) { M <- NULL } else { M <- x$rel[idx,idx,drop=FALSE] M[M==0] <- NA M[M==1] <- "*" M[which(!is.na(x$labels[idx,idx]))] <- x$labels[idx,idx][which(!is.na(x$labels[idx,idx]))] M[which(!is.na(x$values[idx,idx]))] <- x$values[idx,idx][which(!is.na(x$values[idx,idx]))] if (attributes(x)$type=="reg") M <- t(M[,idx0,drop=FALSE]) } } if (print) { M0 <- M if (NROW(M)>0) rownames(M0) <- paste(indent,rownames(M)) print(M0,quote=FALSE,na.print="",...) } invisible(M) } ###}}} print.fix ###{{{ intfix ##' @export "intfix" <- function(object,...) UseMethod("intfix") ##' @export "intfix<-" <- function(object,...,value) UseMethod("intfix<-") ##' Fix mean parameters in 'lvm'-object ##' ##' Define linear constraints on intercept parameters in a \code{lvm}-object. ##' ##' ##' The \code{intercept} function is used to specify linear constraints on the ##' intercept parameters of a latent variable model. As an example we look at ##' the multivariate regression model ##' ##' \deqn{ E(Y_1|X) = \alpha_1 + \beta_1 X} \deqn{ E(Y_2|X) = \alpha_2 + \beta_2 ##' X} ##' ##' defined by the call ##' ##' \code{m <- lvm(c(y1,y2) ~ x)} ##' ##' To fix \eqn{\alpha_1=\alpha_2} we call ##' ##' \code{intercept(m) <- c(y1,y2) ~ f(mu)} ##' ##' Fixed parameters can be reset by fixing them to \code{NA}. For instance to ##' free the parameter restriction of \eqn{Y_1} and at the same time fixing ##' \eqn{\alpha_2=2}, we call ##' ##' \code{intercept(m, ~y1+y2) <- list(NA,2)} ##' ##' Calling \code{intercept} with no additional arguments will return the ##' current intercept restrictions of the \code{lvm}-object. ##' ##' @aliases intercept intercept<- intercept.lvm intercept<-.lvm intfix intfix ##' intfix<- intfix.lvm intfix<-.lvm ##' @param object \code{lvm}-object ##' @param vars character vector of variable names ##' @param value Vector (or list) of parameter values or labels (numeric or ##' character) or a formula defining the linear constraints (see also the ##' \code{regression} or \code{covariance} methods). ##' @param \dots Additional arguments ##' @usage ##' \method{intercept}{lvm}(object, vars, ...) <- value ##' @return ##' ##' A \code{lvm}-object ##' @note ##' ##' Variables will be added to the model if not already present. ##' @author Klaus K. Holst ##' @seealso \code{\link{covariance<-}}, \code{\link{regression<-}}, ##' \code{\link{constrain<-}}, \code{\link{parameter<-}}, ##' \code{\link{latent<-}}, \code{\link{cancel<-}}, \code{\link{kill<-}} ##' @keywords models regression ##' @export ##' @examples ##' ##' ##' ## A multivariate model ##' m <- lvm(c(y1,y2) ~ f(x1,beta)+x2) ##' regression(m) <- y3 ~ f(x1,beta) ##' intercept(m) <- y1 ~ f(mu) ##' intercept(m, ~y2+y3) <- list(2,"mu") ##' intercept(m) ## Examine intercepts of model (NA translates to free/unique paramete##r) ##' ##' "intercept" <- function(object,...) UseMethod("intercept") ##' @export ##' @export intercept.lvm <- intfix.lvm <- function(object,value,...) { if (!missing(value)) { intercept(object,...) <- value return(object) } res <- object$mean; attr(res,"type") <- "mean" attr(res,"exo.idx") <- index(object)$exo.idx attr(res,"nvar") <- length(res) class(res) <- "fix" return(res) } ##' @export "intercept<-" <- function(object,...,value) UseMethod("intercept<-") ##' @export ##' @export "intercept<-.lvm" <- "intfix<-.lvm" <- function(object, vars,...,value) { if (!missing(vars) && inherits(value,"formula")) value <- all.vars(value) if (inherits(value,"formula")) { lhs <- getoutcome(value) yy <- decomp.specials(lhs) if ((inherits(value[[3]],"logical") && is.na(value[[3]]))) { intfix(object,yy) <- NA return(object) } tt <- terms(value) xf <- attributes(terms(tt))$term.labels res <- lapply(xf,decomp.specials)[[1]] myvalue <- char2num(as.list(res)) myvalue <- lapply(myvalue, function(x) ifelse(x=="NA",NA,x)) intfix(object,yy) <- myvalue object$parpos <- NULL return(object) } if (inherits(vars,"formula")) { vars <- all.vars(vars) } object$mean[vars] <- value newindex <- reindex(object) object$parpos <- NULL index(object)[names(newindex)] <- newindex return(object) } ###}}} intfix ###{{{ covfix ##' @export "covfix" <- function(object,...) UseMethod("covfix") ##' @export covfix.lvm <- function(object,...) { res <- list(rel=object$cov, labels=object$covpar, values=object$covfix); attr(res,"type") <- "cov" attr(res,"exo.idx") <- index(object)$exo.idx attr(res,"nvar") <- NROW(res$rel) class(res) <- "fix" return(res) } ##' @export "covfix<-" <- function(object,...,value) UseMethod("covfix<-") ##' @export "covfix<-.lvm" <- function(object, var1, var2=var1, pairwise=FALSE, exo=FALSE, ..., value) { if (inherits(var1,"formula")) { var1 <- all.vars(var1) } if (inherits(var2,"formula")) { var2 <- all.vars(var2) } object <- addvar(object,c(var1,var2),reindex=FALSE,...) allvars <- c(var1,var2) xorg <- exogenous(object) exoset <- setdiff(xorg,allvars) if (!exo & length(exoset)0) gg <- matrix(0,nrow=length(x),ncol=K) for (i in seq(K)) { gg[,i] <- g(x,breaks[i]) } B <- matrix(0,nrow=length(x),ncol=K-2) for (i in seq(K-2)) { B[,i] <- gg[,i] - (breaks[K]-breaks[i])/(breaks[K]-breaks[K-1])*gg[,K-1] + (breaks[K-1]-breaks[i])/(breaks[K]-breaks[K-1])*gg[,K] } cbind(x,B) } ncspred <- function(mu, var, knots=c(-5,0,5)) { breaks <- knots K <- length(breaks) v <- as.vector(var) k <- sqrt(v/(2*pi)) g <- function(x,tau) { x0 <- (x-tau) x2 <- x0^2 p0 <- 1-pnorm(-x0/sqrt(v)) # P(x>tau|...) k*(2*v + x2)*exp(-(x0/(sqrt(2*v)))^2) + x0*(x2+3*v)*p0 } n <- NROW(mu) gg <- matrix(0,nrow=n,ncol=K) for (i in seq(K)) { gg[,i] <- g(mu,breaks[i]) } B <- matrix(0,nrow=n,ncol=K-2) for (i in seq(K-2)) { B[,i] <- gg[,i] - (breaks[K]-breaks[i])/(breaks[K]-breaks[K-1])*gg[,K-1] + (breaks[K-1]-breaks[i])/(breaks[K]-breaks[K-1])*gg[,K] } cbind(mu,B) } ##' @export nonlinear.lvm <- function(object, to, from=NULL, type=c("quadratic"), knots=c(-5,0,5), names, ...) { if (missing(to)) { return(object$attributes$nonlinear) } if (inherits(to,"formula")) { yy <- decomp.specials(getoutcome(to)) myvars <- all.vars(to) from <- setdiff(myvars,yy) to <- yy } if (length(to)>1) stop("Supply only one response variable") if (length(from)>1) stop("Supply only one explanatory variable") object <- cancel(object, c(from,to)) variance(object) <- to f <- pred <- NULL if (tolower(type)[1]%in%c("ncs","spline","naturalspline","cubicspline","natural cubic spline")) { if (is.null(knots)) stop("Need cut-points ('knots')") if (length(knots)<3) { warning("Supply at least three knots (one interior and boundaries)") ## Fall-back to linear type <- "linear" } if (missing(names)) names <- paste0(from,"_",seq(length(knots)-1)) f <- function(p,x) { B <- cbind(1,naturalcubicspline(x,knots=knots[-c(1,length(knots))],boundary=knots[c(1,length(knots))])) colnames(B) <- c("(Intercept)",names) as.vector(B%*%p) } pred <- function(mu,var,...) { B <- ncspred(mu,var,knots=knots) structure(B,dimnames=list(NULL,names)) } } if (tolower(type)[1]=="linear") { if (missing(names)) names <- from f <- function(p,x) p[1] + p[2]*x pred <- function(mu,var,...) { structure(cbind(mu[,1]),dimnames=list(NULL,names)) } } if (tolower(type)[1]=="quadratic") { if (missing(names)) names <- paste0(from,"_",1:2) f <- function(p,x) p[1] + p[2]*x + p[3]*(x*x) pred <- function(mu,var,...) { structure(cbind(mu[,1],mu[,1]^2+var[1]),dimnames=list(NULL,names)) } } if (tolower(type)[1]%in%c("piecewise","piecewise linear","linear")) { if (is.null(knots)) stop("Need cut-points ('knots')") } if (tolower(type)[1]%in%c("exp","exponential")) { if (missing(names)) names <- paste0(from,"_",1) f <- function(p,x) p[1] + p[2]*exp(x) pred <- function(mu,var,...) { structure(cbind(exp(0.5*var[1] + mu[,1])),dimnames=list(NULL,names)) } } object$attributes$nonlinear[[to]] <- list(x=from, p=length(names)+1, newx=names, f=f, pred=pred, type=tolower(type[1])) return(object) } ##' @export nonlinear.lvmfit <- function(object, to, ...) { if (missing(to)) { return(Model(object)$attributes$nonlinear) } Model(object) <- nonlinear(Model(object),to=to,...) return(object) } ##' @export nonlinear.twostage.lvm <- function(object, ...) { return(object$nonlinear) } ##' @export nonlinear.lvmfit <- function(object, ...) { return(object$nonlinear) } ##' @export `nonlinear<-.lvm` <- function(object, ..., type="quadratic", value) { nonlinear(object,to=value,type=type,...) } lava/R/ksmooth.R0000644000176200001440000001235513162174023013212 0ustar liggesusers##' Plot/estimate surface ##' ##' @export ##' @aliases ksmooth2 surface ##' @param x formula or data ##' @param data data.frame ##' @param h bandwidth ##' @param xlab X label ##' @param ylab Y label ##' @param zlab Z label ##' @param gridsize grid size of kernel smoother ##' @param ... Additional arguments to graphics routine (persp3d or persp) ##' @examples ##' ksmooth2(rmvn(1e4,sigma=diag(2)*.5+.5),c(-3.5,3.5),h=1, ##' rgl=FALSE,theta=30) ##' ##' if (interactive()) { ##' ksmooth2(rmvn(1e4,sigma=diag(2)*.5+.5),c(-3.5,3.5),h=1) ##' ksmooth2(function(x,y) x^2+y^2, c(-20,20)) ##' ksmooth2(function(x,y) x^2+y^2, xlim=c(-5,5), ylim=c(0,10)) ##' ##' f <- function(x,y) 1-sqrt(x^2+y^2) ##' surface(f,xlim=c(-1,1),alpha=0.9,aspect=c(1,1,0.75)) ##' surface(f,xlim=c(-1,1),clut=heat.colors(128)) ##' ##play3d(spin3d(axis=c(0,0,1), rpm=8), duration=5) ##' } ##' ##' if (interactive()) { ##' surface(function(x) dmvn(x,sigma=diag(2)),c(-3,3),lit=FALSE,smooth=FALSE,box=FALSE,alpha=0.8) ##' surface(function(x) dmvn(x,sigma=diag(2)),c(-3,3),box=FALSE,specular="black")##' ##' } ##' ##' if (!inherits(try(find.package("fields"),silent=TRUE),"try-error")) { ##' f <- function(x,y) 1-sqrt(x^2+y^2) ##' ksmooth2(f,c(-1,1),rgl=FALSE,image=fields::image.plot) ##' } ksmooth2 <- function(x,data,h=NULL,xlab=NULL,ylab=NULL,zlab="",gridsize=rep(51L,2),...) { if (is.function(x)) { args <- c(list(f=x,h=h,xlab=xlab,ylab=ylab,zlab=zlab),list(...)) if (is.null(args$xlim) && !missing(data)) { if (is.list(data)) { args$xlim <- data[[1]] args$ylim <- data[[2]] } else args$xlim <- data } return(do.call(surface,args)) } if (inherits(x,"formula")) { x <- model.frame(x,data) } if (length(gridsize)==1) gridsize <- rep(gridsize,2) if (is.null(h)) h <- apply(as.matrix(x),2,sd)*nrow(x)^(-1/5) est <- KernSmooth::bkde2D(x, bandwidth=h, gridsize=gridsize) if (is.null(xlab)) xlab <- names(x)[1] if (is.null(ylab)) ylab <- names(x)[2] surface(est$fhat, x=est$x1, y=est$x2, est$fhat, xlab=xlab, ylab=ylab, zlab=zlab, ...) return(invisible(est)) } ##' @export surface <- function(f,xlim=c(0,1),ylim=xlim,n=rep(100,2),col,clut="gold",clut.center,x,y,rgl=TRUE,expand=0.5,nlevels=10,col.contour="black",contour=TRUE,persp=TRUE,image="image",...) { if (missing(x)) { if (length(n)==1) n <- rep(n,2) x <- seq(xlim[1],xlim[2],length.out=n[1]) y <- seq(ylim[1],ylim[2],length.out=n[2]) } if (is.function(f)) { xy <- as.matrix(expand.grid(x,y)) if (inherits(try(f(c(x[1],y[1])),silent=TRUE),"try-error")) { f <- matrix(f(xy[,1],xy[,2]),nrow=length(x),ncol=length(y)) } else { val <- f(xy) if (length(val)0) ## J[eta.idx,eta.idx] <- 0; J <- J[-eta.idx,] ## Jeta[obs.idx,obs.idx] <- 0; Jeta <- J[-obs.idx,] A <- t(mom$A) Lambda <- A[y.idx,eta.idx,drop=FALSE] K <- A[y.idx,exo.idx,drop=FALSE] B <- A[eta.idx,eta.idx,drop=FALSE] I <- diag(nrow=nrow(B)) Gamma <- A[eta.idx,exo.idx,drop=FALSE] V <- mom$P Psi <- V[eta.idx,eta.idx] ## Residual variance Theta <- V[y.idx,y.idx] ## - IBi <- if (ncol(I)>0) solve(I-B) else I LIBi <- Lambda%*%IBi Phi <- LIBi%*%Gamma + K Veta.x <- IBi%*%Psi%*%IBi ## Variance of eta given x COVetay.x <- Veta.x%*%t(Lambda) ## Covariance of eta,y given x ## Vy.x <- Lambda%*%COVetay.x + Theta ## Omega Vy.x <- LIBi%*%Psi%*%t(LIBi) + Theta if (!is.null(X)) { Ey.x <- t(apply(as.matrix(X)%*% t(LIBi%*%Gamma + K),1,function(x) x + mom$v[y.idx])) } else Ey.x <- NULL Sigma <- mom$Cfull CV <- COVetay.x%*%Vy.x ## Sigma <- Vy.x + Phi%*%varX%*%t(Phi) return(list(Lambda=Lambda, K=K, B=B, I=I, Gamma=Gamma, Psi=Psi, Theta=Theta, IBi=IBi, LIBi=LIBi, Phi=Phi, Vy.x=Vy.x, Veta.x=Veta.x, COVetay.x=COVetay.x, CV=CV, Ey.x=Ey.x)) } lava/R/zgetmplus.R0000644000176200001440000002152013162174023013552 0ustar liggesusers##' Read Mplus output files ##' ##' @title Read Mplus output ##' @param infile Mplus output file ##' @param coef Coefficients only ##' @param \dots additional arguments to lower level functions ##' @author Klaus K. Holst ##' @export ##' @seealso getSAS `getMplus` <- function(infile="template.out",coef=TRUE,...) { ## mycmd <- paste("grep -n \"Estimates S.E. Est./S.E.\" | cut -f1 -d:", outfile) if (coef) { start <- "MODEL RESULTS" end1 <- "R-SQUARE" res0 <- findfileseg(infile,start)[-c(seq(5))] res <- sapply(res0,function(x) { val <- strsplit(x," ")[[1]]; val[val!=""] }) res <- res[unlist(lapply(res, length))!=0] coef.idx <- unlist(lapply(res, length))>3 lab.idx <- which(!coef.idx) count <- 0 mycoef <- c() myrownames <- c() for (i in seq_along(res)) { if (i %in% lab.idx) { count <- count+1 } else { val <- char2num(res[[i]][-1]) if (length(val)<5) val <- c(val,rep(0,5-length(val))) mycoef <- rbind(mycoef, val) myrownames <- c(myrownames, paste(paste(res[[lab.idx[count]]],collapse=" "),res[[i]][1]) ) } } rownames(mycoef) <- myrownames colnames(mycoef) <- c("Estimate","Std.Err","Z-value","Std","StdYX") return(mycoef) } start <- "Estimate S.E. Est./S.E." end1 <- "MODEL RESULTS" end2 <- "QUALITY OF NUMERICAL RESULTS" ## start <- "Estimate S.E. Est./S.E." ## end1 <- "Beginning Time:" ## end2 <- "TECHNICAL" res <- findfileseg(infile,start,end1); ## res2 <- findfileseg(infile,start,end2); ## if (length(res)>length(res2)) ## res <- res2 cat(paste(res,"\n")) res <- findfileseg(infile, "TESTS OF MODEL FIT", "Chi-Square Test of Model Fit for the Baseline Model") cat(paste(res,"\n")) } `findfileseg` <- function(infile, startstring, endstring,nlines) { con <- file(infile, blocking = FALSE) inp <- readLines(con) close(con) nullstring <- 0 linestart <- 1; lineend <- length(inp) mycmd1 <- paste0("grep -n \"",startstring,"\" ", infile); a1 <- system(mycmd1,intern=TRUE); if (length(a1)>0) linestart <- char2num(strsplit(a1,":")[[1]][1]) nn <- length(inp) if (!missing(nlines)) nn <- linestart+nlines if (missing(endstring)) { for (i in seq(linestart,nn)) { lineend <- i-1 if (inp[i]==inp[i-1]) break; } } else { mycmd2 <- paste0("grep -n \"",endstring,"\" ", infile); a2 <- system(mycmd2,intern=TRUE); if (length(a2)>0) lineend <- char2num(strsplit(a2,":")[[1]][1]) } res <- inp[linestart:lineend-1] return(res) } ################################################## ### Generate code and run mplus... ################################################## `mplus` <- function(file="template.mplus",wait=TRUE,intern=TRUE,...) { if (!file.exists(file)) file <- paste0(file,".mplus") if (!file.exists(file)) stop("File does not exist") if (!exists("winecmd")) winecmd <- "wine" if (!exists("mplus.directory")) mplus.directory <- "" mycmd <- paste0(winecmd, " \"", mplus.directory, "mplus.exe\" ", file) system(mycmd, wait=wait, intern=TRUE) prefix <- strsplit(file, ".", fixed=TRUE)[[1]][1] return(getMplus(paste0(prefix,".out"),coef=TRUE)) } `toMplus.data.frame` <- function(x, datafile="data.tab", mplusfile="template.mplus", na.string=".", model="!f1 by x1;", analysis=NULL, categorical=NULL, group, run=FALSE, techout=FALSE,missing=TRUE,...) { write.table(x, file=datafile, sep="\t", quote=FALSE, row.names=FALSE, col.names=FALSE, na=na.string) varnames <- c() ngroups <- ceiling(ncol(x)/4) for (i in seq_len(ngroups)) { newline <- c("\t",colnames(x)[((i-1)*4+1):min(ncol(x), (i*4))],"\n") varnames <- c(varnames, newline) } ### mplusfilesummary <- paste0("summary",mplusfile) ### zz <- file(mplusfilesummary, "w") # open an output file connection ### cat(file=zz, "TITLE: Summary-statistics\n") ### cat(file=zz, "!-----------------------------------------------------\n") ### cat(file=zz,"DATA:\n\tFILE=\"", datafile, "\";\n") ### cat(file=zz,"VARIABLE:\n\tNAMES ARE\n") ### cat(file=zz, varnames, ";\n\n") ### cat(file=zz, "!-----------------------------------------------------\n") ### cat(file=zz, "USEVARIABLES=\n!?;\n") ### cat(file=zz, "!CATEGORICAL=?;\n") ### cat(file=zz, "!MISSING=?;\n") ### cat(file=zz, "!IDVARIABLE=?;\n") ### cat(file=zz, "!-----------------------------------------------------\n") ### cat(file=zz, "ANALYSIS:\n\tTYPE IS BASIC;\n") ### cat(file=zz, "!-----------------------------------------------------\n") ### cat(file=zz, "OUTPUT:\t\tstandardized sampstat;") ### close(zz) zz <- file(mplusfile, "w") # open an output file connection cat(file=zz, "TITLE: ...\n") cat(file=zz, "!-----------------------------------------------------\n") cat(file=zz,"DATA:\n\tFILE=\"", datafile, "\";\n") cat(file=zz,"VARIABLE:\n\tNAMES ARE\n") cat(file=zz, varnames, ";\n") if (!missing(group)) { groups <- unique(x[,group]) mygroupdef <- paste("(",paste(groups,groups,sep="=",collapse=","),")") cat(file=zz, "GROUPING IS ", group, mygroupdef, ";\n", sep="") } else { cat(file=zz, "!GROUPING IS g (1=male, 2=female);\n") } cat(file=zz, "USEVARIABLES=\n", varnames,";\n") if (!is.null(categorical)) cat(file=zz, paste("CATEGORICAL=",paste(categorical,collapse=" "),";\n")) cat(file=zz, "MISSING=",na.string,";\n",sep="") cat(file=zz, "!IDVARIABLE=?;\n") cat(file=zz, "!DEFINE: define new variables here;\n") cat(file=zz, "!SAVEDATA: save data and/or results;\n\n") if (is.null(analysis)) { cat(file=zz, "ANALYSIS: TYPE=MEANSTRUCTURE"); if (missing) cat(file=zz, " MISSING;\n") else cat(file=zz,";\n") cat(file=zz, "ESTIMATOR=ML;\n") cat(file=zz, "INFORMATION=EXPECTED;\n") cat(file=zz, "ITERATIONS=5000;\n") cat(file=zz, "CONVERGENCE=0.00005;\n\n") } else { cat(file=zz,"ANALYSIS:\n") cat(file=zz, analysis,"\n") } cat(file=zz, "!-----------------------------------------------------\n") cat(file=zz, "MODEL:\n") cat(file=zz, model,"\n") cat(file=zz, "!-----------------------------------------------------\n") if (!techout) cat(file=zz, "OUTPUT: STANDARDIZED;\n") else cat(file=zz, "OUTPUT: MODINDICES(0); TECH1; TECH2; TECH5; STANDARDIZED;\n") cat(file=zz, "!\tSAMPSTAT;RESIDUAL;CINTERVAL;MODINDICES(0);\n") cat(file=zz, "!Other output options are:\n") cat(file=zz, "!\tSTANDARDIZED; !Standardized coefficients\n") cat(file=zz, "!\tH1SE; !Standard errors for the H1 model\n") cat(file=zz, "!\tH1TECH3; !Estimated covar,corr matrices for par. estimates\n") cat(file=zz, "!\tPATTERNS; !Summary of missing data patterns\n") cat(file=zz, "!\tFSCOEFFICIENT; !Factor score coefficients and posterior covar matrix\n") cat(file=zz, "!\tFSDERTERMINACY; !Factor score determinacy for each factor\n") cat(file=zz, "!\tTECH1; !Parameter specifications and starting values\n") cat(file=zz, "!\tTECH2 !Parameter derivatives;\n") cat(file=zz, "!\tTECH3; !Covar and Corr matrices for estimates\n") cat(file=zz, "!\tTECH4; !Estimated means and covar for the latent variables\n") cat(file=zz, "!\tTECH5; !Optimization matrix\n") cat(file=zz, "!\tTECH6; !Optimization for categorical variables\n") cat(file=zz, "!\tTECH7; !output for type Mixture\n") cat(file=zz, "!\tTECH8; !Output for type mixture\n") cat(file=zz, "!\tTECH9; !Error messages for MC study\n") cat(file=zz, "!\tMONTECARLO: File is\n") close(zz) if (run & exists("mplus")) { res <- mplus(mplusfile) outfile <- paste0(strsplit(mplusfile,".",fixed=TRUE)[[1]][1],".out") getMplus(outfile) return(res) } } `toMplus.lvmfit` <- function(x, model=NULL, data=model.frame(x), run=TRUE, categorical=NULL,##binary(Model(x)), mplusfile="template.mplus", ...) { mymodel <- "" M <- index(x)$M P <- index(x)$P nn <- vars(x) p <- length(nn) lat.var <- latent(x) lat.idx <- match(lat.var, vars(x)) for (i in seq_len(p)) { for (j in seq_len(p)) { if (M[i,j]!=0) { var1 <- nn[i]; var2 <- nn[j]; if (i %in% lat.idx & !(j %in% lat.idx)) {## & !(j %in% lat.idx)) { key <- " on " mymodel <- paste0(mymodel, "\n", var1, " by ", var2, ";") } else { mymodel <- paste0(mymodel, "\n", var2, " on ", var1, ";") } } } } for (i in seq_len(p-1)) { for (j in ((i+1):p)) { if (P[i,j]!=0) { var1 <- nn[i]; var2 <- nn[j]; mymodel <- paste0(mymodel, "\n", var1, " with ", var2, ";") } } } if (is.null(model)) model <- mymodel mydata <- subset(as.data.frame(data), select=setdiff(nn,lat.var)) toMplus.data.frame(mydata,model=mymodel,run=run, mplusfile=mplusfile, ...) } lava/R/sim.lvm.R0000644000176200001440000007435413162174023013122 0ustar liggesusers##' Simulate model ##' ##' Simulate data from a general SEM model including non-linear effects and ##' general link and distribution of variables. ##' ##' @aliases sim sim.lvmfit sim.lvm ##' simulate.lvmfit simulate.lvm ##' transform<- transform<-.lvm transform.lvm ##' functional functional<- functional.lvm functional<-.lvm ##' distribution distribution distribution<- distribution.lvm distribution<-.lvm ##' heavytail heavytail<- ##' weibull.lvm ##' binomial.lvm ##' poisson.lvm ##' uniform.lvm ##' beta.lvm ##' normal.lvm ##' lognormal.lvm ##' gaussian.lvm ##' GM2.lvm ##' GM3.lvm ##' probit.lvm ##' logit.lvm ##' pareto.lvm ##' student.lvm ##' chisq.lvm ##' coxGompertz.lvm ##' coxWeibull.lvm ##' coxExponential.lvm ##' aalenExponential.lvm ##' Gamma.lvm gamma.lvm ##' loggamma.lvm ##' categorical categorical<- ##' threshold.lvm ##' ones.lvm ##' sequence.lvm ##' @usage ##' \method{sim}{lvm}(x, n = NULL, p = NULL, normal = FALSE, cond = FALSE, ##' sigma = 1, rho = 0.5, X = NULL, unlink=FALSE, latent=TRUE, ##' use.labels = TRUE, seed=NULL, ...) ##' @param x Model object ##' @param n Number of simulated values/individuals ##' @param p Parameter value (optional) ##' @param normal Logical indicating whether to simulate data from a ##' multivariate normal distribution conditional on exogenous variables hence ##' ignoring functional/distribution definition ##' @param cond for internal use ##' @param sigma Default residual variance (1) ##' @param rho Default covariance parameter (0.5) ##' @param X Optional matrix of covariates ##' @param unlink Return Inverse link transformed data ##' @param latent Include latent variables (default TRUE) ##' @param use.labels convert categorical variables to factors before applying transformation ##' @param seed Random seed ##' @param \dots Additional arguments to be passed to the low level functions ##' @author Klaus K. Holst ##' @keywords models datagen regression ##' @export ##' @examples ##' ################################################## ##' ## Logistic regression ##' ################################################## ##' m <- lvm(y~x+z) ##' regression(m) <- x~z ##' distribution(m,~y+z) <- binomial.lvm("logit") ##' d <- sim(m,1e3) ##' head(d) ##' e <- estimate(m,d,estimator="glm") ##' e ##' ## Simulate a few observation from estimated model ##' sim(e,n=5) ##' ################################################## ##' ## Poisson ##' ################################################## ##' distribution(m,~y) <- poisson.lvm() ##' d <- sim(m,1e4,p=c(y=-1,"y~x"=2,z=1)) ##' head(d) ##' estimate(m,d,estimator="glm") ##' mean(d$z); lava:::expit(1) ##' summary(lm(y~x,sim(lvm(y[1:2]~4*x),1e3))) ##' ################################################## ##' ### Gamma distribution ##' ################################################## ##' m <- lvm(y~x) ##' distribution(m,~y+x) <- list(Gamma.lvm(shape=2),binomial.lvm()) ##' intercept(m,~y) <- 0.5 ##' d <- sim(m,1e4) ##' summary(g <- glm(y~x,family=Gamma(),data=d)) ##' \dontrun{MASS::gamma.shape(g)} ##' args(lava::Gamma.lvm) ##' distribution(m,~y) <- Gamma.lvm(shape=2,log=TRUE) ##' sim(m,10,p=c(y=0.5))[,"y"] ##' ################################################## ##' ### Beta ##' ################################################## ##' m <- lvm() ##' distribution(m,~y) <- beta.lvm(alpha=2,beta=1) ##' var(sim(m,100,"y,y"=2)) ##' distribution(m,~y) <- beta.lvm(alpha=2,beta=1,scale=FALSE) ##' var(sim(m,100)) ##' ################################################## ##' ### Transform ##' ################################################## ##' m <- lvm() ##' transform(m,xz~x+z) <- function(x) x[1]*(x[2]>0) ##' regression(m) <- y~x+z+xz ##' d <- sim(m,1e3) ##' summary(lm(y~x+z + x*I(z>0),d)) ##' ################################################## ##' ### Non-random variables ##' ################################################## ##' m <- lvm() ##' distribution(m,~x+z+v+w) <- list(sequence.lvm(0,5),## Seq. 0 to 5 by 1/n ##' ones.lvm(), ## Vector of ones ##' ones.lvm(0.5), ## 0.8n 0, 0.2n 1 ##' ones.lvm(interval=list(c(0.3,0.5),c(0.8,1)))) ##' sim(m,10) ##' ################################################## ##' ### Cox model ##' ### piecewise constant hazard ##' ################################################ ##' m <- lvm(t~x) ##' rates <- c(1,0.5); cuts <- c(0,5) ##' ## Constant rate: 1 in [0,5), 0.5 in [5,Inf) ##' distribution(m,~t) <- coxExponential.lvm(rate=rates,timecut=cuts) ##' \dontrun{ ##' d <- sim(m,2e4,p=c("t~x"=0.1)); d$status <- TRUE ##' plot(timereg::aalen(survival::Surv(t,status)~x,data=d, ##' resample.iid=0,robust=0),spec=1) ##' L <- approxfun(c(cuts,max(d$t)),f=1, ##' cumsum(c(0,rates*diff(c(cuts,max(d$t))))), ##' method="linear") ##' curve(L,0,100,add=TRUE,col="blue") ##' } ##' ################################################## ##' ### Cox model ##' ### piecewise constant hazard, gamma frailty ##' ################################################## ##' m <- lvm(y~x+z) ##' rates <- c(0.3,0.5); cuts <- c(0,5) ##' distribution(m,~y+z) <- list(coxExponential.lvm(rate=rates,timecut=cuts), ##' loggamma.lvm(rate=1,shape=1)) ##' \dontrun{ ##' d <- sim(m,2e4,p=c("y~x"=0,"y~z"=0)); d$status <- TRUE ##' plot(timereg::aalen(survival::Surv(y,status)~x,data=d, ##' resample.iid=0,robust=0),spec=1) ##' L <- approxfun(c(cuts,max(d$y)),f=1, ##' cumsum(c(0,rates*diff(c(cuts,max(d$y))))), ##' method="linear") ##' curve(L,0,100,add=TRUE,col="blue") ##' } ##' ## Equivalent via transform (here with Aalens additive hazard model) ##' m <- lvm(y~x) ##' distribution(m,~y) <- aalenExponential.lvm(rate=rates,timecut=cuts) ##' distribution(m,~z) <- Gamma.lvm(rate=1,shape=1) ##' transform(m,t~y+z) <- prod ##' sim(m,10) ##' ## Shared frailty ##' m <- lvm(c(t1,t2)~x+z) ##' rates <- c(1,0.5); cuts <- c(0,5) ##' distribution(m,~y) <- aalenExponential.lvm(rate=rates,timecut=cuts) ##' distribution(m,~z) <- loggamma.lvm(rate=1,shape=1) ##' \dontrun{ ##' mets::fast.reshape(sim(m,100),varying="t") ##' } ##' ################################################## ##' ### General multivariate distributions ##' ################################################## ##' \dontrun{ ##' m <- lvm() ##' distribution(m,~y1+y2,oratio=4) <- VGAM::rbiplackcop ##' ksmooth2(sim(m,1e4),rgl=FALSE,theta=-20,phi=25) ##' m <- lvm() ##' distribution(m,~z1+z2,"or1") <- VGAM::rbiplackcop ##' distribution(m,~y1+y2,"or2") <- VGAM::rbiplackcop ##' sim(m,10,p=c(or1=0.1,or2=4)) ##' } ##' m <- lvm() ##' distribution(m,~y1+y2+y3,TRUE) <- function(n,...) rmvn(n,sigma=diag(3)+1) ##' var(sim(m,100)) ##' ## Syntax also useful for univariate generators, e.g. ##' m <- lvm(y~x+z) ##' distribution(m,~y,TRUE) <- function(n) rnorm(n,mean=1000) ##' sim(m,5) ##' distribution(m,~y,"m1",0) <- rnorm ##' sim(m,5) ##' sim(m,5,p=c(m1=100)) ##' ################################################## ##' ### Regression design in other parameters ##' ################################################## ##' ## Variance heterogeneity ##' m <- lvm(y~x) ##' distribution(m,~y) <- function(n,mean,x) rnorm(n,mean,exp(x)^.5) ##' if (interactive()) plot(y~x,sim(m,1e3)) ##' ## Alternaively, calculate the standard error directly ##' addvar(m) <- ~sd ## If 'sd' should be part of the resulting data.frame ##' constrain(m,sd~x) <- function(x) exp(x)^.5 ##' distribution(m,~y) <- function(n,mean,sd) rnorm(n,mean,sd) ##' if (interactive()) plot(y~x,sim(m,1e3)) ##' ## Regression on variance parameter ##' m <- lvm() ##' regression(m) <- y~x ##' regression(m) <- v~x ##' ##distribution(m,~v) <- 0 # No stochastic term ##' ## Alternative: ##' ## regression(m) <- v[NA:0]~x ##' distribution(m,~y) <- function(n,mean,v) rnorm(n,mean,exp(v)^.5) ##' if (interactive()) plot(y~x,sim(m,1e3)) ##' ## Regression on shape parameter in Weibull model ##' m <- lvm() ##' regression(m) <- y ~ z+v ##' regression(m) <- s ~ exp(0.6*x-0.5*z) ##' distribution(m,~x+z) <- binomial.lvm() ##' distribution(m,~cens) <- coxWeibull.lvm(scale=1) ##' distribution(m,~y) <- coxWeibull.lvm(scale=0.1,shape=~s) ##' eventTime(m) <- time ~ min(y=1,cens=0) ##' if (interactive()) { ##' d <- sim(m,1e3) ##' require(survival) ##' (cc <- coxph(Surv(time,status)~v+strata(x,z),data=d)) ##' plot(survfit(cc) ,col=1:4,mark.time=FALSE) ##' } ##' ################################################## ##' ### Categorical predictor ##' ################################################## ##' m <- lvm() ##' ## categorical(m,K=3) <- "v" ##' categorical(m,labels=c("A","B","C")) <- "v" ##' regression(m,additive=FALSE) <- y~v ##' \dontrun{ ##' plot(y~v,sim(m,1000,p=c("y~v:2"=3))) ##' } ##' m <- lvm() ##' categorical(m,labels=c("A","B","C"),p=c(0.5,0.3)) <- "v" ##' regression(m,additive=FALSE,beta=c(0,2,-1)) <- y~v ##' ## equivalent to: ##' ## regression(m,y~v,additive=FALSE) <- c(0,2,-1) ##' regression(m,additive=FALSE,beta=c(0,4,-1)) <- z~v ##' table(sim(m,1e4)$v) ##' glm(y~v, data=sim(m,1e4)) ##' glm(y~v, data=sim(m,1e4,p=c("y~v:1"=3))) ##' ##' transform(m,v2~v) <- function(x) x=='A' ##' sim(m,10) ##' ##' ################################################## ##' ### Pre-calculate object ##' ################################################## ##' m <- lvm(y~x) ##' m2 <- sim(m,'y~x'=2) ##' sim(m,10,'y~x'=2) ##' sim(m2,10) ## Faster ##' "sim" <- function(x,...) UseMethod("sim") ##' @export sim.lvmfit <- function(x,n=nrow(model.frame(x)),p=pars(x),xfix=TRUE,...) { m <- Model(x) if ((nrow(model.frame(x))==n) & xfix) { X <- exogenous(x) mydata <- model.frame(x) for (pred in X) { distribution(m, pred) <- list(mydata[,pred]) } } sim(m,n=n,p=p,...) } ##' @export sim.lvm <- function(x,n=NULL,p=NULL,normal=FALSE,cond=FALSE,sigma=1,rho=.5, X=NULL,unlink=FALSE,latent=TRUE,use.labels=TRUE,seed=NULL,...) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } v.env <- c("A","M","P","PP","PPdiag","xx","vv","mdist","mdistnam","mii", "nn","mu","xf","xfix","X", "vartrans","multitrans","multitrans.idx", "X.idx","ii.mvn","xconstrain.idx","xconstrain", "xconstrain.par","covparnames","exo_constrainY") setup <- is.null(n) ## Save environment (variables v.env) and return sim object loadconfig <- !is.null(x$sim.env) && !setup && (length(list(...))==0 && length(p)==0) if (loadconfig) { for (v in setdiff(v.env,"X")) assign(v, x$sim.env[[v]]) if (is.null(X)) X <- x$sim.env[['X']] } else { if (!is.null(X)) { n <- nrow(X) } if (!is.null(n) && n<1) return(NULL) p <- c(p,unlist(list(...))) xx <- exogenous(x) if (!is.null(p)) { i1 <- na.omit(c(match(names(p),xx), match(names(p),paste0(xx,lava.options()$symbol[2],xx)))) if (length(i1)>0) covariance(x) <- xx[i1] } ## index(x) <- reindex(x) vv <- vars(x) nn <- setdiff(vv,parameter(x)) mu <- unlist(lapply(x$mean, function(l) ifelse(is.na(l)|is.character(l),0,l))) xf <- intersect(unique(parlabels(x)),xx) xfix <- c(randomslope(x),xf); if (length(xfix)>0) normal <- FALSE ## Match parameter names if ((!is.null(names(p)) && all(!is.na(names(p)))) || length(p)!=(index(x)$npar+index(x)$npar.mean+index(x)$npar.ex) | is.null(names(p))) { nullp <- is.null(p) p0 <- p ep <- NULL ei <- which(index(x)$e1==1) if (length(ei)>0) ep <- unlist(x$expar)[ei] p <- c(rep(1, index(x)$npar+index(x)$npar.mean),ep) p[seq_len(index(x)$npar.mean)] <- 0 p[index(x)$npar.mean + variances(x)] <- sigma p[index(x)$npar.mean + offdiags(x)] <- rho if (!nullp) { c1 <- coef(x,mean=TRUE,fix=FALSE) c2 <- coef(x,mean=TRUE,fix=FALSE,labels=TRUE) idx1 <- na.omit(match(names(p0),c1)) idx11 <- na.omit(match(names(p0),c2)) idx2 <- na.omit(which(names(p0)%in%c1)) idx22 <- na.omit(which(names(p0)%in%c2)) if (length(idx1)>0 && !is.na(idx1)) p[idx1] <- p0[idx2] if (length(idx11)>0 && !is.na(idx11)) p[idx11] <- p0[idx22] } } M <- modelVar(x,p,data=NULL) A <- M$A; P <- M$P if (!is.null(M$v)) mu <- M$v ## Square root of residual variance matrix PP <- with(svd(P), v%*%diag(sqrt(d),nrow=length(d))%*%t(u)) ## Multivariate distributions mdist <- distribution(x,multivariate=TRUE)$var mdistnam <- names(mdist) mii <- match(mdistnam,vars(x)) if (length(distribution(x))>0 ) { ii <- match(names(distribution(x)),vv) ii.mvn <- setdiff(seq(ncol(P)),c(ii,mii)) } else { ii.mvn <- seq(ncol(P)) } PPdiag <- sum(abs(offdiag(PP[ii.mvn,ii.mvn,drop=FALSE])^2))<1e-20 } if (!setup) { E <- matrix(0,ncol=ncol(P),nrow=n) if (length(ii.mvn)>0) { ## Error term for conditional normal distributed variables if (PPdiag) { for (i in ii.mvn) E[,i] <- rnorm(n,sd=PP[i,i]) } else { E[,ii.mvn] <- matrix(rnorm(length(ii.mvn)*n),ncol=length(ii.mvn))%*%PP[ii.mvn,ii.mvn,drop=FALSE] } } if (length(mdistnam)>0) { fun <- distribution(x,multivariate=TRUE)$fun for (i in seq_along(fun)) { mv <- names(which(unlist(mdist)==i)) ii <- match(mv,vv) E[,ii] <- distribution(x,multivariate=TRUE)$fun[[i]](n,p=p,object=x) # ,...) } } ## Simulate exogenous variables (covariates) res <- matrix(0,ncol=length(nn),nrow=n) colnames(res) <- nn } if (!loadconfig) { vartrans <- names(x$attributes$transform) multitrans <- multitrans.idx <- NULL if (length(x$attributes$multitransform)>0) { multitrans <- unlist(lapply(x$attributes$multitransform,function(z) z$y)) for (i in (seq_along(x$attributes$multitransform))) { multitrans.idx <- c(multitrans.idx,rep(i,length(x$attributes$multitransform[[i]]$y))) } } xx <- unique(c(exogenous(x, latent=FALSE, index=TRUE),xfix)) xx <- setdiff(xx,vartrans) X.idx <- match(xx,vv) } if (!setup) { res[,X.idx] <- t(mu[X.idx]+t(E[,X.idx])) if (is.null(X)) { if (!is.null(xx) && length(xx)>0) for (i in seq_along(xx)) { mu.x <- mu[X.idx[i]] dist.x <- distribution(x,xx[i])[[1]] if (is.list(dist.x) && is.function(dist.x[[1]])) dist.x <- dist.x[[1]] if (is.list(dist.x)) { dist.x <- dist.x[[1]] if (length(dist.x)==1) dist.x <- rep(dist.x,n) } if (is.function(dist.x)) { res[,X.idx[i]] <- dist.x(n=n,mu=mu.x,var=P[X.idx[i],X.idx[i]]) } else { if (is.null(dist.x) || is.na(dist.x)) { } else { if (length(dist.x)!=n) stop("'",vv[X.idx[i]], "' fixed at length ", length(dist.x)," != ",n,".") res[,X.idx[i]] <- dist.x ## Deterministic } } } } else { res[,X.idx] <- X[,xx] } } simuled <- c(xx) resunlink <- NULL if (unlink) { resunlink <- res } if ( normal | ( is.null(distribution(x)) & is.null(functional(x)) & is.null(constrain(x))) ) { if(cond) { ## Simulate from conditional distribution of Y given X mypar <- pars(x,A,P,mu) Ey.x <- predict(x, mypar, data.frame(res)) Vy.x <- attributes(Ey.x)$cond.var PP <- with(svd(Vy.x), v%*%diag(sqrt(d),nrow=length(d))%*%t(u)) yy <- Ey.x + matrix(n*ncol(Vy.x),ncol=ncol(Vy.x))%*%PP res <- cbind(yy, res[,xx]); colnames(res) <- c(colnames(Vy.x),xx) return(res) } ## Simulate from sim. distribution (Y,X) (mv-normal) I <- diag(nrow=length(nn)) IAi <- Inverse(I-t(A)) colnames(E) <- vv dd <- t(apply(heavytail.sim.hook(x,E),1,function(x) x+mu)) res <- dd%*%t(IAi) colnames(res) <- vv } else { if (!loadconfig) { xc <- index(x)$vars xconstrain.idx <- unlist(lapply(lapply(constrain(x),function(z) attributes(z)$args),function(z) length(intersect(z,xc))>0)) xconstrain <- intersect(unlist(lapply(constrain(x),function(z) attributes(z)$args)),xc) xconstrain.par <- names(xconstrain.idx)[xconstrain.idx] covparnames <- unique(as.vector(covariance(x)$labels)) exo_constrainY <- intersect(exogenous(x),names(x$constrainY)) } if (setup) { sim.env <- c() sim.env[v.env] <- list(NULL) for (v in v.env) if (!is.null(get(v))) sim.env[[v]] <- get(v) x$sim.env <- sim.env return(x) } if (length(xconstrain)>0) for (i in which(xconstrain.idx)) { ff <- constrain(x)[[i]] myargs <- attributes(ff)$args D <- matrix(0,n,length(myargs)) for (j in seq_len(ncol(D))) { if (myargs[j]%in%xconstrain) D[,j] <- res[,myargs[j]] else D[,j] <- M$parval[[myargs[j]]] } val <- try(apply(D,1,ff),silent=TRUE) if (inherits(val,"try-error") || NROW(val)0) { warg <- setdiff(attributes(z)$args,xx) wargidx <- which(attributes(z)$args%in%warg) exoidx <- which(attributes(z)$args%in%xx) parname <- names(constrain(x))[i] y <- names(which(unlist(lapply(intercept(x),function(x) x==parname)))) el <- list(i,y,parname,xx,exoidx,warg,wargidx,z) names(el) <- c("idx","endo","parname","exo","exoidx","warg","wargidx","func") xconstrain <- c(xconstrain,list(el)) } } yconstrain <- unlist(lapply(xconstrain,function(x) x$endo)) for (i in exo_constrainY) { cc <- x$constrainY[[i]] args <- cc$args args <- if (is.null(args) || length(args)==0) res[,i] else res[,args] res[,i] <- cc$fun(args,p) # ,...) } res <- data.frame(res) if (length(vartrans)>0) { parvals <- parpos(x)$parval parvalsnam <- setdiff(names(parvals),xx) if (length(parvalsnam)>0) { Parvals <- p[unlist(parvals)]; res <- cbind(res, cbind(rep(1,nrow(res)))%x%rbind(Parvals)) colnames(res)[seq(length(Parvals))+ncol(res)-length(Parvals)] <- names(parvals) } } leftovers <- c() itercount <- 0 while (length(simuled)0) stop("Infinite loop (feedback).") itercount <- itercount+1 } for (i in leftovers) { if (i%in%vartrans) { xtrans <- x$attributes$transform[[i]]$x if (all(xtrans%in%c(simuled,names(parvals)))) { xtr <- res[,xtrans,drop=FALSE] if (use.labels) { lb <- x$attributes$labels lb.idx <- na.omit(match(names(lb),xtrans)) ## For categorical variables turn them into factors so we can ## use the actual labels in function calls/transform if (length(lb.idx)>0) { xtr <- as.data.frame(xtr) for (lb0 in lb.idx) { lab <- lb[[names(xtr)[lb0]]] xtr[,lb0] <- factor(xtr[,lb0],levels=seq_along(lab)-1,labels=lab) } } } suppressWarnings(yy <- with(x$attributes$transform[[i]], fun(xtr))) ##fun(res[,xtrans]))) if (NROW(yy) != NROW(res)) { ## apply row-wise res[,i] <- with(x$attributes$transform[[i]], ##apply(res[,xtrans,drop=FALSE],1,fun)) apply(xtr,1,fun)) } else { colnames(yy) <- NULL res[,i] <- yy } simuled <- c(simuled,i) } } else if (i%in%multitrans) { idx0 <- match(i,multitrans) idx <- multitrans.idx[idx0] mtval <- x$attributes$multitransform[[idx]] if (all(mtval$x%in%simuled)) { res[,mtval$y] <- mtval$fun(res[,mtval$x]) simuled <- c(simuled,mtval$y) break; } } else { ipos <- which(yconstrain%in%i) if (length(ipos)==0 || all(xconstrain[[ipos]]$exo%in%simuled)) { pos <- match(i,vv) relations <- colnames(A)[A[,pos]!=0] simvars <- x$attributes$simvar[[i]] dist.i <- distribution(x,i)[[1]] ## User-specified distribution function dist.xx <- NULL if (is.function(dist.i)) { dist.args0 <- names(formals(dist.i)) dist.args <- setdiff(dist.args0,c("n","mean","mu","var","...")) dist.xx <- intersect(names(res),dist.args) ## Variables influencing distribution } if (all(c(relations,simvars,dist.xx)%in%simuled)) { ## Only depending on already simulated variables if (x$mean[[pos]]%in%xconstrain.par && length(ipos)==0) { mu.i <- res[,x$mean[[pos]] ] } else { mu.i <- mu[pos] } if (length(ipos)>0) { pp <- unlist(M$parval[xconstrain[[ipos]]$warg]) myidx <- with(xconstrain[[ipos]],order(c(wargidx,exoidx))) ## myidx <- with(xconstrain[[ipos]], ## match(attr(func,"args"), c(warg,exo))) X <- with(xconstrain[[ipos]], if (length(pp)>0) cbind(rbind(pp)%x%cbind(rep(1,nrow(res))), res[,exo,drop=FALSE]) else res[,exo,drop=FALSE]) yy <- try(with(xconstrain[[ipos]], func(X[,myidx])),silent=TRUE) if (NROW(yy) != NROW(res)) { ## apply row-wise mu.i <- #mu.i + with(xconstrain[[ipos]], apply(res[,exo,drop=FALSE],1, function(x) func( unlist(c(pp,x))[myidx]))) } else { mu.i <- ##mu.i+ yy } } for (From in relations) { f <- functional(x,i,From)[[1]] if (!is.function(f)) f <- function(x,...) x reglab <- regfix(x)$labels[From,pos] if (reglab%in%c(xfix,xconstrain.par)) { if (is.function(f)) { if (length(formals(f))>1) { mu.i <- mu.i + res[,reglab]*f(res[,From],p) } else { mu.i <- mu.i + res[,reglab]*f(res[,From]) } } else mu.i <- mu.i + res[,reglab]*res[,From] } else { if (is.function(f)) { if (length(formals(f))>1) { mu.i <- mu.i + A[From,pos]*f(res[,From],p) } else { mu.i <- mu.i + A[From,pos]*f(res[,From]) } } else mu.i <- mu.i + A[From,pos]*res[,From] } } if (!is.function(dist.i)) { res[,pos] <- mu.i + E[,pos] if (unlink) resunlink[,pos] <- res[,pos] } else { if (length(simvars)>0) { ## Depends on mu and also on other variables (e.g. time-depending effect) if (length(mu.i)==1) mu.i <- rep(mu.i,n) mu.i <- cbind("m0"=mu.i,res[,simvars,drop=FALSE]) } new.args <- list(n=n) mu.arg <- intersect(c("mean","mu"),dist.args0) if (length(mu.arg)>0) { new.args <- c(new.args,list(mu.i)) names(new.args)[length(new.args)] <- mu.arg[1] } var.arg <- intersect(c("var"),dist.args0) if (length(var.arg)>0) { new.args <- c(new.args,list(P[pos,pos])) names(new.args)[length(new.args)] <- var.arg[1] } for (jj in dist.xx) { new.args <- c(new.args,list(res[,jj,drop=TRUE])) names(new.args)[length(new.args)] <- jj } res[,pos] <- do.call(dist.i,new.args) if (unlink) resunlink[,pos] <- mu.i } if (length(x$constrainY)>0 && i%in%names(x$constrainY)) { cc <- x$constrainY[[i]] args <- cc$args args <- if (is.null(args) || length(args)==0) res[,pos] else { ii <- intersect(names(M$parval),args) args0 <- args args <- res[,intersect(args,colnames(res)),drop=FALSE] if (length(ii)>0) { pp <- rbind(unlist(M$parval[ii]))%x%cbind(rep(1,n)) colnames(pp) <- ii args <- cbind(res,pp)[,args0,drop=FALSE] } } res[,pos] <- cc$fun(args,p) # ,...) } simuled <- c(simuled,i) } } } } } res <- res[,nn,drop=FALSE] } res <- as.data.frame(res) myhooks <- gethook("sim.hooks") for (f in myhooks) { res <- do.call(f, list(x=x,data=res,p=p,modelpar=M)) } if (unlink) res <- resunlink res <- as.data.frame(res) self <- x$attributes$selftransform for (v in names(self)) { res[,v] <- self[[v]](res[,v]) } if (!latent && length(latent(x))>0) return(subset(res[,-which(colnames(res)%in%latent(x))])) return(res) } ##' @export simulate.lvm <- function(object,nsim,seed=NULL,...) { sim(object,nsim,seed=seed,...) } ##' @export simulate.lvmfit <- function(object,nsim,seed=NULL,...) { sim(object,nsim,seed=seed,...) } lava/R/vars.R0000644000176200001440000000652413162174023012502 0ustar liggesusers##' Extract variable names from latent variable model ##' ##' Extract exogenous variables (predictors), endogenous variables (outcomes), ##' latent variables (random effects), manifest (observed) variables from a ##' \code{lvm} object. ##' ##' \code{vars} returns all variables of the \code{lvm}-object including ##' manifest and latent variables. Similarily \code{manifest} and \code{latent} ##' returns the observered resp. latent variables of the model. ##' \code{exogenous} returns all manifest variables without parents, e.g. ##' covariates in the model, however the argument \code{latent=TRUE} can be used ##' to also include latent variables without parents in the result. Pr. default ##' \code{lava} will not include the parameters of the exogenous variables in ##' the optimisation routine during estimation (likelihood of the remaining ##' observered variables conditional on the covariates), however this behaviour ##' can be altered via the assignment function \code{exogenous<-} telling ##' \code{lava} which subset of (valid) variables to condition on. Finally ##' \code{latent} returns a vector with the names of the latent variables in ##' \code{x}. The assigment function \code{latent<-} can be used to change the ##' latent status of variables in the model. ##' ##' @aliases vars vars.lvm vars.lvmfit latent latent<- latent.lvm latent<-.lvm ##' latent.lvmfit latent.multigroup manifest manifest.lvm manifest.lvmfit ##' manifest.multigroup exogenous exogenous<- exogenous.lvm exogenous<-.lvm ##' exogenous.lvmfit exogenous.multigroup endogenous endogenous.lvm ##' endogenous.lvmfit endogenous.multigroup ##' @usage ##' ##' vars(x,...) ##' ##' endogenous(x,...) ##' ##' exogenous(x,...) ##' ##' manifest(x,...) ##' ##' latent(x,...) ##' ##' \method{exogenous}{lvm}(x,silent = FALSE, xfree = TRUE,...) <- value ##' ##' \method{exogenous}{lvm}(x,latent=FALSE,index=TRUE,...) ##' ##' \method{latent}{lvm}(x,clear=FALSE,...) <- value ##' ##' @param x \code{lvm}-object ##' @param latent Logical defining whether latent variables without parents ##' should be included in the result ##' @param index For internal use only ##' @param clear Logical indicating whether to add or remove latent variable ##' status ##' @param silent Suppress messages ##' @param xfree For internal use only ##' @param value Formula or character vector of variable names. ##' @param \dots Additional arguments to be passed to the low level functions ##' @return Vector of variable names. ##' @author Klaus K. Holst ##' @seealso \code{\link{endogenous}}, \code{\link{manifest}}, ##' \code{\link{latent}}, \code{\link{exogenous}}, \code{\link{vars}} ##' @keywords models regression ##' @examples ##' ##' g <- lvm(eta1 ~ x1+x2) ##' regression(g) <- c(y1,y2,y3) ~ eta1 ##' latent(g) <- ~eta1 ##' endogenous(g) ##' exogenous(g) ##' identical(latent(g), setdiff(vars(g),manifest(g))) ##' ##' @export `vars` <- function(x,...) UseMethod("vars") ##' @export `vars.graph` <- function(x,...) { graph::nodes(x) } ##' @export `vars.lvm` <- function(x,...) { colnames(x$M) } ##' @export `vars.lvmfit` <- function(x,...) { vars(Model(x),...) } ##' @export vars.list <- function(x,...) { varlist <- c() for (i in seq_along(x)) { varlist <- c(varlist, vars(x[[i]])) } varlist <- unique(varlist) return(varlist) } ##' @export `vars.lm` <- function(x,...) { c(endogenous(x),exogenous(x)) } lava/R/summary.R0000644000176200001440000001247513162174023013226 0ustar liggesusers###{{{ summary.lvm ##' @export `summary.lvm` <- function(object,...) { k <- length(vars(object)) ## cat("Latent Variable Model \n\twith: ", k, " variables.\n", sep=""); print(object,print.transform=FALSE,...) if (length(transform(object))>0) { cat("\nTransformations:\n") print(transform(object),quote=FALSE,...) } cat("\n") if (length(index(object))>0) cat("Number of free parameters: ", with(index(object),npar+npar.mean+npar.ex),"\n", sep="") if (k==0) return() ##cat("Npar=", index(object)$npar, "+", index(object)$npar.mean, "\n", sep="") cat("\n") print(regression(object),...) print(covariance(object),...) print(intercept(object),...) if (length(object$exfix)>0) { cat("Additional parameters:\n") val <- unlist(object$exfix) M <- rbind(val); colnames(M) <- names(val) rownames(M) <- " " print(M,quote=FALSE,...) } if (length(constrain(object))>0) { cat("Non-linear constraints:\n") print(constrain(object),quote=FALSE,...) } ## printmany(object$cov, printmany(object$covpar, object$covfix, name1="Labels:", name2="Fixed:", print=FALSE), name1="covariance:") cat("\n") } ###}}} summary.lvm ###{{{ summary.lvmfit ##' @export `summary.lvmfit` <- function(object,std="xy", level=9, labels=2, ...) { cc <- CoefMat(object,labels=labels,std=std,level=level,...) mycoef <- coef(object,level=9) nlincon <- attributes(mycoef)$nlincon nonexo <- setdiff(vars(object),index(Model(object))$exogenous) attributes(mycoef) <- attributes(mycoef)[1:2] mygof <- object$opt$summary.message if (is.null(mygof)) { mygof <- gof } if (class(object)[1]=="lvm.missing") { nn <- unlist(lapply(object$multigroup$data, nrow)) nc <- nn[object$cc] if (length(nc)==0) nc <- 0 ngroup <- object$multigroup$ngroup res <- list(object=object, coef=mycoef, coefmat=cc, nlincon=nlincon, gof=mygof(object), n=sum(nn), nc=nc, ngroup=ngroup, varmat=modelVar(object)$P[nonexo,nonexo], latent=latent(object), opt=object$opt, vcov=vcov(object), estimator=object$estimator, rsq=rsq(object)) } else { n <- nrow(model.frame(object)) if (is.null(n)) n <- model.frame(object)$n res <- list(coef=mycoef, coefmat=cc, nlincon=nlincon, gof=mygof(object), n=n, nc=n, latent=latent(object), opt=object$opt, vcov=vcov(object), estimator=object$estimator, rsq=rsq(object))##, varmat=modelVar(object)$P[nonexo,nonexo]) } class(res) <- "summary.lvmfit" res } ##' @export print.summary.lvmfit <- function(x,varmat=TRUE,...) { if (!is.null(x$control$method)) { l2D <- sum(x$opt$grad^2) rnkV <- qr(x$vcov)$rank if (l2D>1e-2) warning("Possible problems with convergence!") cat("||score||^2=",l2D,"\n",sep="") np <- nrow(x$vcov) if (rnkV1e-2) warning("Possible problems with convergence!") cat("||score||^2=",l2D,"\n") cat("Latent variables:", x$latent, "\n") print(x$object,...) ##print(x$coefmat,quote=FALSE,right=TRUE) printline() if (!is.null(attributes(x$coefmat)$nlincon)) { cat("Non-linear constraints:\n") print(attributes(x$coefmat)$nlincon) printline() } cat("Estimator:",x$estimator,"\n") printline() if (!is.null(x$gof)) { print(x$gof) printline() } invisible(x) } ###}}} summary.multigroupfit ###{{{ summary.multigroup ##' @export summary.multigroup <- function(object,...) { for (m in object$lvm) print(m,...) print(object) invisible(object) } ###}}} lava/R/deriv.R0000644000176200001440000001727213162174023012642 0ustar liggesusers##' @export deriv.lvm <- function(expr, p, mom, conditional=FALSE, meanpar=TRUE, mu=NULL, S=NULL, second=FALSE, zeroones=FALSE, all=!missing(mom),...) { if (missing(mom) & !missing(p)) { mom <- modelVar(expr,p,conditional=conditional,...) all <- TRUE if (mom$npar==length(p)) meanpar <- NULL } ii <- index(expr) npar.total <- npar <- ii$npar; npar.reg <- ii$npar.reg npar.mean <- ifelse(is.null(meanpar),0,ii$npar.mean) npar.ex <- ii$npar.ex meanpar <- seq_len(npar.mean) epar <- seq_len(npar.ex) nn <- expr$parpos if (is.null(nn)) { nn <- matrices2(expr, seq_len(npar+npar.mean+npar.ex)); nn$A[ii$M0!=1] <- 0 nn$P[ii$P0!=1] <- 0 nn$v[ii$v0!=1] <- 0 nn$e[ii$e0!=1] <- 0 } regr.idx <- seq_len(npar.reg) + npar.mean var.idx <- seq_len(npar-npar.reg) + (npar.mean + npar.reg) mean.idx <- seq_len(npar.mean) npar.total <- npar+length(mean.idx) epar.idx <- seq_len(npar.ex)+npar.total npar.total <- npar.total+length(epar.idx) if (zeroones | is.null(ii$dA)) { dimA <- length(ii$A) if (ii$sparse) { ## Not used yet... if (!requireNamespace("Matrix",quietly=TRUE)) stop("package Matrix not available") dP <- dA <- Matrix::Matrix(0, nrow=dimA, ncol=npar.total) } else { dP <- dA <- matrix(0, nrow=dimA, ncol=npar.total) } if (npar.reg>0) { ## dA[,regr.idx] <- sapply(regr.idx, function(i) izero(ii$reg[ii$reg[,2]==i,1],nrow(dA))) dA[,regr.idx] <- sapply(regr.idx, function(i) izero(which(t(nn$A)==i),nrow(dA)) ) } if (npar>npar.reg) { ## dP[,var.idx] <- sapply(var.idx, function(i) izero(ii$cov[ii$cov[,2]==i,1],nrow(dA)) ) dP[,var.idx] <- sapply(var.idx, function(i) izero(which(nn$P==i),nrow(dA)) ) } res <- list(dA=dA, dP=dP) { if (ii$sparse) { dv <- Matrix::Matrix(0, nrow=length(expr$mean), ncol=npar.total) } else { dv <- matrix(0, nrow=length(expr$mean), ncol=npar.total) } if (!is.null(meanpar) & npar.mean>0) ## dv[,mean.idx] <- sapply(mean.idx, function(i) izero(ii$mean[ii$mean[,2]==i,1],length(expr$mean)) ) dv[,mean.idx] <- sapply(mean.idx, function(i) izero(which(nn$v==i),length(expr$mean)) ) res <- c(res, list(dv=dv)) } } else { res <- with(ii, list(dA=dA, dP=dP, dv=dv)) for (pp in nn$parval) { res$dP[attributes(pp)$cov.idx,pp] <- 1 res$dv[attributes(pp)$m.idx,pp] <- 1 } } if (!all) return(res) ## Non-linear constraints: cname <- constrainpar <- c() if (!missing(p) && length(index(expr)$constrain.par)>0) { for (pp in index(expr)$constrain.par) { myc <- constrain(expr)[[pp]] if (!is.null(myc)) { parval <- mom$parval vals <- c(parval,constrainpar,mom$v,mom$e)[attributes(myc)$args] fval <- try(myc(unlist(vals)),silent=TRUE) fmat <- inherits(fval,"try-error") if (fmat) fval <- myc(rbind(unlist(vals))) if (!is.null(attributes(fval)$grad)) { if (fmat) { Gr <- attributes(fval)$grad(rbind(unlist(vals))) } else { Gr <- attributes(fval)$grad(unlist(vals)) } } else { ## if (!requireNamespace("numDeriv")) stop("numDeriv or analytical derivatives needed!") if (fmat) { Gr <- as.numeric(numDeriv::jacobian(myc, rbind(unlist(vals)))) } else { Gr <- as.numeric(numDeriv::jacobian(myc, unlist(vals))) } } mat.idx <- mom$constrain.idx[[pp]] cname <- c(cname,pp) attributes(fval)$grad <- Gr attributes(fval)$vals <- vals constrainpar <- c(constrainpar,list(fval)); names(constrainpar) <- cname for (jj in seq_len(length(vals))) { allpars <- c(nn$A[attributes(vals[[jj]])$reg.idx[1]], nn$P[attributes(vals[[jj]])$cov.idx[1]], nn$v[attributes(vals[[jj]])$m.idx[1]], nn$e[attributes(vals[[jj]])$e.idx[1]] ) if (!is.null(mat.idx$cov.idx)) res$dP[mat.idx$cov.idx,allpars] <- Gr[jj] if (!is.null(mat.idx$reg.idx)) res$dA[mat.idx$reg.tidx,allpars] <- Gr[jj] if (!is.null(res$dv) & !is.null(mat.idx$m.idx)) res$dv[mat.idx$m.idx,allpars] <- Gr[jj] } } } } if (is.null(ii$Kkk)) { nobs <- nrow(mom$J) ii$Ik <- diag(nrow=nobs) ii$Im <- diag(nrow=ncol(ii$A)) ## ii$Kkk <- commutation(nobs,sparse=FALSE) } N <- NCOL(ii$A) K <- nobs ## if (N>10) { if (!lava.options()$devel) { dG <- with(mom, kronprod(t(IAi),G,res$dA)) G3 <- with(mom, kronprod(G,G,res$dP)) GP <- with(mom,G%*%P) G1 <- with(mom, kronprod(GP,ii$Ik,dG)) G2 <- G1[as.vector(matrix(seq_len(K^2),K,byrow=TRUE)),] dS <- G1+G2+G3 } else { dG <- with(mom, kronprod(t(IAi),G,res$dA[,ii$parBelongsTo$reg,drop=FALSE])) G3 <- with(mom, kronprod(G,G,res$dP[,ii$parBelongsTo$cov,drop=FALSE])) GP <- with(mom,G%*%P) G1 <- with(mom, kronprod(GP,ii$Ik,dG)) G2 <- G1[as.vector(matrix(seq_len(K^2),K,byrow=TRUE)),] dS <- matrix(0,nrow=nrow(G1),ncol=ncol(res$dA)) dS[,ii$parBelongsTo$reg] <- G1+G2; dS[,ii$parBelongsTo$cov] <- G3 } ## } else { ## dG <- suppressMessages(with(mom, (t(IAi) %x% G) %*% (res$dA))) ## MM <- suppressMessages(with(mom, (G%*%P %x% ii$Ik))) ## G1<- MM %*% (dG) ## ## Commutatation product K*X: ## ## G2 <- with(mom, ii$Kkk%*%(G1)) ## G2 <- G1[as.vector(matrix(seq_len(K^2),K,byrow=TRUE)),] ## G3 <- with(mom, (G%x%G)%*%(res$dP)) ## dS <- G1+G2+G3 ## } ## } res <- c(res, list(dG=dG, dS=dS)) if (!is.null(mom$v)) { if (lava.options()$devel) { dG <- with(mom, kronprod(t(IAi),G,res$dA[,with(ii$parBelongsTo,c(mean,reg)),drop=FALSE])) } dxi <- with(mom, kronprod(rbind(v),dG)) ## with(mom, kronprod(rbind(v),ii$Ik,dG)) if (is.matrix(mom$v) && nrow(mom$v)>1) { ## reorder k <- nrow(dxi)/nrow(mom$v) idx0 <- seq(nrow(mom$v))*k-k+1 idx <- unlist(lapply(1:k,function(x) idx0+x-1)) dxi <- dxi[idx,,drop=FALSE] } if (!is.null(res$dv)) { if (!(lava.options()$devel)) { if (is.matrix(mom$v) && nrow(mom$v)>1) { ##dxi <- dxi + cbind(rep(1,nrow(mom$v)))%x%(mom$G%*%res$dv) dxi <- dxi + (mom$G%*%res$dv)%x%cbind(rep(1,nrow(mom$v))) } else { dxi <- dxi+ mom$G%*%res$dv } } else { dxi <- dxi+ mom$G%*%res$dv[,with(ii$parBelongsTo,c(mean,reg))] } } res <- c(res, list(dxi=dxi)) if (!is.null(mu)) { muv <- rbind(mu-mom$xi) dT <- suppressMessages(-t(ii$Ik%x%muv + muv%x%ii$Ik) %*% dxi) res <- c(res, list(dT=dT)) } } if (second) { k <- nrow(ii$A) K <- ii$Kkk ## commutation(k,k) I <- ii$Ik ## diag(k) I2 <- diag(nrow=k*k) ## KI <- I[as.vector(matrix(seq_len(K^2),K,byrow=TRUE)),] d2S1 <- t( (I %x% K %x% I) %*% ( ( I2 %x% as.vector(mom$G) )%*% dG + ( as.vector(mom$P) %x% I2 )%*% (dP) ) %*% t(dG) ) d2S2 <- K%*%d2S1 ### HK? d2S3 <- t( (I %x% K %x% I) %*% ( ( I2 %x% as.vector(mom$G) )%*% dG + ( as.vector(mom$G) %x% I2 )%*% dG ) %*% t(dP) ) vec.d2S <- d2S1+d2S3+d2S3 res <- c(res, list(d2vecS=vec.d2S)) } return(res) } lava/R/exogenous.R0000644000176200001440000000406713162174023013543 0ustar liggesusers##' @export `exogenous` <- function(x,...) UseMethod("exogenous") ##' @export "exogenous<-" <- function(x,...,value) UseMethod("exogenous<-") ##' @export `exogenous<-.lvm` <- function(x,silent=FALSE, xfree=TRUE, ...,value) { if (inherits(value,"formula")) { exogenous(x,...) <- all.vars(value) return(x) } not.in <- !(value%in%vars(x)) if (any(not.in)) { addvar(x,reindex=FALSE) <- value[not.in] } xorg <- exogenous(x) x$exogenous <- value if (!is.null(value) & xfree) { notexo.idx <- xorg[which(!(xorg%in%value))] if (length(notexo.idx)>0) { ## & mom) { if (length(notexo.idx)>1) { covariance(x,notexo.idx,pairwise=TRUE,exo=TRUE) <- NA } covariance(x,notexo.idx,vars(x),exo=TRUE) <- NA intercept(x,notexo.idx) <- x$mean[notexo.idx] } } ## x$exogenous <- value index(x) <- reindex(x) return(x) } ##' @export `exogenous.lvm` <- function(x,latent=FALSE,index=TRUE,...) { if (!index) { if (latent) { allvars <- vars(x) } else { allvars <- manifest(x) } M <- x$M res <- c() for (i in allvars) if (!any(M[,i]==1) & !any(is.na(x$cov[i,]))) # & any(M[i,]==1)) res <- c(res, i) return(res) } if (is.null(x$exogenous)) return(x$exogenous) if (all(!is.na(x$exogenous)) & !latent) { return(x$exogenous[x$exogenous%in%index(x)$manifest]) } if (!latent) return(index(x)$exogenous) return(exogenous(x,latent=latent,index=FALSE,...)) } ##' @export `exogenous.lvmfit` <- function(x,...) { exogenous(Model(x),...) } ##' @export exogenous.list <- function(x,...) { exolist <- c() endolist <- c() for (i in seq_along(x)) { exolist <- c(exolist, exogenous(x[[i]])) endolist <- c(endolist, endogenous(x[[i]])) } endolist <- unique(endolist) exolist <- unique(exolist) return(exolist[!(exolist%in%endolist)]) } ##' @export `exogenous.multigroup` <- function(x,...) { exogenous(Model(x)) } ##' @export `exogenous.lm` <- function(x,...) { attr(getoutcome(formula(x)),"x") } lava/R/optims.R0000644000176200001440000001554313162174023013043 0ustar liggesusers###{{{ nlminb nlminb2 <- function(start,objective,gradient,hessian,...) { nlminbcontrols <- c("eval.max","iter.max","trace","abs.tol","rel.tol","x.tol","step.min") dots <- list(...) control <- list(...)$control control <- control[names(control)%in%nlminbcontrols] dots$control <- control if (length(dots$trace)>0 && dots$trace>0) cat("\n") mypar <- c(list(start=start,objective=objective,gradient=gradient,hessian=hessian),dots) mypar["debug"] <- NULL do.call("nlminb", mypar) } nlminb1 <- function(start,objective,gradient,hessian,...) { nlminb2(start,objective,gradient=gradient,hessian=NULL,...) } nlminb0 <- function(start,objective,gradient,hessian,...) { nlminb2(start,objective,gradient=NULL,hessian=NULL,...) } ###}}} nlminb ###{{{ estfun estfun <- function(start,objective,gradient,hessian,NR=FALSE,...) { myobj <- function(x,...) { S <- gradient(x,...) crossprod(S)[1] } if (!missing(hessian) && !is.null(hessian)) { mygrad <- function(x) { H <- hessian(x) S <- gradient(x) 2*S%*%H } } else { hessian <- function(x) numDeriv::jacobian(gradient,x,method=lava.options()$Dmethod) mygrad <- function(x) { H <- hessian(x) S <- gradient(x) 2*S%*%H } } if (NR) { op <- lava::NR(start,gradient=gradient,hessian=hessian,...) } else { op <- nlminb2(start,myobj,mygrad,hessian=NULL,...) } return(op) } estfun0 <- function(...,hessian=NULL) estfun(...,hessian=hessian) ###}}} ###{{{ Newton-Raphson/Scoring ##' @export NR <- function(start,objective,gradient,hessian,debug=FALSE,control,...) { control0 <- list(trace=0, gamma=1, lambda=0, ngamma=0, gamma2=0, backtrack=TRUE, iter.max=200, tol=1e-9, stabil=FALSE, epsilon=1e-9) if (!missing(control)) { control0[names(control)] <- control } ## conditions to select the step length if(control0$backtrack[1] == "armijo"){ control0$backtrack <- c(1e-4,0) # page 33 } if(control0$backtrack[1] == "curvature"){ control0$backtrack <- c(0,0.9) # page 34 } if(control0$backtrack[1] == "wolfe"){ control0$backtrack <- c(1e-4,0.9) } if(!is.logical(control0$backtrack) || length(control0$backtrack)!=1){ if(length(control0$backtrack) != 2){ stop("control$backtrack must have length two if not TRUE or FALSE \n") } if(any(!is.numeric(control0$backtrack)) || any(abs(control0$backtrack)>1)){ stop("elements in control$backtrack must be in [0,1] \n") } if(control0$backtrack[2]==0){ control0$backtrack[2] <- +Inf # no Wolfe condition } } if (control0$trace>0) cat("\nIter=0 Objective=",objective(as.double(start)),";\t\n \tp=", paste0(formatC(start), collapse=" "),"\n") gradFun = !missing(gradient) if (!gradFun & missing(hessian)) { hessian <- function(p) { ff <- objective(p) res <- attributes(ff)$hessian attributes(res)$grad <- as.vector(attributes(ff)$grad) return(res) } } oneiter <- function(p.orig,Dprev,return.mat=FALSE,iter=1) { if (is.null(hessian)) { cat(".") I <- -numDeriv::jacobian(gradient,p.orig,method=lava.options()$Dmethod) } else { I <- -hessian(p.orig) } D <- attributes(I)$grad if (is.null(D)) { D <- gradient(p.orig) } if (return.mat) return(list(D=D,I=I)) if (control0$stabil) { if (control0$lambda!=0) { if (control0$lambda<0) { sigma <- (t(D)%*%(D))[1] } else { sigma <- control0$lambda } sigma <- min(sigma,10) I <- I+control0$gamma2*sigma*diag(nrow=nrow(I)) } else { sigma <- ((D)%*%t(D)) I <- I+control0$gamma2*(sigma) } } ## svdI <- svd(I); svdI$d0 <- numeric(length(svdI$d)); ## svdI$d0[abs(svdI$d)>control0$epsilon] <- ## 1/svdI$d[abs(svdI$d)>control0$epsilon] ## iI <- with(svdI, (v)%*%diag(d0,nrow=length(d0))%*%t(u)) iI <- Inverse(I, symmetric=TRUE, tol=control0$epsilon) Delta = control0$gamma*iI%*%D Lambda <- 1 if (identical(control0$backtrack, TRUE)) { mD0 <- mean(Dprev^2) mD <- mean(D^2) p <- p.orig + Lambda*Delta while (mD>=mD0) { if (gradFun) { D = gradient(p) } else { DI <- oneiter(p,return.mat=TRUE) D = DI$D } mD = mean(D^2) if (is.nan(mD)) mD=mD0 Lambda <- Lambda/2 if (Lambda<1e-4) break; p <- p.orig + Lambda*Delta } } else if(identical(control0$backtrack, FALSE)) { p <- p.orig + Lambda*Delta } else { # objective(p.orig) - objective(p) <= mu*Lambda*gradient(p.orig)*Delta ## curvature c_D.origin_Delta <- control0$backtrack * c(rbind(D) %*% Delta) objective.origin <- objective(p.orig) p <- p.orig + Lambda*Delta ## ll <- seq(-0.17,1,length.out=50) ## pp <- numeric(length(ll)) ## for (ii in seq_along(ll)) pp[ii] <- objective(p.orig + ll[ii]*Delta) mD0 <- c(objective.origin + Lambda * c_D.origin_Delta[1], abs(c_D.origin_Delta[2]))# mD <- c(objective(p), abs(gradient(p) %*% Delta)) count <- 0 while (any(mD>mD0) || any(is.nan(mD))) { count <- count+1 ##cat(count, " f=",mD[1],"\n") Lambda <- Lambda/2 if (Lambda<1e-4) break; p <- p.orig + Lambda*Delta if(!is.infinite(mD0[1])){ mD0[1] <- objective.origin + Lambda * c_D.origin_Delta[1]# mD[1] <- objective(p) } if(!is.infinite(mD0[2])){ mD[2] <- abs(gradient(p) %*% Delta) } } } return(list(p=p,D=D,iI=iI)) } count <- count2 <- 0 thetacur <- start gammacount <- 0 Dprev <- rep(Inf,length(start)) for (jj in seq_len(control0$iter.max)) { gammacount <- gammacount+1 count <- count+1 count2 <- count2+1 oldpar <- thetacur newpar <- oneiter(thetacur,Dprev,iter=jj) Dprev <- newpar$D thetacur <- newpar$p if (!is.null(control0$ngamma) && control0$ngamma>0) { if (control0$ngamma<=gammacount) { control0$gamma <- sqrt(control0$gamma) gammacount <- 0 } } if (count2==control0$trace) { cat("Iter=", count,"LogLik=",objective(as.double(newpar$p)),";\n\tD=", paste0(formatC(newpar$D), collapse = " "), "\n") cat("\tp=", paste0(formatC(thetacur), collapse = " "), "\n") count2 <- 0 } if (mean(newpar$D^2)1) v <- cumsum(c(v,exp(theta[seq(length(theta)-1L)+1L]))) return(v) } ordreg_ithreshold <- function(v) { theta <- v[1] if (length(v)>1) theta <- c(theta,log(-rev(diff(rev(v))))) return(theta) } ordreg_dthreshold <- function(theta) { K <- length(theta)+1 Da <- matrix(0,K,K-1) Da[seq(K-1),1L] <- 1L for (i in seq_len(K-2)+1) Da[seq(i,K-1),i] <- exp(theta[i]) Da } ##' Ordinal regression models ##' ##' @title Univariate cumulative link regression models ##' @param formula formula ##' @param data data.frame ##' @param offset offset ##' @param family family (default proportional odds) ##' @param start optional starting values ##' @param fast If TRUE standard errors etc. will not be calculated ##' @param ... Additional arguments to lower level functions ##' @export ##' @author Klaus K. Holst ##' @examples ##' m <- lvm(y~x) ##' ordinal(m,K=3) <- ~y ##' d <- sim(m,100) ##' e <- ordreg(y~x,d) ordreg <- function(formula,data=parent.frame(),offset,family=stats::binomial("probit"),start,fast=FALSE,...) { y <- ordered(model.frame(update(formula,.~0),data)[,1]) lev <- levels(y) X <- model.matrix(update(formula,.~.+1),data=data)[,-1,drop=FALSE] up <- new.env() assign("h",family$linkinv,envir=up) assign("dh",family$mu.eta,envir=up) assign("y",as.numeric(y),envir=up) assign("X",X,envir=up) assign("K",nlevels(y),envir=up) assign("n",length(y),envir=up) assign("p",NCOL(X),envir=up) assign("threshold", function(theta,K) ordreg_threshold(theta[seq(K-1)]), envir=up) assign("dthreshold",function(theta,K) ordreg_dthreshold(theta[seq(K-1)]), envir=up) ff <- function(theta) -ordreg_logL(theta,up) gg <- function(theta) -ordreg_score(theta,up) if (missing(start)) start <- with(up,c(rep(-1,up$K-1),rep(0,p))) op <- nlminb(start,ff,gg) cc <- op$par; if (fast) return(structure(cc,threshold=up$threshold(cc,up$K))) ##,up$K))) nn <- c(paste(lev[-length(lev)], lev[-1L], sep = "|"), colnames(X)) I <- -ordreg_hessian(cc,up) names(cc) <- nn dimnames(I) <- list(nn,nn) res <- list(vcov=solve(I),coef=cc,call=match.call(),up=up,opt=op) structure(res,class="ordreg") } ##' @export print.ordreg <- function(x,...) { cat("Call:\n"); print(x$call) cat("\nParameter Estimates:\n") print(x$coef) } ##' @export summary.ordreg <- function(object,alpha=0.95,...) { res <- cbind(coef(object),diag(vcov(object))^.5) pp <- 1-(1-alpha)/2 qq <- qnorm(pp) res <- cbind(res,res[,1]-res[,2]*qq,res[,1]+res[,2]*qq,2*(1-pnorm(abs(res[,1])/res[,2]))) colnames(res) <- c("Estimate","Std.Err",paste0(round(c(1-pp,pp)*1000)/10,"%"),"P-value") res <- list(coef=res,logLik=logLik(object),AIC=AIC(object)) class(res) <- "summary.ordreg" return(res) } ##' @export print.summary.ordreg <- function(x,alpha=0.95,...) { cat("AIC: ", x$AIC, "\n\n") print(x$coef) cat("\n") } ##' @export score.ordreg <- function(x,p=coef(x),indiv=FALSE,...) { ordreg_score(p,x$up) if (!indiv) return(colSums(x$up$score)) x$up$score } ##' @export logLik.ordreg <- function(object,p=coef(object),indiv=FALSE,...) { ordreg_logL(p,object$up) res <- log(object$up$pr) if (!indiv) res <- sum(res) structure(res,nall=length(object$up$pr),nobs=object$up$pr,df=length(p),class="logLik") } ##' @export coef.ordreg <- function(object,...) object$coef ##' @export vcov.ordreg <- function(object,...) object$vcov ordreg_logL <- function(theta,env,indiv=FALSE,...) { if (length(theta)!=with(env,p+K-1)) stop("Wrong dimension") env$theta <- theta if (env$p>0) beta <- with(env,theta[seq(p)+K-1]) alpha <- with(env, threshold(theta,K)) env$alpha <- alpha env$beta <- beta if (env$p>0) eta <- env$X%*%beta else eta <- cbind(rep(0,env$n)) env$lp <- kronecker(-eta,rbind(alpha),"+") F <- with(env,h(lp)) Pr <- cbind(F,1)-cbind(0,F) pr <- Pr[with(env,cbind(seq(n),as.numeric(y)))] env$pr <- pr sum(log(pr)) } ordreg_score <- function(theta,env,...) { if (!identical(theta,env$theta)) ordreg_logL(theta,env) Da <- with(env,dthreshold(theta,K)) dF <- with(env, cbind(dh(lp),0)) idx1 <- with(env,which(as.numeric(y)==1)) S1 <- cbind(Da[as.numeric(env$y),,drop=FALSE],-env$X) S1 <- dF[with(env,cbind(seq(n),as.numeric(y)))]*S1 y2 <- env$y-1; y2[idx1] <- env$K S2 <- cbind(Da[y2,,drop=FALSE],-env$X) S2 <- dF[cbind(seq(env$n),y2)]*S2 env$score <- 1/env$pr*(S1-S2) colSums(env$score) } ordreg_hessian <- function(theta,env,...) { numDeriv::jacobian(function(p) ordreg_score(p,env,...),theta,...) } ##' @export predict.ordreg <- function(object,p=coef(object),type=c("prob","cumulative"),...) { env <- object$up env$theta <- p if (env$p>0) beta <- with(env,theta[seq(p)+K-1]) alpha <- with(env, threshold(theta,K)) env$alpha <- alpha env$beta <- beta if (env$p>0) eta <- env$X%*%beta else eta <- cbind(rep(0,env$n)) env$lp <- kronecker(-eta,rbind(alpha),"+") F <- with(env,h(lp)) if (tolower(type)[1]=="cumulative") return(F) Pr <- cbind(F,1)-cbind(0,F) return(Pr) } lava/R/transform.R0000644000176200001440000000616613162174023013544 0ustar liggesusers##' @export "transform<-" <- function(`_data`,...,value) UseMethod("transform<-") ##' @export "transform<-.lvm" <- function(`_data`,formula=NULL,...,value) { transform(`_data`,formula,value,...) } ##' @export print.transform.lvm <- function(x,...) { for (i in seq_along(x)) { cat("Variable: ", names(x)[i],"\n",sep="") cat("Transformation: (",paste0(x[[i]]$x,collapse=","),") -> ",sep="") print(x[[i]]$fun) cat("\n") } invisible(x) } ##' @export "transform.lvm" <- function(`_data`,formula,fun,post=TRUE,y,x,...) { if (missing(formula)) { if (length(tr <- `_data`$attributes$transform)==0) { return(NULL) } return(structure(`_data`$attributes$transform,class="transform.lvm")) } if (!missing(y) && !missing(x)) { xx <- x } else { if (is.character(formula)) { y <- NULL; xx <- formula } else { y <- getoutcome(formula) xx <- attributes(y)$x } } if (length(xx)==0) { xx <- y; y <- NULL } if (length(y)==0) { if (post) { `_data`$constrainY[xx] <- NULL `_data`$constrain[xx] <- NULL if (is.null(`_data`$attributes$selftransform)) `_data`$attributes$selftransform <- list() `_data`$attributes$selftransform[[xx]] <- fun return(`_data`) } `_data`$attributes$selftransform[xx] <- NULL constrain(`_data`,xx,y,...) <- fun return(`_data`) } `_data`$attributes$selftransform[y] <- NULL addvar(`_data`) <- y intercept(`_data`,y) <- 0; covariance(`_data`,y) <- 0 if (is.null(`_data`$attributes$transform)) `_data`$attributes$transform <- list() if (is.null(fun)) `_data`$attributes$transform[y] <- NULL else { if (length(y)>1) { if (is.null(`_data`$attributes$multitransform)) `_data`$attributes$multitransform <- list() `_data`$attributes$multitransform for (yi in y) { `_data`$attributes$transform[yi] <- NULL } rmidx <- c() for (i in seq_along(`_data`$attributes$multitransform)) { l <- `_data`$attributes$multitransform[[i]] if (any(y%in%letters)) rmidx <- c(rmidx,i) } if (length(rmidx)>0) `_data`$attributes$transform[rmidx] <- NULL `_data`$attributes$multitransform <- c(`_data`$attributes$multitransform, list(list(fun=fun,y=y,x=xx))) } else { `_data`$attributes$transform[[y]] <- list(fun=fun,x=xx) } } return(`_data`) } addhook("plothook.transform","plot.post.hooks") plothook.transform <- function(x,...) { trans <- x$attributes$transform transnames <- names(trans) for (v in transnames) { xx <- trans[[v]][["x"]] if (length(xx)>0) { x <- regression(x,x=xx,y=v) edgelabels(x,from=xx,to=v,col="gray70") <- "" } } return(x) } lava/R/parlabels.R0000644000176200001440000000067713162174023013477 0ustar liggesusers##' @export parlabels <- function(x,exo=FALSE) { res <- c(unlist(intfix(x)[unlist(lapply(intfix(x), function(y) !is.na(y) & !is.numeric(y)))]), regfix(x)$labels[!is.na(regfix(x)$labels)], covfix(x)$labels[!is.na(covfix(x)$labels)]) if (!is.null(x$exfix)) res <- c(res, unlist(x$exfix[!is.na(x$exfix) && !is.numeric(x$exfix)])) if (exo) res <- intersect(res,index(Model(x))$exogenous) return(res) } lava/R/pcor.R0000644000176200001440000001315713162174023012472 0ustar liggesusers##' Polychoric correlation ##' ##' Maximum likelhood estimates of polychoric correlations ##' @param x Variable 1 ##' @param y Variable 2 ##' @param X Optional covariates ##' @param start Optional starting values ##' @param ... Additional arguments to lower level functions ##' @export pcor <- function(x,y,X,start,...) { if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") if (is.numeric(x) && is.numeric(y)) { e <- estimate(covariance(lvm(),x~y)) return(estimate(e,function(p) list(rho=p[5]/(p[3]*p[4])^.5),iid=TRUE)) } n1 <- 1+seq(nlevels(x)-1) n2 <- n1[length(n1)]+seq(nlevels(y)-1) if (missing(start)) { f <- as.formula(ifelse(missing(X),"~1","~X")) start <- c(0.5, attr(lava::ordreg(update(f,x~.),fast=TRUE,family=stats::binomial("probit")),"threshold"), attr(lava::ordreg(update(f,y~.),fast=TRUE,family=stats::binomial("probit")),"threshold")) } ii <- mets::fast.pattern(cbind(as.numeric(x),as.numeric(y)),categories=max(length(unique(x)),length(unique(y)))) nn <- table(x,y) ff <- function(theta) { -sum(as.vector(nn)*log(polycor0(theta[1],theta[n1],theta[n2]))) } gg <- function(theta) { pp <- polycor0(theta[1],theta[n1],theta[n2],onlyP=FALSE) np <- as.vector(nn)/as.vector(pp$p) -colSums(apply(pp$dp,2,function(x) np*x)) } nn0 <- nn; nn[nn==0] <- .5 p0 <- as.vector(nn)/sum(nn) logL0 <- sum(as.vector(nn)*log(p0)) suppressWarnings(t0 <- system.time(op <- nlminb(start,ff,gg))) cc <- op$par names(cc) <- c("rho",paste(rownames(nn),"x",sep=".")[-1], paste(colnames(nn),"y",sep=".")[-1]) V <- solve(numDeriv::jacobian(function(p) gg(p), cc)) res <- list(coef=cc, vcov=V, tab=nn, logLik0=logL0, logLik=-ff(cc), n1=n1, n2=n2, opt=op, idx=ii) structure(res,class="pcor") } ##' @export coef.pcor <- function(object,...) object$coef ##' @export vcov.pcor <- function(object,...) object$vcov ##' @export logLik.pcor <- function(object,p=coef(object),...) { u <- polycor0(p[1],p[object$n1],p[object$n2],onlyP=TRUE) np <- sum(as.vector(object$tab)*log(as.vector(u))) nobs <- sum(object$tab)/2 structure(np,nall=nobs,nobs=nobs,df=length(p),class="logLik") } ##' @export print.pcor <- function(x,...) { res <- cbind(coef(x),diag(vcov(x))^0.5) colnames(res) <- c("Estimate","Std.Err") print(res) df <- length(x$tab)-nrow(res) q <- with(x,2*(logLik0-logLik)) cat("\nDeviance = ", q, ", df = ",df,"\n") } ##' @export score.pcor <- function(x,p=coef(x),indiv=FALSE,...) { u <- polycor0(p[1],p[x$n1],p[x$n2],onlyP=FALSE) if (!indiv) { np <- as.vector(x$tab)/as.vector(u$p) return(colSums(apply(u$dp,2,function(x) np*x))) } U <- u$dp; U <- apply(u$dp,2,function(x) x/as.vector(u$p)) ##ii <- unlist(apply(cbind(seq(length(x$tab)),as.vector(x$tab)),1,function(x) rep(x[1],x[2]))) Pos <- matrix(0,nrow=prod(dim(x$tab)),ncol=2) count <- 0 for (j in seq(ncol(x$tab))) for (i in seq(nrow(x$tab))) { count <- count+1 Pos[count,] <- c(i,j) } pos <- match(data.frame(t(x$idx$pattern)),data.frame(t(Pos))) ## pos <- c() ## for (i in seq(nrow(x$idx$pattern))) { ## pos <- c(pos,which(apply(Pos,1,function(y) identical(y,x$idx$pattern[i,])))) ## } return(U[pos[x$idx$group+1],]) } polycor0 <- function(rho,a0,b0,onlyP=TRUE,...) { k1 <- length(a0); k2 <- length(b0) S <- diag(c(1-rho,1-rho))+rho P <- matrix(0,nrow=k1,ncol=k2) P1 <- pnorm(a0,sd=1) P2 <- pnorm(b0,sd=1) set.seed(1) for (i in seq(k1)) for (j in seq(k2)) P[i,j] <- mets::pmvn(lower=c(-Inf,-Inf),upper=c(a0[i],b0[j]),sigma=S) PP <- Drho <- matrix(0,nrow=k1+1,ncol=k2+1) pmvn0 <- function(i,j,sigma=S) { if (i==0 | j==0) return(0) if (i==(k1+1) & j==(k2+1)) return(1) if (i==(k1+1)) return(P2[j]) if (j==(k2+1)) return(P1[i]) P[i,j] } dpmvn0 <- function(i,j,type=1,k) { if (i==0 | j==0) return(0) if (i==(k1+1) & j==(k2+1)) return(0) if (i==(k1+1)) { if (type==3 && k==j) return(dnorm(b0[j])) return(0) } if (j==(k2+1)) { if (type==2 && k==i) return(dnorm(a0[i])) return(0) } if (type==1) ## rho return(dmvn(c(a0[i],b0[j]),sigma=S)) if (type==2) { ## threshold a if (k!=i) return(0) return(dnorm(a0[i])*pnorm((b0[j]-rho*a0[i])/sqrt(1-rho^2))) } ## threshold b if (k!=j) return(0) dnorm(b0[j])*pnorm((a0[i]-rho*b0[j])/sqrt(1-rho^2)) } for (i in seq(k1+1)) for (j in seq(k2+1)) { PP[i,j] <- pmvn0(i,j) + pmvn0(i-1,j-1) - pmvn0(i-1,j) - pmvn0(i,j-1) Drho[i,j] <- dpmvn0(i,j) + dpmvn0(i-1,j-1) - dpmvn0(i-1,j) - dpmvn0(i,j-1) } if (onlyP) return(PP) Da <- matrix(0,length(PP),k1) for (k in seq(k1)) for (i in seq(k1+1)) for (j in seq(k2+1)) { pos <- i + (k1+1)*(j-1) Da[pos,k] <- dpmvn0(i,j,type=2,k=k) + dpmvn0(i-1,j-1,type=2,k=k) - dpmvn0(i-1,j,type=2,k=k) - dpmvn0(i,j-1,type=2,k=k) } Db <- matrix(0,length(PP),k2) for (k in seq(k2)) for (i in seq(k1+1)) for (j in seq(k2+1)) { pos <- i + (k1+1)*(j-1) Db[pos,k] <- dpmvn0(i,j,type=3,k=k) + dpmvn0(i-1,j-1,type=3,k=k) - dpmvn0(i-1,j,type=3,k=k) - dpmvn0(i,j-1,type=3,k=k) } list(p=PP,dp=cbind(as.vector(Drho),Da,Db)) } lava/R/missingMLE.R0000644000176200001440000002373013162174023013534 0ustar liggesusers###{{{ missingModel missingModel <- function(model,data,var=endogenous(model),fix=FALSE,type=2,keep=NULL,weights=NULL,data2=NULL,cluster=NULL,...) { if (!inherits(model,"lvm")) stop("Needs a lvm-object") if (type==3) { var <- manifest(model) } data.mis <- is.na(data[,var,drop=FALSE]) colnames(data.mis) <- var patterns <- unique(data.mis,MARGIN=1) mis.type <- apply(data.mis,1, function(x) which(apply(patterns,1,function(y) identical(x,y)))) pattern.allmis <- which(apply(patterns,1,all)) ## Remove entry with all missing models <- datasets <- weights <- data2 <- clusters <- c() mymodel <- baptize(model) pattern.compl <- 0 count <- 0 A <- index(model)$A topendo <- endogenous(model,top=TRUE) exo <- exogenous(model) exclude <- c() warned <- FALSE for (i in setdiff(seq_len(nrow(patterns)),pattern.allmis)) { exoremove <- c() includemodel <- TRUE count <- count+1 mypattern <- patterns[i,] m0 <- mymodel; if (any(mypattern)) { latent(m0) <- colnames(data.mis)[mypattern] if (type>1) { mytop <- intersect(topendo,colnames(data.mis)[mypattern]) if (!is.null(mytop)) { kill(m0) <- mytop for (xx in exo) { ## If exogenous variable only have effect on missing variables, ## then remove it from the model if (all(c(rownames(A)[A[xx,]==1])%in%mytop) && !(xx%in%m0$par) ##&& !(xx%in%names(index(m0))$parval) ) { exoremove <- c(exoremove,xx) kill(m0) <- xx } } } } } else pattern.compl <- count ## d0 <- data[mis.type==i,manifest(m0),drop=FALSE]; d0 <- data[which(mis.type==i),c(manifest(m0),keep),drop=FALSE]; if (!is.list(weights)) { w0.var <- intersect(manifest(m0),colnames(weights)) w0 <- weights[which(mis.type==i),w0.var,drop=FALSE]; } if (!is.list(data2)) { w02.var <- intersect(manifest(m0),colnames(data2)) w02 <- data2[which(mis.type==i),w02.var,drop=FALSE]; } clust0 <- cluster[which(mis.type==i)] ex0 <- exogenous(m0) <- setdiff(exo,exoremove) xmis <- which(apply(d0[,ex0,drop=FALSE],1,function(x) any(is.na(x)))) if (length(xmis)>0) { misx <- ex0[apply(d0[xmis,ex0,drop=FALSE],2,function(x) any(is.na(x)))] if (!warned) warning("Missing exogenous variables: ", paste(misx,collapse=","), ". Removing rows...") warned <- TRUE d0 <- d0[-xmis,,drop=FALSE] w0 <- w0[-xmis,,drop=FALSE] clust0 <- clust0[-xmis] w02 <- w02[-xmis,,drop=FALSE] } if (length(misx <- intersect(ex0,latent(m0)))>0) { warning("Missing exogenous variables:", paste(misx,collapse=","), "! Remove manually!.") } ## else { if( sum(unlist(index(m0)[c("npar","npar.mean")]))>0 ) { models <- c(models, list(m0)) datasets <- c(datasets, list(d0)) weights <- c(weights, list(w0)) if (!is.list(data2)) data2 <- c(data2, list(w02)) clusters <- c(clusters, list(clust0)) } else { exclude <- c(exclude,count) } } } rmset <- c() for (i in seq_len(length(datasets))) { if (nrow(datasets[[i]])==0) rmset <- c(rmset,i) } if (length(rmset)>0) { models[[rmset]] <- NULL datasets[[rmset]] <- NULL weights[[rmset]] <- NULL data2[[rmset]] <- NULL clusters[[rmset]] <- NULL patterns <- patterns[-rmset,,drop=FALSE] } Patterns <- patterns if (length(exclude)>0) Patterns <- Patterns[-exclude,] pattern.allcomp<- which(apply(Patterns,1,function(x) all(!x))) ## Complete cases res <- list(models=models, datasets=datasets, weights=weights, data2=data2, clusters=clusters, patterns=Patterns, pattern.compl=pattern.compl, pattern.allmis=pattern.allmis, pattern.allcomp=pattern.allcomp, mis.type=mis.type) return(res) } ###}}} ###{{{ estimate.MAR.lvm ##' @export estimate.MAR <- function(x,data,which=endogenous(x),fix,type=2,startcc=FALSE,control=list(),silent=FALSE,weights,data2,cluster,onlymodel=FALSE,estimator="gaussian",hessian=TRUE,keep=NULL,...) { cl <- match.call() Debug("estimate.MAR") redvar <- intersect(intersect(parlabels(x),latent(x)),colnames(data)) if (length(redvar)>0 & !silent) warning(paste("Remove latent variable colnames from dataset",redvar)) xfix <- setdiff(colnames(data)[(colnames(data)%in%parlabels(x,exo=TRUE))],latent(x)) if (missing(fix)) fix <- ifelse(length(xfix)>0,FALSE,TRUE) S <- diag(nrow=length(manifest(x))); mu <- rep(0,nrow(S)); K <- length(exogenous(x)) vnames <- index(x)$manifest names(mu) <- rownames(S) <- colnames(S) <- vnames if (K>0) { xx <- subset(Model(x),exogenous(x)) exogenous(xx) <- NULL covfix(xx, vars(xx)) <- NA xx <- covariance(xx,exogenous(x),exogenous(x)) datax <- data[,exogenous(x),drop=FALSE] exo.idx <- match(exogenous(x),manifest(x)) mu0 <- colMeans(datax,na.rm=TRUE) cov0 <- cov(datax,use="pairwise.complete.obs")*(nrow(datax)-1)/nrow(datax) cov0upper <- cov0[upper.tri(cov0,diag=TRUE)] exogenous(xx) <- NULL coefpos <- matrices(xx,seq_len(K*(K-1)/2+K))$P ii <- coefpos[upper.tri(coefpos,diag=TRUE)] start <- c(mu0, cov0upper[order(ii)]) S[exo.idx,exo.idx] <- cov0 mu[exo.idx] <- mu0 ## message("\n") } x0 <- x x <- fixsome(x, measurement.fix=fix, exo.fix=TRUE, S=S, mu=mu, n=1) if (!silent) message("Identifying missing patterns...") val <- missingModel(x,data,var=which,type=type,keep=c(keep,xfix),weights=weights,data2=data2,cluster=cluster,...) if (!silent) message("\n") if (nrow(val$patterns)==1) { res <- estimate(x,data=data,fix=fix,weights=weights,data2=data2,estimator=estimator,silent=silent,control=control,...) return(res) } if (startcc & is.null(control$start)) { if (!silent) message("Obtaining starting value...") start0 <- rep(1,sum(unlist(index(x)[c("npar","npar.mean")]))) mystart <- tryCatch( (estimate(x,data=na.omit(data),silent=TRUE, weights=weights,data2=data2,estimator=estimator,quick=TRUE,... )), error=function(e) rep(1,sum(unlist(index(x)[c("npar","npar.mean")]))) ) control$start <- mystart if (!silent) message("\n") } if (is.null(control$meanstructure)) control$meanstructure <- TRUE mg0 <- with(val, suppressWarnings(multigroup(models,datasets,fix=FALSE,exo.fix=FALSE,missing=FALSE))) if (!is.null(names(control$start))) { parorder1 <- attributes(parpos(mg0,p=names(control$start)))$name paridx <- match(parorder1,names(control$start)) newpos <- paridx[which(!is.na(paridx))] start0 <- control$start start0[which(!is.na(paridx))] <- control$start[na.omit(paridx)] names(start0)[which(!is.na(paridx))] <- names(control$start[na.omit(paridx)]) control$start <- start0 } if (onlymodel) return(list(mg=mg0,val=val,weights=val$weights,data2=val$data2,cluster=val$clusters)) if (all(unlist(lapply(val$weights,is.null)))) val$weights <- NULL if (all(unlist(lapply(val$data2,is.null)))) val$data2 <- NULL if (all(unlist(lapply(val$clusters,is.null)))) val$clusters <- NULL e.mis <- estimate(mg0,control=control,silent=silent, weights=val$weights,data2=val$data2, cluster=val$clusters,estimator=estimator,...) cc <- coef(e.mis,level=1) mynames <- c() if (e.mis$model$npar.mean>0) mynames <- c(mynames,paste0("m",seq_len(e.mis$model$npar.mean))) if (e.mis$model$npar>0) mynames <- c(mynames,paste0("p",seq_len(e.mis$model$npar))) rownames(cc) <- mynames mycc <- val$pattern.allcomp ## Position of complete-case model nmis <- with(val, as.numeric(table(mis.type)[pattern.allmis])) ## Number of completely missing observations if (length(nmis)>0 & length(mycc)>0) ## Any individuals with all missing? if (val$pattern.allmis0) { nrow <- length(vars(x)) xpos <- lapply(xfix,function(y) which(regfix(x)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix <- list(var=xfix, col=colpos, row=rowpos) for (i in seq_along(xfix)) regfix(x, from=vars(x)[rowpos[[i]]],to=vars(x)[colpos[[i]]]) <- rep(colMeans(data[,xfix[i],drop=FALSE],na.rm=TRUE),length(rowpos[[i]])) x <- updatelvm(x,zeroones=TRUE,deriv=TRUE) } ord <- c() ordlist <- list() for (i in seq_len(nrow(val$patterns))) { ordlist <- c(ordlist, list(which(val$mis.type==i))) ord <- c(ord, ordlist[[i]]) } res <- with(val, list(coef=cc, patterns=patterns, table=table(mis.type), mis.type=mis.type, order=ord, orderlist=ordlist, nmis=nmis, allmis=pattern.allmis, cc=mycc, ncc=as.numeric(table(mis.type)[pattern.allcomp]), multigroup=e.mis$model, estimate=e.mis, model=x, model0=x0, vcov=e.mis$vcov, opt=e.mis$opt, control=control, data=list(model.frame=data), estimator=estimator, call=cl )) class(res) <- c("lvm.missing","lvmfit") if (inherits(e.mis,"lvmfit.randomslope")) class(res) <- c(class(res),"lvmfit.randomslope") if (hessian & is.null(cluster)) { if (!silent) message("Calculating asymptotic variance...\n") res$vcov <- solve(information(res$estimate,type="hessian")) cc[] <- coef(e.mis,level=1,vcov=res$vcov) res$coef <- cc } return(res) } ###}}} estimate.MAR.lvm lava/R/merge.R0000644000176200001440000001235113162174023012621 0ustar liggesusers##' @export `%++%.lvm` <- function(x,y) merge(x,y) ##' @export "+.lvm" <- function(x,...) { merge(x,...) } ## ##' @export ## "+.lm" <- function(x,...) { ## merge(x,...) ## } ##' @export merge.lvm <- function(x,y,...) { objects <- list(x,y,...) if (length(objects)<2) return(x) m <- objects[[1]] for (i in seq(2,length(objects))) { m2 <- objects[[i]] if (length(latent(m2))>0) latent(m) <- latent(m2) if (length(m2$constrain)>0) m$constrain <- c(m$constrain,m2$constrain) M <- (index(m2)$A) P <- (index(m2)$P) nn <- vars(m2) for (j in seq_len(nrow(M))) { if (any(idx <- M[j,]!=0)) { val <- as.list(rep(NA,sum(idx==TRUE))) if (any(idx. <- !is.na(m2$par[j,idx]))) val[idx.] <- m2$par[j,idx][idx.] if (any(idx. <- !is.na(m2$fix[j,idx]))) val[idx.] <- m2$fix[j,idx][idx.] regression(m,to=nn[idx],from=nn[j],silent=TRUE) <- val } P0 <- P[j,]; P0[seq_len(j-1)] <- 0 if (any(idx <- P[j,]!=0)) { val <- as.list(rep(NA,sum(idx==TRUE))) if (any(idx. <- !is.na(m2$covpar[j,idx]))) val[idx.] <- m2$covpar[j,idx][idx.] if (any(idx. <- !is.na(m2$covfix[j,idx]))) val[idx.] <- m2$covfix[j,idx][idx.] covariance(m,nn[idx],nn[j],silent=TRUE) <- val } } intercept(m,nn) <- intercept(m2) m2x <- exogenous(m2) if (length(m2x)>0) exogenous(m) <- c(exogenous(m),m2x) } index(m) <- reindex(m) return(m) } ##' @export "+.estimate" <- function(x,...) { merge(x,...) } ##' @export merge.estimate <- function(x,y,...,id,paired=FALSE,labels=NULL,keep=NULL,subset=NULL) { objects <- list(x,y, ...) if (length(nai <- names(objects)=="NA")>0) names(objects)[which(nai)] <- "" if (!missing(subset)) { coefs <- unlist(lapply(objects, function(x) coef(x)[subset])) } else { coefs <- unlist(lapply(objects,coef)) } if (!is.null(labels)) { names(coefs) <- labels } else { names(coefs) <- make.unique(names(coefs)) } if (!missing(id) && is.null(id)) { ## Independence between datasets in x,y,... nn <- unlist(lapply(objects,function(x) nrow(x$iid))) cnn <- c(0,cumsum(nn)) id <- list() for (i in seq_along(nn)) id <- c(id,list(seq(nn[i])+cnn[i])) } if (missing(id)) { if (paired) { ## One-to-one dependence between observations in x,y,... id <- rep(list(seq(nrow(x$iid))),length(objects)) } else { id <- lapply(objects,function(x) x$id) } } else { nn <- unlist(lapply(objects,function(x) NROW(iid(x)))) if (length(id)==1 && is.logical(id)) { if (id) { if (any(nn[1]!=nn)) stop("Expected objects of the same size: ", paste(nn,collapse=",")) id0 <- seq(nn[1]); id <- c() for (i in seq(length(nn))) id <- c(id,list(id0)) } else { id <- c() N <- cumsum(c(0,nn)) for (i in seq(length(nn))) id <- c(id,list(seq(nn[i])+N[i])) } } if (length(id)!=length(objects)) stop("Same number of id-elements as model objects expected") idlen <- unlist(lapply(id,length)) if (!identical(idlen,nn)) stop("Wrong lengths of 'id': ", paste(idlen,collapse=","), "; ", paste(nn,collapse=",")) } if (any(unlist(lapply(id,is.null)))) stop("Id needed for each model object") ##iid <- Reduce("cbind",lapply(objects,iid)) ids <- iidall <- c(); count <- 0 for (z in objects) { count <- count+1 clidx <- NULL id0 <- id[[count]] iidz <- iid(z) if (!missing(subset)) iidz <- iidz[,subset,drop=FALSE] if (!lava.options()$cluster.index) { iid0 <- matrix(unlist(by(iidz,id0,colSums)),byrow=TRUE,ncol=ncol(iidz)) ids <- c(ids, list(sort(unique(id0)))) } else { if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") clidx <- mets::cluster.index(id0,mat=iidz,return.all=TRUE) iid0 <- clidx$X ids <- c(ids, list(id0[as.vector(clidx$firstclustid)+1])) } iidall <- c(iidall, list(iid0)) } id <- unique(unlist(ids)) iid0 <- matrix(0,nrow=length(id),ncol=length(coefs)) colpos <- 0 for (i in seq(length(objects))) { relpos <- seq_along(coef(objects[[i]])) if (!missing(subset)) relpos <- seq_along(subset) iid0[match(ids[[i]],id),relpos+colpos] <- iidall[[i]] colpos <- colpos+tail(relpos,1) } rownames(iid0) <- id estimate.default(NULL, coef=coefs, stack=FALSE, data=NULL, iid=iid0, id=id, keep=keep) } ##' @export merge.lm <- function(x,y,...) { args <- c(list(x,y),list(...)) nn <- names(formals(merge.estimate)[-seq(3)]) idx <- na.omit(match(nn,names(args))) models <- args; models[idx] <- NULL mm <- lapply(args,function(x) tryCatch(estimate(x),error=function(e) NULL)) names(mm)[1:2] <- c("x","y") ii <- which(unlist(lapply(mm,is.null))) if (length(ii)>0) mm[ii] <- NULL do.call(merge,c(mm,args[idx])) } ##' @export merge.glm <- merge.lm ##' @export merge.lvmfit <- merge.lm ##' @export merge.multinomial <- function(x,...) { merge.estimate(x,...) } lava/R/stack.R0000644000176200001440000000353313162174023012631 0ustar liggesusers##' Stack estimating equations ##' ##' Stack estimating equations ##' @param x Model 1 ##' @param model2 Model 2 ##' @param D1u Derivative of score of model 2 w.r.t. parameter vector of model 1 ##' @param inv.D2u Inverse of deri ##' @param weights weights (vector or function) ##' @param dweights derivative of weight wrt parameters of model 1 ##' @param U Optional score function (model 2) as function of all parameters ##' @param k Debug argument ##' @param keep1 If TRUE only parameters of model 2 i s returned ##' @param ... Additional arguments to lower level functions ##' @aliases stack.estimate ##' @export stack.estimate <- function(x,model2,D1u,inv.D2u,weights,dweights,U,k=1,keep1=FALSE,...) { iid1 <- iid(x) iid2 <- iid(model2) if (missing(inv.D2u)) { inv.D2u <- -attributes(iid2)$bread } if (is.null(inv.D2u)) stop("Need derivative of second stage score") if (!missing(U)) { D1u <- numDeriv::jacobian(U,coef(x)) } if (!missing(weights) && is.function(weights)) { dweights <- numDeriv::jacobian(weights,coef(x)) weights <- weights(coef(x)) } if (!missing(dweights)) { D2u <- Inverse(inv.D2u) u2 <- iid2%*%D2u ## Score of stage two equation derived from estimated influence function ## Derivative of score wrt first set of parameters (weights model) D1u <- crossprod(apply(u2,2,function(x) -x/weights),dweights) } ii <- iid(merge(x,model2)) iid1. <- ii[,seq_along(coef(x)),drop=FALSE] iid2. <- ii[,length(coef(x))+seq_along(coef(model2)),drop=FALSE] iid3 <- t(inv.D2u%*%(D1u%*%t(iid1.))) if (!keep1) return(estimate(coef=coef(model2),iid=cbind(iid2.+k*iid3))) estimate(coef=c(coef(x),coef(model2)),iid=cbind(iid1.,iid2. + k*iid3)) } ##' @export stack.glm <- function(x,model2,...) { stack(estimate(x),estimate(model2),...) } lava/R/lava-package.R0000644000176200001440000001760013162174023014040 0ustar liggesusers ##' Estimation and simulation of latent variable models ##' ##' Framwork for estimating parameters and simulate data from Latent Variable ##' Models. ##' ##' @name lava-package ##' @importFrom graphics plot lines points abline points text layout ##' par plot.new plot.window title rect locator segments image ##' mtext box axis polygon matplot contour contour.default ##' identify ##' @importFrom grDevices xy.coords col2rgb rgb colors rainbow ##' topo.colors gray.colors palette colorRampPalette heat.colors ##' @importFrom utils stack combn read.csv getTxtProgressBar ##' setTxtProgressBar txtProgressBar head tail modifyList ##' getFromNamespace packageVersion write.table methods data ##' glob2rx ##' @importFrom stats density deriv effects lm family simulate vcov ##' var cov cor coef model.frame model.weights as.formula ##' model.matrix rnorm rchisq runif rlnorm pnorm qnorm na.omit AIC ##' terms logLik qt pt update update.formula confint approxfun ##' pchisq confint.default formula fft uniroot rbinom predict sd ##' addmargins residuals dnorm quantile qf cov2cor qchisq ##' get_all_vars p.adjust rpois rgamma printCoefmat rt glm nlminb ##' na.pass na.omit ##' @importFrom survival is.Surv ##' @importFrom methods new as ##' @aliases lava-package lava ##' @docType package ##' @author Klaus K. Holst Maintainer: ##' @keywords package ##' @examples ##' ##' lava() ##' NULL ##' Longitudinal Bone Mineral Density Data ##' ##' Bone Mineral Density Data consisting of 112 girls randomized to receive ##' calcium og placebo. Longitudinal measurements of bone mineral density ##' (g/cm^2) measured approximately every 6th month in 3 years. ##' ##' ##' @name calcium ##' @docType data ##' @format A data.frame containing 560 (incomplete) observations. The 'person' ##' column defines the individual girls of the study with measurements at ##' visiting times 'visit', and age in years 'age' at the time of visit. The ##' bone mineral density variable is 'bmd' (g/cm^2). ##' @source Vonesh & Chinchilli (1997), Table 5.4.1 on page 228. ##' @keywords datasets NULL ##' Longitudinal Bone Mineral Density Data (Wide format) ##' ##' Bone Mineral Density Data consisting of 112 girls randomized to receive ##' calcium og placebo. Longitudinal measurements of bone mineral density ##' (g/cm^2) measured approximately every 6th month in 3 years. ##' @name bmd ##' @docType data ##' @source Vonesh & Chinchilli (1997), Table 5.4.1 on page 228. ##' @format data.frame ##' @keywords datasets ##' @seealso calcium NULL ##' Simulated data ##' ##' Simulated data ##' @name brisa ##' @docType data ##' @format data.frame ##' @source Simulated ##' @keywords datasets NULL ##' Data ##' ##' Description ##' @name bmidata ##' @docType data ##' @format data.frame ##' @keywords datasets NULL ##' Hubble data ##' ##' Velocity (v) and distance (D) measures of 36 Type Ia super-novae from the Hubble ##' Space Telescope ##' @name hubble ##' @docType data ##' @format data.frame ##' @source Freedman, W. L., et al. 2001, AstroPhysicalJournal, 553, 47. ##' @keywords datasets NULL ##' Hubble data ##' ##' @name hubble2 ##' @seealso hubble ##' @docType data ##' @format data.frame ##' @keywords datasets NULL ##' Data ##' ##' Description ##' @name indoorenv ##' @docType data ##' @format data.frame ##' @source Simulated ##' @keywords datasets NULL ##' Missing data example ##' ##' Simulated data generated from model ##' \deqn{E(Y_i\mid X) = X, \quad cov(Y_1,Y_2\mid X)=0.5} ##' ##' The list contains four data sets ##' 1) Complete data ##' 2) MCAR ##' 3) MAR ##' 4) MNAR (missing mechanism depends on variable V correlated with Y1,Y2) ##' @examples ##' data(missingdata) ##' e0 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[1]]) ## No missing ##' e1 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[2]]) ## CC (MCAR) ##' e2 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[2]],missing=TRUE) ## MCAR ##' e3 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[3]]) ## CC (MAR) ##' e4 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[3]],missing=TRUE) ## MAR ##' @name missingdata ##' @docType data ##' @format list of data.frames ##' @source Simulated ##' @keywords datasets NULL ##' Example data (nonlinear model) ##' ##' @name nldata ##' @docType data ##' @format data.frame ##' @source Simulated ##' @keywords datasets NULL ##' Example SEM data (nonlinear) ##' ##' Simulated data ##' @name nsem ##' @docType data ##' @format data.frame ##' @source Simulated ##' @keywords datasets NULL ##' Example SEM data ##' ##' Simulated data ##' @name semdata ##' @docType data ##' @source Simulated ##' @format data.frame ##' @keywords datasets NULL ##' Serotonin data ##' ##' This simulated data mimics a PET imaging study where the 5-HT2A ##' receptor and serotonin transporter (SERT) binding potential has ##' been quantified into 8 different regions. The 5-HT2A ##' cortical regions are considered high-binding regions ## 'which are a priori known to yield quite similar and highly correlated ##' measurements. These measurements can be regarded as proxy measures of ##' the extra-cellular levels of serotonin in the brain ##' \tabular{rll}{ ##' day \tab numeric \tab Scan day of the year \cr ##' age \tab numeric \tab Age at baseline scan \cr ##' mem \tab numeric \tab Memory performance score \cr ##' depr \tab numeric \tab Depression (mild) status 500 days after baseline \cr ##' gene1 \tab numeric \tab Gene marker 1 (HTR2A) \cr ##' gene2 \tab numeric \tab Gene marker 2 (HTTTLPR) \cr ##' cau \tab numeric \tab SERT binding, Caudate Nucleus \cr ##' th \tab numeric \tab SERT binding, Thalamus \cr ##' put \tab numeric \tab SERT binding, Putamen \cr ##' mid \tab numeric \tab SERT binding, Midbrain \cr ##' aci \tab numeric \tab 5-HT2A binding, Anterior cingulate gyrus \cr ##' pci \tab numeric \tab 5-HT2A binding, Posterior cingulate gyrus \cr ##' sfc \tab numeric \tab 5-HT2A binding, Superior frontal cortex \cr ##' par \tab numeric \tab 5-HT2A binding, Parietal cortex \cr ##' } ##' @name serotonin ##' @docType data ##' @format data.frame ##' @source Simulated ##' @keywords datasets NULL ##' Data ##' ##' Description ##' @seealso serotonin ##' @name serotonin2 ##' @docType data ##' @format data.frame ##' @source Simulated ##' @keywords datasets NULL ##' Twin menarche data ##' ##' Simulated data ##' \tabular{rll}{ ##' id \tab numeric \tab Twin-pair id \cr ##' zyg \tab character \tab Zygosity (MZ or DZ) \cr ##' twinnum \tab numeric \tab Twin number (1 or 2) \cr ##' agemena \tab numeric \tab Age at menarche (or censoring) \cr ##' status \tab logical \tab Censoring status (observed:=T,censored:=F) \cr ##' bw \tab numeric \tab Birth weight \cr ##' msmoke \tab numeric \tab Did mother smoke? (yes:=1,no:=0) \cr ##' } ##' @name twindata ##' @docType data ##' @format data.frame ##' @keywords datasets ##' @source Simulated NULL ##' For internal use ##' ##' @title For internal use ##' @name startvalues ##' @rdname internal ##' @author Klaus K. Holst ##' @keywords utilities ##' @export ##' @aliases ##' startvalues0 startvalues1 startvalues2 startvalues3 ##' starter.multigroup ##' addattr modelPar modelVar matrices pars pars.lvm ##' pars.lvmfit pars.glm score.glm procdata.lvmfit modelPar modelVar ##' matrices reorderdata graph2lvm igraph.lvm subgraph finalize ##' index.lvm index.lvmfit index reindex index<- ##' survival survival<- ordinal ordinal<- ##' rmvn dmvn NR logit expit tigol ##' randomslope randomslope<- lisrel variances offdiags describecoef ##' parlabels rsq stdcoef CoefMat CoefMat.multigroupfit deriv updatelvm ##' checkmultigroup profci estimate.MAR missingModel Inverse ##' gaussian_logLik.lvm addhook gethook multigroup Weights fixsome ##' parfix parfix<- merge IV parameter index index<- ##' Specials procformula getoutcome decomp.specials NULL lava/R/backdoor.R0000644000176200001440000000571413162174023013313 0ustar liggesusers##' Backdoor criterion ##' ##' Check backdoor criterion of a lvm object ##' @param object lvm object ##' @param f formula. Conditioning, z, set can be given as y~x|z ##' @param cond Vector of variables to conditon on ##' @param ... Additional arguments to lower level functions ##' @param return.graph Return moral ancestral graph with z and effects from x removed ##' @examples ##' m <- lvm(y~c2,c2~c1,x~c1,m1~x,y~m1, v1~c3, x~c3,v1~y, ##' x~z1, z2~z1, z2~z3, y~z3+z2+g1+g2+g3) ##' ll <- backdoor(m, y~x) ##' backdoor(m, y~x|c1+z1+g1) ##' @export backdoor <- function(object, f, cond, ..., return.graph=FALSE) { y <- getoutcome(f, sep = "|") x <- attr(y, "x") if (length(x) > 1) { cond <- all.vars(x[[2]]) } x <- all.vars(x[[1]]) nod <- vars(object) des <- descendants(object, x) ch <- children(object, x) g0 <- cancel(object, toformula(x, ch)) if (!base::missing(cond)) { val <- dsep(g0, c(y, x), cond = cond) && !any(cond %in% des) if (return.graph) { res <- dsep(g0, c(y, x), cond = cond, return.graph=TRUE) attr(res,"result") <- val return(res) } return(val) } cset <- base::setdiff(nod, c(des, x, y)) ## possible conditioning set pp <- path(g0,from=x,to=y,all=TRUE) ## All backdoor paths M <- adjMat(g0) Collider <- function(vec) { M[vec[2],vec[1]] & M[vec[2],vec[3]] } blockList <- collideList <- c() for (i in seq_along(pp)) { p0 <- pp[[i]] blocks <- c() collide <- c() for (j in seq(length(p0)-2)) { if (Collider(p0[0:2 + j])) { collide <- c(collide,p0[1+j]) } else { blocks <- c(blocks,p0[1+j]) } } blockList <- c(blockList,list(blocks)) collideList <- c(collideList,list(collide)) } res <- list(blockList) ## Paths with colliders: col <- unlist(lapply(collideList,function(x) !is.null(x))) if (length(col)>0) col <- which(col) ## List of variables which are not on path between x and y: optional <- setdiff(cset,c(unlist(collideList),unlist(blockList))) callrecurs <- function(col,res=list()) { if (length(col)==0) return(res) blockList0 <- blockList blockList0[col] <- NULL blockList0 <- lapply(blockList0, function(x) setdiff(x,unlist(collideList[col]))) if (!any(unlist(lapply(blockList0,is.null)))) { res <- c(res, list(blockList0)) } for (i in seq_along(col)) { col0 <- col[-i] if (length(col0)>0) res <- callrecurs(col0,res) } return(res) } if (length(col)>0) res <- c(res,callrecurs(col)) ## Any element can be included from 'optional' For a given element ## in 'include' at least one element in each member of the list ## must be included in the conditioning set. return(list(optional=optional, include=res)) } lava/R/addvar.R0000644000176200001440000000721613162174023012767 0ustar liggesusers##' Generic method for adding variables to model object ##' ##' @title Add variable to (model) object ##' @param x Model object ##' @param \dots Additional arguments ##' @author Klaus K. Holst ##' @aliases addvar<- ##' @export `addvar` <- function(x,...) UseMethod("addvar") ##' @export `addvar<-` <- function(x,...,value) UseMethod("addvar<-") ##' @export `addvar<-.lvm` <- function(x,...,value) { if (inherits(value,"formula")) { regression(x,...) <- value return(x) ## return(addvar(x,all.vars(value),...)) } addvar(x, var=value, ...) } ##' @export `addvar.lvm` <- function(x, var, silent=lava.options()$silent,reindex=TRUE,...) { new <- setdiff(var,vars(x)) k <- length(new) Debug(new) if (k>0) { if (lava.options()$sparse) { requireNamespace("Matrix",quietly=TRUE) newNA <- newM <- Matrix::Matrix(0,k,k) newNAc <- newNA; diag(newNAc) <- NA newcov <- Matrix::Diagonal(k) } else { newM <- matrix(0,k,k) newcov <- diag(k) } newNA <- matrix(NA,k,k) colnames(newM) <- rownames(newM) <- colnames(newcov) <- rownames(newcov) <- colnames(newNA) <- rownames(newNA) <- new newmean <- as.list(rep(NA,k)) ## for (i in new) { N <- nrow(x$cov) if (is.null(N)) { N <- 0 x$M <- newM x$cov <- newcov; x$covfix <- x$fix <- x$par <- x$covpar <- newNA x$mean <- newmean } else { if (lava.options()$sparse) { x$M <- Matrix::bdiag(x$M, newM) ## Add regression labels.R x$cov <- Matrix::bdiag(x$cov, newcov) ## Add covariance x$par <- Matrix::bdiag(x$par, newNA) ## Add regression labels x$covpar <- Matrix::bdiag(x$covpar, newNA) ## Add covariance labels x$fix <- Matrix::bdiag(x$fix, newNA) x$covfix <- Matrix::bdiag(x$covfix, newNA) } else { x$M <- blockdiag(x$M, newM, pad=0) ## Add regression labels x$cov <- blockdiag(x$cov, newcov, pad=0) ## Add covariance x$par <- blockdiag(x$par, newNA, pad=NA) ## Add regression labels x$covpar <- blockdiag(x$covpar, newNA, pad=NA) ## Add covariance labels x$fix <- blockdiag(x$fix, newNA, pad=NA) ## x$covfix <- blockdiag(x$covfix, newNA, pad=NA) ## } x$mean <- c(x$mean, newmean) } names(x$mean)[N+seq_len(k)] <- colnames(x$M)[N+seq_len(k)] <- rownames(x$M)[N+seq_len(k)] <- colnames(x$covfix)[N+seq_len(k)] <- rownames(x$covfix)[N+seq_len(k)] <- colnames(x$fix)[N+seq_len(k)] <- rownames(x$fix)[N+seq_len(k)] <- colnames(x$covpar)[N+seq_len(k)] <- rownames(x$covpar)[N+seq_len(k)] <- colnames(x$par)[N+seq_len(k)] <- rownames(x$par)[N+seq_len(k)] <- colnames(x$cov)[N+seq_len(k)] <- rownames(x$cov)[N+seq_len(k)] <- new ## x$cov[N+1,N+1] <- 1 ## names(x$mean)[N+1] <- ## colnames(x$M)[N+1] <- rownames(x$M)[N+1] <- ## colnames(x$covfix)[N+1] <- rownames(x$covfix)[N+1] <- ## colnames(x$fix)[N+1] <- rownames(x$fix)[N+1] <- ## colnames(x$covpar)[N+1] <- rownames(x$covpar)[N+1] <- ## colnames(x$par)[N+1] <- rownames(x$par)[N+1] <- ## colnames(x$cov)[N+1] <- rownames(x$cov)[N+1] <- i ## myexpr <- paste("c(",i,"=expression(",i,"))", sep="\"") ## labels(x) <- (eval(parse(text=myexpr))) ## if (!silent) ## message("\tAdded '", i, "' to model.\n", sep="") if (!silent) { if (k==1) message("\tAdded '", new, "' to model.\n", sep="") else message("\tAdded ",paste(paste("'",new,"'",sep=""),collapse=",")," to model.\n", sep="") } exogenous(x) <- c(new,exogenous(x)) } if (reindex) index(x) <- reindex(x) return(x) } lava/R/compare.R0000644000176200001440000001510713162174023013152 0ustar liggesusers##' Performs Likelihood-ratio, Wald and score tests ##' @title Statistical tests ##' @aliases compare ##' @export ##' @param object \code{lvmfit}-object ##' @param \dots Additional arguments to low-level functions ##' @return Matrix of test-statistics and p-values ##' @author Klaus K. Holst ##' @seealso \code{\link{modelsearch}}, \code{\link{equivalence}} ##' @keywords htest ##' @examples ##' m <- lvm(); ##' regression(m) <- c(y1,y2,y3) ~ eta; latent(m) <- ~eta ##' regression(m) <- eta ~ x ##' m2 <- regression(m, c(y3,eta) ~ x) ##' set.seed(1) ##' d <- sim(m,1000) ##' e <- estimate(m,d) ##' e2 <- estimate(m2,d) ##' ##' compare(e) ##' ##' compare(e,e2) ## LRT, H0: y3<-x=0 ##' compare(e,scoretest=y3~x) ## Score-test, H0: y3~x=0 ##' compare(e2,par=c("y3~x")) ## Wald-test, H0: y3~x=0 ##' ##' B <- diag(2); colnames(B) <- c("y2~eta","y3~eta") ##' compare(e2,contrast=B,null=c(1,1)) ##' ##' B <- rep(0,length(coef(e2))); B[1:3] <- 1 ##' compare(e2,contrast=B) ##' ##' compare(e,scoretest=list(y3~x,y2~x)) compare <- function(object,...) UseMethod("compare") ##' @export compare.default <- function(object,...,par,contrast,null,scoretest,Sigma,level=.95,df=NULL) { if (!missing(par) || (!missing(contrast) && is.character(contrast))) { if (!missing(contrast) && is.character(contrast)) par <- contrast contrast <- rep(0,length(coef(object))) myidx <- parpos(Model(object),p=par) contrast[myidx] <- 1 contrast <- diag(contrast,nrow=length(contrast))[which(contrast!=0),,drop=FALSE] if (!missing(null) && length(null)>1) null <- null[attributes(myidx)$ord] } ### Wald test if (!missing(contrast)) { B <- contrast p <- coef(object) pname <- names(p) B <- rbind(B); colnames(B) <- if (is.vector(contrast)) names(contrast) else colnames(contrast) if (missing(Sigma)) { Sigma <- vcov(object) } if (ncol(B)0) { M <- moments(x,coef(x)) nn <- names(res) for (lat in latent(x)) { v <- intersect(children(x,lat),endogenous(x)) varl <- M$Cfull[lat,lat] varv <- M$Cfull[cbind(v,v)] rpar <- paste(v,lat,sep=lava.options()$symbol[1]) fix <- c(x$model$fix[lat,v,drop=TRUE]) pp <- coef(x) if (inherits(x,"lvm.missing")) { mp <- match(coef(x$model),names(coef(x))) pp <- pp[mp] } idx1 <- x$model$parpos$A[lat,v] ##idx2 <- x$model$parpos$P[lat,lat] ##idx3 <- x$model$parpos$P[cbind(v,v)] p0 <- c(idx1) p1 <- setdiff(unique(p0),0) p2 <- match(p0,p1) p <- pp[p1] p. <- p[p2] p.[is.na(p.)] <- fix[is.na(p.)] k <- length(v) val <- (p.^2*varl)/varv; names(val) <- v res <- c(res,list(val)) nn <- c(nn,paste0("Variance explained by '",lat,"'")) } names(res) <- nn } res } satmodel <- function(object,logLik=TRUE,data=model.frame(object), control=list(trace=1), weights=Weights(object),estimator=object$estimator, missing=inherits(object,"lvm.missing"), regr=FALSE, ...) { if (object$estimator=="gaussian" & logLik & !missing) { if (class(object)[1]%in%c("multigroupfit","multigroup")) { ll <- structure(0,nall=0,nobs=0,df=0,class="logLik") for (i in seq_len(Model(object)$ngroup)) { l0 <- logLik(Model(Model(object))[[i]],data=model.frame(object)[[i]],type="sat") ll <- ll+l0 for (atr in c("nall","nobs","df")) attributes(ll)[[atr]] <- attributes(ll)[[atr]]+attributes(l0)[[atr]] } } return(logLik(object, type="sat")) } covar <- exogenous(object) y <- endogenous(object) m0 <- Model(object) if (length(covar)>0) suppressWarnings(m0 <- regression(m0,y,covar)) if (length(latent(m0))>0) kill(m0) <- latent(m0) cancel(m0) <- y if (!regr) suppressWarnings(covariance(m0) <- y) else { if (length(y)>1) { for (i in seq_len(length(y)-1)) for (j in seq(i+1,length(y))) { m0 <- regression(m0,y[i],y[j]) } } exogenous(m0) <- covar } if (is.null(control$start)) { mystart <- rep(0,with(index(m0), npar.mean+npar)) mystart[variances(m0,mean=TRUE)] <- 1 control$start <- mystart } message("Calculating MLE of saturated model:\n") e0 <- estimate(m0,data=data,weights=weights,estimator=estimator,silent=TRUE,control=control,missing=missing,...) if (logLik) return(logLik(e0)) return(e0) } condition <- function(A) { suppressWarnings(with(eigen(A),tail(values,1)/head(values,1))) } ##' Extract model summaries and GOF statistics for model object ##' ##' Calculates various GOF statistics for model object including global ##' chi-squared test statistic and AIC. Extract model-specific mean and variance ##' structure, residuals and various predicitions. ##' ##' ##' @aliases gof gof.lvmfit moments moments.lvm information information.lvmfit ##' score score.lvmfit logLik.lvmfit ##' @param object Model object ##' @param x Model object ##' @param p Parameter vector used to calculate statistics ##' @param data Data.frame to use ##' @param latent If TRUE predictions of latent variables are included in output ##' @param data2 Optional second data.frame (only for censored observations) ##' @param weights Optional weight matrix ##' @param n Number of observations ##' @param conditional If TRUE the conditional moments given the covariates are ##' calculated. Otherwise the joint moments are calculated ##' @param model String defining estimator, e.g. "gaussian" (see ##' \code{estimate}) ##' @param debug Debugging only ##' @param chisq Boolean indicating whether to calculate chi-squared ##' goodness-of-fit (always TRUE for estimator='gaussian') ##' @param level Level of confidence limits for RMSEA ##' @param rmsea.threshold Which probability to calculate, Pr(RMSEA0) { ## SRMR.endo <- mean(c(R[idx,idx][upper.tri(R[idx,idx],diag=TRUE)],R2[idx])^2)^0.5 ## res <- c(res,list("SRMR(endogenous)"=SRMR.endo)) ## } } ## if (class(object)[1]=="lvmfit") if (rnkV==ncol(vcov(object)) && (!is.null(minSV) && minSV>1e-12)) { rmseafun <- function(...) { epsilon <- function(lambda) sapply(lambda,function(x) ifelse(x>0 & qdf>0,sqrt(x/(qdf*(n))),0)) ## n-1,n vs. n-df opf <- function(l,p) suppressWarnings(p-pchisq(q,df=qdf,ncp=l)) ## pchisq(... lower.tail=FALSE)-1 alpha <- (1-level)/2 RMSEA <- epsilon(q-qdf) B <- max(q-qdf,0) lo <- hi <- list(root=0) if (RMSEA>0 && opf(0,p=1-alpha)<0) { hi <- uniroot(function(x) opf(x,p=1-alpha),c(0,B)) } if (opf(B,p=alpha)<0) { lo <- uniroot(function(x) opf(x,p=alpha),c(B,n)) } ci <- c(epsilon(c(hi$root,lo$root))) RMSEA <- c(RMSEA=RMSEA,ci); names(RMSEA) <- c("RMSEA",paste0(100*c(alpha,(1-alpha)),"%")) pval <- pchisq(q,qdf,(n*qdf*rmsea.threshold^2),lower.tail=FALSE) res <- list(aa=((q-qdf)/(2*qdf)^0.5),RMSEA=RMSEA, level=level, rmsea.threshold=rmsea.threshold, pval.rmsea=pval) return(res) } rmseaval <- tryCatch(rmseafun(),error=function(e) NULL) res <- c(res,rmseaval) } } else { res <- list(n=n, logLik=loglik, BIC=myBIC, AIC=myAIC) } res <- c(res, L2score=l2D, rankV=rnkV, cond=condnum, k=nrow(vcov(object))) class(res) <- "gof.lvmfit" return(res) } ##' @export print.gof.lvmfit <- function(x,optim=TRUE,...) { if (!is.null(x$n)) { with(x, cat("\n Number of observations =", n, "\n")) } if (is.null(x$fit)) { with(x, cat(" Log-Likelihood =", logLik, "\n")) } with(x, cat(" BIC =", BIC, "\n", "AIC =", AIC, "\n")) if (!is.null(x$fit)) with(x, cat(" log-Likelihood of model =", fit$estimate[1], "\n\n", "log-Likelihood of saturated model =", fit$estimate[2], "\n", "Chi-squared statistic: q =", fit$statistic, ", df =", fit$parameter, "\n P(Q>q) =", fit$p.value, "\n")) if (!is.null(x$RMSEA)) { rr <- round(x$RMSEA*10000)/10000 rmsea <- paste0(rr[1]," (",rr[2],";",rr[3],")") cat("\n RMSEA (",x$level*100,"% CI): ", rmsea,"\n",sep="") cat(" P(RMSEA<",x$rmsea.threshold,")=", x$pval.rmsea,"\n",sep="") } for (i in c("TLI","CFI","NFI","SRMR","SRMR(endogenous)")) if (!is.null(x[[i]])) cat("", i,"=",x[[i]],"\n") if (optim) { cat("\nrank(Information) = ",x$rankV," (p=", x$k,")\n",sep="") cat("condition(Information) = ",x$cond,"\n",sep="") cat("mean(score^2) =",x$L2score,"\n") } invisible(x) } ## gof.multigroupfit <- function(object,...) { ## L0 <- logLik(object); df0 <- attributes(L0)$df ## L1 <- logLik(object,type="sat"); df1 <- attributes(L1)$df ## df <- df1-df0; names(df) <- "df" ## Q <- -2*(L0-L1); attributes(Q) <- NULL; names(Q) <- "chisq"; ## pQ <- pchisq(Q,df,lower.tail=FALSE) ## values <- c(L0,L1); names(values) <- c("log likelihood (model)", "log likelihood (saturated model)") ## res <- list(statistic = Q, parameter = df, ## p.value=pQ, method = "Likelihood ratio test", ## estimate = values) ## class(res) <- "htest" ## return(res) ## } lava/R/subgraph.R0000644000176200001440000000123113162174023013330 0ustar liggesuserssubgraph <- function(g,from,to,Tree=new("graphNEL",node=c(to,from),edgemode="directed"),...) { adjnodes <- graph::adj(g,from)[[1]] newnodes <- !(adjnodes %in% graph::nodes(Tree)) if (length(adjnodes)==0) return(Tree) for (v in adjnodes) { if (v==to) { Tree <- graph::addEdge(from, v, Tree) } re1 <- graph::acc(g,v)[[1]] ## Reachable nodes from v if ((to %in% names(re1)[re1>0])) { if (!(v %in% graph::nodes(Tree))) Tree <- graph::addNode(v,Tree) Tree <- graph::addEdge(from, v, Tree) Tree <- path(g,v,to,Tree) } } return(Tree) } lava/R/modelsearch.R0000644000176200001440000002570313162174023014015 0ustar liggesusers##' Model searching ##' ##' Performs Wald or score tests ##' ##' ##' @aliases modelsearch ##' @param x \code{lvmfit}-object ##' @param k Number of parameters to test simultaneously. For \code{equivalence} ##' the number of additional associations to be added instead of \code{rel}. ##' @param dir Direction to do model search. "forward" := add ##' associations/arrows to model/graph (score tests), "backward" := remove ##' associations/arrows from model/graph (wald test) ##' @param type If equal to 'correlation' only consider score tests for covariance parameters. If equal to 'regression' go through direct effects only (default 'all' is to do both) ##' @param ... Additional arguments to be passed to the low level functions ##' @return Matrix of test-statistics and p-values ##' @author Klaus K. Holst ##' @seealso \code{\link{compare}}, \code{\link{equivalence}} ##' @keywords htest ##' @examples ##' ##' m <- lvm(); ##' regression(m) <- c(y1,y2,y3) ~ eta; latent(m) <- ~eta ##' regression(m) <- eta ~ x ##' m0 <- m; regression(m0) <- y2 ~ x ##' dd <- sim(m0,100)[,manifest(m0)] ##' e <- estimate(m,dd); ##' modelsearch(e,silent=TRUE) ##' modelsearch(e,silent=TRUE,type="cor") ##' @export modelsearch <- function(x,k=1,dir="forward",type='all',...) { if (dir=="forward") { res <- forwardsearch(x,k,type=type,...) return(res) } if (dir=="backstep") { res <- backwardeliminate(x,...) return(res) } res <- backwardsearch(x,k,...) return(res) } backwardeliminate <- function(x, keep=NULL, pthres=0.05, AIC=FALSE, silent=TRUE, missing=FALSE, intercepts=FALSE, maxsteps=Inf, information="E", messages=TRUE, data, ...) { if (inherits(x,"lvm")) { M <- x } else { M <- Model(x) } if(missing(data)) data <- model.frame(x) dots <- list(...) if (is.null(dots$control$start)) { p0 <- estimate(M,data,quick=TRUE,silent=silent,missing=FALSE,...) dots$control <- c(dots$control, list(start=p0,information="E")) } if (intercepts) ii <- NULL ff <- function() { ii <- grep("m",names(coef(M))) vv <- variances(M,mean=TRUE) args <- c(list(x=M,data=data,missing=missing,quick=TRUE,silent=silent),dots) cc <- do.call("estimate",args) if (is.numeric(cc)) { I0 <- information(M,p=cc,data=data,type=information)[-c(ii,vv),-c(ii,vv)] cc0 <- cc[-c(ii,vv)] res <- (pnorm(abs(cc0/sqrt(diag(solve(I0)))),lower.tail=FALSE))*2 attributes(res)$coef <- cc } else { coefs <- coef(cc) res <- (pnorm(abs(coefs/sqrt(diag(vcov(cc)))),lower.tail=FALSE))*2 res <- res[-c(ii,vv)] attributes(res)$coef <- coefs } return(res) } done <- FALSE; i <- 0; while (!done & iw)", "Index"); rownames(PM) <- rep("",nrow(PM)) res <- list(res=PM,test=res$test) class(res) <- "modelsearch" res } forwardsearch <- function(x,k=1,silent=FALSE,type='all',exclude.var=NULL,...) { if (!inherits(x,"lvmfit")) stop("Expected an object of class 'lvmfit'.") p <- pars(x,reorder=TRUE) cur <- Model(x) pp <- modelPar(cur,p) Y <- endogenous(x) X <- exogenous(x) V <- vars(x) q <- length(Y); qx <- length(X) npar.sat <- q+q*(q-1)/2 + q*qx npar.cur <- index(cur)$npar npar.mean <- index(cur)$npar.mean nfree <- npar.sat-npar.cur if (nfrees)", "Index"); rownames(PM) <- rep("",nrow(PM)) res <- list(res=PM, test=Tests, var=Vars, directional=directional) class(res) <- "modelsearch" return(res) } ##' @export print.modelsearch <- function(x,tail=nrow(x$res),adj=c("holm","BH"),...) { N <- nrow(x$res) if (!is.null(adj)) { ## adjp <- rev(holm(as.numeric(x$test[,2]))) adjp <- rbind(sapply(adj,function(i) p.adjust(x$test[,2],method=i))) colnames(adjp) <- adj x$res <- cbind(x$res,rbind(formatC(adjp))) } print(x$res[seq(N-tail+1,N),], quote=FALSE, ...) invisible(x) } lava/R/complik.R0000644000176200001440000001076513162174023013167 0ustar liggesusers##' Composite Likelihood for probit latent variable models ##' ##' Estimate parameters in a probit latent variable model via a composite ##' likelihood decomposition. ##' @param x \code{lvm}-object ##' @param data data.frame ##' @param k Size of composite groups ##' @param type Determines number of groups. With \code{type="nearest"} (default) ##' only neighboring items will be grouped, e.g. for \code{k=2} ##' (y1,y2),(y2,y3),... With \code{type="all"} all combinations of size \code{k} ##' are included ##' @param pairlist A list of indices specifying the composite groups. Optional ##' argument which overrides \code{k} and \code{type} but gives complete ##' flexibility in the specification of the composite likelihood ##' @param silent Turn output messsages on/off ##' @param \dots Additional arguments parsed on to lower-level functions ##' @param estimator Model (pseudo-likelihood) to use for the pairs/groups ##' @return An object of class \code{clprobit} inheriting methods from \code{lvm} ##' @author Klaus K. Holst ##' @seealso estimate ##' @keywords models regression ##' @export complik <- function(x,data,k=2,type=c("nearest","all"),pairlist,silent=TRUE,estimator="normal", ...) { y <- setdiff(endogenous(x),latent(x)) x.idx <- index(x)$exo.idx binsurv <- rep(FALSE,length(y)) for (i in 1:length(y)) { z <- try(data[,y[i]],silent=TRUE) ## binsurv[i] <- is.Surv(z) | (is.factor(z) && length(levels(z))==2) if (!inherits(z,"try-error")) binsurv[i] <- inherits(z,"Surv") | (is.factor(z)) } ord <- ordinal(x) binsurv <- unique(c(y[binsurv],ord)) ## ,binary(x)) ## binsurvpos <- which(colnames(data)%in%binsurv) if (!missing(pairlist)) { binsurvpos <- which(colnames(data)%in%endogenous(x)) } else { binsurvpos <- which(colnames(data)%in%binsurv) } if (missing(pairlist)) { #if (length(binsurv)<(k+1)) stop("No need for composite likelihood analysis.") if (type[1]=="all") { mypar <- combn(length(binsurv),k) ## all pairs (or multiplets), k=2: k*(k-1)/2 } else { mypar <- sapply(0:(length(binsurv)-k), function(x) x+1:k) } } else { mypar <- pairlist } if (is.matrix(mypar)) { mypar0 <- mypar; mypar <- c() for (i in seq(ncol(mypar0))) mypar <- c(mypar, list(mypar0[,i])) } nblocks <- length(mypar) mydata0 <- data[,,drop=FALSE] mydata <- as.data.frame(matrix(NA, nblocks*nrow(data), ncol=ncol(data))) names(mydata) <- names(mydata0) for (i in 1:ncol(mydata)) { if (is.factor(data[,i])) { mydata[,i] <- factor(mydata[,i],levels=levels(mydata0[,i])) } if (survival::is.Surv(data[,i])) { S <- data[,i] for (j in 2:nblocks) S <- rbind(S,data[,i]) S[,1] <- NA mydata[,i] <- S } } for (ii in 1:nblocks) { data0 <- data; for (i in binsurvpos[-mypar[[ii]]]) { if (survival::is.Surv(data[,i])) { S <- data0[,i]; S[,1] <- NA data0[,i] <- S } else { data0[,i] <- NA if (is.factor(data[,i])) data0[,i] <- factor(data0[,i],levels=levels(data[,i])) } } mydata[(1:nrow(data))+(ii-1)*nrow(data),] <- data0 } suppressWarnings(e0 <- estimate(x,data=mydata,estimator=estimator,missing=TRUE,silent=silent, ...)) S <- score(e0,indiv=TRUE) nd <- nrow(data) block1 <- which((1:nd)%in%(rownames(S))) blocks <- sapply(1:nblocks, function(x) 1:length(block1)+length(block1)*(x-1)) if (nblocks==1) { Siid <- S } else { Siid <- matrix(0,nrow=length(block1),ncol=ncol(S)) for (j in 1:ncol(blocks)) { Siid <- Siid+S[blocks[,j],] } } iI <- solve(information(e0,type="hessian")) J <- t(Siid)%*%(Siid) e0$iidscore <- Siid e0$blocks <- blocks e0$vcov <- iI%*%J%*%iI ## thetahat-theta0 :=(asymp) I^-1*S => var(thetahat) = iI*var(S)*iI cc <- e0$coef; cc[,2] <- sqrt(diag(e0$vcov)) cc[,3] <- cc[,1]/cc[,2]; cc[,4] <- 2*(1-pnorm(abs(cc[,3]))) e0$coef <- cc e0$bread <- iI class(e0) <- c("estimate.complik",class(e0)) return(e0) } score.estimate.complik <- function(x,indiv=FALSE,...) { if (!indiv) return(colSums(x$iidscore)) x$iidscore } iid.estimate.complik <- function(x,...) { iid.default(x,bread=x$bread,...) } lava/R/Inverse.R0000644000176200001440000000234513162174023013137 0ustar liggesusers##' @export Inverse <- function(X,tol=lava.options()$itol,det=TRUE,names=!chol,chol=FALSE,symmetric=FALSE) { n <- NROW(X) # return(structure(solve(X),)) if (n==1L) { res <- 1/X if (det) attributes(res)$det <- X if (chol) attributes(res)$chol <- X return(res) } if (chol) { L <- chol(X) res <- chol2inv(L) if (det) attributes(res)$det <- prod(diag(L)^2) if (chol) attributes(res)$chol <- X } else { if(symmetric){ decomp <- eigen(X, symmetric = TRUE) D <- decomp$values U <- decomp$vectors V <- decomp$vectors }else{ X.svd <- svd(X) U <- X.svd$u V <- X.svd$v D <- X.svd$d } id0 <- numeric(n) idx <- which(abs(D)>tol) id0[idx] <- 1/D[idx] res <- V%*%diag(id0,nrow=length(id0))%*%t(U) if (det) attributes(res)$det <- prod(D[D>tol]) attributes(res)$pseudo <- (length(idx)0) { p. <- p[-seq_len(npar.mean)] } else meanpar <- NULL p <- p.[seq_len(npar)] if (npar>0) { p2 <- p.[-seq_len(npar)] } else p2 <- p. } else { meanpar <- NULL p2 <- NULL } return(list(p=p,meanpar=meanpar,p2=p2)) } ###}}} modelpar.lvm ###{{{ modelPar.multigroupfit ##' @export modelPar.multigroupfit <- function(x,p=pars(x),...) { modelPar(Model(x),p,...) } ###}}} ###{{{ modelPar.multigroup ##' @export modelPar.multigroup <- function(x,p, ...) { if (length(p)==x$npar) { pp <- lapply(x$parposN,function(z) p[z]) res <- list(p=pp, par=pp, mean=NULL) return(res) } Nmean <- unlist(lapply(x$meanposN,length)) Npar <- unlist(lapply(x$parposN,length)) ##ppos <- mapply("+",x$parposN,as.list(Nmean),SIMPLIFY=FALSE) ppos <- x$parposN pp <- lapply(ppos,function(z) p[z+x$npar.mean]) if (length(pp)==0) pp <- lapply(seq_len(x$ngroup),function(x) logical()) mm <- lapply(x$meanposN,function(x) p[x]) if (is.null(mm)) mm <- lapply(seq_len(x$ngroup),logical()) pm <- mm for (i in seq_len(length(pm))) pm[[i]] <- c(pm[[i]],pp[[i]]) res <- list(p=pm,par=pp,mean=mm) return(res) } ###}}} modelPar2.multigroup <- function(x,p, ...) { npar <- x$npar npar.mean <- x$npar.mean k <- x$ngroup if (length(p)!=npar & length(p)!=(npar+npar.mean)) stop("Wrong dimension of parameter vector!") if (length(p)!=npar) { ## if meanstructure meanpar <- p[seq_len(npar.mean)] p. <- p[-seq_len(npar.mean)] } else { meanpar <- NULL p. <- p } parlist <- list(); for (i in seq_len(k)) parlist[[i]] <- numeric(length(x$parlist[[i]])) if (!is.null(meanpar)) { meanlist <- list(); for (i in seq_len(k)) meanlist[[i]] <- numeric(length(x$meanlist[[i]])) } if (length(p.)>0) for (i in seq_along(p.)) { for (j in seq_len(k)) { idx <- match(paste0("p",i), x$parlist[[j]]) if (!is.na(idx)) parlist[[j]][idx] <- p.[i] if (!is.null(meanpar)) { midx <- match(paste0("p",i), x$meanlist[[j]]) if (!is.na(midx)) meanlist[[j]][midx] <- p.[i] } } } if (!is.null(meanpar)) { for (i in seq_along(meanpar)) { for (j in seq_len(k)) { idx <- match(paste0("m",i), x$meanlist[[j]]) if (!is.na(idx)) meanlist[[j]][idx] <- meanpar[i] } } } else { meanlist <- NULL } p0 <- parlist for (i in seq_along(p0)) p0[[i]] <- c(meanlist[[i]],parlist[[i]]) return(list(p=p0, par=parlist, mean=meanlist)) } lava/R/model.frame.R0000644000176200001440000000110513162174023013706 0ustar liggesusers##' @export model.frame.lvmfit <- function(formula, all=FALSE,...) { dots <- list(...) mydata <- formula$data$model.frame if (!is.data.frame(mydata) & !is.matrix(mydata)) return(mydata) if (all) return(mydata) ## xfix <- colnames(mydata)[(colnames(mydata)%in%parlabels(formula$model0,exo=TRUE))] xfix <- colnames(mydata)[(colnames(mydata)%in%parlabels(formula$model0))] return( mydata[,c(manifest(formula),xfix),drop=FALSE] ) } ##' @export model.frame.multigroupfit <- function(formula,...) { dots <- list(...) mydata <- formula$model$data return(mydata) } lava/R/manifest.R0000644000176200001440000000105513162174023013327 0ustar liggesusers##' @export `manifest` <- function(x,...) UseMethod("manifest") ##' @export `manifest.lvm` <- function(x,...) { if (length(vars(x))>0) setdiff(vars(x),latent(x)) else NULL } ##' @export `manifest.lvmfit` <- function(x,...) { manifest(Model(x)) } ##' @export manifest.list <- function(x,...) { manifestlist <- c() for (i in seq_along(x)) { manifestlist <- c(manifestlist, manifest(x[[i]])) } endolist <- unique(manifestlist) return(manifestlist) } ##' @export `manifest.multigroup` <- function(x,...) { manifest(Model(x)) } lava/R/vec.R0000644000176200001440000000136213162174023012277 0ustar liggesusers##' vec operator ##' ##' Convert array into vector ##' @title vec operator ##' @param x Array ##' @param matrix If TRUE a row vector (matrix) is returned ##' @param sep Seperator ##' @param ... Additional arguments ##' @author Klaus Holst ##' @export vec <- function(x,matrix=FALSE,sep=".",...) { if (is.vector(x) && !is.list(x)) { res <- x } else if (is.list(x)) { res <- stats::setNames(unlist(x),names(x)) } else { if (is.matrix(x) && is.null(rownames(x))) { nn <- colnames(x) } else { nn <- apply(expand.grid(dimnames(x)),1,function(x) paste(x,collapse=sep)) } res <- as.vector(x); names(res) <- nn } if (matrix) return(cbind(res)) return(res) } lava/R/fixsome.R0000644000176200001440000000767113162174023013205 0ustar liggesusers ##' @export fixsome <- function(x, exo.fix=TRUE, measurement.fix=TRUE, S, mu, n, data, x0=FALSE, na.method="complete.obs", param=lava.options()$param,...) { if (is.null(param)) { param <- "none" } else { paramval <- c("hybrid","relative","none","absolute") param <- agrep(param,paramval,max.distance=0,value=TRUE) } if (is.character(measurement.fix)) { param <- measurement.fix measurement.fix <- TRUE } var.missing <- c() if (!missing(data) | !missing(S)) { if (!missing(data)) { dd <- procdata.lvm(x,data=data,na.method=na.method) } else { dd <- procdata.lvm(x, list(S=S,mu=mu,n=n)) } S <- dd$S; mu <- dd$mu; n <- dd$n var.missing <- setdiff(index(x)$manifest,colnames(S)) } else { S <- NULL; mu <- NULL } if (measurement.fix & param!="none") { if (length(var.missing)>0) {## Convert to latent: new.lat <- setdiff(var.missing,latent(x)) if (length(new.lat)>0) x <- latent(x, new.lat) } etas <- latent(x) ys <- endogenous(x) M <- x$M for (e in etas) { ## Makes sure that at least one arrow from latent variable is fixed (identification) ys. <- names(which(M[e,ys]==1)) if (length(ys.)>0) { if (tolower(param)=="absolute") { if (is.na(intercept(x)[[e]])) intercept(x,e) <- 0 if (is.na(x$covfix[e,e]) & is.na(x$covpar[e,e])) covariance(x,e) <- 1 } else { if (param=="hybrid") { if (is.na(intercept(x)[[e]])) intercept(x,e) <- 0 if (all(is.na(x$fix[e, ]==1)) & is.na(x$covpar[e,e]) & is.na(x$covfix[e,e])) regfix(x,from=e,to=ys.[1]) <- 1 } else { ## relative if (all(is.na(x$fix[e, ]==1)) & is.na(x$covpar[e,e]) & is.na(x$covfix[e,e])) regfix(x,from=e,to=ys.[1]) <- 1 if (!any(unlist(lapply(intercept(x)[ys.],is.numeric))) & is.na(intercept(x)[[e]])) { if (tryCatch(any(idx <- !is.na(x$fix[e,ys.])),error=function(x) FALSE)) { intercept(x, ys.[which(idx)[1]]) <- 0 } else { intercept(x,ys.[1]) <- 0 } } } } } } } if (is.null(S)) x0 <- TRUE if (exo.fix) { if (x0) { S0 <- diag(nrow=length(index(x)$manifest)) mu0 <- rep(0,nrow(S0)) } else { S0 <- S S0[is.na(S0)] <- 0 mu0 <- mu e0 <- eigen(S0) thres <- lava.options()$itol^(1/2) if (any(e0$values0) { for (i in seq_along(exo.idx)) for (j in seq_along(exo.idx)) { i. <- exo_all.idx[i]; j. <- exo_all.idx[j] myval <- S0[exo.idx[i],exo.idx[j]]; if (i.==j. & myval==0) { ##warning("Overparametrized model. Problem with '"%++%index(x)$vars[j.]%++%"'") myval <- 1 } else if (is.na(myval) || is.nan(myval)) myval <- 0 x$covfix[i.,j.] <- x$covfix[j.,i.] <- myval } x$mean[exo_all.idx] <- mu0[exo.idx] } } index(x) <- reindex(x) return(x) } lava/R/parameter.R0000644000176200001440000000232013162174023013475 0ustar liggesusers##' @export "parameter<-" <- function(x,...,value) UseMethod("parameter<-") ##' @export "parameter<-.lvmfit" <- function(x,...,value) { parameter(Model(x),...) <- value return(x) } ##' @export "parameter<-.lvm" <- function(x,constrain,start,remove=FALSE,...,value) { if (inherits(value,"formula")) value <- all.vars(value) if (remove) { x$expar[value] <- NULL x$exfix[value] <- NULL x$attributes$parameter[value] <- NULL index(x) <- reindex(x) return(x) } if (!missing(start)) { if (length(start) != length(value)) stop("'start' and 'value' should be of the same lengths") start <- as.list(start) names(start) <- value } else { start <- as.list(rep(0,length(value))); names(start) <- value } if (!missing(constrain)) { newfix <- constrain if (!is.list(newfix)) newfix <- as.list(newfix) } else { newfix <- as.list(value); } names(newfix) <- value x$expar[value] <- start x$exfix[value] <- newfix index(x) <- reindex(x) x$attributes$parameter[value] <- TRUE return(x) } ##' @export parameter <- function(x,var,...) { if (missing(var)) return (names(unlist(x$attributes$parameter))) parameter(x,...) <- var } lava/R/measurement.R0000644000176200001440000000157713162174023014057 0ustar liggesusers##' @export `measurement` <- function(x, ...) { M <- x$M latent.idx <- match(latent(x),vars(x)) obs.idx <- match(manifest(x),vars(x)) if (length(latent.idx)==0) return(NULL) measurementmodels <- c() for (i in seq_along(latent.idx)) { ii <- latent.idx[i] relation <- M[ii,obs.idx]==1 byNodes <- names(relation)[relation] newnodes <- c(latent(x)[i],byNodes) lvm1 <- subset(x,newnodes) ## g0 <- graph::subGraph(newnodes, Graph(x,add=TRUE)) ## lvm1 <- latent(graph2lvm(g0, debug=TRUE), latent(x)[i]) ## g0fix<- x$fix[newnodes, newnodes]; lvm1$fix <- g0fix ## index(lvm1) <- reindex(lvm1) measurementmodels <- c(measurementmodels, list(lvm1)) } measurementmodels } lava/R/diagtest.R0000644000176200001440000002212613162174023013327 0ustar liggesusers##' @export logit <- function(p) log(p/(1-p)) ##' @export expit <- function(z) 1/(1+exp(-z)) ##' @export tigol <- expit ##' Calculate prevalence, sensitivity, specificity, and positive and ##' negative predictive values ##' ##' @title Calculate diagnostic tests for 2x2 table ##' @aliases diagtest odds riskcomp OR Ratio Diff ##' @param table Table or (matrix/data.frame with two columns) ##' @param positive Switch reference ##' @param exact If TRUE exact binomial proportions CI/test will be used ##' @param p0 Optional null hypothesis (test prevalenc, sensitivity, ...) ##' @param confint Type of confidence limits ##' @param ... Additional arguments to lower level functions ##' @author Klaus Holst ##' @details Table should be in the format with outcome in columns and ##' test in rows. Data.frame should be with test in the first ##' column and outcome in the second column. ##' @examples ##' M <- as.table(matrix(c(42,12, ##' 35,28),ncol=2,byrow=TRUE, ##' dimnames=list(rater=c("no","yes"),gold=c("no","yes")))) ##' diagtest(M,exact=TRUE) ##' @export diagtest <- function(table,positive=2,exact=FALSE,p0=NA,confint=c("logit","arcsin","pseudoscore","exact"),...) { if (!inherits(table,c("table","data.frame","matrix","multinomial"))) stop("Expecting a table or data.frame.") if (is.table(table)) { lev <- dimnames(table)[[2]] } if (inherits(table,"multinomial")) { lev <- dimnames(table$P)[[2]] } if (!is.table(table) & (is.matrix(table) || is.data.frame(table))) { if (is.factor(table[,2])) { lev <- levels(table[,2]) } else lev <- unique(table[,2]) } if (is.character(positive)) { positive <- match(positive,lev) } if (!(positive%in%c(1,2))) stop("Expecting and index of 1 or 2.") negative <- positive%%2+1L if (!is.null(confint) && confint[1]=="exact") exact <- TRUE if (exact) { if (!is.table(table) && (is.matrix(table) || is.data.frame(table))) { table <- base::table(table[,c(1,2),drop=FALSE]) ##names(dimnames(table)) <- colnames(table)[1:2] } if (!is.table(table) || nrow(table)!=2 || ncol(table)!=2) stop("2x2 table expected") n <- sum(table) nc <- colSums(table) nr <- rowSums(table) test <- TRUE if (is.na(p0)) { test <- FALSE p0 <- 0.5 } ## Prevalence p1 <- with(stats::binom.test(nc[positive],n,p=p0),c(estimate,conf.int,p.value)) ## Test marginal p2 <- with(stats::binom.test(nr[positive],n,p=p0),c(estimate,conf.int,p.value)) ## Sensitivity/Specificity sens <- with(stats::binom.test(table[positive,positive],nc[positive],p=p0),c(estimate,conf.int,p.value)) spec <- with(stats::binom.test(table[negative,negative],nc[negative],p=p0),c(estimate,conf.int,p.value)) ## PPV,NPV ppv <- with(stats::binom.test(table[positive,positive],nr[positive],p=p0),c(estimate,conf.int,p.value)) npv <- with(stats::binom.test(table[negative,negative],nr[negative],p=p0),c(estimate,conf.int,p.value)) ## Accuracy acc <- with(stats::binom.test(table[positive,positive]+table[negative,negative],n,p=p0),c(estimate,conf.int,p.value)) ## Symmetry (McNemar): ## number of discordant pairs under null: b~bin(b+c,0.5) sym <- with(stats::binom.test(table[positive,negative],table[positive,negative]+table[negative,positive],p=0.5),c(estimate,conf.int,p.value)) coefmat <- rbind(Prevalence=p1, Test=p2, Sensitivity=sens, Specificity=spec, PositivePredictiveValue=ppv, NegativePredictiveValue=npv, Accuracy=acc, Homogeneity=sym) if (!test) coefmat[seq(nrow(coefmat)-1),4] <- NA coefmat <- cbind(coefmat[,1,drop=FALSE],NA,coefmat[,-1,drop=FALSE]) colnames(coefmat) <- c("Estimate","Std.Err","2.5%","97.5%","P-value") res <- list(table=table, prop.table=table/sum(table), coefmat=coefmat) } else { if (inherits(table,"table")) M <- multinomial(table) else { if (inherits(table,"multinomial")) { M <- table table <- round(M$P*nrow(M$data)) } else { M <- multinomial(table[,1:2],...) table <- base::table(table) } } calc_diag <- function(p,...) { P <- matrix(p[1:4],2) p1 <- sum(P[,positive]) p2 <- sum(P[positive,]) res <- c(Prevalence=p1, ##(p[1]+p[2]), Test=p2, ##(p[1]+p[3]), Sensitivity=P[positive,positive]/p1, ## p[1]/(p[1]+p[2]), # Prob test + | given (true) disease (True positive rate) Specificity=P[negative,negative]/(1-p1), ## p[4]/(1-p[1]-p[2]), # Prob test - | given no disease (True negative rate) PositivePredictiveValue=P[positive,positive]/p2, ## p[1]/(p[1]+p[3]), # Prob disease | test + NegativePredictiveValue=P[negative,negative]/(1-p2), ## p[4]/(1-p[1]-p[3]), # Prob disease free | test - Accuracy=(P[1,1]+P[2,2])/sum(P), Homogeneity=P[negative,positive]-P[positive,negative] ) if (!is.null(confint)) { if (tolower(confint[1])=="logit") { res[seq(length(res)-1)] <- logit(res[seq(length(res)-1)]) } else if (tolower(confint[1])=="arcsin") { res[seq(length(res)-1)] <- asin(sqrt(res[seq(length(res)-1)])) } } return(res) } names(dimnames(table)) <- paste0(c("Test:","Outcome:"),names(dimnames(table))) prfun <- function(x,...) { printCoefmat(x$coefmat[,c(-2)],na.print="",...) printline() cat("\n") cat("Prevalence: Prob( outcome+ )\n") cat("Test: Prob( test+ )\n") cat("Sensitivity (True positive rate): Prob( test+ | outcome+ )\n") cat("Specificity (True negative rate): Prob( test- | outcome- )\n") cat("Positive predictive value (Precision): Prob( outcome+ | test+ )\n") cat("Negative predictive value: Prob( outcome- | test- )\n") cat("Accuracy: Prob( correct classification )\n") cat("Homogeneity/Symmetry: Prob( outcome+ ) - Prob( test+ )\n") } btransform <- NULL if (!is.null(confint)) { if (tolower(confint[1])=="logit") { btransform <- function(x) { rbind(expit(x[seq(nrow(x)-1),,drop=FALSE]), x[nrow(x),,drop=FALSE]) } } else if (tolower(confint[1])=="pseudoscore") { ## TODO, agresti-ryu, biometrika 2010 } else if (tolower(confint[1])=="arcsin") { btransform <- function(x) { rbind(sin(x[seq(nrow(x)-1),,drop=FALSE])^2, x[nrow(x),,drop=FALSE]) } } } res <- estimate(M,calc_diag,print=prfun,null=c(rep(p0,7),0),back.transform=btransform,...) } CI <- confint[1] if (exact) CI <- "exact" if (is.null(CI)) CI <- "wald" res <- structure(c(res, list(table=table, prop.table=table/sum(table), confint=CI, positive=positive, negative=negative, levels=dimnames(table) )), class=c("diagtest","estimate")) res$call <- match.call() rownames(res$coefmat) <- gsub("\\[|\\]","",rownames(res$coefmat)) names(res$coef) <- rownames(res$coefmat) return(res) } print.diagtest <- function(x,...) { cat("Call: "); print(x$call) cat("Confidence limits: ", x$confint,"\n",sep="") printline() printmany(x$table,x$prop.table,nspace=2,...) cat("\nPositive outcome: '", x$levels[[2]][x$positive],"'\n",sep="") ##cat("\tNegative outcome: '", x$levels[[2]][x$positive%%2+1],"'\n",sep="") printline() printCoefmat(x$coefmat[,c(-2)],na.print="",...) printline() cat("\n") cat("Prevalence: Prob( outcome+ )\n") cat("Test: Prob( test+ )\n") cat("Sensitivity (True positive rate): Prob( test+ | outcome+ )\n") cat("Specificity (True negative rate): Prob( test- | outcome- )\n") cat("Positive predictive value (Precision): Prob( outcome+ | test+ )\n") cat("Negative predictive value: Prob( outcome- | test- )\n") cat("Accuracy: Prob( correct classification )\n") if (x$confint=="exact") { cat("Homogeneity/Symmetry: Prob( outcome+, test- | discordant ), H0: p=0.5 \n") } else { cat("Homogeneity/Symmetry: H0: Prob( outcome+ ) - Prob( test+ ), H0: p=0\n") } cat("\n") } summary.diagtest <- function(x,...) { x[c("iid","print","id","compare")] <- NULL return(x) } lava/R/measurement.error.R0000644000176200001440000000570013162174023015177 0ustar liggesusers##' Two-stage (non-linear) measurement error ##' ##' Two-stage measurement error ##' @param model1 Stage 1 model ##' @param formula Formula specifying observed covariates in stage 2 model ##' @param data data.frame ##' @param predictfun Predictions to be used in stage 2 ##' @param id1 Optional id-vector of stage 1 ##' @param id2 Optional id-vector of stage 2 ##' @param ... Additional arguments to lower level functions ##' @seealso stack.estimate ##' @export ##' @examples ##' m <- lvm(c(y1,y2,y3)~u,c(y3,y4,y5)~v,u~~v,c(u,v)~x) ##' transform(m,u2~u) <- function(x) x^2 ##' transform(m,uv~u+v) <- prod ##' regression(m) <- z~u2+u+v+uv+x ##' set.seed(1) ##' d <- sim(m,1000,p=c("u,u"=1)) ##' ##' ## Stage 1 ##' m1 <- lvm(c(y1[0:s],y2[0:s],y3[0:s])~1*u,c(y3[0:s],y4[0:s],y5[0:s])~1*v,u~b*x,u~~v) ##' latent(m1) <- ~u+v ##' e1 <- estimate(m1,d) ##' ##' pp <- function(mu,var,data,...) { ##' cbind(u=mu[,"u"],u2=mu[,"u"]^2+var["u","u"],v=mu[,"v"],uv=mu[,"u"]*mu[,"v"]+var["u","v"]) ##' } ##' (e <- measurement.error(e1, z~1+x, data=d, predictfun=pp)) ##' ##' ## uu <- seq(-1,1,length.out=100) ##' ## pp <- estimate(e,function(p,...) p["(Intercept)"]+p["u"]*uu+p["u2"]*uu^2)$coefmat ##' if (interactive()) { ##' plot(e,intercept=TRUE,vline=0) ##' ##' f <- function(p) p[1]+p["u"]*u+p["u2"]*u^2 ##' u <- seq(-1,1,length.out=100) ##' plot(e, f, data=data.frame(u), ylim=c(-.5,2.5)) ##' } measurement.error <- function(model1, formula, data=parent.frame(), predictfun=function(mu,var,data,...) mu[,1]^2+var[1], id1, id2, ...) { if (!inherits(model1,c("lvmfit","lvm.mixture"))) stop("Expected lava object ('lvmfit','lvm.mixture',...)") if (missing(formula)) stop("formula needed for stage two (right-hand side additional covariates)") p1 <- coef(model1,full=TRUE) uhat <- function(p=p1) { P <- predict(model1,p=p,x=manifest(model1)) cbind(predictfun(P,attributes(P)$cond.var,data)) } if (missing(id1)) id1 <- seq(nrow(model.frame(model1))) if (missing(id2)) id2 <- seq(nrow(model.frame(model1))) if (!inherits(model1,"estimate")) e1 <- estimate(NULL,coef=p1,id=id1,iid=iid(model1)) u <- uhat() X0 <- model.matrix(formula, data) Y <- model.frame(formula,data)[,1] X <- cbind(X0,u) stage.two <- lm(Y~-1+X) names(stage.two$coefficients) <- colnames(X) if (!inherits(stage.two,"estimate")) e2 <- estimate(stage.two, id=id2) U <- function(alpha=p1,beta=coef(stage.two)) { X <- cbind(X0,uhat(alpha)) r <- (Y-X%*%beta)/summary(stage.two)$sigma^2 apply(X,2,function(x) sum(x*r)) } Ia <- -numDeriv::jacobian(function(p) U(p),p1) stacked <- stack(e1,e2,Ia) res <- c(stacked,list(naive=e2,lm=coef(summary(stage.two)),fun=predictfun)) ## res <- list(estimate=stacked, naive=e2, lm=coef(summary(stage.two)), ## fun=predictfun) structure(res,class=c("measurement.error","estimate")) } lava/R/information.R0000644000176200001440000001667213162174023014061 0ustar liggesusers##' @export `information` <- function(x,...) UseMethod("information") ###{{{ information.lvm ##' @export information.lvm <- function(x,p,n,type=ifelse(model=="gaussian", c("E","hessian","varS","outer","sandwich","robust","num"),"outer"), data,weights=NULL, data2=NULL, model="gaussian", method=lava.options()$Dmethod, inverse=FALSE, pinv=TRUE, score=TRUE,...) { if (missing(n)) n <- NROW(data) if (type[1]%in%c("sandwich","robust")) { cl <- match.call() cl$inverse <- !inverse cl$type <- "outer" A <- eval.parent(cl) cl$inverse <- !(cl$inverse) cl$type <- ifelse(type[1]=="sandwich","E","hessian") B <- eval.parent(cl) return(B%*%A%*%B) } if (type[1]%in%c("num","hessian","obs","observed") | (type[1]%in%c("E","hessian") & model!="gaussian")) { ## requireNamespace("numDeriv") myf <- function(p0) score(x, p=p0, model=model,data=data, weights=weights,data2=data2,indiv=FALSE,n=n) ##...) ## I <- -hessian(function(p0) logLik(x,p0,dd),p) I <- -numDeriv::jacobian(myf,p,method=method) res <- (I+t(I))/2 # Symmetric result if (inverse) { if (pinv) iI <- Inverse(res) else iI <- solve(res) return(iI) } return(res) } if (type[1]=="varS" | type[1]=="outer") { S <- score(x,p=p,data=na.omit(data),model=model,weights=weights,data2=data2,indiv=TRUE,...) ## print("...") res <- t(S)%*%S if (inverse) { if (pinv) iI <- Inverse(res) else iI <- solve(res) return(iI) } attributes(res)$grad <- colSums(S) return(res) } if (n>1) { xfix <- colnames(data)[(colnames(data)%in%parlabels(x,exo=TRUE))] xconstrain <- intersect(unlist(lapply(constrain(x),function(z) attributes(z)$args)),manifest(x)) if (length(xfix)>0 | length(xconstrain)>0) { ##### Random slopes! x0 <- x if (length(xfix)>0) { nrow <- length(vars(x)) xpos <- lapply(xfix,function(y) which(regfix(x)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix <- list(var=xfix, col=colpos, row=rowpos) for (i in seq_along(myfix$var)) for (j in seq_along(myfix$col[[i]])) regfix(x0, from=vars(x0)[myfix$row[[i]]][j],to=vars(x0)[myfix$col[[i]]][j]) <- data[1,myfix$var[[i]]] index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) } pp <- modelPar(x0,p) p0 <- with(pp, c(meanpar,p,p2)) k <- length(index(x)$manifest) myfun <- function(ii) { if (length(xfix)>0) for (i in seq_along(myfix$var)) { for (j in seq_along(myfix$col[[i]])) { index(x0)$A[cbind(myfix$row[[i]],myfix$col[[i]])] <- data[ii,myfix$var[[i]]] } } ww <- NULL if (!is.null(weights)) ww <- weights[ii,] return(information(x0,p=p,n=1,type=type,weights=ww,data=data[ii,])) } L <- lapply(seq_len(nrow(data)),function(y) myfun(y)) val <- apply(array(unlist(L),dim=c(length(p0),length(p0),nrow(data))),c(1,2),sum) if (inverse) { if (pinv) iI <- Inverse(val) else iI <- solve(val) return(iI) } return(val) } } if (!is.null(weights) && is.matrix(weights)) { L <- lapply(seq_len(nrow(weights)),function(y) information(x,p=p,n=1,type=type,weights=weights[y,])) val <- apply(array(unlist(L),dim=c(length(p),length(p),nrow(weights))),c(1,2),sum) if (inverse) { if (pinv) iI <- Inverse(val) else iI <- solve(val) return(iI) } return(val) } mp <- moments(x,p,data=data) pp <- modelPar(x,p) D <- deriv.lvm(x, meanpar=pp$meanpar, mom=mp, p=p)##, all=length(constrain(x))>0) C <- mp$C iC <- Inverse(C,det=FALSE, symmetric = TRUE) if (is.null(weights)) { ## W <- diag(ncol(iC)) } else { if (length(weights)0)) { ##is.null(pp$meanpar) && is.null(pp$p2)) { if (inverse) { if (pinv) iI <- Inverse(information_Sigma) else iI <- solve(information_Sigma) return(iI) } return(information_Sigma) } ii <- index(x) if (is.null(weights)) { information_mu <- n*t(dxi) %*% (iC) %*% (dxi) } else { information_mu <- n*t(dxi) %*% (iC%*%W) %*% (dxi) } if (!(lava.options()$devel)) { information <- information_Sigma+information_mu } else { mparidx <- with(ii$parBelongsTo,c(mean,reg)) information <- information_Sigma information[mparidx,mparidx] <- information[mparidx,mparidx] + information_mu } if (inverse) { if (pinv) iI <- Inverse(information, symmetric = TRUE) else iI <- solve(information) return(iI) } return(information) } ###}}} information.lvm ###{{{ information.lvmfit ##' @export information.lvmfit <- function(x,p=pars(x),n=x$data$n,data=model.frame(x),model=x$estimator,weights=Weights(x), data2=x$data$data2, ...) { I <- information(x$model0,p=p,n=n,data=data,model=model, weights=weights,data2=data2,...) if (ncol(I) 1) for (i in 2:length(x)) { xst <- paste(xst, "+", x[i]) } yst <- y[1] yn <- length(y) if (yn > 1) { yst <- paste0("c(", yst) for (i in 2:length(y)) { yst <- paste0(yst, ", ", y[i]) } yst <- paste0(yst, ")") } ff <- paste(yst, "~", xst) return(as.formula(ff)) } lava/R/correlation.R0000644000176200001440000000637113162174023014050 0ustar liggesusers##' Generic correlation method ##' ##' @title Generic method for extracting correlation coefficients of model object ##' @param x Object ##' @param \dots Additional arguments ##' @author Klaus K. Holst ##' @export "correlation" <- function(x,...) UseMethod("correlation") ##' @export correlation.lvmfit <- function(x,z=TRUE,iid=FALSE,back.transform=TRUE,...) { pp <- matrices2(Model(x), with(index(x),seq_len(npar+npar.mean+npar.ex)))$P pos <- pp[lower.tri(pp)][(index(x)$P0)[lower.tri(pp)]==1] if (length(pos)<1) return(NULL) pp0 <- pp pp0[index(x)$P0!=1] <- 0; pp0[lower.tri(pp0)] <- 0 coords <- c() mynames <- vars(x) n <- nrow(pp0) ff <- function(p) { res <- numeric(length(pos)) nn <- character(length(pos)) for (i in seq_along(pos)) { p0 <- pos[i] idx <- which(pp0==p0) rowpos <- (idx-1)%%n + 1 colpos <- ceiling(idx/n) coefpos <- c(p0,pp0[rbind(c(rowpos,rowpos),c(colpos,colpos))]) pval <- pp[rbind(c(rowpos,rowpos),c(colpos,colpos))] phi.v1.v2 <- numeric(3); newval <- p[coefpos] phi.v1.v2[coefpos!=0] <- newval phi.v1.v2[coefpos==0] <- pval[tail(coefpos==0,2)] rho <- atanh(phi.v1.v2[1]/sqrt(prod(phi.v1.v2[-1]))) res[i] <- rho nn[i] <- paste(mynames[c(rowpos,colpos)],collapse="~") } structure(res,names=nn) } V <- NULL if (!iid) V <- vcov(x) if (back.transform) { back.transform <- tanh } else { back.transform <- NULL } estimate(x,coef=coef(x),vcov=V,f=ff,back.transform=back.transform,iid=iid,...) } ##' @export correlation.matrix <- function(x,z=TRUE,back.transform=TRUE,mreg=FALSE,return.all=FALSE,...) { if (mreg) { m <- lvm() covariance(m,pairwise=TRUE) <- colnames(x) try(e <- estimate(m,as.data.frame(x),...),silent=TRUE) res <- correlation(e,...) if (return.all) { return(list(model=m,estimate=e,correlation=res)) } return(res) } if (ncol(x)==2) { ii <- iid(x) ee <- estimate(coef=attributes(ii)$coef[3:5], iid=ii[,3:5]) if (z) { if (back.transform) { ee <- estimate(ee, function(x) atanh(x[2]/sqrt(x[1]*x[3])), back.transform=tanh) } else { ee <- estimate(ee, function(x) atanh(x[2]/sqrt(x[1]*x[3]))) } } else { ee <- estimate(ee, function(x) x[2]/sqrt(x[1]*x[3])) } return(ee) } e <- c() R <- diag(nrow=ncol(x)) dimnames(R) <- list(colnames(x),colnames(x)) for (i in seq(ncol(x)-1)) for (j in seq(i+1,ncol(x))) { e <- c(e,list(correlation(x[,c(i,j)],z=z,back.transform=FALSE,...))) R[j,i] <- coef(e[[length(e)]]) if (z) R[j,i] <- tanh(R[j,i]) } R <- R[-1,-ncol(R),drop=FALSE] res <- do.call(merge, c(e, paired=TRUE)) if (z && back.transform) { res <- estimate(res,back.transform=tanh, print=function(x,digits=1,...) { print(x$coefmat[,-2,drop=FALSE],...) cat("\n") print(offdiag(R,type=4),digits=digits,...) }) } return(res) } ##' @export correlation.data.frame <- function(x,...) { correlation(as.matrix(x),...) } lava/R/kill.R0000644000176200001440000000424613162174023012461 0ustar liggesusers##' Generic method for removing elements of object ##' ##' @title Remove variables from (model) object. ##' @aliases rmvar rmvar<- kill kill<- ##' @param x Model object ##' @param value Vector of variables or formula specifying which nodes to ##' remove ##' @param \dots additional arguments to lower level functions ##' @usage ##' kill(x, ...) <- value ##' @seealso \code{cancel} ##' @author Klaus K. Holst ##' @keywords models regression ##' @export ##' @examples ##' ##' m <- lvm() ##' addvar(m) <- ~y1+y2+x ##' covariance(m) <- y1~y2 ##' regression(m) <- c(y1,y2) ~ x ##' ### Cancel the covariance between the residuals of y1 and y2 ##' cancel(m) <- y1~y2 ##' ### Remove y2 from the model ##' rmvar(m) <- ~y2 ##' "rmvar" <- function(x, ...) UseMethod("rmvar") ##' @export "kill" <- function(x, ...) UseMethod("kill") ##' @export "kill<-" <- function(x, ..., value) UseMethod("kill<-") ##' @export "rmvar<-" <- function(x, ..., value) UseMethod("rmvar<-") ##' @export "kill<-.lvm" <- function(x, ..., value) { kill(x,value) } ##' @export "rmvar<-.lvm" <- get("kill<-.lvm") ##' @export "kill.lvm" <- function(x, value, ...) { if (inherits(value,"formula")) value <- all.vars(value) idx <- which(names(x$exfix)%in%value) if (length(idx)>0) { x$attributes$parameter[idx] <- x$expar[idx] <- x$exfix[idx] <- NULL if (length(x$exfix)==0) { x$exfix <- x$expar <- x$attributes$parameter <- NULL } index(x) <- reindex(x) } idx <- which(vars(x)%in%value) if (length(idx)!=0){ vv <- vars(x)[idx] keep <- setdiff(seq_along(vars(x)),idx) x$M <- x$M[keep,keep,drop=FALSE] x$par <- x$par[keep,keep,drop=FALSE] x$fix <- x$fix[keep,keep,drop=FALSE] x$covpar <- x$covpar[keep,keep,drop=FALSE] x$covfix <- x$covfix[keep,keep,drop=FALSE] x$cov <- x$cov[keep,keep,drop=FALSE] x$mean <- (x$mean)[-idx] x$exogenous <- setdiff(exogenous(x),vv) x$latent[vv] <- NULL }else{ ## remove variables that cannot be accessed by vars in the hook vv <- value } myhooks <- gethook("remove.hooks") for (f in myhooks) { x <- do.call(f, list(x=x,var=vv,...)) } index(x) <- reindex(x) return(x) } ##' @export "rmvar.lvm" <- get("kill.lvm") lava/R/cancel.R0000644000176200001440000000236313162174023012751 0ustar liggesusers##' Generic cancel method ##' ##' @title Generic cancel method ##' @param x Object ##' @param \dots Additioal arguments ##' @author Klaus K. Holst ##' @aliases cancel<- ##' @export "cancel" <- function(x,...) UseMethod("cancel") ##' @export "cancel<-" <- function(x,...,value) UseMethod("cancel<-") ##' @export "cancel<-.lvm" <- function(x, ..., value) { cancel(x,value,...) } ##' @export cancel.lvm <- function(x,value,...) { if (inherits(value,"formula")) { ## yx <- all.vars(value) lhs <- getoutcome(value) if (length(lhs)==0) yy <- NULL else yy <- decomp.specials(lhs) xf <- attributes(terms(value))$term.labels if(identical(all.vars(value),xf)) return(cancel(x,xf)) res <- lapply(xf,decomp.specials) xx <- unlist(lapply(res, function(z) z[1])) for (i in yy) { for (j in xx) cancel(x) <- c(i,j) } index(x) <- reindex(x) return(x) } for (v1 in value) for (v2 in value) if (v1!=v2) { if (all(c(v1,v2)%in%vars(x))) { x$M[v1,v2] <- 0 x$par[v1,v2] <- x$fix[v1,v2] <- x$covpar[v1,v2] <- x$covfix[v1,v2] <- NA x$cov[v1,v2] <- 0 } } x$parpos <- NULL index(x) <- reindex(x) return(x) } lava/R/residuals.R0000644000176200001440000000224513162174023013516 0ustar liggesusersIsqrt <- function(X) { eX <- eigen(X); with(eX, vectors %*% diag(1/sqrt(values),nrow=length(values)) %*% t(vectors)) } ##' @export residuals.multigroupfit <- function(object,data=model.frame(object),p=coef(object), k, ...) { pp <- modelPar(object,p,...) if (!missing(k)) return(residuals(object$model$lvm[[k]],data=data[[k]],p=pp$p[[k]],...)) res <- c() for (i in seq(length(pp$p))) { res <- c(res, list(residuals(object$model$lvm[[i]],data=data[[i]],p=pp$p[[i]],...))) } return(res) } ##' @export residuals.lvmfit <- function(object,data=model.frame(object),p=coef(object),...) { residuals(Model(object), data=data, p=p, ...) } ##' @export residuals.lvm <- function(object,data=model.frame(object),std=FALSE,p=coef(object),...) { Y <- setdiff(manifest(object), X <- exogenous(object)) Pr <- predict(object,p=p,data=data) PrY <- Pr[,Y,drop=FALSE] ## y <- endogenous(object)[match(endogenous(object),manifest(object))] r <- as.matrix(data[,Y,drop=FALSE]-(PrY)) res <- r if (std) { S <- attributes(Pr)$cond.var; if (length(Y)>1) { res <- r%*%Isqrt(S) } else res <- 1/sqrt(S[1,1])*r } colnames(res) <- colnames(r) res } lava/R/gkgamma.R0000644000176200001440000001044113162174023013124 0ustar liggesusersgoodmankruskal_gamma <- function(P,...) { nr <- nrow(P); nc <- ncol(P) Pconc <- 0 for (i in seq_len(nr-1)) { h <- seq(i+1,nr) for (j in seq_len(nc-1)) { k <- seq(j+1,nc) Pconc <- Pconc+2*P[i,j]*sum(P[h,k]) } } Pdisc <- 0 for (i in seq_len(nr-1)) { h <- seq(i+1,nr) for (j in (seq_len(nc-1)+1)) { k <- seq(1,j-1) Pdisc <- Pdisc+2*P[i,j]*sum(P[h,k]) } } list(C=Pconc,D=Pdisc,gamma=(Pconc-Pdisc)/(Pconc+Pdisc)) } ##' @export gkgamma <- function(x,data=parent.frame(),strata=NULL,all=FALSE,iid=TRUE,...) { if (inherits(x,"formula")) { xf <- getoutcome(x,sep="|") xx <- attr(xf,"x") if (length(xx)==0) stop("Not a valid formula") yx <- update(as.formula(paste0(xf,"~.")),xx[[1]]) if (length(xx)>1) { strata <- interaction(model.frame(xx[[2]],data=data)) x <- yx } else { x <- model.frame(yx,data=data) } } if (!is.null(strata)) { dd <- split(data,strata) gam <- lapply(dd,function(d,...) gkgamma(x,data=d,...), ..., iid=TRUE, keep=1:2) mgam <- Reduce(function(x,y,...) merge(x,y,...),gam) ps <- estimate(multinomial(strata),data=data,...) mgam <- merge(mgam,ps) psi <- 2*length(gam)+seq(length(coef(ps))) res <- estimate(mgam,function(p,...) { k <- length(p)/3 cd <- lapply(seq(k),function(x) p[(1:2)+2*(x-1)]) dif <- unlist(lapply(cd,function(x) x[1]-x[2])) tot <- unlist(lapply(cd,function(x) x[1]+x[2])) gam <- dif/tot ## Conditional gammas given Z=z px2 <- p[psi]^2 pgamma <- sum(dif*px2)/sum(tot*px2) c(gam,pgamma=pgamma) },labels=c(paste0("\u03b3:",names(dd)),"pgamma"), iid=iid) if (!iid) { for (i in seq_along(gam)) gam[[i]][c("iid","id")] <- NULL } homtest <- estimate(res,lava::contr(seq_along(gam),length(gam)+1),iid=FALSE) attributes(res) <- c(attributes(res), list(class=c("gkgamma","estimate"), cl=match.call(), strata=gam, homtest=homtest)) return(res) } if (is.table(x) || is.data.frame(x) || is.matrix(x)) { x <- multinomial(x) } if (!inherits(x,"multinomial")) stop("Expected table, data.frame or multinomial object") structure(estimate(x,function(p) { P <- x$position; P[] <- p[x$position] goodmankruskal_gamma(P) },iid=iid,data=data,...), cl=match.call(), class=c("gkgamma","estimate")) } ##' @export print.gkgamma <- function(x,call=TRUE,...) { if (call) { cat("Call: ") print(attr(x,"cl")) printline(50) } n <- x$n if (!is.null(attr(x,"strata"))) { cat("Strata:\n\n") for (i in seq_along(attr(x,"strata"))) { with(attributes(x), cat(paste0(names(strata)[i], " (n=",strata[[i]]$n, if (strata[[i]]$ncluster Description: A general implementation of Structural Equation Models with latent variables (MLE, 2SLS, and composite likelihood estimators) with both continuous, censored, and ordinal outcomes (Holst and Budtz-Joergensen (2013) ). The package also provides methods for graph exploration (d-separation, back-door criterion), simulation of general non-linear latent variable models, and estimation of influence functions for a broad range of statistical models. URL: https://github.com/kkholst/lava BugReports: https://github.com/kkholst/lava/issues License: GPL-3 LazyLoad: yes Depends: R (>= 3.0) Imports: grDevices, graphics, methods, numDeriv, stats, survival, utils Suggests: KernSmooth, Matrix, Rgraphviz, ascii, data.table, fields, foreach, geepack, gof (>= 0.9), graph, igraph (>= 0.6), lava.tobit, lme4, mets (>= 1.1), optimx, quantreg, rgl, testthat (>= 0.11), visNetwork, zoo ByteCompile: yes RoxygenNote: 6.0.1 NeedsCompilation: no Packaged: 2017-09-27 20:46:26 UTC; klaus Repository: CRAN Date/Publication: 2017-09-27 21:25:09 UTC lava/man/0000755000176200001440000000000013162174023011747 5ustar liggesuserslava/man/estimate.default.Rd0000644000176200001440000001325413162174023015501 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate.default.R \name{estimate.default} \alias{estimate.default} \alias{estimate} \alias{estimate.estimate} \alias{merge.estimate} \title{Estimation of functional of parameters} \usage{ \method{estimate}{default}(x = NULL, f = NULL, ..., data, id, iddata, stack = TRUE, average = FALSE, subset, score.deriv, level = 0.95, iid = TRUE, type = c("robust", "df", "mbn"), keep, use, contrast, null, vcov, coef, robust = TRUE, df = NULL, print = NULL, labels, label.width, only.coef = FALSE, back.transform = NULL, folds = 0, cluster, R = 0, null.sim) } \arguments{ \item{x}{model object (\code{glm}, \code{lvmfit}, ...)} \item{f}{transformation of model parameters and (optionally) data, or contrast matrix (or vector)} \item{...}{additional arguments to lower level functions} \item{data}{\code{data.frame}} \item{id}{(optional) id-variable corresponding to iid decomposition of model parameters.} \item{iddata}{(optional) id-variable for 'data'} \item{stack}{if TRUE (default) the i.i.d. decomposition is automatically stacked according to 'id'} \item{average}{if TRUE averages are calculated} \item{subset}{(optional) subset of data.frame on which to condition (logical expression or variable name)} \item{score.deriv}{(optional) derivative of mean score function} \item{level}{level of confidence limits} \item{iid}{if TRUE (default) the iid decompositions are also returned (extract with \code{iid} method)} \item{type}{type of small-sample correction} \item{keep}{(optional) index of parameters to keep from final result} \item{use}{(optional) index of parameters to use in calculations} \item{contrast}{(optional) Contrast matrix for final Wald test} \item{null}{(optional) null hypothesis to test} \item{vcov}{(optional) covariance matrix of parameter estimates (e.g. Wald-test)} \item{coef}{(optional) parameter coefficient} \item{robust}{if TRUE robust standard errors are calculated. If FALSE p-values for linear models are calculated from t-distribution} \item{df}{degrees of freedom (default obtained from 'df.residual')} \item{print}{(optional) print function} \item{labels}{(optional) names of coefficients} \item{label.width}{(optional) max width of labels} \item{only.coef}{if TRUE only the coefficient matrix is return} \item{back.transform}{(optional) transform of parameters and confidence intervals} \item{folds}{(optional) aggregate influence functions (divide and conquer)} \item{cluster}{(obsolete) alias for 'id'.} \item{R}{Number of simulations (simulated p-values)} \item{null.sim}{Mean under the null for simulations} } \description{ Estimation of functional of parameters. Wald tests, robust standard errors, cluster robust standard errors, LRT (when \code{f} is not a function)... } \details{ iid decomposition \deqn{\sqrt{n}(\widehat{\theta}-\theta) = \sum_{i=1}^n\epsilon_i + o_p(1)} can be extracted with the \code{iid} method. } \examples{ ## Simulation from logistic regression model m <- lvm(y~x+z); distribution(m,y~x) <- binomial.lvm("logit") d <- sim(m,1000) g <- glm(y~z+x,data=d,family=binomial()) g0 <- glm(y~1,data=d,family=binomial()) ## LRT estimate(g,g0) ## Plain estimates (robust standard errors) estimate(g) ## Testing contrasts estimate(g,null=0) estimate(g,rbind(c(1,1,0),c(1,0,2))) estimate(g,rbind(c(1,1,0),c(1,0,2)),null=c(1,2)) estimate(g,2:3) ## same as cbind(0,1,-1) estimate(g,as.list(2:3)) ## same as rbind(c(0,1,0),c(0,0,1)) ## Alternative syntax estimate(g,"z","z"-"x",2*"z"-3*"x") estimate(g,z,z-x,2*z-3*x) estimate(g,"?") ## Wilcards estimate(g,"*Int*","z") estimate(g,"1","2"-"3",null=c(0,1)) estimate(g,2,3) ## Usual (non-robust) confidence intervals estimate(g,robust=FALSE) ## Transformations estimate(g,function(p) p[1]+p[2]) ## Multiple parameters e <- estimate(g,function(p) c(p[1]+p[2],p[1]*p[2])) e vcov(e) ## Label new parameters estimate(g,function(p) list("a1"=p[1]+p[2],"b1"=p[1]*p[2])) ##' ## Multiple group m <- lvm(y~x) m <- baptize(m) d2 <- d1 <- sim(m,50) e <- estimate(list(m,m),list(d1,d2)) estimate(e) ## Wrong estimate(e,id=rep(seq(nrow(d1)),2)) estimate(lm(y~x,d1)) ## Marginalize f <- function(p,data) list(p0=lava:::expit(p["(Intercept)"] + p["z"]*data[,"z"]), p1=lava:::expit(p["(Intercept)"] + p["x"] + p["z"]*data[,"z"])) e <- estimate(g, f, average=TRUE) e estimate(e,diff) estimate(e,cbind(1,1)) ## Clusters and subset (conditional marginal effects) d$id <- rep(seq(nrow(d)/4),each=4) estimate(g,function(p,data) list(p0=lava:::expit(p[1] + p["z"]*data[,"z"])), subset=d$z>0, id=d$id, average=TRUE) ## More examples with clusters: m <- lvm(c(y1,y2,y3)~u+x) d <- sim(m,10) l1 <- glm(y1~x,data=d) l2 <- glm(y2~x,data=d) l3 <- glm(y3~x,data=d) ## Some random id-numbers id1 <- c(1,1,4,1,3,1,2,3,4,5) id2 <- c(1,2,3,4,5,6,7,8,1,1) id3 <- seq(10) ## Un-stacked and stacked i.i.d. decomposition iid(estimate(l1,id=id1,stack=FALSE)) iid(estimate(l1,id=id1)) ## Combined i.i.d. decomposition e1 <- estimate(l1,id=id1) e2 <- estimate(l2,id=id2) e3 <- estimate(l3,id=id3) (a2 <- merge(e1,e2,e3)) ## If all models were estimated on the same data we could use the ## syntax: ## Reduce(merge,estimate(list(l1,l2,l3))) ## Same: iid(a1 <- merge(l1,l2,l3,id=list(id1,id2,id3))) iid(merge(l1,l2,l3,id=TRUE)) # one-to-one (same clusters) iid(merge(l1,l2,l3,id=FALSE)) # independence ## Monte Carlo approach, simple trend test example m <- categorical(lvm(),~x,K=5) regression(m,additive=TRUE) <- y~x d <- simulate(m,100,seed=1,'y~x'=0.1) l <- lm(y~-1+factor(x),data=d) f <- function(x) coef(lm(x~seq_along(x)))[2] null <- rep(mean(coef(l)),length(coef(l))) ## just need to make sure we simulate under H0: slope=0 estimate(l,f,R=1e2,null.sim=null) estimate(l,f) } lava/man/addvar.Rd0000644000176200001440000000055413162174023013503 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/addvar.R \name{addvar} \alias{addvar} \alias{addvar<-} \title{Add variable to (model) object} \usage{ addvar(x, ...) } \arguments{ \item{x}{Model object} \item{\dots}{Additional arguments} } \description{ Generic method for adding variables to model object } \author{ Klaus K. Holst } lava/man/vec.Rd0000644000176200001440000000063413162174023013016 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vec.R \name{vec} \alias{vec} \title{vec operator} \usage{ vec(x, matrix = FALSE, sep = ".", ...) } \arguments{ \item{x}{Array} \item{matrix}{If TRUE a row vector (matrix) is returned} \item{sep}{Seperator} \item{...}{Additional arguments} } \description{ vec operator } \details{ Convert array into vector } \author{ Klaus Holst } lava/man/bmd.Rd0000644000176200001440000000102513162174023012776 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{bmd} \alias{bmd} \title{Longitudinal Bone Mineral Density Data (Wide format)} \format{data.frame} \source{ Vonesh & Chinchilli (1997), Table 5.4.1 on page 228. } \description{ Bone Mineral Density Data consisting of 112 girls randomized to receive calcium og placebo. Longitudinal measurements of bone mineral density (g/cm^2) measured approximately every 6th month in 3 years. } \seealso{ calcium } \keyword{datasets} lava/man/revdiag.Rd0000644000176200001440000000140713162174023013661 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/revdiag.R \name{revdiag} \alias{revdiag} \alias{revdiag<-} \alias{offdiag} \alias{offdiag<-} \title{Create/extract 'reverse'-diagonal matrix or off-diagonal elements} \usage{ revdiag(x,...) offdiag(x,type=0,...) revdiag(x,...) <- value offdiag(x,type=0,...) <- value } \arguments{ \item{x}{vector} \item{\dots}{additional arguments to lower level functions} \item{value}{For the assignment function the values to put in the diagonal} \item{type}{0: upper and lower triangular, 1: upper triangular, 2: lower triangular, 3: upper triangular + diagonal, 4: lower triangular + diagonal} } \description{ Create/extract 'reverse'-diagonal matrix or off-diagonal elements } \author{ Klaus K. Holst } lava/man/eventTime.Rd0000644000176200001440000001066613162174023014207 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/eventTime.R \name{eventTime} \alias{eventTime} \alias{eventTime<-} \title{Add an observed event time outcome to a latent variable model.} \usage{ eventTime(object, formula, eventName = "status", ...) } \arguments{ \item{object}{Model object} \item{formula}{Formula (see details)} \item{eventName}{Event names} \item{\dots}{Additional arguments to lower levels functions} } \description{ For example, if the model 'm' includes latent event time variables are called 'T1' and 'T2' and 'C' is the end of follow-up (right censored), then one can specify } \details{ \code{eventTime(object=m,formula=ObsTime~min(T1=a,T2=b,C=0,"ObsEvent"))} when data are simulated from the model one gets 2 new columns: - "ObsTime": the smallest of T1, T2 and C - "ObsEvent": 'a' if T1 is smallest, 'b' if T2 is smallest and '0' if C is smallest Note that "ObsEvent" and "ObsTime" are names specified by the user. } \examples{ # Right censored survival data without covariates m0 <- lvm() distribution(m0,"eventtime") <- coxWeibull.lvm(scale=1/100,shape=2) distribution(m0,"censtime") <- coxExponential.lvm(rate=10) m0 <- eventTime(m0,time~min(eventtime=1,censtime=0),"status") sim(m0,10) # Alternative specification of the right censored survival outcome ## eventTime(m,"Status") <- ~min(eventtime=1,censtime=0) # Cox regression: # lava implements two different parametrizations of the same # Weibull regression model. The first specifies # the effects of covariates as proportional hazard ratios # and works as follows: m <- lvm() distribution(m,"eventtime") <- coxWeibull.lvm(scale=1/100,shape=2) distribution(m,"censtime") <- coxWeibull.lvm(scale=1/100,shape=2) m <- eventTime(m,time~min(eventtime=1,censtime=0),"status") distribution(m,"sex") <- binomial.lvm(p=0.4) distribution(m,"sbp") <- normal.lvm(mean=120,sd=20) regression(m,from="sex",to="eventtime") <- 0.4 regression(m,from="sbp",to="eventtime") <- -0.01 sim(m,6) # The parameters can be recovered using a Cox regression # routine or a Weibull regression model. E.g., \dontrun{ set.seed(18) d <- sim(m,1000) library(survival) coxph(Surv(time,status)~sex+sbp,data=d) sr <- survreg(Surv(time,status)~sex+sbp,data=d) library(SurvRegCensCov) ConvertWeibull(sr) } # The second parametrization is an accelerated failure time # regression model and uses the function weibull.lvm instead # of coxWeibull.lvm to specify the event time distributions. # Here is an example: ma <- lvm() distribution(ma,"eventtime") <- weibull.lvm(scale=3,shape=0.7) distribution(ma,"censtime") <- weibull.lvm(scale=2,shape=0.7) ma <- eventTime(ma,time~min(eventtime=1,censtime=0),"status") distribution(ma,"sex") <- binomial.lvm(p=0.4) distribution(ma,"sbp") <- normal.lvm(mean=120,sd=20) regression(ma,from="sex",to="eventtime") <- 0.7 regression(ma,from="sbp",to="eventtime") <- -0.008 set.seed(17) sim(ma,6) # The regression coefficients of the AFT model # can be tranformed into log(hazard ratios): # coef.coxWeibull = - coef.weibull / shape.weibull \dontrun{ set.seed(17) da <- sim(ma,1000) library(survival) fa <- coxph(Surv(time,status)~sex+sbp,data=da) coef(fa) c(0.7,-0.008)/0.7 } # The Weibull parameters are related as follows: # shape.coxWeibull = 1/shape.weibull # scale.coxWeibull = exp(-scale.weibull/shape.weibull) # scale.AFT = log(scale.coxWeibull) / shape.coxWeibull # Thus, the following are equivalent parametrizations # which produce exactly the same random numbers: model.aft <- lvm() distribution(model.aft,"eventtime") <- weibull.lvm(scale=-log(1/100)/2,shape=0.5) distribution(model.aft,"censtime") <- weibull.lvm(scale=-log(1/100)/2,shape=0.5) set.seed(17) sim(model.aft,6) model.cox <- lvm() distribution(model.cox,"eventtime") <- coxWeibull.lvm(scale=1/100,shape=2) distribution(model.cox,"censtime") <- coxWeibull.lvm(scale=1/100,shape=2) set.seed(17) sim(model.cox,6) # The minimum of multiple latent times one of them still # being a censoring time, yield # right censored competing risks data mc <- lvm() distribution(mc,~X2) <- binomial.lvm() regression(mc) <- T1~f(X1,-.5)+f(X2,0.3) regression(mc) <- T2~f(X2,0.6) distribution(mc,~T1) <- coxWeibull.lvm(scale=1/100) distribution(mc,~T2) <- coxWeibull.lvm(scale=1/100) distribution(mc,~C) <- coxWeibull.lvm(scale=1/100) mc <- eventTime(mc,time~min(T1=1,T2=2,C=0),"event") sim(mc,6) } \author{ Thomas A. Gerds, Klaus K. Holst } \keyword{models} \keyword{regression} \keyword{survival} lava/man/children.Rd0000644000176200001440000000075613162174023014036 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/children.R \name{children} \alias{children} \alias{parents} \alias{ancestors} \alias{descendants} \alias{roots} \alias{sinks} \alias{adjMat} \alias{edgeList} \title{Extract children or parent elements of object} \usage{ children(object, ...) } \arguments{ \item{object}{Object} \item{\dots}{Additional arguments} } \description{ Generic method for memberships from object (e.g. a graph) } \author{ Klaus K. Holst } lava/man/bootstrap.Rd0000644000176200001440000000063213162174023014254 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bootstrap.R \name{bootstrap} \alias{bootstrap} \title{Generic bootstrap method} \usage{ bootstrap(x, ...) } \arguments{ \item{x}{Model object} \item{\dots}{Additional arguments} } \description{ Generic method for calculating bootstrap statistics } \seealso{ \code{bootstrap.lvm} \code{bootstrap.lvmfit} } \author{ Klaus K. Holst } lava/man/iid.Rd0000644000176200001440000000156613162174023013013 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/iid.R \name{iid} \alias{iid} \alias{iid.default} \title{Extract i.i.d. decomposition (influence function) from model object} \usage{ iid(x,...) \method{iid}{default}(x,bread,id=NULL,folds=0,maxsize=(folds>0)*1e6,...) } \arguments{ \item{x}{model object} \item{...}{additional arguments} \item{id}{(optional) id/cluster variable} \item{bread}{(optional) Inverse of derivative of mean score function} \item{folds}{(optional) Calculate aggregated iid decomposition (0:=disabled)} \item{maxsize}{(optional) Data is split in groups of size up to 'maxsize' (0:=disabled)} } \description{ Extract i.i.d. decomposition (influence function) from model object } \examples{ m <- lvm(y~x+z) distribution(m, ~y+z) <- binomial.lvm("logit") d <- sim(m,1e3) g <- glm(y~x+z,data=d,family=binomial) crossprod(iid(g)) } lava/man/path.Rd0000644000176200001440000000466413162174023013204 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/path.R \name{path} \alias{path} \alias{effects} \alias{path.lvm} \alias{effects.lvmfit} \alias{totaleffects} \title{Extract pathways in model graph} \usage{ \method{path}{lvm} (object, to = NULL, from, all=FALSE, ...) \method{effects}{lvmfit} (object, to, from, silent=FALSE, ...) } \arguments{ \item{object}{Model object (\code{lvm})} \item{\dots}{Additional arguments to be passed to the low level functions} \item{to}{Outcome variable (string). Alternatively a formula specifying response and predictor in which case the argument \code{from} is ignored.} \item{from}{Response variable (string), not necessarily directly affected by \code{to}.} \item{all}{If TRUE all simple paths (in undirected graph) is returned} \item{silent}{Logical variable which indicates whether messages are turned on/off.} } \value{ If \code{object} is of class \code{lvmfit} a list with the following elements is returned \item{idx}{ A list where each element defines a possible pathway via a integer vector indicating the index of the visited nodes. } \item{V }{ A List of covariance matrices for each path. } \item{coef }{A list of parameters estimates for each path} \item{path }{A list where each element defines a possible pathway via a character vector naming the visited nodes in order. } \item{edges }{Description of 'comp2'} If \code{object} is of class \code{lvm} only the \code{path} element will be returned. The \code{effects} method returns an object of class \code{effects}. } \description{ Extract all possible paths from one variable to another connected component in a latent variable model. In an estimated model the effect size is decomposed into direct, indirect and total effects including approximate standard errors. } \note{ For a \code{lvmfit}-object the parameters estimates and their corresponding covariance matrix are also returned. The \code{effects}-function additionally calculates the total and indirect effects with approximate standard errors } \examples{ m <- lvm(c(y1,y2,y3)~eta) regression(m) <- y2~x1 latent(m) <- ~eta regression(m) <- eta~x1+x2 d <- sim(m,500) e <- estimate(m,d) path(Model(e),y2~x1) parents(Model(e), ~y2) children(Model(e), ~x2) children(Model(e), ~x2+eta) effects(e,y2~x1) ## All simple paths (undirected) path(m,y1~x1,all=TRUE) } \seealso{ \code{children}, \code{parents} } \author{ Klaus K. Holst } \keyword{graphs} \keyword{methods} \keyword{models} lava/man/cv.Rd0000644000176200001440000000200713162174023012645 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cv.R \name{cv} \alias{cv} \title{Cross-validation} \usage{ cv(modelList, data, K = 5, rep = 1, perf, seed = NULL, mc.cores = 1, ...) } \arguments{ \item{modelList}{List of fitting functions or models} \item{data}{data.frame} \item{K}{Number of folds (default 5)} \item{rep}{Number of repetitions (default 1)} \item{perf}{Performance measure (default RMSE)} \item{seed}{Optional random seed} \item{mc.cores}{Number of cores used for parallel computations} \item{...}{Additional arguments parsed to models in modelList and perf} } \description{ Cross-validation } \details{ Generic cross-validation function } \examples{ f0 <- function(data,...) lm(...,data) f1 <- function(data,...) lm(Sepal.Length~Species,data) f2 <- function(data,...) lm(Sepal.Length~Species+Petal.Length,data) x <- cv(list(m0=f0,m1=f1,m2=f2),rep=10, data=iris, formula=Sepal.Length~.) x2 <- cv(list(f0(iris),f1(iris),f2(iris)),rep=10, data=iris) } \author{ Klaus K. Holst } lava/man/regression-set.Rd0000644000176200001440000001057113162174023015213 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/regression.R \name{regression<-} \alias{regression<-} \alias{regression} \alias{regression<-.lvm} \alias{regression.lvm} \alias{regfix} \alias{regfix<-} \alias{regfix.lvm} \alias{regfix<-.lvm} \title{Add regression association to latent variable model} \usage{ \method{regression}{lvm}(object = lvm(), to, from, fn = NA, silent = lava.options()$silent, additive=TRUE, y, x, value, ...) \method{regression}{lvm}(object, to=NULL, quick=FALSE, ...) <- value } \arguments{ \item{object}{\code{lvm}-object.} \item{\dots}{Additional arguments to be passed to the low level functions} \item{value}{A formula specifying the linear constraints or if \code{to=NULL} a \code{list} of parameter values.} \item{to}{Character vector of outcome(s) or formula object.} \item{from}{Character vector of predictor(s).} \item{fn}{Real function defining the functional form of predictors (for simulation only).} \item{silent}{Logical variable which indicates whether messages are turned on/off.} \item{additive}{If FALSE and predictor is categorical a non-additive effect is assumed} \item{y}{Alias for 'to'} \item{x}{Alias for 'from'} \item{quick}{Faster implementation without parameter constraints} } \value{ A \code{lvm}-object } \description{ Define regression association between variables in a \code{lvm}-object and define linear constraints between model equations. } \details{ The \code{regression} function is used to specify linear associations between variables of a latent variable model, and offers formula syntax resembling the model specification of e.g. \code{lm}. For instance, to add the following linear regression model, to the \code{lvm}-object, \code{m}: \deqn{ E(Y|X_1,X_2) = \beta_1 X_1 + \beta_2 X_2} We can write \code{regression(m) <- y ~ x1 + x2} Multivariate models can be specified by successive calls with \code{regression}, but multivariate formulas are also supported, e.g. \code{regression(m) <- c(y1,y2) ~ x1 + x2} defines \deqn{ E(Y_i|X_1,X_2) = \beta_{1i} X_1 + \beta_{2i} X_2 } The special function, \code{f}, can be used in the model specification to specify linear constraints. E.g. to fix \eqn{\beta_1=\beta_2} , we could write \code{regression(m) <- y ~ f(x1,beta) + f(x2,beta)} The second argument of \code{f} can also be a number (e.g. defining an offset) or be set to \code{NA} in order to clear any previously defined linear constraints. Alternatively, a more straight forward notation can be used: \code{regression(m) <- y ~ beta*x1 + beta*x2} All the parameter values of the linear constraints can be given as the right handside expression of the assigment function \code{regression<-} (or \code{regfix<-}) if the first (and possibly second) argument is defined as well. E.g: \code{regression(m,y1~x1+x2) <- list("a1","b1")} defines \eqn{E(Y_1|X_1,X_2) = a1 X_1 + b1 X_2}. The rhs argument can be a mixture of character and numeric values (and NA's to remove constraints). The function \code{regression} (called without additional arguments) can be used to inspect the linear constraints of a \code{lvm}-object. For backward compatibility the "$"-symbol can be used to fix parameters at a given value. E.g. to add a linear relationship between \code{y} and \code{x} with slope 2 to the model \code{m}, we can write \code{regression(m,"y") <- "x$2"}. Similarily we can use the "@"-symbol to name parameters. E.g. in a multiple regression we can force the parameters to be equal: \code{regression(m,"y") <- c("x1@b","x2@b")}. Fixed parameters can be reset by fixing (with \$) them to \code{NA}. } \note{ Variables will be added to the model if not already present. } \examples{ m <- lvm() ## Initialize empty lvm-object ### E(y1|z,v) = beta1*z + beta2*v regression(m) <- y1 ~ z + v ### E(y2|x,z,v) = beta*x + beta*z + 2*v + beta3*u regression(m) <- y2 ~ f(x,beta) + f(z,beta) + f(v,2) + u ### Clear restriction on association between y and ### fix slope coefficient of u to beta regression(m, y2 ~ v+u) <- list(NA,"beta") regression(m) ## Examine current linear parameter constraints ## ## A multivariate model, E(yi|x1,x2) = beta[1i]*x1 + beta[2i]*x2: m2 <- lvm(c(y1,y2) ~ x1+x2) } \seealso{ \code{\link{intercept<-}}, \code{\link{covariance<-}}, \code{\link{constrain<-}}, \code{\link{parameter<-}}, \code{\link{latent<-}}, \code{\link{cancel<-}}, \code{\link{kill<-}} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/devcoords.Rd0000644000176200001440000000123313162174023014225 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/devcoords.R \name{devcoords} \alias{devcoords} \title{Returns device-coordinates and plot-region} \usage{ devcoords() } \value{ A \code{list} with elements \item{dev.x1}{Device: Left x-coordinate} \item{dev.x2}{Device: Right x-coordinate} \item{dev.y1}{Device Bottom y-coordinate} \item{dev.y2}{Device Top y-coordinate} \item{fig.x1}{Plot: Left x-coordinate} \item{fig.x2}{Plot: Right x-coordinate} \item{fig.y1}{Plot: Bottom y-coordinate} \item{fig.y2}{Plot: Top y-coordinate} } \description{ Returns device-coordinates and plot-region } \author{ Klaus K. Holst } \keyword{hplot} lava/man/Model.Rd0000644000176200001440000000120513162174023013274 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model.R \name{Model} \alias{Model} \alias{Model<-} \title{Extract model} \usage{ Model(x, ...) Model(x, ...) <- value } \arguments{ \item{x}{Fitted model} \item{\dots}{Additional arguments to be passed to the low level functions} \item{value}{New model object (e.g. \code{lvm} or \code{multigroup})} } \value{ Returns a model object (e.g. \code{lvm} or \code{multigroup}) } \description{ Extract or replace model object } \examples{ m <- lvm(y~x) e <- estimate(m, sim(m,100)) Model(e) } \seealso{ \code{\link{Graph}} } \author{ Klaus K. Holst } \keyword{models} lava/man/op_concat.Rd0000644000176200001440000000131513162174023014203 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/operators.R \name{\%++\%} \alias{\%++\%} \title{Concatenation operator} \usage{ x \%++\% y } \arguments{ \item{x}{First object} \item{y}{Second object of same class} } \description{ For matrices a block-diagonal matrix is created. For all other data types he operator is a wrapper of \code{paste}. } \details{ Concatenation operator } \examples{ ## Block diagonal matrix(rnorm(25),5)\%++\%matrix(rnorm(25),5) ## String concatenation "Hello "\%++\%" World" ## Function composition f <- log \%++\% exp f(2) } \seealso{ \code{blockdiag}, \code{\link{paste}}, \code{\link{cat}}, } \author{ Klaus K. Holst } \keyword{misc} \keyword{utilities} lava/man/predictlvm.Rd0000644000176200001440000000171213162174023014410 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.R \name{predictlvm} \alias{predictlvm} \title{Predict function for latent variable models} \usage{ predictlvm(object, formula, p = coef(object), data = model.frame(object), ...) } \arguments{ \item{object}{Model object} \item{formula}{Formula specifying which variables to predict and which to condition on} \item{p}{Parameter vector} \item{data}{Data.frame} \item{...}{Additional arguments to lower level functions} } \description{ Predictions of conditinoal mean and variance and calculation of jacobian with respect to parameter vector. } \examples{ m <- lvm(c(x1,x2,x3)~u1,u1~z, c(y1,y2,y3)~u2,u2~u1+z) latent(m) <- ~u1+u2 d <- simulate(m,10,"u2,u2"=2,"u1,u1"=0.5,seed=123) e <- estimate(m,d) ## Conditional mean given covariates predictlvm(e,c(x1,x2)~1)$mean ## Conditional variance of u1,y1 given x1,x2 predictlvm(e,c(u1,y1)~x1+x2)$var } \seealso{ predict.lvm } lava/man/plot.lvm.Rd0000644000176200001440000000567313162174023014024 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{plot.lvm} \alias{plot.lvm} \alias{plot.lvmfit} \title{Plot path diagram} \usage{ \method{plot}{lvm}(x, diag = FALSE, cor = TRUE, labels = FALSE, intercept = FALSE, addcolor = TRUE, plain = FALSE, cex, fontsize1 = 10, noplot = FALSE, graph = list(rankdir = "BT"), attrs = list(graph = graph), unexpr = FALSE, addstyle = TRUE, plot.engine = lava.options()$plot.engine, init = TRUE, layout = lava.options()$layout, edgecolor = lava.options()$edgecolor, graph.proc = lava.options()$graph.proc, ...) } \arguments{ \item{x}{Model object} \item{diag}{Logical argument indicating whether to visualize variance parameters (i.e. diagonal of variance matrix)} \item{cor}{Logical argument indicating whether to visualize correlation parameters} \item{labels}{Logical argument indiciating whether to add labels to plot (Unnamed parameters will be labeled p1,p2,...)} \item{intercept}{Logical argument indiciating whether to add intercept labels} \item{addcolor}{Logical argument indiciating whether to add colors to plot (overrides \code{nodecolor} calls)} \item{plain}{if TRUE strip plot of colors and boxes} \item{cex}{Fontsize of node labels} \item{fontsize1}{Fontsize of edge labels} \item{noplot}{if TRUE then return \code{graphNEL} object only} \item{graph}{Graph attributes (Rgraphviz)} \item{attrs}{Attributes (Rgraphviz)} \item{unexpr}{if TRUE remove expressions from labels} \item{addstyle}{Logical argument indicating whether additional style should automatically be added to the plot (e.g. dashed lines to double-headed arrows)} \item{plot.engine}{default 'Rgraphviz' if available, otherwise visNetwork,igraph} \item{init}{Reinitialize graph (for internal use)} \item{layout}{Graph layout (see Rgraphviz or igraph manual)} \item{edgecolor}{if TRUE plot style with colored edges} \item{graph.proc}{Function that post-process the graph object (default: subscripts are automatically added to labels of the nodes)} \item{...}{Additional arguments to be passed to the low level functions} } \description{ Plot the path diagram of a SEM } \examples{ if (interactive()) { m <- lvm(c(y1,y2) ~ eta) regression(m) <- eta ~ z+x2 regression(m) <- c(eta,z) ~ x1 latent(m) <- ~eta labels(m) <- c(y1=expression(y[scriptscriptstyle(1)]), y2=expression(y[scriptscriptstyle(2)]), x1=expression(x[scriptscriptstyle(1)]), x2=expression(x[scriptscriptstyle(2)]), eta=expression(eta)) edgelabels(m, eta ~ z+x1+x2, cex=2, lwd=3, col=c("orange","lightblue","lightblue")) <- expression(rho,phi,psi) nodecolor(m, vars(m), border="white", labcol="darkblue") <- NA nodecolor(m, ~y1+y2+z, labcol=c("white","white","black")) <- NA plot(m,cex=1.5) d <- sim(m,100) e <- estimate(m,d) plot(e) m <- lvm(c(y1,y2) ~ eta) regression(m) <- eta ~ z+x2 regression(m) <- c(eta,z) ~ x1 latent(m) <- ~eta plot(lava:::beautify(m,edgecol=FALSE)) } } \author{ Klaus K. Holst } \keyword{hplot} \keyword{regression} lava/man/twindata.Rd0000644000176200001440000000132413162174023014051 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{twindata} \alias{twindata} \title{Twin menarche data} \format{data.frame} \source{ Simulated } \description{ Simulated data \tabular{rll}{ id \tab numeric \tab Twin-pair id \cr zyg \tab character \tab Zygosity (MZ or DZ) \cr twinnum \tab numeric \tab Twin number (1 or 2) \cr agemena \tab numeric \tab Age at menarche (or censoring) \cr status \tab logical \tab Censoring status (observed:=T,censored:=F) \cr bw \tab numeric \tab Birth weight \cr msmoke \tab numeric \tab Did mother smoke? (yes:=1,no:=0) \cr } } \keyword{datasets} lava/man/csplit.Rd0000644000176200001440000000157713162174023013546 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/csplit.R \name{csplit} \alias{csplit} \alias{foldr} \title{Split data into folds} \usage{ csplit(x, p = NULL, replace = FALSE, return.index = FALSE, k = 2, ...) } \arguments{ \item{x}{Data or integer (size)} \item{p}{Number of folds, or if a number between 0 and 1 is given two folds of size p and (1-p) will be returned} \item{replace}{With or with-out replacement} \item{return.index}{If TRUE index of folds are returned otherwise the actual data splits are returned (default)} \item{k}{(Optional, only used when p=NULL) number of folds without shuffling} \item{...}{additional arguments to lower level functions} } \description{ Split data into folds } \examples{ foldr(5,2,rep=2) csplit(10,3) csplit(iris[1:10,]) ## Split in two sets 1:(n/2) and (n/2+1):n csplit(iris[1:10,],0.5) } \author{ Klaus K. Holst } lava/man/internal.Rd0000644000176200001440000000265013162174023014055 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \name{startvalues} \alias{startvalues} \alias{startvalues0} \alias{startvalues1} \alias{startvalues2} \alias{startvalues3} \alias{starter.multigroup} \alias{addattr} \alias{modelPar} \alias{modelVar} \alias{matrices} \alias{pars} \alias{pars.lvm} \alias{pars.lvmfit} \alias{pars.glm} \alias{score.glm} \alias{procdata.lvmfit} \alias{reorderdata} \alias{graph2lvm} \alias{igraph.lvm} \alias{subgraph} \alias{finalize} \alias{index.lvm} \alias{index.lvmfit} \alias{index} \alias{reindex} \alias{index<-} \alias{survival} \alias{survival<-} \alias{ordinal} \alias{ordinal<-} \alias{rmvn} \alias{dmvn} \alias{NR} \alias{logit} \alias{expit} \alias{tigol} \alias{randomslope} \alias{randomslope<-} \alias{lisrel} \alias{variances} \alias{offdiags} \alias{describecoef} \alias{parlabels} \alias{rsq} \alias{stdcoef} \alias{CoefMat} \alias{CoefMat.multigroupfit} \alias{deriv} \alias{updatelvm} \alias{checkmultigroup} \alias{profci} \alias{estimate.MAR} \alias{missingModel} \alias{Inverse} \alias{gaussian_logLik.lvm} \alias{addhook} \alias{gethook} \alias{multigroup} \alias{Weights} \alias{fixsome} \alias{parfix} \alias{parfix<-} \alias{merge} \alias{IV} \alias{parameter} \alias{Specials} \alias{procformula} \alias{getoutcome} \alias{decomp.specials} \title{For internal use} \description{ For internal use } \author{ Klaus K. Holst } \keyword{utilities} lava/man/confpred.Rd0000644000176200001440000000236113162174023014040 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/confpred.R \name{confpred} \alias{confpred} \title{Conformal prediction} \usage{ confpred(object, data, newdata = data, alpha = 0.05, mad, ...) } \arguments{ \item{object}{Model object (lm, glm or similar with predict method) or formula (lm)} \item{data}{data.frame} \item{newdata}{New data.frame to make predictions for} \item{alpha}{Level of prediction interval} \item{mad}{Conditional model (formula) for the MAD (locally-weighted CP)} \item{...}{Additional arguments to lower level functions} } \value{ data.frame with fitted (fit), lower (lwr) and upper (upr) predictions bands. } \description{ Conformal predicions } \examples{ set.seed(123) n <- 200 x <- seq(0,6,length.out=n) delta <- 3 ss <- exp(-1+1.5*cos((x-delta))) ee <- rnorm(n,sd=ss) y <- (x-delta)+3*cos(x+4.5-delta)+ee d <- data.frame(y=y,x=x) newd <- data.frame(x=seq(0,6,length.out=50)) ## cc <- confpred(lm(y~ns(x,knots=c(1,3,5)),d),newdata=newd) cc <- confpred(lm(y~poly(x,3),d),data=d,newdata=newd) if (interactive()) { ##' plot(y~x,pch=16,col=lava::Col("black"),ylim=c(-10,15),xlab="X",ylab="Y") with(cc, lava::confband(newd$x,lwr,upr,fit, lwd=3,polygon=TRUE,col=Col("blue"),border=FALSE)) } } lava/man/subset.lvm.Rd0000644000176200001440000000123613162174023014342 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/subset.R \name{subset.lvm} \alias{subset.lvm} \alias{measurement} \title{Extract subset of latent variable model} \usage{ \method{subset}{lvm}(x, vars, ...) } \arguments{ \item{x}{\code{lvm}-object.} \item{vars}{Character vector or formula specifying variables to include in subset.} \item{\dots}{Additional arguments to be passed to the low level functions} } \value{ A \code{lvm}-object. } \description{ Extract measurement models or user-specified subset of model } \examples{ m <- lvm(c(y1,y2)~x1+x2) subset(m,~y1+x1) } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/labels-set.Rd0000644000176200001440000000365713162174023014304 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/labels.R \name{labels<-} \alias{labels<-} \alias{labels} \alias{labels<-.default} \alias{labels.lvm} \alias{labels.lvmfit} \alias{labels.graphNEL} \alias{edgelabels} \alias{edgelabels<-} \alias{edgelabels<-.lvm} \alias{nodecolor} \alias{nodecolor<-} \alias{nodecolor<-.default} \title{Define labels of graph} \usage{ \method{labels}{default}(object, ...) <- value \method{edgelabels}{lvm}(object, to, ...) <- value \method{nodecolor}{default}(object, var=vars(object), border, labcol, shape, lwd, ...) <- value } \arguments{ \item{object}{\code{lvm}-object.} \item{\dots}{Additional arguments (\code{lwd}, \code{cex}, \code{col}, \code{labcol}), \code{border}.} \item{value}{node label/edge label/color} \item{to}{Formula specifying outcomes and predictors defining relevant edges.} \item{var}{Formula or character vector specifying the nodes/variables to alter.} \item{border}{Colors of borders} \item{labcol}{Text label colors} \item{shape}{Shape of node} \item{lwd}{Line width of border} } \description{ Alters labels of nodes and edges in the graph of a latent variable model } \examples{ m <- lvm(c(y,v)~x+z) regression(m) <- c(v,x)~z labels(m) <- c(y=expression(psi), z=expression(zeta)) nodecolor(m,~y+z+x,border=c("white","white","black"), labcol="white", lwd=c(1,1,5), lty=c(1,2)) <- c("orange","indianred","lightgreen") edgelabels(m,y~z+x, cex=c(2,1.5), col=c("orange","black"),labcol="darkblue", arrowhead=c("tee","dot"), lwd=c(3,1)) <- expression(phi,rho) edgelabels(m,c(v,x)~z, labcol="red", cex=0.8,arrowhead="none") <- 2 if (interactive()) { plot(m,addstyle=FALSE) } m <- lvm(y~x) labels(m) <- list(x="multiple\\nlines") if (interactive()) { op <- par(mfrow=c(1,2)) plot(m,plain=TRUE) plot(m) par(op) d <- sim(m,100) e <- estimate(m,d) plot(e,type="sd") } } \author{ Klaus K. Holst } \keyword{aplot} \keyword{graphs} lava/man/hubble.Rd0000644000176200001440000000057513162174023013506 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{hubble} \alias{hubble} \title{Hubble data} \format{data.frame} \source{ Freedman, W. L., et al. 2001, AstroPhysicalJournal, 553, 47. } \description{ Velocity (v) and distance (D) measures of 36 Type Ia super-novae from the Hubble Space Telescope } \keyword{datasets} lava/man/op_match.Rd0000644000176200001440000000075313162174023014035 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/operators.R \name{\%ni\%} \alias{\%ni\%} \title{Matching operator (x not in y) oposed to the \code{\%in\%}-operator (x in y)} \usage{ x \%ni\% y } \arguments{ \item{x}{vector} \item{y}{vector of same type as \code{x}} } \value{ A logical vector. } \description{ Matching operator } \examples{ 1:10 \%ni\% c(1,5,10) } \seealso{ \code{\link{match}} } \author{ Klaus K. Holst } \keyword{misc} \keyword{utilities} lava/man/complik.Rd0000644000176200001440000000243113162174023013674 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/complik.R \name{complik} \alias{complik} \title{Composite Likelihood for probit latent variable models} \usage{ complik(x, data, k = 2, type = c("nearest", "all"), pairlist, silent = TRUE, estimator = "normal", ...) } \arguments{ \item{x}{\code{lvm}-object} \item{data}{data.frame} \item{k}{Size of composite groups} \item{type}{Determines number of groups. With \code{type="nearest"} (default) only neighboring items will be grouped, e.g. for \code{k=2} (y1,y2),(y2,y3),... With \code{type="all"} all combinations of size \code{k} are included} \item{pairlist}{A list of indices specifying the composite groups. Optional argument which overrides \code{k} and \code{type} but gives complete flexibility in the specification of the composite likelihood} \item{silent}{Turn output messsages on/off} \item{estimator}{Model (pseudo-likelihood) to use for the pairs/groups} \item{\dots}{Additional arguments parsed on to lower-level functions} } \value{ An object of class \code{clprobit} inheriting methods from \code{lvm} } \description{ Estimate parameters in a probit latent variable model via a composite likelihood decomposition. } \seealso{ estimate } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/images.Rd0000644000176200001440000000370413162174023013507 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/img.R \name{images} \alias{images} \title{Organize several image calls (for visualizing categorical data)} \usage{ images(x, group, ncol = 2, byrow = TRUE, colorbar = 1, colorbar.space = 0.1, label.offset = 0.02, order = TRUE, colorbar.border = 0, main, rowcol = FALSE, plotfun = NULL, axis1, axis2, mar, col = list(c("#EFF3FF", "#BDD7E7", "#6BAED6", "#2171B5"), c("#FEE5D9", "#FCAE91", "#FB6A4A", "#CB181D"), c("#EDF8E9", "#BAE4B3", "#74C476", "#238B45"), c("#FEEDDE", "#FDBE85", "#FD8D3C", "#D94701")), ...) } \arguments{ \item{x}{data.frame or matrix} \item{group}{group variable} \item{ncol}{number of columns in layout} \item{byrow}{organize by row if TRUE} \item{colorbar}{Add color bar} \item{colorbar.space}{Space around color bar} \item{label.offset}{label offset} \item{order}{order} \item{colorbar.border}{Add border around color bar} \item{main}{Main title} \item{rowcol}{switch rows and columns} \item{plotfun}{Alternative plot function (instead of 'image')} \item{axis1}{Axis 1} \item{axis2}{Axis 2} \item{mar}{Margins} \item{col}{Colours} \item{...}{Additional arguments to lower level graphics functions} } \description{ Visualize categorical by group variable } \examples{ X <- matrix(rbinom(400,3,0.5),20) group <- rep(1:4,each=5) images(X,colorbar=0,zlim=c(0,3)) images(X,group=group,zlim=c(0,3)) \dontrun{ images(X,group=group,col=list(RColorBrewer::brewer.pal(4,"Purples"), RColorBrewer::brewer.pal(4,"Greys"), RColorBrewer::brewer.pal(4,"YlGn"), RColorBrewer::brewer.pal(4,"PuBuGn")),colorbar=2,zlim=c(0,3)) } images(list(X,X,X,X),group=group,zlim=c(0,3)) images(list(X,X,X,X),ncol=1,group=group,zlim=c(0,3)) images(list(X,X),group,axis2=c(FALSE,FALSE),axis1=c(FALSE,FALSE), mar=list(c(0,0,0,0),c(0,0,0,0)),yaxs="i",xaxs="i",zlim=c(0,3)) } \author{ Klaus Holst } lava/man/backdoor.Rd0000644000176200001440000000133013162174023014017 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/backdoor.R \name{backdoor} \alias{backdoor} \title{Backdoor criterion} \usage{ backdoor(object, f, cond, ..., return.graph = FALSE) } \arguments{ \item{object}{lvm object} \item{f}{formula. Conditioning, z, set can be given as y~x|z} \item{cond}{Vector of variables to conditon on} \item{...}{Additional arguments to lower level functions} \item{return.graph}{Return moral ancestral graph with z and effects from x removed} } \description{ Check backdoor criterion of a lvm object } \examples{ m <- lvm(y~c2,c2~c1,x~c1,m1~x,y~m1, v1~c3, x~c3,v1~y, x~z1, z2~z1, z2~z3, y~z3+z2+g1+g2+g3) ll <- backdoor(m, y~x) backdoor(m, y~x|c1+z1+g1) } lava/man/serotonin2.Rd0000644000176200001440000000041413162174023014337 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{serotonin2} \alias{serotonin2} \title{Data} \format{data.frame} \source{ Simulated } \description{ Description } \seealso{ serotonin } \keyword{datasets} lava/man/Combine.Rd0000644000176200001440000000123013162174023013606 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine.R \name{Combine} \alias{Combine} \title{Report estimates across different models} \usage{ Combine(x, ...) } \arguments{ \item{x}{list of model objects} \item{...}{additional arguments to lower level functions} } \description{ Report estimates across different models } \examples{ data(serotonin) m1 <- lm(cau ~ age*gene1 + age*gene2,data=serotonin) m2 <- lm(cau ~ age + gene1,data=serotonin) m3 <- lm(cau ~ age*gene2,data=serotonin) Combine(list(A=m1,B=m2,C=m3),fun=function(x) c("_____"="",R2=" "\%++\%format(summary(x)$r.squared,digits=2))) } \author{ Klaus K. Holst } lava/man/partialcor.Rd0000644000176200001440000000143213162174023014376 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/partialcor.R \name{partialcor} \alias{partialcor} \title{Calculate partial correlations} \usage{ partialcor(formula, data, level = 0.95, ...) } \arguments{ \item{formula}{formula speciying the covariates and optionally the outcomes to calculate partial correlation for} \item{data}{data.frame} \item{level}{Level of confidence limits} \item{...}{Additional arguments to lower level functions} } \value{ A coefficient matrix } \description{ Calculate partial correlation coefficients and confidence limits via Fishers z-transform } \examples{ m <- lvm(c(y1,y2,y3)~x1+x2) covariance(m) <- c(y1,y2,y3)~y1+y2+y3 d <- sim(m,500) partialcor(~x1+x2,d) } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/makemissing.Rd0000644000176200001440000000140313162174023014543 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/makemissing.R \name{makemissing} \alias{makemissing} \title{Create random missing data} \usage{ makemissing(data, p = 0.2, cols = seq_len(ncol(data)), rowwise = FALSE, nafun = function(x) x) } \arguments{ \item{data}{data.frame} \item{p}{Fraction of missing data in each column} \item{cols}{Which columns (name or index) to alter} \item{rowwise}{Should missing occur row-wise (either none or all selected columns are missing)} \item{nafun}{(Optional) function to be applied on data.frame before return (e.g. \code{na.omit} to return complete-cases only)} } \value{ data.frame } \description{ Generates missing entries in data.frame/matrix } \author{ Klaus K. Holst } \keyword{utilities} lava/man/correlation.Rd0000644000176200001440000000057013162174023014561 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/correlation.R \name{correlation} \alias{correlation} \title{Generic method for extracting correlation coefficients of model object} \usage{ correlation(x, ...) } \arguments{ \item{x}{Object} \item{\dots}{Additional arguments} } \description{ Generic correlation method } \author{ Klaus K. Holst } lava/man/Missing.Rd0000644000176200001440000000355313162174023013655 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Missing.R \name{Missing} \alias{Missing} \alias{Missing,} \alias{Missing<-} \title{Missing value generator} \usage{ Missing(object, formula, Rformula, missing.name, suffix = "0", ...) } \arguments{ \item{object}{\code{lvm}-object.} \item{formula}{The right hand side specifies the name of a latent variable which is not always observed. The left hand side specifies the name of a new variable which is equal to the latent variable but has missing values. If given as a string then this is used as the name of the latent (full-data) name, and the observed data name is 'missing.data'} \item{Rformula}{Missing data mechanism with left hand side specifying the name of the observed data indicator (may also just be given as a character instead of a formula)} \item{missing.name}{Name of observed data variable (only used if 'formula' was given as a character specifying the name of the full-data variable)} \item{suffix}{If missing.name is missing, then the name of the oberved data variable will be the name of the full-data variable + the suffix} \item{...}{Passed to binomial.lvm.} } \value{ lvm object } \description{ Missing value generator } \details{ This function adds a binary variable to a given \code{lvm} model and also a variable which is equal to the original variable where the binary variable is equal to zero } \examples{ library(lava) set.seed(17) m <- lvm(y0~x01+x02+x03) m <- Missing(m,formula=x1~x01,Rformula=R1~0.3*x02+-0.7*x01,p=0.4) sim(m,10) m <- lvm(y~1) m <- Missing(m,"y","r") ## same as ## m <- Missing(m,y~1,r~1) sim(m,10) ## same as m <- lvm(y~1) Missing(m,"y") <- r~x sim(m,10) m <- lvm(y~1) m <- Missing(m,"y","r",suffix=".") ## same as ## m <- Missing(m,"y","r",missing.name="y.") ## same as ## m <- Missing(m,y.~y,"r") sim(m,10) } \author{ Thomas A. Gerds } lava/man/twostage.lvmfit.Rd0000644000176200001440000000716613162174023015405 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/twostage.R \name{twostage.lvmfit} \alias{twostage.lvmfit} \alias{twostage.lvm} \alias{twostage.lvm.mixture} \alias{twostage.estimate} \alias{nonlinear} \alias{nonlinear<-} \title{Two-stage estimator (non-linear SEM)} \usage{ \method{twostage}{lvmfit}(object, model2, data = NULL, predict.fun = function(mu, var, data, ...) cbind(u1 = mu[, 1], u2 = mu[, 1]^2 + var[1]), id1 = NULL, id2 = NULL, all = FALSE, formula = NULL, std.err = TRUE, ...) } \arguments{ \item{object}{Stage 1 measurement model} \item{model2}{Stage 2 SEM} \item{data}{data.frame} \item{predict.fun}{Prediction of latent variable} \item{id1}{Optional id-variable (stage 1 model)} \item{id2}{Optional id-variable (stage 2 model)} \item{all}{If TRUE return additional output (naive estimates)} \item{formula}{optional formula specifying non-linear relation} \item{std.err}{If FALSE calculations of standard errors will be skipped} \item{...}{Additional arguments to lower level functions} } \description{ Two-stage estimator for non-linear structural equation models } \examples{ m <- lvm(c(x1,x2,x3)~f1,f1~z, c(y1,y2,y3)~f2,f2~f1+z) latent(m) <- ~f1+f2 d <- simulate(m,100,p=c("f2,f2"=2,"f1,f1"=0.5),seed=1) ## Full MLE ee <- estimate(m,d) ## Manual two-stage \dontrun{ m1 <- lvm(c(x1,x2,x3)~f1,f1~z); latent(m1) <- ~f1 e1 <- estimate(m1,d) pp1 <- predict(e1,f1~x1+x2+x3) d$u1 <- pp1[,] d$u2 <- pp1[,]^2+attr(pp1,"cond.var")[1] m2 <- lvm(c(y1,y2,y3)~eta,c(y1,eta)~u1+u2+z); latent(m2) <- ~eta e2 <- estimate(m2,d) } ## Two-stage m1 <- lvm(c(x1,x2,x3)~f1,f1~z); latent(m1) <- ~f1 m2 <- lvm(c(y1,y2,y3)~eta,c(y1,eta)~u1+u2+z); latent(m2) <- ~eta pred <- function(mu,var,data,...) cbind("u1"=mu[,1],"u2"=mu[,1]^2+var[1]) (mm <- twostage(m1,model2=m2,data=d,predict.fun=pred)) if (interactive()) { pf <- function(p) p["eta"]+p["eta~u1"]*u + p["eta~u2"]*u^2 plot(mm,f=pf,data=data.frame(u=seq(-2,2,length.out=100)),lwd=2) } ## Splines f <- function(x) cos(2*x)+x+-0.25*x^2 m <- lvm(x1+x2+x3~eta1, y1+y2+y3~eta2, latent=~eta1+eta2) functional(m, eta2~eta1) <- f d <- sim(m,500,seed=1,latent=TRUE) m1 <- lvm(x1+x2+x3~eta1,latent=~eta1) m2 <- lvm(y1+y2+y3~eta2,latent=~eta2) mm <- twostage(m1,m2,formula=eta2~eta1,type="spline") if (interactive()) plot(mm) nonlinear(m2,type="quadratic") <- eta2~eta1 a <- twostage(m1,m2,data=d) if (interactive()) plot(a) kn <- c(-1,0,1) nonlinear(m2,type="spline",knots=kn) <- eta2~eta1 a <- twostage(m1,m2,data=d) x <- seq(-3,3,by=0.1) y <- predict(a, newdata=data.frame(eta1=x)) if (interactive()) { plot(eta2~eta1, data=d) lines(x,y, col="red", lwd=5) p <- estimate(a,f=function(p) predict(a,p=p,newdata=x))$coefmat plot(eta2~eta1, data=d) lines(x,p[,1], col="red", lwd=5) confband(x,lower=p[,3],upper=p[,4],center=p[,1], polygon=TRUE, col=Col(2,0.2)) l1 <- lm(eta2~splines::ns(eta1,knots=kn),data=d) p1 <- predict(l1,newdata=data.frame(eta1=x),interval="confidence") lines(x,p1[,1],col="green",lwd=5) confband(x,lower=p1[,2],upper=p1[,3],center=p1[,1], polygon=TRUE, col=Col(3,0.2)) } \dontrun{ ## Reduce timing ## Cross-validation example ma <- lvm(c(x1,x2,x3)~u,latent=~u) ms <- functional(ma, y~u, f=function(x) -.4*x^2) d <- sim(ms,500)#,seed=1) ea <- estimate(ma,d) mb <- lvm() mb1 <- nonlinear(mb,type="linear",y~u) mb2 <- nonlinear(mb,type="quadratic",y~u) mb3 <- nonlinear(mb,type="spline",knots=c(-3,-1,0,1,3),y~u) mb4 <- nonlinear(mb,type="spline",knots=c(-3,-2,-1,0,1,2,3),y~u) ff <- lapply(list(mb1,mb2,mb3,mb4), function(m) function(data,...) twostage(ma,m,data=data,st.derr=FALSE)) a <- cv(ff,data=d,rep=1,mc.cores=1) a } } lava/man/estimate.lvm.Rd0000644000176200001440000001127213162174023014651 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate.lvm.R \name{estimate.lvm} \alias{estimate.lvm} \title{Estimation of parameters in a Latent Variable Model (lvm)} \usage{ \method{estimate}{lvm}(x, data = parent.frame(), estimator = NULL, control = list(), missing = FALSE, weights, weightsname, data2, id, fix, index = !quick, graph = FALSE, silent = lava.options()$silent, quick = FALSE, method, param, cluster, p, ...) } \arguments{ \item{x}{\code{lvm}-object} \item{data}{\code{data.frame}} \item{estimator}{String defining the estimator (see details below)} \item{control}{control/optimization parameters (see details below)} \item{missing}{Logical variable indiciating how to treat missing data. Setting to FALSE leads to complete case analysis. In the other case likelihood based inference is obtained by integrating out the missing data under assumption the assumption that data is missing at random (MAR).} \item{weights}{Optional weights to used by the chosen estimator.} \item{weightsname}{Weights names (variable names of the model) in case \code{weights} was given as a vector of column names of \code{data}} \item{data2}{Optional additional dataset used by the chosen estimator.} \item{id}{Vector (or name of column in \code{data}) that identifies correlated groups of observations in the data leading to variance estimates based on a sandwich estimator} \item{fix}{Logical variable indicating whether parameter restriction automatically should be imposed (e.g. intercepts of latent variables set to 0 and at least one regression parameter of each measurement model fixed to ensure identifiability.)} \item{index}{For internal use only} \item{graph}{For internal use only} \item{silent}{Logical argument indicating whether information should be printed during estimation} \item{quick}{If TRUE the parameter estimates are calculated but all additional information such as standard errors are skipped} \item{method}{Optimization method} \item{param}{set parametrization (see \code{help(lava.options)})} \item{cluster}{Obsolete. Alias for 'id'.} \item{p}{Evaluate model in parameter 'p' (no optimization)} \item{...}{Additional arguments to be passed to the low level functions} } \value{ A \code{lvmfit}-object. } \description{ Estimate parameters. MLE, IV or user-defined estimator. } \details{ A list of parameters controlling the estimation and optimization procedures is parsed via the \code{control} argument. By default Maximum Likelihood is used assuming multivariate normal distributed measurement errors. A list with one or more of the following elements is expected: \describe{ \item{start:}{Starting value. The order of the parameters can be shown by calling \code{coef} (with \code{mean=TRUE}) on the \code{lvm}-object or with \code{plot(..., labels=TRUE)}. Note that this requires a check that it is actual the model being estimated, as \code{estimate} might add additional restriction to the model, e.g. through the \code{fix} and \code{exo.fix} arguments. The \code{lvm}-object of a fitted model can be extracted with the \code{Model}-function.} \item{starterfun:}{Starter-function with syntax \code{function(lvm, S, mu)}. Three builtin functions are available: \code{startvalues}, \code{startvalues0}, \code{startvalues1}, ...} \item{estimator:}{ String defining which estimator to use (Defaults to ``\code{gaussian}'')} \item{meanstructure}{Logical variable indicating whether to fit model with meanstructure.} \item{method:}{ String pointing to alternative optimizer (e.g. \code{optim} to use simulated annealing).} \item{control:}{ Parameters passed to the optimizer (default \code{stats::nlminb}).} \item{tol:}{ Tolerance of optimization constraints on lower limit of variance parameters. } } } \examples{ dd <- read.table(header=TRUE, text="x1 x2 x3 0.0 -0.5 -2.5 -0.5 -2.0 0.0 1.0 1.5 1.0 0.0 0.5 0.0 -2.5 -1.5 -1.0") e <- estimate(lvm(c(x1,x2,x3)~u),dd) ## Simulation example m <- lvm(list(y~v1+v2+v3+v4,c(v1,v2,v3,v4)~x)) covariance(m) <- v1~v2+v3+v4 dd <- sim(m,10000) ## Simulate 10000 observations from model e <- estimate(m, dd) ## Estimate parameters e ## Using just sufficient statistics n <- nrow(dd) e0 <- estimate(m,data=list(S=cov(dd)*(n-1)/n,mu=colMeans(dd),n=n)) rm(dd) ## Multiple group analysis m <- lvm() regression(m) <- c(y1,y2,y3)~u regression(m) <- u~x d1 <- sim(m,100,p=c("u,u"=1,"u~x"=1)) d2 <- sim(m,100,p=c("u,u"=2,"u~x"=-1)) mm <- baptize(m) regression(mm,u~x) <- NA covariance(mm,~u) <- NA intercept(mm,~u) <- NA ee <- estimate(list(mm,mm),list(d1,d2)) ## Missing data d0 <- makemissing(d1,cols=1:2) e0 <- estimate(m,d0,missing=TRUE) e0 } \seealso{ estimate.default score, information } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/wrapvec.Rd0000644000176200001440000000051113162174023013702 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wrapvec.R \name{wrapvec} \alias{wrapvec} \title{Wrap vector} \usage{ wrapvec(x, delta = 0L, ...) } \arguments{ \item{x}{Vector or integer} \item{delta}{Shift} \item{...}{Additional parameters} } \description{ Wrap vector } \examples{ wrapvec(5,2) } lava/man/Expand.Rd0000644000176200001440000000126613162174023013462 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Expand.R \name{Expand} \alias{Expand} \title{Create a Data Frame from All Combinations of Factors} \usage{ Expand(`_data`, ...) } \arguments{ \item{_data}{Data.frame} \item{...}{vectors, factors or a list containing these} } \description{ Create a Data Frame from All Combinations of Factors } \details{ Simple wrapper of the 'expand.grid' function. If x is a table then a data frame is returned with one row pr individual observation. } \examples{ dd <- Expand(iris, Sepal.Length=2:8, Species=c("virginica","setosa")) summary(dd) T <- with(warpbreaks, table(wool, tension)) Expand(T) } \author{ Klaus K. Holst } lava/man/equivalence.Rd0000644000176200001440000000205013162174023014534 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/equivalence.R \name{equivalence} \alias{equivalence} \title{Identify candidates of equivalent models} \usage{ equivalence(x, rel, tol = 0.001, k = 1, omitrel = TRUE, ...) } \arguments{ \item{x}{\code{lvmfit}-object} \item{rel}{Formula or character-vector specifying two variables to omit from the model and subsequently search for possible equivalent models} \item{tol}{Define two models as empirical equivalent if the absolute difference in score test is less than \code{tol}} \item{k}{Number of parameters to test simultaneously. For \code{equivalence} the number of additional associations to be added instead of \code{rel}.} \item{omitrel}{if \code{k} greater than 1, this boolean defines wether to omit candidates containing \code{rel} from the output} \item{\dots}{Additional arguments to be passed to the low level functions} } \description{ Identifies candidates of equivalent models } \seealso{ \code{\link{compare}}, \code{\link{modelsearch}} } \author{ Klaus K. Holst } lava/man/lava-package.Rd0000644000176200001440000000065313162174023014556 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{package} \name{lava-package} \alias{lava-package} \alias{lava} \title{Estimation and simulation of latent variable models} \description{ Framwork for estimating parameters and simulate data from Latent Variable Models. } \examples{ lava() } \author{ Klaus K. Holst Maintainer: } \keyword{package} lava/man/toformula.Rd0000644000176200001440000000111513162174023014244 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/toformula.R \name{toformula} \alias{toformula} \title{Converts strings to formula} \usage{ toformula(y = ".", x = ".") } \arguments{ \item{y}{vector of predictors} \item{x}{vector of responses} } \value{ An object of class \code{formula} } \description{ Converts a vector of predictors and a vector of responses (characters) i#nto a formula expression. } \examples{ toformula(c("age","gender"), "weight") } \seealso{ \code{\link{as.formula}}, } \author{ Klaus K. Holst } \keyword{models} \keyword{utilities} lava/man/curly.Rd0000644000176200001440000000261613162174023013401 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/curly.R \name{curly} \alias{curly} \title{Adds curly brackets to plot} \usage{ curly(x, y, len = 1, theta = 0, wid, shape = 1, col = 1, lwd = 1, lty = 1, grid = FALSE, npoints = 50, text = NULL, offset = c(0.05, 0)) } \arguments{ \item{x}{center of the x axis of the curly brackets (or start end coordinates (x1,x2))} \item{y}{center of the y axis of the curly brackets (or start end coordinates (y1,y2))} \item{len}{Length of the curly brackets} \item{theta}{angle (in radians) of the curly brackets orientation} \item{wid}{Width of the curly brackets} \item{shape}{shape (curvature)} \item{col}{color (passed to lines/grid.lines)} \item{lwd}{line width (passed to lines/grid.lines)} \item{lty}{line type (passed to lines/grid.lines)} \item{grid}{If TRUE use grid graphics (compatability with ggplot2)} \item{npoints}{Number of points used in curves} \item{text}{Label} \item{offset}{Label offset (x,y)} } \description{ Adds curly brackets to plot } \examples{ if (interactive()) { plot(0,0,type="n",axes=FALSE,xlab="",ylab="") curly(x=c(1,0),y=c(0,1),lwd=2,text="a") curly(x=c(1,0),y=c(0,1),lwd=2,text="b",theta=pi) curly(x=-0.5,y=0,shape=1,theta=pi,text="c") curly(x=0,y=0,shape=1,theta=0,text="d") curly(x=0.5,y=0,len=0.2,theta=pi/2,col="blue",lty=2) curly(x=0.5,y=-0.5,len=0.2,theta=-pi/2,col="red",shape=1e3,text="e") } } lava/man/confband.Rd0000644000176200001440000000521513162174023014013 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/confband.R \name{confband} \alias{confband} \alias{forestplot} \title{Add Confidence limits bar to plot} \usage{ confband(x, lower, upper, center = NULL, line = TRUE, delta = 0.07, centermark = 0.03, pch, blank = TRUE, vert = TRUE, polygon = FALSE, step = FALSE, ...) } \arguments{ \item{x}{Position (x-coordinate if vert=TRUE, y-coordinate otherwise)} \item{lower}{Lower limit (if NULL no limits is added, and only the center is drawn (if not NULL))} \item{upper}{Upper limit} \item{center}{Center point} \item{line}{If FALSE do not add line between upper and lower bound} \item{delta}{Length of limit bars} \item{centermark}{Length of center bar} \item{pch}{Center symbol (if missing a line is drawn)} \item{blank}{If TRUE a white ball is plotted before the center is added to the plot} \item{vert}{If TRUE a vertical bar is plotted. Otherwise a horizontal bar is used} \item{polygon}{If TRUE polygons are added between 'lower' and 'upper'.} \item{step}{Type of polygon (step-function or piecewise linear)} \item{...}{Additional low level arguments (e.g. col, lwd, lty,...)} } \description{ Add Confidence limits bar to plot } \examples{ plot(0,0,type="n",xlab="",ylab="") confband(0.5,-0.5,0.5,0,col="darkblue") confband(0.8,-0.5,0.5,0,col="darkred",vert=FALSE,pch=1,cex=1.5) set.seed(1) K <- 20 est <- rnorm(K) se <- runif(K,0.2,0.4) x <- cbind(est,est-2*se,est+2*se,runif(K,0.5,2)) x[c(3:4,10:12),] <- NA rownames(x) <- unlist(lapply(letters[seq(K)],function(x) paste(rep(x,4),collapse=""))) rownames(x)[which(is.na(est))] <- "" signif <- sign(x[,2])==sign(x[,3]) forestplot(x,text.right=FALSE) forestplot(x[,-4],sep=c(2,15),col=signif+1,box1=TRUE,delta=0.2,pch=16,cex=1.5) forestplot(x,vert=TRUE,text=FALSE) forestplot(x,vert=TRUE,text=FALSE,pch=NA) ##forestplot(x,vert=TRUE,text.vert=FALSE) ##forestplot(val,vert=TRUE,add=TRUE) z <- seq(10) zu <- c(z[-1],10) plot(z,type="n") confband(z,zu,rep(0,length(z)),col=Col("darkblue"),polygon=TRUE,step=TRUE) confband(z,zu,zu-2,col=Col("darkred"),polygon=TRUE,step=TRUE) z <- seq(0,1,length.out=100) plot(z,z,type="n") confband(z,z,z^2,polygon="TRUE",col=Col("darkblue")) set.seed(1) k <- 10 x <- seq(k) est <- rnorm(k) sd <- runif(k) val <- cbind(x,est,est-sd,est+sd) par(mfrow=c(1,2)) plot(0,type="n",xlim=c(0,k+1),ylim=range(val[,-1]),axes=FALSE,xlab="",ylab="") axis(2) confband(val[,1],val[,3],val[,4],val[,2],pch=16,cex=2) plot(0,type="n",ylim=c(0,k+1),xlim=range(val[,-1]),axes=FALSE,xlab="",ylab="") axis(1) confband(val[,1],val[,3],val[,4],val[,2],pch=16,cex=2,vert=FALSE) } \seealso{ \code{confband} } \author{ Klaus K. Holst } \keyword{iplot} lava/man/spaghetti.Rd0000644000176200001440000000434413162174023014233 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/spaghetti.R \name{spaghetti} \alias{spaghetti} \title{Spaghetti plot} \usage{ spaghetti(formula, data, id = "id", group = NULL, type = "o", lty = 1, pch = NA, col = 1:10, alpha = 0.3, lwd = 1, level = 0.95, trend.formula = formula, tau = NULL, trend.lty = 1, trend.join = TRUE, trend.delta = 0.2, trend = !is.null(tau), trend.col = col, trend.alpha = 0.2, trend.lwd = 3, trend.jitter = 0, legend = NULL, by = NULL, xlab = "Time", ylab = "", add = FALSE, ...) } \arguments{ \item{formula}{Formula (response ~ time)} \item{data}{data.frame} \item{id}{Id variable} \item{group}{group variable} \item{type}{Type (line 'l', stair 's', ...)} \item{lty}{Line type} \item{pch}{Colour} \item{col}{Colour} \item{alpha}{transparency (0-1)} \item{lwd}{Line width} \item{level}{Confidence level} \item{trend.formula}{Formula for trendline} \item{tau}{Quantile to estimate (trend)} \item{trend.lty}{Trend line type} \item{trend.join}{Trend polygon} \item{trend.delta}{Length of limit bars} \item{trend}{Add trend line} \item{trend.col}{Colour of trend line} \item{trend.alpha}{Transparency} \item{trend.lwd}{Trend line width} \item{trend.jitter}{Jitter amount} \item{legend}{Legend} \item{by}{make separate plot for each level in 'by' (formula, name of column, or vector)} \item{xlab}{Label of X-axis} \item{ylab}{Label of Y-axis} \item{add}{Add to existing device} \item{...}{Additional arguments to lower level arguments} } \description{ Spaghetti plot for longitudinal data } \examples{ if (interactive() & requireNamespace("mets")) { K <- 5 y <- "y"\%++\%seq(K) m <- lvm() regression(m,y=y,x=~u) <- 1 regression(m,y=y,x=~s) <- seq(K)-1 regression(m,y=y,x=~x) <- "b" N <- 50 d <- sim(m,N); d$z <- rbinom(N,1,0.5) dd <- mets::fast.reshape(d); dd$num <- dd$num+3 spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4), trend.formula=~factor(num),trend=TRUE,trend.col="darkblue") dd$num <- dd$num+rnorm(nrow(dd),sd=0.5) ## Unbalance spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4), trend=TRUE,trend.col="darkblue") spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4), trend.formula=~num+I(num^2),trend=TRUE,trend.col="darkblue") } } \author{ Klaus K. Holst } lava/man/pcor.Rd0000644000176200001440000000066313162174023013206 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pcor.R \name{pcor} \alias{pcor} \title{Polychoric correlation} \usage{ pcor(x, y, X, start, ...) } \arguments{ \item{x}{Variable 1} \item{y}{Variable 2} \item{X}{Optional covariates} \item{start}{Optional starting values} \item{...}{Additional arguments to lower level functions} } \description{ Maximum likelhood estimates of polychoric correlations } lava/man/Org.Rd0000644000176200001440000000246113162174023012770 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zorg.R \name{Org} \alias{Org} \title{Convert object to ascii suitable for org-mode} \usage{ Org(x, ..., ncol, include.rownames = TRUE, include.colnames = TRUE, header = TRUE, frame = "topbot", rownames = NULL, colnames = NULL, type = "org", tab = FALSE, margins = TRUE, print = TRUE, html, latex, sep = " ") } \arguments{ \item{x}{R object} \item{...}{additional arguments to lower level functions} \item{ncol}{If \code{x} is a vector and \code{ncol} is given as argument, the resulting output will be a \code{matrix} with \code{ncol} columns} \item{include.rownames}{If \code{FALSE} row names are removed} \item{include.colnames}{If \code{FALSE} column names are removed} \item{header}{If TRUE the header is included} \item{frame}{Frame argument (see \code{ascii})} \item{rownames}{Optional vector of row names} \item{colnames}{Optional vector of column names} \item{type}{Type argument (see \code{ascii})} \item{tab}{Tabulate?} \item{margins}{Add margins to table?} \item{print}{print or return result} \item{html}{HTML prefix (added to ATTR_HTML)} \item{latex}{LaTeX prefix (added to ATTR_LaTeX)} \item{sep}{separator with type='ascii'} } \description{ Convert object to ascii suitable for org-mode } \author{ Klaus K. Holst } lava/man/nldata.Rd0000644000176200001440000000043313162174023013501 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{nldata} \alias{nldata} \title{Example data (nonlinear model)} \format{data.frame} \source{ Simulated } \description{ Example data (nonlinear model) } \keyword{datasets} lava/man/contr.Rd0000644000176200001440000000146613162174023013372 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/contr.R \name{contr} \alias{contr} \alias{parsedesign} \title{Create contrast matrix} \usage{ contr(p, n, diff = TRUE, ...) } \arguments{ \item{p}{index of non-zero entries (see example)} \item{n}{Total number of parameters (if omitted the max number in p will be used)} \item{diff}{If FALSE all non-zero entries are +1, otherwise the second non-zero element in each row will be -1.} \item{...}{Additional arguments to lower level functions} } \description{ Create contrast matrix typically for use with 'estimate' (Wald tests). } \examples{ contr(2,n=5) contr(as.list(2:4),n=5) contr(list(1,2,4),n=5) contr(c(2,3,4),n=5) contr(list(c(1,3),c(2,4)),n=5) contr(list(c(1,3),c(2,4),5)) parsedesign(c("aa","b","c"),"?","?",diff=c(FALSE,TRUE)) } lava/man/vars.Rd0000644000176200001440000000563113162174023013216 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vars.R \name{vars} \alias{vars} \alias{vars.lvm} \alias{vars.lvmfit} \alias{latent} \alias{latent<-} \alias{latent.lvm} \alias{latent<-.lvm} \alias{latent.lvmfit} \alias{latent.multigroup} \alias{manifest} \alias{manifest.lvm} \alias{manifest.lvmfit} \alias{manifest.multigroup} \alias{exogenous} \alias{exogenous<-} \alias{exogenous.lvm} \alias{exogenous<-.lvm} \alias{exogenous.lvmfit} \alias{exogenous.multigroup} \alias{endogenous} \alias{endogenous.lvm} \alias{endogenous.lvmfit} \alias{endogenous.multigroup} \title{Extract variable names from latent variable model} \usage{ vars(x,...) endogenous(x,...) exogenous(x,...) manifest(x,...) latent(x,...) \method{exogenous}{lvm}(x,silent = FALSE, xfree = TRUE,...) <- value \method{exogenous}{lvm}(x,latent=FALSE,index=TRUE,...) \method{latent}{lvm}(x,clear=FALSE,...) <- value } \arguments{ \item{x}{\code{lvm}-object} \item{\dots}{Additional arguments to be passed to the low level functions} \item{latent}{Logical defining whether latent variables without parents should be included in the result} \item{index}{For internal use only} \item{clear}{Logical indicating whether to add or remove latent variable status} \item{silent}{Suppress messages} \item{xfree}{For internal use only} \item{value}{Formula or character vector of variable names.} } \value{ Vector of variable names. } \description{ Extract exogenous variables (predictors), endogenous variables (outcomes), latent variables (random effects), manifest (observed) variables from a \code{lvm} object. } \details{ \code{vars} returns all variables of the \code{lvm}-object including manifest and latent variables. Similarily \code{manifest} and \code{latent} returns the observered resp. latent variables of the model. \code{exogenous} returns all manifest variables without parents, e.g. covariates in the model, however the argument \code{latent=TRUE} can be used to also include latent variables without parents in the result. Pr. default \code{lava} will not include the parameters of the exogenous variables in the optimisation routine during estimation (likelihood of the remaining observered variables conditional on the covariates), however this behaviour can be altered via the assignment function \code{exogenous<-} telling \code{lava} which subset of (valid) variables to condition on. Finally \code{latent} returns a vector with the names of the latent variables in \code{x}. The assigment function \code{latent<-} can be used to change the latent status of variables in the model. } \examples{ g <- lvm(eta1 ~ x1+x2) regression(g) <- c(y1,y2,y3) ~ eta1 latent(g) <- ~eta1 endogenous(g) exogenous(g) identical(latent(g), setdiff(vars(g),manifest(g))) } \seealso{ \code{\link{endogenous}}, \code{\link{manifest}}, \code{\link{latent}}, \code{\link{exogenous}}, \code{\link{vars}} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/Col.Rd0000644000176200001440000000156313162174023012760 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Col.R \name{Col} \alias{Col} \title{Generate a transparent RGB color} \usage{ Col(col, alpha = 0.2, locate = 0) } \arguments{ \item{col}{Color (numeric or character)} \item{alpha}{Degree of transparency (0,1)} \item{locate}{Choose colour (with mouse)} } \value{ A character vector with elements of 7 or 9 characters, '"\#"' followed by the red, blue, green and optionally alpha values in hexadecimal (after rescaling to '0 ... 255'). } \description{ This function transforms a standard color (e.g. "red") into an transparent RGB-color (i.e. alpha-blend<1). } \details{ This only works for certain graphics devices (Cairo-X11 (x11 as of R>=2.7), quartz, pdf, ...). } \examples{ plot(runif(1000),cex=runif(1000,0,4),col=Col(c("darkblue","orange"),0.5),pch=16) } \author{ Klaus K. Holst } \keyword{color} lava/man/ordreg.Rd0000644000176200001440000000134713162174023013525 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ordreg.R \name{ordreg} \alias{ordreg} \title{Univariate cumulative link regression models} \usage{ ordreg(formula, data = parent.frame(), offset, family = stats::binomial("probit"), start, fast = FALSE, ...) } \arguments{ \item{formula}{formula} \item{data}{data.frame} \item{offset}{offset} \item{family}{family (default proportional odds)} \item{start}{optional starting values} \item{fast}{If TRUE standard errors etc. will not be calculated} \item{...}{Additional arguments to lower level functions} } \description{ Ordinal regression models } \examples{ m <- lvm(y~x) ordinal(m,K=3) <- ~y d <- sim(m,100) e <- ordreg(y~x,d) } \author{ Klaus K. Holst } lava/man/semdata.Rd0000644000176200001440000000037713162174023013663 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{semdata} \alias{semdata} \title{Example SEM data} \format{data.frame} \source{ Simulated } \description{ Simulated data } \keyword{datasets} lava/man/serotonin.Rd0000644000176200001440000000276613162174023014271 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{serotonin} \alias{serotonin} \title{Serotonin data} \format{data.frame} \source{ Simulated } \description{ This simulated data mimics a PET imaging study where the 5-HT2A receptor and serotonin transporter (SERT) binding potential has been quantified into 8 different regions. The 5-HT2A cortical regions are considered high-binding regions measurements. These measurements can be regarded as proxy measures of the extra-cellular levels of serotonin in the brain \tabular{rll}{ day \tab numeric \tab Scan day of the year \cr age \tab numeric \tab Age at baseline scan \cr mem \tab numeric \tab Memory performance score \cr depr \tab numeric \tab Depression (mild) status 500 days after baseline \cr gene1 \tab numeric \tab Gene marker 1 (HTR2A) \cr gene2 \tab numeric \tab Gene marker 2 (HTTTLPR) \cr cau \tab numeric \tab SERT binding, Caudate Nucleus \cr th \tab numeric \tab SERT binding, Thalamus \cr put \tab numeric \tab SERT binding, Putamen \cr mid \tab numeric \tab SERT binding, Midbrain \cr aci \tab numeric \tab 5-HT2A binding, Anterior cingulate gyrus \cr pci \tab numeric \tab 5-HT2A binding, Posterior cingulate gyrus \cr sfc \tab numeric \tab 5-HT2A binding, Superior frontal cortex \cr par \tab numeric \tab 5-HT2A binding, Parietal cortex \cr } } \keyword{datasets} lava/man/missingdata.Rd0000644000176200001440000000160513162174023014543 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{missingdata} \alias{missingdata} \title{Missing data example} \format{list of data.frames} \source{ Simulated } \description{ Simulated data generated from model \deqn{E(Y_i\mid X) = X, \quad cov(Y_1,Y_2\mid X)=0.5} } \details{ The list contains four data sets 1) Complete data 2) MCAR 3) MAR 4) MNAR (missing mechanism depends on variable V correlated with Y1,Y2) } \examples{ data(missingdata) e0 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[1]]) ## No missing e1 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[2]]) ## CC (MCAR) e2 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[2]],missing=TRUE) ## MCAR e3 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[3]]) ## CC (MAR) e4 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[3]],missing=TRUE) ## MAR } \keyword{datasets} lava/man/confint.lvmfit.Rd0000644000176200001440000000406513162174023015203 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/confint.R \name{confint.lvmfit} \alias{confint.lvmfit} \alias{confint.multigroupfit} \title{Calculate confidence limits for parameters} \usage{ \method{confint}{lvmfit}(object, parm = seq_len(length(coef(object))), level = 0.95, profile = FALSE, curve = FALSE, n = 20, interval = NULL, lower = TRUE, upper = TRUE, ...) } \arguments{ \item{object}{\code{lvm}-object.} \item{parm}{Index of which parameters to calculate confidence limits for.} \item{level}{Confidence level} \item{profile}{Logical expression defining whether to calculate confidence limits via the profile log likelihood} \item{curve}{if FALSE and profile is TRUE, confidence limits are returned. Otherwise, the profile curve is returned.} \item{n}{Number of points to evaluate profile log-likelihood in over the interval defined by \code{interval}} \item{interval}{Interval over which the profiling is done} \item{lower}{If FALSE the lower limit will not be estimated (profile intervals only)} \item{upper}{If FALSE the upper limit will not be estimated (profile intervals only)} \item{\dots}{Additional arguments to be passed to the low level functions} } \value{ A 2xp matrix with columns of lower and upper confidence limits } \description{ Calculate Wald og Likelihood based (profile likelihood) confidence intervals } \details{ Calculates either Wald confidence limits: \deqn{\hat{\theta} \pm z_{\alpha/2}*\hat\sigma_{\hat\theta}} or profile likelihood confidence limits, defined as the set of value \eqn{\tau}: \deqn{logLik(\hat\theta_{\tau},\tau)-logLik(\hat\theta)< q_{\alpha}/2} where \eqn{q_{\alpha}} is the \eqn{\alpha} fractile of the \eqn{\chi^2_1} distribution, and \eqn{\hat\theta_{\tau}} are obtained by maximizing the log-likelihood with tau being fixed. } \examples{ m <- lvm(y~x) d <- sim(m,100) e <- estimate(y~x, d) confint(e,3,profile=TRUE) confint(e,3) \donttest{ ## Reduce Ex.timings B <- bootstrap(e,R=50) B } } \seealso{ \code{\link{bootstrap}{lvm}} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/fplot.Rd0000644000176200001440000000126713162174023013370 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fplot.R \name{fplot} \alias{fplot} \title{fplot} \usage{ fplot(x, y, z = NULL, xlab, ylab, ..., z.col = topo.colors(64), data = parent.frame(), add = FALSE) } \arguments{ \item{x}{X variable} \item{y}{Y variable} \item{z}{Z variable (optional)} \item{xlab}{x-axis label} \item{ylab}{y-axis label} \item{...}{additional arggument to lower level plot functions} \item{z.col}{Color} \item{data}{data.frame} \item{add}{If TRUE use current active device} } \description{ Faster plot via RGL } \examples{ if (interactive()) { data(iris) fplot(Sepal.Length ~ Petal.Length+Species, data=iris, size=2, type="s") } } lava/man/parpos.Rd0000644000176200001440000000056613162174023013551 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parpos.R \name{parpos} \alias{parpos} \title{Generic method for finding indeces of model parameters} \usage{ parpos(x, ...) } \arguments{ \item{x}{Model object} \item{\dots}{Additional arguments} } \description{ Generic method for finding indeces of model parameters } \author{ Klaus K. Holst } lava/man/zibreg.Rd0000644000176200001440000000374413162174023013530 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zib.R \name{zibreg} \alias{zibreg} \title{Regression model for binomial data with unkown group of immortals} \usage{ zibreg(formula, formula.p = ~1, data, family = stats::binomial(), offset = NULL, start, var = "hessian", ...) } \arguments{ \item{formula}{Formula specifying} \item{formula.p}{Formula for model of disease prevalence} \item{data}{data frame} \item{family}{Distribution family (see the help page \code{family})} \item{offset}{Optional offset} \item{start}{Optional starting values} \item{var}{Type of variance (robust, expected, hessian, outer)} \item{...}{Additional arguments to lower level functions} } \description{ Regression model for binomial data with unkown group of immortals (zero-inflated binomial regression) } \examples{ ## Simulation n <- 2e3 x <- runif(n,0,20) age <- runif(n,10,30) z0 <- rnorm(n,mean=-1+0.05*age) z <- cut(z0,breaks=c(-Inf,-1,0,1,Inf)) p0 <- lava:::expit(model.matrix(~z+age) \%*\% c(-.4, -.4, 0.2, 2, -0.05)) y <- (runif(n)0) regression(m) <- y~x+z+xz d <- sim(m,1e3) summary(lm(y~x+z + x*I(z>0),d)) ################################################## ### Non-random variables ################################################## m <- lvm() distribution(m,~x+z+v+w) <- list(sequence.lvm(0,5),## Seq. 0 to 5 by 1/n ones.lvm(), ## Vector of ones ones.lvm(0.5), ## 0.8n 0, 0.2n 1 ones.lvm(interval=list(c(0.3,0.5),c(0.8,1)))) sim(m,10) ################################################## ### Cox model ### piecewise constant hazard ################################################ m <- lvm(t~x) rates <- c(1,0.5); cuts <- c(0,5) ## Constant rate: 1 in [0,5), 0.5 in [5,Inf) distribution(m,~t) <- coxExponential.lvm(rate=rates,timecut=cuts) \dontrun{ d <- sim(m,2e4,p=c("t~x"=0.1)); d$status <- TRUE plot(timereg::aalen(survival::Surv(t,status)~x,data=d, resample.iid=0,robust=0),spec=1) L <- approxfun(c(cuts,max(d$t)),f=1, cumsum(c(0,rates*diff(c(cuts,max(d$t))))), method="linear") curve(L,0,100,add=TRUE,col="blue") } ################################################## ### Cox model ### piecewise constant hazard, gamma frailty ################################################## m <- lvm(y~x+z) rates <- c(0.3,0.5); cuts <- c(0,5) distribution(m,~y+z) <- list(coxExponential.lvm(rate=rates,timecut=cuts), loggamma.lvm(rate=1,shape=1)) \dontrun{ d <- sim(m,2e4,p=c("y~x"=0,"y~z"=0)); d$status <- TRUE plot(timereg::aalen(survival::Surv(y,status)~x,data=d, resample.iid=0,robust=0),spec=1) L <- approxfun(c(cuts,max(d$y)),f=1, cumsum(c(0,rates*diff(c(cuts,max(d$y))))), method="linear") curve(L,0,100,add=TRUE,col="blue") } ## Equivalent via transform (here with Aalens additive hazard model) m <- lvm(y~x) distribution(m,~y) <- aalenExponential.lvm(rate=rates,timecut=cuts) distribution(m,~z) <- Gamma.lvm(rate=1,shape=1) transform(m,t~y+z) <- prod sim(m,10) ## Shared frailty m <- lvm(c(t1,t2)~x+z) rates <- c(1,0.5); cuts <- c(0,5) distribution(m,~y) <- aalenExponential.lvm(rate=rates,timecut=cuts) distribution(m,~z) <- loggamma.lvm(rate=1,shape=1) \dontrun{ mets::fast.reshape(sim(m,100),varying="t") } ################################################## ### General multivariate distributions ################################################## \dontrun{ m <- lvm() distribution(m,~y1+y2,oratio=4) <- VGAM::rbiplackcop ksmooth2(sim(m,1e4),rgl=FALSE,theta=-20,phi=25) m <- lvm() distribution(m,~z1+z2,"or1") <- VGAM::rbiplackcop distribution(m,~y1+y2,"or2") <- VGAM::rbiplackcop sim(m,10,p=c(or1=0.1,or2=4)) } m <- lvm() distribution(m,~y1+y2+y3,TRUE) <- function(n,...) rmvn(n,sigma=diag(3)+1) var(sim(m,100)) ## Syntax also useful for univariate generators, e.g. m <- lvm(y~x+z) distribution(m,~y,TRUE) <- function(n) rnorm(n,mean=1000) sim(m,5) distribution(m,~y,"m1",0) <- rnorm sim(m,5) sim(m,5,p=c(m1=100)) ################################################## ### Regression design in other parameters ################################################## ## Variance heterogeneity m <- lvm(y~x) distribution(m,~y) <- function(n,mean,x) rnorm(n,mean,exp(x)^.5) if (interactive()) plot(y~x,sim(m,1e3)) ## Alternaively, calculate the standard error directly addvar(m) <- ~sd ## If 'sd' should be part of the resulting data.frame constrain(m,sd~x) <- function(x) exp(x)^.5 distribution(m,~y) <- function(n,mean,sd) rnorm(n,mean,sd) if (interactive()) plot(y~x,sim(m,1e3)) ## Regression on variance parameter m <- lvm() regression(m) <- y~x regression(m) <- v~x ##distribution(m,~v) <- 0 # No stochastic term ## Alternative: ## regression(m) <- v[NA:0]~x distribution(m,~y) <- function(n,mean,v) rnorm(n,mean,exp(v)^.5) if (interactive()) plot(y~x,sim(m,1e3)) ## Regression on shape parameter in Weibull model m <- lvm() regression(m) <- y ~ z+v regression(m) <- s ~ exp(0.6*x-0.5*z) distribution(m,~x+z) <- binomial.lvm() distribution(m,~cens) <- coxWeibull.lvm(scale=1) distribution(m,~y) <- coxWeibull.lvm(scale=0.1,shape=~s) eventTime(m) <- time ~ min(y=1,cens=0) if (interactive()) { d <- sim(m,1e3) require(survival) (cc <- coxph(Surv(time,status)~v+strata(x,z),data=d)) plot(survfit(cc) ,col=1:4,mark.time=FALSE) } ################################################## ### Categorical predictor ################################################## m <- lvm() ## categorical(m,K=3) <- "v" categorical(m,labels=c("A","B","C")) <- "v" regression(m,additive=FALSE) <- y~v \dontrun{ plot(y~v,sim(m,1000,p=c("y~v:2"=3))) } m <- lvm() categorical(m,labels=c("A","B","C"),p=c(0.5,0.3)) <- "v" regression(m,additive=FALSE,beta=c(0,2,-1)) <- y~v ## equivalent to: ## regression(m,y~v,additive=FALSE) <- c(0,2,-1) regression(m,additive=FALSE,beta=c(0,4,-1)) <- z~v table(sim(m,1e4)$v) glm(y~v, data=sim(m,1e4)) glm(y~v, data=sim(m,1e4,p=c("y~v:1"=3))) transform(m,v2~v) <- function(x) x=='A' sim(m,10) ################################################## ### Pre-calculate object ################################################## m <- lvm(y~x) m2 <- sim(m,'y~x'=2) sim(m,10,'y~x'=2) sim(m2,10) ## Faster } \author{ Klaus K. Holst } \keyword{datagen} \keyword{models} \keyword{regression} lava/man/predict.lvm.Rd0000644000176200001440000000261613162174023014472 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.R \name{predict.lvm} \alias{predict.lvm} \alias{predict.lvmfit} \title{Prediction in structural equation models} \usage{ \method{predict}{lvm}(object, x = NULL, y = NULL, residual = FALSE, p, data, path = FALSE, quick = is.null(x) & !(residual | path), ...) } \arguments{ \item{object}{Model object} \item{x}{optional list of (endogenous) variables to condition on} \item{y}{optional subset of variables to predict} \item{residual}{If true the residuals are predicted} \item{p}{Parameter vector} \item{data}{Data to use in prediction} \item{path}{Path prediction} \item{quick}{If TRUE the conditional mean and variance given covariates are returned (and all other calculations skipped)} \item{\dots}{Additional arguments to lower level function} } \description{ Prediction in structural equation models } \examples{ m <- lvm(list(c(y1,y2,y3)~u,u~x)); latent(m) <- ~u d <- sim(m,100) e <- estimate(m,d) ## Conditional mean (and variance as attribute) given covariates r <- predict(e) ## Best linear unbiased predictor (BLUP) r <- predict(e,vars(e)) ## Conditional mean of y3 giving covariates and y1,y2 r <- predict(e,y3~y1+y2) ## Conditional mean gives covariates and y1 r <- predict(e,~y1+y2) ## Predicted residuals (conditional on all observed variables) r <- predict(e,vars(e),residual=TRUE) } \seealso{ predictlvm } lava/man/pdfconvert.Rd0000644000176200001440000000152213162174023014410 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pdfconvert.R \name{pdfconvert} \alias{pdfconvert} \title{Convert pdf to raster format} \usage{ pdfconvert(files, dpi = 300, resolution = 1024, gs, gsopt, resize, format = "png", ...) } \arguments{ \item{files}{Vector of (pdf-)filenames to process} \item{dpi}{DPI} \item{resolution}{Resolution of raster image file} \item{gs}{Optional ghostscript command} \item{gsopt}{Optional ghostscript arguments} \item{resize}{Optional resize arguments (mogrify)} \item{format}{Raster format (e.g. png, jpg, tif, ...)} \item{\dots}{Additional arguments} } \description{ Convert PDF file to print quality png (default 300 dpi) } \details{ Access to ghostscript program 'gs' is needed } \seealso{ \code{dev.copy2pdf}, \code{printdev} } \author{ Klaus K. Holst } \keyword{iplot} lava/man/sim.default.Rd0000644000176200001440000000503313162174023014452 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim.default.R \name{sim.default} \alias{sim.default} \alias{summary.sim} \title{Wrapper function for mclapply} \usage{ \method{sim}{default}(x = NULL, R = 100, f = NULL, colnames = NULL, messages = lava.options()$messages, mc.cores, blocksize = 2L * mc.cores, cl, type = 1L, seed = NULL, args = list(), iter = FALSE, ...) } \arguments{ \item{x}{function or 'sim' object} \item{R}{Number of replications or data.frame with parameters} \item{f}{Optional function (i.e., if x is a matrix)} \item{colnames}{Optional column names} \item{messages}{Messages} \item{mc.cores}{Number of cores to use} \item{blocksize}{Split computations in blocks} \item{cl}{(optional) cluster to use for parallelization} \item{type}{type=0 is an alias for messages=1,mc.cores=1,blocksize=R} \item{seed}{(optional) Seed (needed with cl=TRUE)} \item{args}{(optional) list of named arguments passed to (mc)mapply} \item{iter}{If TRUE the iteration number is passed as first argument to (mc)mapply} \item{...}{Additional arguments to (mc)mapply} } \description{ Wrapper function for mclapply } \examples{ m <- lvm(y~x+e) distribution(m,~y) <- 0 distribution(m,~x) <- uniform.lvm(a=-1.1,b=1.1) transform(m,e~x) <- function(x) (1*x^4)*rnorm(length(x),sd=1) onerun <- function(iter=NULL,...,n=2e3,b0=1,idx=2) { d <- sim(m,n,p=c("y~x"=b0)) l <- lm(y~x,d) res <- c(coef(summary(l))[idx,1:2], confint(l)[idx,], estimate(l,only.coef=TRUE)[idx,2:4]) names(res) <- c("Estimate","Model.se","Model.lo","Model.hi", "Sandwich.se","Sandwich.lo","Sandwich.hi") res } val <- sim(onerun,R=10,b0=1,messages=0,mc.cores=1) val val <- sim(val,R=40,b0=1,mc.cores=1) ## append results summary(val,estimate=c(1,1),confint=c(3,4,6,7),true=c(1,1)) summary(val,estimate=c(1,1),se=c(2,5),names=c("Model","Sandwich")) summary(val,estimate=c(1,1),se=c(2,5),true=c(1,1),names=c("Model","Sandwich"),confint=TRUE) if (interactive()) { plot(val,estimate=1,c(2,5),true=1,names=c("Model","Sandwich"),polygon=FALSE) plot(val,estimate=c(1,1),se=c(2,5),main=NULL, true=c(1,1),names=c("Model","Sandwich"), line.lwd=1,density.col=c("gray20","gray60"), rug=FALSE) plot(val,estimate=c(1,1),se=c(2,5),true=c(1,1), names=c("Model","Sandwich")) } f <- function(a=1,b=1) { rep(a*b,5) } R <- Expand(a=1:3,b=1:3) sim(f,R,type=0) sim(function(a,b) f(a,b), 3, args=c(a=5,b=5),type=0) sim(function(iter=1,a=5,b=5) iter*f(a,b), type=0, iter=TRUE, R=5) } lava/man/gof.Rd0000644000176200001440000000543313162174023013016 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gof.R \name{gof} \alias{gof} \alias{gof.lvmfit} \alias{moments} \alias{moments.lvm} \alias{information} \alias{information.lvmfit} \alias{score} \alias{score.lvmfit} \alias{logLik.lvmfit} \title{Extract model summaries and GOF statistics for model object} \usage{ gof(object, ...) \method{gof}{lvmfit}(object, chisq=FALSE, level=0.90, rmsea.threshold=0.05,all=FALSE,...) moments(x,...) \method{moments}{lvm}(x, p, debug=FALSE, conditional=FALSE, data=NULL, latent=FALSE, ...) \method{logLik}{lvmfit}(object, p=coef(object), data=model.frame(object), model=object$estimator, weights=Weights(object), data2=object$data$data2, ...) \method{score}{lvmfit}(x, data=model.frame(x), p=pars(x), model=x$estimator, weights=Weights(x), data2=x$data$data2, ...) \method{information}{lvmfit}(x,p=pars(x),n=x$data$n,data=model.frame(x), model=x$estimator,weights=Weights(x), data2=x$data$data2, ...) } \arguments{ \item{object}{Model object} \item{\dots}{Additional arguments to be passed to the low level functions} \item{x}{Model object} \item{p}{Parameter vector used to calculate statistics} \item{data}{Data.frame to use} \item{latent}{If TRUE predictions of latent variables are included in output} \item{data2}{Optional second data.frame (only for censored observations)} \item{weights}{Optional weight matrix} \item{n}{Number of observations} \item{conditional}{If TRUE the conditional moments given the covariates are calculated. Otherwise the joint moments are calculated} \item{model}{String defining estimator, e.g. "gaussian" (see \code{estimate})} \item{debug}{Debugging only} \item{chisq}{Boolean indicating whether to calculate chi-squared goodness-of-fit (always TRUE for estimator='gaussian')} \item{level}{Level of confidence limits for RMSEA} \item{rmsea.threshold}{Which probability to calculate, Pr(RMSEA