semTools/0000755000175100001440000000000013002716223012071 5ustar hornikuserssemTools/inst/0000755000175100001440000000000013002400120013030 5ustar hornikuserssemTools/inst/CITATION0000644000175100001440000000166613002400120014176 0ustar hornikuserscitHeader("We think that the development of the package is a collaborative work. The maintainers cannot take the credits of others' contributions. If it is possible to cite a paper describing the development of a particular function (e.g., permuteMeasEq), please cite that paper. Otherwise, please use the following citation for the package as a whole:") year <- sub(".*(2[[:digit:]]{3})-.*", "\\1", meta$Date, perl = TRUE) vers <- paste("R package version", meta$Version) url <- "https://CRAN.R-project.org/package=semTools" citEntry(entry = "Manual", title = "{semTools}: Useful tools for structural equation modeling", author = as.person("semTools Contributors"), year = year, note = vers, url = url, textVersion = paste("semTools Contributors. (", year, "). ", "semTools: Useful tools for structural equation modeling. ", vers, ". Retrieved from ", url, sep = "") )semTools/inst/doc/0000755000175100001440000000000013002400141013600 5ustar hornikuserssemTools/inst/doc/partialInvariance.pdf0000644000175100001440000041405613000201061017737 0ustar hornikusers%PDF-1.5 % 3 0 obj << /Length 1840 /Filter /FlateDecode >> stream xڍ˒6잯̚)R^:Nδ${KrP,z$ ,mm/ 7tbO;0٥ƈ\j!z?$~wX&8;zBfK?^ѰtNq*Ң3:.<ʾ.EZ.DSt,bv](b}?o ʣz(w~/~hˮ&Dnp~.bI}ɢꠋ!I*1TtJ= \AK8cc[FP'ƞ-a ?lӃ ]Ww纫XtÄvr 3aL_-6*mE/5s#bp[],2DO nVJ(c^|[U*t2sPӥ thDIs:(g5X+ĻY-Xl!"m=]{A@F.Mb.W{3CƋa0 ` F YT2G{s Dc&#"L<TSk%so!ޛ x!c9S#b)Ui.jH Dd~ECOE3\d3l(b;/"m}p 5oG%gU-?2AN#!h+3M[wwXF%P %"79 ў#y"BTPC`B̤JBp\֙ u3;,DBq[ $d<4\_Q.t#T҇zPRND\T oӊ@_f@S T FpSD6+U6_BV !iC>y&q#Q:^}FC}ZFLU vz{@88^ԂTּV7^ZTCެ*ʉ=,I}Mp*vo5p_ | zEGo `E-E%~3s79/XSM⠽,$/aik4eib3_؀JF>t$z7: 9jI % {ntCc(t|mja@POZ6IB\vv |p|>!z-X߂6oIeB'yBtˉ"FXKwђFTNj:f'8PcQp{:Pnv uHn|B8Vپbv;0g#t'lƜ-]3\[4d&i@qgZ?ċQ#?eMc uU ܱ(&.;FOuuP4ufJ$ PvT㘏UAHU3\ sUC;@ph[r.Ӆy '"$aD D3,< %E,CFZlR@e Fe;͔>IAr80zw8%3Kꇺm.ԅ6PG6{ep5&^OWK϶쏨0gVk+; ܘPǺ:_ F a;!=z*j1Xp@)0Sfvŵcz׽]00*l"Z_^- XN02ܟD 5fZ|5ù0) Dyr[wUW Cnsk_lj:nM0_V42. \9=hhw=p=!ޏ-* )P)R6gxt?G endstream endobj 2 0 obj << /Type /Page /Contents 3 0 R /Resources 1 0 R /MediaBox [0 0 595.276 841.89] /Parent 10 0 R >> endobj 1 0 obj << /Font << /F16 4 0 R /F17 5 0 R /F28 6 0 R /F29 7 0 R /F32 8 0 R /F30 9 0 R >> /ProcSet [ /PDF /Text ] >> endobj 13 0 obj << /Length 2367 /Filter /FlateDecode >> stream xYK۸ϯ-T'xˇq*rjʩ83$eg?n<P攋FCOLgaIQ2T&n}̻Dz[]K#lR2/ܯoІCs8~SERoW\i[vaju8#{4a[mVI"DYr q,*pj[{Og+ߝ(QA%qc3l? >(n@d'jhhՔ!"L!'H@eM⃧9u&̓=1Z(<'C/o L}Ҏ sYa:T:( qFf*)̐Pv'Bٻ`pPŊ7,)+ F{OTє%E`OZk 8h+21 ZT( 2@.t/DUV$W ^ьe\kRh#d]}L _R&Hw 6?b LlV@lž>a{DCe`s5(*ؑ΂Ѿ=VB9$`CKMB3baQblTMom%e8n=p&͝$~<`K}Q.50p+jdX&r3-c:([lnon޼C-:B4pA($y/ɡ&s١S1*N8;깲*|bwn{S"^y QO`vm=bG7r:{xTw9^c8+_Mu9PamRնW F *cOrf #<][ :3V /m {x?=z&)Hp /#b$r<>tzL)?cOLctuYpvh2k;P `u_f 0?c\+ hx_` M Tr[bx9>Q i]տA2zWs"].h~7ݐp0͋4jRs(Tu>]F9<ەajKW뾞kp],?jD&XX{ica7 88mߋ'~/LlXƝ_h@/BXO=uF XYk)_- *ks.So:%)hϤ8!9&!!G _jL$4q>y69~t HC 1 9pTP3Ŝm$hOT E8c &C *H'*X(RP<6&0kq~{S}]l8W,T*9-x2Rp?Z`EiV<,܏ -XӣhHH;kݡ> 撍/]k n^[13;#e;cWrlTw+ xoΈj<[aFv,mOw&q]"u[:*e @s?4xږj_U6,3aW*#btrDlj7KކWt$nẍ_MdVG_{p Ŧ%Z @ 0aL1-:ذ4>cUep&ًu*=ř>] TgCYv8ϣ6^ (AYp&; QBE Cd8|a. :I+Hf/ LxYl|Z_B}:`w\ z<.ζ FvЗB=ċ1X^x 6ƠH`{hr;%ap6Q?r2opčqE}퇍a]JO.-Őu74s+#WşUr,Y;wAN{vuwd=+`{:9_n?,i>G덛-z;|O\(~ZPvp'aP^7lOM hOYڹ@v$h2I&% :v$AHBgՁyv2#D ֛phqƹi~2U (хgJʇ=.wŽ+/t$NocxS!&%׻tqPr:raTo_Z=v1-sP\K"/ endstream endobj 12 0 obj << /Type /Page /Contents 13 0 R /Resources 11 0 R /MediaBox [0 0 595.276 841.89] /Parent 10 0 R >> endobj 11 0 obj << /Font << /F17 5 0 R /F32 8 0 R /F30 9 0 R /F33 14 0 R /F38 15 0 R /F35 16 0 R /F48 17 0 R /F36 18 0 R >> /ProcSet [ /PDF /Text ] >> endobj 21 0 obj << /Length 2345 /Filter /FlateDecode >> stream xڽYK6ϯSo%z+S[e@K)Jd6 %rm`* HjH+1Лq' Vps6\$ȈCi 8 !%ظ] 6`C8C,cj5Cpx)ZPeJJ)dq WJE}*;ϖ}mY:}h˾TECQבסVC|@l NșG$Rj.ڽu^L޿~+;2Czey޴@xi@0UxSͩ404CsnC F*ӖMU ^8仔3oWlz/,b[Nv꣗<"A MӖuSq"ydq꼀~Dr#PnN}}U]T"_u-֯ VeB'KD7JݙR6(ヂ# 6=+NL/ CWY@q!y*-!.U%Y@1 `π`4nb+ zq)a $9 !S ÂO-m>ʦz$ T}Һm +9;<wYPI=2RY"A-l<3Ϝpǧf (< 6!M)0K|X1> |2<17™!μy zAuSԛS]b8|5u݄ɤf,b]Ll,1 ߠwT;( DP&جIn9~g`올]ޕS$TGHi7tЫ9%st1=5؝U9:ґ1`7CS!s' K#guD9]149]*9$*_MOKiC:.jޫeM)pp-vmtt}s].(~1}x,.C>R#lK` `\yOue/XqCM9> g9G #Ph~q[úQ[݌ЬpV}P$ QC`ՄFۢ]f7LH0 $L66҂~q(RRFM#/F D   8>1oIuqUTsG[:svY= r${_+ &^'.PMRUaeFtTw>}T/lڦPHGH2Q0qB"zU%Ѩa[!jF z#(b8j?9@ }%: )J$$~0x>;}DZAb 1pseև9Δ0,"C,* IJtR f#k6U=@qeHV9+4?K,'?qwmL V!/}E+nmXv җ‚넺[SfG;h_`s]xC6DxI7r28W;8!Psj08tn-JN+1+-*B)CQh#?ra_/}52k0.!O_>r'bm ~fZ@CfR_#S?GI zzF*"oDK"R=Sg).ݑ[rO^1 [?B$W!i|1ޤbc(KZJ1'?x endstream endobj 20 0 obj << /Type /Page /Contents 21 0 R /Resources 19 0 R /MediaBox [0 0 595.276 841.89] /Parent 10 0 R >> endobj 19 0 obj << /Font << /F32 8 0 R /F17 5 0 R /F38 15 0 R /F33 14 0 R /F35 16 0 R /F28 6 0 R /F29 7 0 R /F48 17 0 R /F36 18 0 R >> /ProcSet [ /PDF /Text ] >> endobj 24 0 obj << /Length 1918 /Filter /FlateDecode >> stream xYKs6Wz!ē@:9؝&N[-2g(éIAvzEc.~_yOH׷ 4/TzZwbUtwyw79( 3 )$_,) Gf:ҩ^VyM]D2B9Zтӱ-{WT%퍽Vk7\,Tգ[ 7KXF-Œi SI6IKF%  yF1qwk,ZTPlsή1Veuơ85RnWTOֹWY{s\OWþ?+͈fkLΓMW6ַ.[+{aߺ F9/<LCMkZcK L˰wu"`6ws ==6pZ,:(5h^x:VH_$g6ZZpkZ<^E5T`H΁?y}7rZW^R{G%PDa}(Š7{,+ǮwǜpGpyq(T$A(E9$%G$L|n6 @T} ,[6f2@O؜?k'ц J9r«gx`'6 Fa],M A2310TF,e,N XߓP="gxՃ%k v&u;v[@@m2sb2_T^Ix?@wIyfN|IayjT{p!GlGn@VGu1dsAc7m&u[!NSn8KI`}g endstream endobj 23 0 obj << /Type /Page /Contents 24 0 R /Resources 22 0 R /MediaBox [0 0 595.276 841.89] /Parent 10 0 R >> endobj 22 0 obj << /Font << /F17 5 0 R /F32 8 0 R /F33 14 0 R /F35 16 0 R /F48 17 0 R /F36 18 0 R >> /ProcSet [ /PDF /Text ] >> endobj 27 0 obj << /Length 2114 /Filter /FlateDecode >> stream xYKsWHUxu+J=N܆{wH ,R{,<2a ,2ܧ攖p/bkB9j-9/]_lk4[ jЦ$RFWs[لj^8E2-6E zfdaF6*v_S"8\#cy ;ߺK/#!? `b%5]&b֚9mJ53Jnkϊm,{)3^nOڄϩ3bL5D"}//)Yഀ\+o[ B % _V%-" 跇iZ> (]>Scj"Lgʼn^5J$-P <ȷ)~7|dV)L}.~> N< qÐ?WOS#HG>!h`  F졦EbZ rZnja|7T[0@#Wŵ) n#/V 'ʠ; 80] M\#nEG81PPm/Û=tG;/@sƅWS(\ڹے dUNƏ6P\53,/>(H۵x_@@}[zڠ3 ļ$Q5w\Twrt0*krRn܍m!V6IBEǘi<eB.Pίòt~sc+i%ZY(#C!zS5 VEw>bP\Mຘ\Fަ=UX|Bԓ(9.ztxZEJ'K.~Oi2n p"45pq}C- g=dx^-}// e>p[Y`>=ȓs إ5U."m6Z_''lT*Rg> ۃ#bq3m/8>! NBoah*oҧa}*ԣ>Th;Z"M,|NW& Yq˷;@:m5&\gpC~ et-F8KЅs&6Ml)M_ƞY#kz͚QI'٭ i:Ѱ3ɛOkëm[a8.e 5~%<:0tT>[VhX0P]yʬG׾"K0r7cH ET32s6eRsAܫIy|MKs5uv 6OChy˳JŁH0GtA f +)$H580DTWfئ>8(S])1q82jjVk݉Pvt<^@N@ 9 %=Pzlt `s;qud?K=^(r 8^Fw!yʄL늳.hgXOqi'@p&7Y"d7YF.'|ztXL7 endstream endobj 26 0 obj << /Type /Page /Contents 27 0 R /Resources 25 0 R /MediaBox [0 0 595.276 841.89] /Parent 10 0 R >> endobj 25 0 obj << /Font << /F48 17 0 R /F17 5 0 R /F32 8 0 R /F33 14 0 R /F36 18 0 R /F35 16 0 R /F30 9 0 R /F38 15 0 R >> /ProcSet [ /PDF /Text ] >> endobj 30 0 obj << /Length 1482 /Filter /FlateDecode >> stream xڭWM6WT@S"`/if.C֢m5c[w|g8c; zh8|Zܿ'%'B0gܯ&2K;Icb>~t1q3RN4s`~Nf2K'Y'dM";@HƅH0İcLyᦥ/2Tgj 9=yD #Qb:Uχ=Lrńh ̒{ /J?i"o__$E9aғa:}Ak m]G99CD/h^FUsܠSB[I4)3K kB 3iGp1c+F- ~%BXGY;Y5,bSZoXیm%lm3ZpwtWRy}/bt tFޏ7qф3 AB]yהC7teްbaTi]-ʃBI&L&b Q j޷ c~XaWUE9lσۤu>M\$PfԨ|l@@q;O٠FpN~d=Ez[V{2_MDmʪ*w'dEZ~NP8{JKb/(uWc7JƓfOP2Fv p#r=(;CPW~D7a- nW> gߏ뾆)ӑNaUIK9 "0+y q MBv4}[T|6_%phX"2)g>AGU1a!,ij,j !9RdWV"+;Hn` D#4>h:-%}F> P=`BO.mJtϪ1W|}8ՔOsWK2u _8*yKۡTSףv)xjUAf>$`é^ Qc$'JgsF46g8]k_.x5hiiD_OWDݜ endstream endobj 29 0 obj << /Type /Page /Contents 30 0 R /Resources 28 0 R /MediaBox [0 0 595.276 841.89] /Parent 10 0 R >> endobj 28 0 obj << /Font << /F32 8 0 R /F36 18 0 R /F33 14 0 R /F17 5 0 R /F35 16 0 R /F48 17 0 R /F29 7 0 R >> /ProcSet [ /PDF /Text ] >> endobj 31 0 obj [826.4 295.1 826.4 531.3] endobj 32 0 obj [562.5 562.5 562.5 562.5 562.5 562.5 562.5 562.5 562.5 312.5 312.5 342.6 875 531.3 531.3 875 849.5 799.8 812.5 862.3 738.4 707.2 884.3 879.6 419 581 880.8 675.9 1067.1 879.6 844.9 768.5 844.9 839.1 625 782.4 864.6 849.5 1162 849.5 849.5 687.5 312.5 581 312.5 562.5 312.5 312.5 546.9 625 500 625 513.3 343.7 562.5 625 312.5 343.7 593.8 312.5 937.5 625 562.5 625 593.8 459.5 443.8 437.5 625] endobj 33 0 obj [777.8 277.8 777.8 500 777.8 500 777.8 777.8 777.8 777.8 777.8 777.8 777.8 1000 500 500 777.8 777.8 777.8 777.8 777.8 777.8 777.8 777.8 777.8 777.8 777.8 777.8 1000 1000 777.8 777.8 1000 1000 500 500 1000 1000 1000 777.8 1000 1000 611.1 611.1 1000 1000 1000 777.8 275 1000 666.7 666.7 888.9 888.9 0 0 555.6 555.6 666.7 500 722.2 722.2 777.8 777.8 611.1 798.5 656.8 526.5 771.4 527.8 718.7 594.9 844.5 544.5 677.8 761.9 689.7 1200.9 820.5 796.1 695.6 816.7 847.5 605.6 544.6 625.8 612.8 987.8 713.3 668.3 724.7 666.7 666.7 666.7 666.7 666.7 611.1 611.1 444.4 444.4 444.4 444.4 500 500 388.9 388.9 277.8] endobj 34 0 obj [736.1 736.1 527.8 527.8 583.3 583.3 583.3 583.3 750 750 750 750 1044.4 1044.4 791.7 791.7 583.3 583.3 638.9 638.9 638.9 638.9 805.6 805.6 805.6 805.6 1277.8 1277.8 811.1 811.1 875 875 666.7 666.7 666.7 666.7 666.7 666.7 888.9 888.9 888.9 888.9 888.9 888.9 888.9 666.7 875 875 875 875 611.1 611.1 833.3 1111.1 472.2 555.6 1111.1 1511.1 1111.1 1511.1 1111.1 1511.1 1055.6 944.4 472.2 833.3 833.3 833.3 833.3 833.3 1444.4 1277.8 555.6 1111.1 1111.1 1111.1 1111.1 1111.1 944.4 1277.8 555.6 1000 1444.4 555.6 1000 1444.4 472.2 472.2 527.8 527.8 527.8 527.8 666.7 666.7 1000 1000 1000] endobj 35 0 obj [795.8 801.4 757.3 871.7 778.7 672.4 827.9 872.8 460.7 580.4 896 722.6 1020.4 843.3 806.2 673.6 835.7 800.2 646.2 618.6 718.8 618.8 1002.4 873.9 615.8 720 413.2 413.2 413.2 1062.5 1062.5 434 564.4 454.5 460.2 546.7 492.9 510.4 505.6 612.3 361.7 429.7 553.2 317.1 939.8 644.7 513.5 534.8 474.4 479.5 491.3 383.7 615.2] endobj 36 0 obj [531.3 531.3 531.3 531.3 531.3 531.3 531.3 531.3 531.3 295.1 295.1 295.1 826.4] endobj 37 0 obj [622.8 552.8 507.9 433.7 395.4 427.7 483.1 456.3 346.1 563.7 571.2 589.1 483.8 427.7 555.4 505 556.5 425.2 527.8 579.5 613.4 636.6 609.7 458.2 577.1 808.9 505 354.2 641.4 979.2 979.2 979.2 979.2 272 272 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 272 272 761.6 489.6 761.6 489.6 516.9 734 743.9 700.5 813 724.8 633.8 772.4 811.3 431.9 541.2 833 666.2 947.3 784.1 748.3 631.1 775.5 745.3 602.2 573.9 665 570.8 924.4 812.6 568.1 670.2 380.8 380.8 380.8 979.2 979.2 410.9 514 416.3 421.4 508.8 453.8 482.6 468.9 563.7 334 405.1 509.3 291.7 856.5 584.5 470.7 491.4 434.1 441.3 461.2] endobj 38 0 obj [726.9 688.4 700 738.4 663.4 638.4 756.7 726.9 376.9 513.4 751.9 613.4 876.9 726.9 750 663.4 750 713.4 550 700 726.9 726.9 976.9 726.9 726.9 600 300 500 300 500 300 300 500 450 450 500 450 300 450 500 300 300 450 250 800 550 500 500 450 412.5 400 325 525 450 650 450 475 400] endobj 39 0 obj [514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6 514.6] endobj 40 0 obj [816 761.6 679.6 652.8 734 707.2 761.6 707.2 761.6 707.2 571.2 544 544 816 816 272 299.2 489.6 489.6 489.6 489.6 489.6 734 435.2 489.6 707.2 761.6 489.6 883.8 992.6 761.6 272 272 489.6 816 489.6 816 761.6 272 380.8 380.8 489.6 761.6 272 326.4 272 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 272 272 272 761.6 462.4 462.4 761.6 734 693.4 707.2 747.8 666.2 639 768.3 734 353.2 503 761.2 611.8 897.2 734 761.6 666.2 761.6 720.6 544 707.2 734 734 1006 734 734 598.4 272 489.6 272 489.6 272 272 489.6 544 435.2 544 435.2 299.2 489.6 544 272 299.2 516.8 272 816 544 489.6 544 516.8 380.8 386.2 380.8 544 516.8 707.2 516.8 516.8 435.2 489.6] endobj 41 0 obj [525.4 499.3 499.3 748.9 748.9 249.6 275.8 458.6 458.6 458.6 458.6 458.6 693.3 406.4 458.6 667.6 719.8 458.6 837.2 941.7 719.8 249.6 249.6 458.6 772.1 458.6 772.1 719.8 249.6 354.1 354.1 458.6 719.8 249.6 301.9 249.6 458.6 458.6 458.6 458.6 458.6 458.6 458.6 458.6 458.6 458.6 458.6 249.6 249.6 249.6 719.8 432.5 432.5 719.8 693.3 654.3 667.6 706.6 628.2 602.1 726.3 693.3 327.6 471.5 719.4 576 850 693.3 719.8 628.2 719.8 680.5 510.9 667.6 693.3 693.3 954.5 693.3 693.3 563.1 249.6 458.6 249.6 458.6 249.6 249.6 458.6 510.9 406.4 510.9 406.4 275.8 458.6 510.9 249.6 275.8 484.7 249.6 772.1 510.9 458.6 510.9 484.7 354.1 359.4 354.1 510.9 484.7 667.6 484.7 484.7 406.4] endobj 42 0 obj << /Length1 1826 /Length2 10906 /Length3 0 /Length 12047 /Filter /FlateDecode >> stream xڍT.P$.E @ݭ-.Ŋ[)R@q+Vb̙s{WJg߽/R`yG+7'@FUڀŃȨqG@"AO2Y듡#fp p pqxcȂ!UN#(X۸>#ł-,,;@ XU )hzWQWW' Ãgex@\mZ`0l MrEcqShO{Oڊ*u'0Oc? 5w@pYX8:8^5 b˫pz@P߆ {';b22t@^Jzb? Յb#w6A-ePWB``{:\;? M 8eya#`g;%o?'G'  r\an`?+aps,!s5O'1Ot0'i\??P7~[~Z r$Vç)ZAV;db/TͿ ?*sr?U|odtKTӿS_?xjSjW9<_`=ݒӘq%` iGBۮ?JQxpl}gOc𙃵AMf J[\coChyz{g5ي1;FþT*tJ-{g_ ;fN%\g7!g3ySyhxD$eebQKSHUܪo`zYcr^w]QcgJ3UTi:dt0R DSB3 m/mt<c1oqMmmQ4 OJIڛtk4^avzYi{NwvrXfUs) k֘6-#mlA(}>@[Ѓ${SSajgzu3i³4%#@PldF-w6;|<.x֮ثwoDf ,ብK7s3L별?#fR9Z8F +;Cӓ6.sɸT2aHHmAFc3+p_ 2ov[Մ` -SCdӾޓ c1- m}yn3;1:3ڷZ1ASҬx}opQ :9C=G]+|E sIz)U)NXGM y:nW'D 7AL&A!ow': ɓXl~8DU ́!}+Ѷ-(c>R+dI^Q:HĂh:^Vg0?J%LtCʎ\%+Qh5?^V#n=KnHCD{F% FEcS"=D!'g_eZ+^Oϊ$LAhm2%-!u3 I.=8"nX~4,-vDhMa',q>Q)Bt#\.f#efv"L^Mb?-lW6@&elyG#줉A!iK u]Uݜa WNM*-u_g jqsQ2È0+F_L( 0#Y3~z_a [ 3VO }d-+[ llj>+ĐJ4Y??(NːA&Wf#-Գ$΀79z#Bd-kwFr|>>;ykovq8[J@f[lޕNۚ38e(uڙ6إ=8j '?W%#EU" p('}4-(83;)HYS4i=(YA%pNF,cܒl.y'Jl4Jd=vdbP԰e'O;6>RbU~øp2?&uI=Rc; )~ eݟ>& y zc -э$ky(A \ jύz}wQ5“28| w6Ԥ0URrθ]x.8ঢ6*f0tg~/(!SۥMul`Y{GWۭKB^hY{8e))y涒 v2R&\zeIZWk=F+nA-9Ha 5V~9{6 :W(63H_s'2 {hXyCДڜ^Jnp.T dxH]J4uz} LZvVפ:'6/f)Hޛ7tK&rq%5U|+.靐:cpTh4 ~x[L\=2*[.UX&t9nXZ^ou[W><,<̑zyVFh_gyW]jZG H}.l뭄~u*jȐ#3+I`fp#h6w{Mz;ظ$b9I O$x .sF#krPgإ1B;3P׈c@͂,zWPl9c>/Eo*7;͜ˑل(ᡪ_-뢔6IR;6]gJ,JD)n@tȼ33]yߪq&qsMWL vW 9rlk1ZP*I7PJBv{l?Lcij30:$zS}F-8`G }TXwo Fx-/vnX,{;$ڟPyu!vJtïNz:0ژ&Oeм9CUuXrZCEK#l [ \Ua{ruS*wt;ϭH2iu "`Kt{ |ў\v{9O$;T /|aC3cݩ{,2R>а1Bԁpx^g퇥4 !!e_Ln˶SXi 9|)zp&QAzoV]ZA1N9tr>N9,"t"ٯlTQ{׮>8/25cs=O[ͮZTz>`z xw\Kc)ӯr vVK[z3J0S;)3{ΑpAfvdx57l]7p9fa- zrw1ք kʒI_(Y'tǔ ~ip!(i 1.w>ҡ wGoS Mf?2p#Mp/ZKl L ާ}8W;_S*i'0_z'uUyCӈRbs[$*TJDKZNPOƵZIIc2 i˾I*9f⼴Id3ZhBhf  YTu3na:z fT{q8UZO) oIzRY#PqN'7s@5vr/&+G;nDE~~a{LJu(4{ hmo?Y~ =ɮAз+W- »ـҹaW[jYD-ZaqcvâHJ[3(pb$T=hSqy%[Gχy"*5}-ijaQJavzN}h&O=ms7xY,5C dmI(6E1W{ K'\~}j-ӾKg2kP3x>eQ2q}Ouv>|-n97|4mF],iraYLuʴ\xV'Sg( UM,L뼦eRAߡ67 '`?Cb 'fWFUex lL[Q4v1y](%(of֡E!%vG-4W  $>R^}R{1l \׀f$R ~Vcʤ(OJbY;vo`~5WphUQиk}Y25gXYQƉdmB/J@d: ʹ2zITfλ4h#» R|꾸5}r@Tɭ…_*n/{=IUSP2;!яpJ^3oY$Z-F mTѭy H-P9qU҉po[Ż7ܰZ?NE8Gӯј*ouГT(Ԅk@<௙V:GN02o[Tk FLe!ϴ57/OM 1 CYQpOe˃GxQfGòU]XɎjˋ wk=^ *V3֡N*l. UQ Q  ?>%AGR(PWi^p*O-:(01Ə\/Dwmߪ}3l;:5^!. EU8ht `%hzݞ5!a:&9eUW E`72?v }z^ixDXKC8~3".DR=yN?^ի;})ܬ5F?8wuȠXEggY A>Q X4 ^ B-꽧.tL]R/RUi+ lЪPML]1ϧR2; /PL>/ k Ӕq)~cpbL|)fM5 , V-EH<R`L'eZQ?y.k-N#+M%:k"=?2\$_|a%A`λA:BƟvMgF3RZGۺW@!:y"'&Drim|>}I RW+=Fuh SHnvc{'ߴROy?C JAKKXlp8^r3^Q0`WOM߆~{^q"=caJVї_:hy>u?sxۇWY.y-1xӆRaOPsVFY9@[rkZYѳU]ieZ$ԝM K\u}1G`jF)k\-NmYʷ܁p3Pӏ&-7dcI9 M "߬aJ]#{ 9u0*NF10rVbQE2v@Ep2 [ ڨ~Sd8Oꆯ*'fCjm1LP SI!ת'D8JZɭ]Z"HwQ-c?x1J`hC3nC9p>"AE8*DS.QS#nmGYXPPvE{“%MSP͹^FhMua[hj@O..Q?oE>ԖA1"Kֱ!>SVxR\2F5lT#NjRaInq(R#]_}ws뾍f N. +{8&:c[G i|\sK=ی#8( [Mj6Hnytz_*j奁H#~i ]M:;teۥ;p]glїBռ݃aGe9>3%dӃ$a哅۪xJ{G`zx7ޚ{Pgffq5r5/X<4z :RAu A2% .m1=.8IY.`3P4ZK`bԙ0V|tcc³lnt+U#xTxSQ,EZX`{#8.VB-eќѕ!Hc Uvk(옦y5Ar^ݳxd[ͼ@ҠCŅ]#>nIsbY`!dvd<)5,e; LJW)x$Vѧ4س; 7rC)4 z\ h`ahqI4Q} Ezv7='WH~fp홺uyPV wKCV!};Nhq?=ʽm*<-"#?t'┩O70d'0vJ)j*:{JPX2c4}KhqIҞ/4{k%z`y{/կuDC󍖎9{5N0񘸺NhK>oN [wިkfUZl1u,Lh6<֚nҨ5 z ϦGۉoT+N:Nx57JW؟vTܚ%5mMq\DSR8e[5otLG bh4QǯH^Ln㛬Lĉ /@BG1w뤩J@o0N(/\L4j?U ;[B7$VxeS΍Ӛ FB1S+, G Xu |aWI6_!\;*Rە򇃞S` I-[޷vpsN_LDiiy QO9s\ ֈw4 PڷZژF.\^̦onQZLC3Jo۱m LٚQah3HmYC05&HY)L~zL=e{0k[Lw\aڤdf*vȳZq+N2 Cl(Lӆ]6α=t,IKn)i&dXl$S׷D 29~2;>l8tEDzNJdۮi"Gw`Z]9ng=|]=E#lp$k _3gWV1WÏ݊3AyrsLy#J'TOt#(%oNT$+n [i9`Suu+#M^$ * +1ن"= -'gwB5epx?a:8JSgXȳ_z﹵;%dCeWWw=1o`P{n>Y)dA5”^зR(NP,q;tA97bc-} h8~$Rb*Bvb2޳+D@|@*sNa!s} "~t5':9aCϼy)Q-`]6|7MS'BAy eH3W_)ĖA"=5R\ۜĮN7; 1\L D-زa rAH-Dh'QR\%.BƼUXy( {T"*@@ۇО6/zY#f!t)d3Gw M_B/xu\fF 6s⩞e7|WܮZغC)<V Hk%FSԅr.6"4Vv7)Ϣ%p=5h[,p Wj :e5}v:Sl$j.%\+u.D}<`弈nR ߧVj酯AcxjܑQ- 37t-Z> v6v(J"ㄷji|"gS4u^`*ta⡄m+P5BV%x|W'VKD 2vI0"I׼$}D8ߎTn=28LkUP[ bQ#gZ5Y̑GrS@8®XtncG;Gƚq&x\۶?N4*07H'u89kzeGul粖fT޽n)Iv9H_v6V|M yǃ=#׋'ebvtRcV+ P:U LvH0RWF"nl1/0MT (? E"nɷWl1ݱ4>?u#׍A-iZ0j!*RXYG2$E?jlRz/"=VoS FNjEQEEƾD&_x0ee%^Y hKohf³ꌸ6IAU|]2sQ$|.PkN D@&~\ B%KRٳT$8o;f  M9Qkg ę]ZїF}V/Pۥ834=m0 2ws:f0D.Hr%ŶI>W^^JVN$N%Fpg%Li?1@ _(ܘ CJI w d=NkXin }OԛއpdrI;;=CIo2tR)z,đ Wl!g.VfSAG wY)~Fhtb7x! QpH |%H s{SE\;x_f|'K+Vbkf{`^zAfEr&*/h CNnČ|~Z /{sXEDߏK kڙ S%`(l:!U PTɭT;7ʋ'FjW=6F 4uw endstream endobj 43 0 obj << /Type /FontDescriptor /FontName /ZATKTX+CMBX12 /Flags 4 /FontBBox [-53 -251 1139 750] /Ascent 694 /CapHeight 686 /Descent -194 /ItalicAngle 0 /StemV 109 /XHeight 444 /CharSet (/E/F/I/L/M/R/T/V/a/c/d/e/f/five/four/g/h/i/l/m/n/o/one/p/r/s/t/three/two/u) /FontFile 42 0 R >> endobj 44 0 obj << /Length1 1484 /Length2 6378 /Length3 0 /Length 7375 /Filter /FlateDecode >> stream xڍu4]׶ޢwI0^GFl1e1=х( A!J%{ae3UpCT0$/O@b An&BڔA[6x @1ID8B :0urF%$x~!(hی`B!DzHܽ'YN/ 0xA>/;4>V3/! [ yݺx mv@p@>  ?pAZ|H?$sEyoA> { >t>/0+1ҋ^9Bad8x{à ?[mN$@D@BPLT@{@~_[ A(V$}@> B'p{Ff_#~K~=08%730#_" J "11A@?۪Ԁ9=i ?s gmCC+<]+#Uo78_C0ny;7DkCj ACsrABT~=(W]nPDuxݎ^or;UL~(@ ns~>yp#~(`nG="]B@a?b^/Q$džވۘMu f`HȎ |y7e޳np?q39?!2HT8N?0^vۚpc^_^>2h'|Wb>fgI+fzC"Ooqb^ T~|=Y'yr78*|>KE~G2}z GA{IBe(󙀥j#Ato'P[YiPeO=n,όy"Ժz1KZa=E@[8ޡToN4qcFYX|܇8*"bԂ?r YE=Kȧd)V\!S:~sA9C!.?3װ\bef>O^%/DpbM6<4sMl޽~F.+V2J )vvrfñUj7|UJl# #oܪ|:&89~nְe;od N=VSXF %-^] uKr=Zl #:],_Di؋#O-7p؂9v5[᫕r%`0ٵ> Kl3{2]=yl¦{ | ~.cBVe#dnvH|י 祑Se.v +>~?&Leۖߕqa{< lmm)UIO~,߷LQzwuF;dOb0&`;I*yPc {rJLcRNΜ]3j0K=4crrGs P =op`t_~bY vnlP=h\/X dy*3:$ ^Z-%Xa8eGXҫH9~e XCp,+V1RQMPv{1SR* 4mKOΗF]FQEwJJCe 5W4>'#EWwH4znͧCdEe ~ݳ8o7:[!&.Dz{u[Yw\eEÄu2&$lר&[:Q"R'믩 iz[eR 55RCm.l^0G,*QD8ϲje/b5WrRR&izHop[ *s+zqe*amN(brRkcІxi)X'(`aMot4$H / Y'JHWx:ðȣBV3{ؖXh(?*M}WnK4φSS |TlhR/튘#ecEd=$:ĐǧNtq3A+}N/YӮO 1R xgEcץ %)FK"?2#EY$ckX~Y.8UR,dw.>?PQ)m֝ Nq [=Yw-pӦJd[[wF1N@nΏ#9'fy0u}D0zC'K_LܠM9?F F}e}0,mv OC):{߲\0WR:UmhEa= ޥ'BN/8"#09 [@p1e bt7㕩ThE 9$V&AE7h-4 xntP3tP;JBYiOCR4֣&Q%&nXT=.Y^<6fNnr@:L5J󵓫W^šL- [͘+">P5t~i.ۯ2<ۊl=4Οm{xsn`,Ĵ؈U f|<;&SwݏOxWG2cӨ4M'HP7 %30g4.Nw;Љ珈ә$Qi[Inhe q/D9їF* )0LBqlFv֖Jyi#/N1j=m%6Si<3{-*}V7Pzf٣R_- oHwEs>,vy␣yGZ,ib`Npo7(ekW7Ç{)5S$շm"0V<4D%m5`7[{=/C0XMok!ߩy"cm.FΕ<п0/D V98hK0jk4uO[$YBJx&4puEun=gLK@O<8 47>':MބiLV%>3*jא!v;GŝWVT UQ"h:ET6} Vw@ !-ms -oNg V"2\HCѐi:$Dal#[Č]gq H#{>*z}4=]><,AM7&7{Us15 xh iå |DÎ3ŢK_3(tk! 1XC',.MbwzDIL#尛ht~̚LiL.)l| QψL3ydgf``$+?> 4 \ snZ[]&%s \m[xǥ=t ^y:gNwycl̓ F5?O#>|! R*q&-tGѲ ^jG;悫ekd^FӵdaCw/GsIs5o2Z9'NTM)zDS84k'7 }O &%85_oEx+|Pp޵3Ǖ~3Su˂4BAsqz3ϨGWۘ>IvKX)̟J⩤qrFeHyjP*ZIUoK˻2\odϞ0Ҡ5~&C|o[0 EXfLsA٧4o 1qeiOU-N>H56?몹8)ښט4B$g%wلHFƉ0fs8붦ij;#mL % 6]ĵzivN!ǫ-inɘ:$EBɥz/BAHF09֗(DΏ?䧋A$]"؅YV-?8^0Q\=ɡUr("p?uf[b56~3,ũFҋж+8[ 'mFtpg2OI ivP+9TX.\Vƨ:';%KC1VȚÛgVSv$51"eYS@&[O鲏< -gmT r{xKUL;&U&E XpK #\5!gء˵yV*>^rZdkjR\_x\p!/.>_CPzVM<HTDRwk_1h=.2rd%;يg0`|$f pTӛ1k,WgX7/}߹]#}$|Ώ΁U˥j}K/3a* zBiļ c qr%JZ*)6sNc%~cVV%uq{s{w$@*EZs D0gss UL׃ު*ЏIC,8h$SLP`OF)zmc/.I;fk.͞{vܠOݗ#H*)$ 7B%So5"6˓O= {U4Ɖ[%]Ǵvp#c©A: 4+TR̦d>x5bXҬ{ -Nv /'`Ż" h4.J]B28y&5Dp蘶qLSBa,Rm@LJN'#D܎0Du9>C ͌zUi蝇3kLh?R&X'nW~E"tY(w Fe1k+t@.Y0efPuR)Ah}p.N]QĢp"YzO/I^d~ amx1xթnfr=*s9CSLZA}dmS9hݸ k+{g׉簓2@zS \nbZGH{|_v UɅC+Io EeO`Lw<ڝ|AjbK2D;^YUI-A(K#L-k+C2R+q]%O5j=Gc9A&Xd\}d'dFa2,W˙ ple63I!S#[w -n m>;\T7UՕJE f'bf)(iŠmnbv\Z@Xp&!-!b,3SjHif~jllNVry5:J2J{RL(,F۸ɟg}eB_9qIɈ ޱ3uܧ/D=l): wqEQ5 hVK!sa^wv#{we<eB+T)"mD.m2n}'ݠVfoVhX ~GLF|OG5 \Icpc c%r8a&ÔsNpS@Fs +gX,`TŨrD0?gc Xdy':oiD6KN1y8>Ce 3Cƿy ^ ,jHæRD8NTB(#ӃΛ٪jϩsˍ}RJ\t7JsCOq~ Ώѧl9fXp9*tLY',ta([2k59 429ęєXfcm_*pL1nXbOPG6eC9ug%5]3 /ߡ7o=gLmMt6$s7KdCniBbEA:3[իVE endstream endobj 45 0 obj << /Type /FontDescriptor /FontName /XUIVNP+CMEX10 /Flags 4 /FontBBox [-24 -2960 1454 772] /Ascent 40 /CapHeight 0 /Descent -600 /ItalicAngle 0 /StemV 47 /XHeight 431 /CharSet (/parenleftbigg/parenrightbigg/radicalbigg/summationtext) /FontFile 44 0 R >> endobj 46 0 obj << /Length1 1793 /Length2 11783 /Length3 0 /Length 12935 /Filter /FlateDecode >> stream xڍP-` .ww'h$HKݝ\wwwdf̜^սUھ72>PLbf01201 RP)>m@V<_ebzWC9+K9`ab-@LdcH[Y)DmA&0ؚёAŽXv@[!7e/j ݟ e+#-*0-^]- e)Y'kƲ:3d3`2>I2t=K߆zvVzz s=W?JH+^d c;1[ZYX-v ^ݙYZ9Z, ~0fTy!#3L\\ d`;5%o+wWk+k+ ;jmV7Bdf }1bџ '1~~y0C+Ksh1gُQ[)"bpgeг3X8? zc'k?Y85T 5S[4@k33~1?o.;u" {s?Tz s,^rV+bj?7Zh_XuI-_O9N4T Lt59`e}z11u ^/מҳ{]Du[XIv3H"v+y#`deFߢ?7Qo `qF\,FЫN`T.,#u33&Fп+ӿ!k6s= }{lF,^[X2[_)^ 6_s`ggO,Iz$eM?}c_Ǎ : 禭 x}M}[n+F`NoYGsu=_֢Re%~0K蚶ˋܦ/\n:]KGN\Cq! FmR!)P A@44Q@uP=HPSگE0UQ_eCƺEFvP|2 _bcj`"/iSr08o?\췏XrY$J8ٱ'8ڏdpꦩA&18;y&V w]>\ H] $<g'o`V7͑:ƚRLJ = +&h3e1~`80!4J2'9'&bx;Wvn"֜(ҮQDa% n p6qbG1 D "ns8ѐEwV6w6'PV7#Uuk&q5a\ Ul{a6Ld/Ȅh#6S?L'MmRrr8JFp"0R{N:2.h9])yŷz6[2wk4zŏ/bMoxJerAiP}I葦 ű:snQU<*Ze;V`H%v,<_qo1H }QK s,}~^H"Ť]rk bФ< SyLq|3[g)tN6[BdReT/m_[UҴhhY#O Dhy]*ť\l,q߱LUo!7J͕JQsy\eH(O]4z@k+&- K>KGQXI\8TNE"zcw?1rʰ^aDwVw] \DzW[nX G C_MɄǒxeLKPĜnEUc^ RMKOAH??T! ?\/r̶{癝5kbBڅ2ZZ\vFTPp/4~ 갢.ӔNY q`4~_bbE0Ea Kf"EDJ3clh/QAkiD:LP 繥4BˡpESy12>UJ?@Qo2VS2ݰ0+QyEmEؿz5 [~T0a(vTŸwy mLK=68AY1̃S~j?YչAK ;b ''27IӶY=3bNCAAjneʅ\/UBRjסOˎ82/ J۱Nɹ-bK!ȃqڢYsIM SbT4y oz1vėjeȔw7t0Z>f~:ݜ`/h<χ]yc)$_r]_mfdsvc̗&B%_3y݄3S w*Ш₩kELV]Is4FYC?m=XʊnRj#ACEK1 ɲx!@ eFFߖaD/IF<l܅9iBK\ǩ8 \2i˔Ȃ`60|yt9V|YɅXis;9. k WdlǴH!I3-gvQ;Hͽ사 teՖiji!|"X 5Kxw( uݣ1.WL%_ded.4Æk<$qG54.~|s>oNz3gz8C;lG3}% VM|CkVҴsېn,Kk+%sK)%И?v'~BQIw]H_E곾Ϸ jz9.HNらsi]J\1LglهIZ`<7+c+iߴ (SPqꈧg,!a`=g^: iE@*9<~׵Dn ZH:^)3 HJҚH _VM${2M2hvҨ zZN0Y]\ꥁ3Rʌ@(.;A C&wxbcgqo>r*̬FK[υˏ5}#d[LqXiw Vr#޼ |`&qe4DdtȤ˻k?ov\'RS {wJv - su*v}VUբ>~a yMKkyRAt8][`0ےG}lT_G&ݢbhbu>d\~n)v7elw 3I~f{.CTY$VLΙv$sv[\b''(!^J 7?H* cbz>\Ԙq=Ԝu 'S6Wbj6,_@ [HF؝8nnijLG,+)efvZ G.Ye@ :{U`׉Ug4m+]⽮|zQhrݓþqjmoqcvnCe\qeҠ4ዎFԾ"VVх"YGSR3WOz ]7fa.̉CX.-'晎yv;l#!WFa-(}$X ѩuL_8?gLÇHqdٱcJeUb\]x$_a7~2 l@n'|ǎ[k ߯5a\h(fp_/GB#av#M棤G?V CCR| /mt6jKM| PBdta2˒CΦ`p\ 崞 e&Mc/g:d5g?&#Ne`cYb~\c8iŴ/RR ?PG؇}QEF@2T@KiaY]$rITu Li4,؞QS6em';׈\U0H\xeSއM"YF/5b4vᦙI|\%D#;QA$6MU"\>x-HVa-xcvO9 cWNz\o=88K̽|ʣO`Ak_ mW78-ĵVܼb0}L؛UGZ[R'"j޿7q"y/ v&D SK̘HC#pUnJՠ=X FjoYηVӁO+igz('ީh.K  :ܡ 9xTU ,TF_~awhL!BOJ5o_)WjyPqmQ&6ށS}|@ V6{E%hahQ(\̩U} _aD+U u]n,i|6@VNfs-F5bdݲ=)ςE=7;|=wH8!>uuK7hg_g=Yo-F;B̩) p:S 8 ,/í(A(}p8QQqu(P^3FWm;;uȾ>v?L1P&.PQg-eCY[Mƨy_K3V^9$7S~Hv VӖ PSр:o`~‰\"*/;WSZn #b4JpOI+o'מ2$jH l6p|xsQ-y^G1JO˦:̉вA}=Vm3aJwm<_e_c;DCKFbz q=SxpR̅'|愷9z(.%W8Ɍ>5:^e9cE0NkWj‹,P8oO敻*0~Bpf/XTG9"{~1P\9|tj4xV3o Ol 'k\qF R%:z+cvߡKT bpߐTǽ4iYͰ!V~`۞P լut^J77(b=vYѯgZ=\{B%+6u iDƟ%d:^Ztv>.6Nc98Dhdmq'Fe. ܽL5eKVE*}~YX.c!U(WXrk'z*ޏ$o I=y 25)<b 8^Px|)ewE$o3 ՃD;iX&"Olr=+m`27|Ï-2Zgǀwkʃ˸ 3amqn_ý1iHZ:yi.P* ('}b@@0^rǙ^j)cUQI5\~0k%Ms| |7J[7P_\z7 KK&¡:6tqܰa7Z3p?8sPJzb}*!mẇ-崾.~ ([ɳ\g1|C»՝]IAt!ȕȔ2zuԒ"R1J]*n跲 ܙD<(%p ZC`nCں%9'}ڨEXQ 'bKX(?'bw=%|#Чme?R%#)7%+wBo~?{s:(7,GlR5ZK8)CraGLt_"^DKpFBoK\`ƶGF0sJئcS AE罿_j/e2VL+(niJ".! )K.b +{]#o O.g 4$T0 ?> [!3G~HD̰(HJ=m7!%m Q,g (6U; Jno~X޿Zk̿xCuk:ƃU-'Oxߜ`%.S]/)=qZSySև6صr6W7wa/0;ͽo^q|*j3Eiqn33cΥL*3g+/vDT9"GwzE)ҭ@0OOI2#] Y^o06~bk{jrZFaQtYc(MϠ{x"75ڽ{F ?zXƣ؁I¢>{wㆯY5ݱmlDd|9B M{8F&VyE_df&Xtk?r fv4-4Ն: EG9.TmĎWhƆke|x5 )IMnPS,QJ1/0unIy3 hASeUŲzO9|O5zֈifLwxвIۙFi0hO|mMaԟI$pY֭<)H/npSab@|";e1QcG 6пx2)Bޞ@t@tLCvGQ_>/뚥d&`'c3*C*_?/d}.稾_*lkS)Er.PiI5[nlq5 )7%vspd qptlh3}ZeЩRieOU'/ n5!g2mNNx_\Ύ]stՒj#&ca7uZ$kpz\0)W #ޒ-aU# LU:`o?t l[ۨc=>NZG۪GI!'\X.ϧ u.Pe$6Т> TP' 6Yk;j<@ZyHvmpεsf \ .[CjVM\UTb>E.Liwk]aߙ?)_^ts{46ИK0U7eQ|Eq0OP70BbgGEWU@7"UhGSt9ѫGkV)ꩃ`c Yӳ ?ܲ)rW+dh(jI|3IkR+XOFXE[F!:QrVZl;q7by³zYP"`ZJ9IQq!l)Vy`g*5$vKs6s2_ot%y&K(:w>YyfhYf GZY݌f*>2hIgއ Ǹ--uٜ,qIbcWW+W1 V;U/8J(>$(̪m)J@s la.>aK7{/zM'!LjQʨ46#lIiYg`h-1 cD)P!5BdF>D}6lgwx|w1(:qm]t:g/pdˠڦb?3*+He p &)f6+Ҕl MC(lxڧ2^;Qp9Z Ӑkj_T~5=ފj(8ŹД9%ВOOP|'?_`_SsH& ׵l; *=6[@}曔] ]}NfO:Dh.TgX1l8XȣI)r%lq"M{W R0\gk!LjA2'ꄉa r; WVdj2&4V\6waA S}8]~!I\}Oxk,ZE_>r+Ul_ Igz*I"vU;d͔~(Bq#1ݧT@_[x.dzRQP60 *Y UV%YXǽ2+Ζb>u os@#no4FIM@xAY5KE?Nv|DqQIr 'n.{̐%a6]8KD,.e{e 4n̲s"p #CG}OޒU؍Cr%&" {sIX' Q3peBzCLf|r(}H:TʭWUyuaÞP;6%t8!D2c' 9`Ԓv$ ={"]䍯TN=}sJ~+Y+pكrU͐F-i;:)6;Pp?Ǐ N=<?z YGK`7AFmq5u!aAS%[ټIvgz=LMa}?Fh:{XҤx_Pmm>y^޸" z9nE2[}WMDu~I Fnq;_Ű1:kl>D.f,돰h032}IQ 3Ί,L"k12[r!$ LnzYFb` q uU-ITnɩЖsCT>MYFml ؝Rɫ%4XzX[e?adn1f9T~zAKnp"xvCKh xǰꪧB#1 vJY :͑) h%¨2UhFld?K3y[r)6Ka.&։ ؜X͂1g~PZmD~>q UC}vboܤ/$B/20ɽ(R8Bûx+O8%YðQZmqo$18Gjt7N3z1>!(qDu$RkBۖ{Oe>$PjjA#_S^r۫pV,i]{blyTܭP6P1BtIσ+e_p7$uTP>7y\Q o1K|l#t#D7l0WpC#nVn2.*,11'WR)~HM&LL ڻDߞ jxkɌ?]\ℏǽuAh=3Tdeqs?b r;-a[;sY'p%3~١=;cF`NMW~uHUO 6$nw7E]Sq<6ȼ:jıW7[&a4uz?I9eIE (cd\%9 ۴?$t}6 3Ed< YC~.6_;$MZi$eSg(}>h yٟJP/J:M;r-j e;5?;xL3I-fVTd!eۿ,2?-#HN'c}qLBVΈn%;We/ESj@ՏP y^hIlq ҇'L%u(~F^qtĄ|m7H.IE _(S\ɈuR .^P"y-U,A,:fB&B W|LJjJ\PV.k `B>z~oPY9(6͕=Ϳ2y*>&/cm *LDGͭsLcMQ`U{qoXOD*lb_oRPWS7Z{bR;%>cbݤ%|d{Ò6kX/sWYfrb`W;%02Xs+jcAEkL /x5H0;&ĢE,`J"'a J#.]q Ńpxl@ŧ  z@R| 〡RHUnPʲz2m Nd}8 >`;nyɨ:MG? lj !C7)"$Jxnά'4)ؕ/u!((&)#ճ|5$v$B/}p6.`K$s(٭❒sE3gv ID4Y𦺁J :k^֏++&͸1uʭJfD |x)3mr vsng"E@0b޼ә_QXMY]xtn9 X3 XmVXGÓraEG'(הj,TX+WaBeYֵr[Y79)Glu]Mp:w3ͧtMoapSƎ|P< y-?RCl&"= WނHZx;8.*d5 H^Tn Y5gg2ؠ>yFS/֍#iA%:o>5ќ4њ>/狝!IG>jrыHٽj_"> endobj 48 0 obj << /Length1 1553 /Length2 8077 /Length3 0 /Length 9110 /Filter /FlateDecode >> stream xڍTm-( % 3tIt7 0 0CKwwH7H#%HwtJ7}_5k=+}zh)Xa`i b $e@ 'ȁNKA؂6jT?$& !N9@Wp 9 $M!E6 J!V6z01@,bv`G h=t43 ``B ]\\Ll0GKaF aP`so%;_iV_v5 x0BPCxhPUC V+l 6?]lbf7A -VZ `@a&&[Ӈ?'7HLMnGy\fg"' q=ݍ\ 7 s'{v ( ,+wȃ 6K0 `W3+0{ '7qN`Ot7B3l l~|G+@=ϛaP[_v1)Yu 9 f{9y&&nx@ޚ]!R=0Gߦ] v??r-rح>4|bno;Ea0?!0l>7 3 h#uaB9L`7DCtIĩe˗Hu ۔2fN=֫_ոq]޶|e3}h2㒌j)G+IBȱ I<3s.5ZwU_g]yZT+kXUE4opgEE&+R ^ǹT`/.t9m!p{$P+4r_*rK0 TS̡T%FMh<$ImjGՒU`IKΚ 0ZVim 'CY_2ĐNΪk@L-:LM+<Ӊbĸl% fv埤 hX.^($S?b%usjtTX(iZRNIO1{T(zu=D=H LsrUWk:>YHԎx)W4L)ֲMPBaPx/6i9oȕUsRڌ?IpiR\\L)EP52U$KnsײSӤ3 SHu~^l_O|M^XiȂ.J :uj'Ӝ/4F8=ڕEl"k:5h EFMRQ&=4⚣qY{erJA _;»D`2/MXf^&%wxT%e7N^s5?i0hDBmBU/5!Vj}_^K胳"g98:v-N: !MßZNJ[K-UѹnxXVi\q䷖^iECI^諥mu^@kbRT,ԽŚ(Ӝ;%^ ./5<x~ቿzDZ˅ (V\\DӲO 7~z"wxlHvTN+AG&S5G aryc C um?9sH)fE/':T ^]s{{qknӝ73\:FpX,UOܶNJ9(E{=6ٲ41l/5|dc&ŷ|ȹyNn˖u.+~Z{|>D89"Vu+I]4箟f'ĤSe\׵Oڸ4 ߛ 5Ũ.&͗N~ʭ#Jr { WkU]7#^w+J@vOrXr".!p u> %)BȶON+]+ϥS 'v:zKaV:ޢ =O& #]1',I"` :^Ⱥŵ̚M[_#G-9͔ʔ# t;b^{7?3iKSJ |z 'R +bg)!Ki9\UY#~ :xd2'S* (;/ scI3o;Hp44#Xwp٪`:da2%mNl5t?c7J6s[ITŻg?+8G }ѮGҙ1Ja:6Y}G[HN1#p/F͆|UjMr2 Io.y+F%Ǚ}s q)q ^cȾ05iZӎBEq5j( }ݦ:8zepsٸzUWdZځ aeV6p_ =劦AIcpsWIYFvq͎JIY@Su+:Yn٧YZZ$<#i{"%{mK|IcqUݕ "u νvʨ=_ajJsvw2WQfG[/%BJ${No9xg8ʨYR۲'4Q4B#-oN7.Xޓ}3zPPa'SG~ж9Ͷ2/(F"td2K4)RX |~l.RcIArqNSݣ}'NjRT~|Mlt~!r.KTg(m7tԪJ "6WA.4l<5i8M_ H͎|xݼKB# mIƛ;br\IM@>[,x=Uy6pz1q,PkO~FWv1egdgˏyN^J>_{T)o,V.V qޒ_d[I3-8%s) tssk׉Kkl^lcUo ֝b5R^/Shtc7|@ւco*i#ݝN~pj54jRh6 ԝS.j>Gc$yGa \. 4D;_#Ga,2 C hh.?sR FKLۢUR`Ugk~k!L#b2\ {ƣeN4?f˽fbpJ d>.rf~ldݛfZ# bOtA6VK,%5)qnv}&yސ1ߓbF.r]i /d9JۨMѷFz2̕D9ѩ 8GiijyڏqX =茮7A/My=J ,^fI:VGbvƏ%ÂRIQl&؛YO̓Wz96ۘ(GM<Rldg$Sێ'aȢbvFue;*K+`? ՏQ[ÅQ}Dh3m`NA >(R5a \Ѝ6Dg[<8bvoC GG$Pci_ZO4YO;L#QD .P} ;thbNxE^K(DTv޸jg":<~1b4n Fjkkr6VGf4TCcxޱ`*5HJ<:TE׺wL^q6_}Fohq!bfQ3+aw%,OҺ]>Ae-,(\U4Q7Q?<YsGLU@ԇ}&\qPdƩ_w<ڭ_;2\ǽΞN#TMn:>Yxݶ4C˘۝*c/= vJK 1l:<_=6D$B (t\Y&"_,E,v_~a8UPP!QM|sxh58^y`'){|tEQ}5zti1Dˈ}Kb!⒗43Hl(# &(v[>__#njXhRwE meEIi]_V2 ;vkSbj2?Jt Atdd8D0mo)t K f'(O'J.ʤzyXL3ѵXe%r"0{?$}K/LP8qaUI 4hڣ&bf5iVYd.n"p X}h~ˏ*'W0d!E wDە}?5\} ]AܐԺ!%Ogɂ"wűrR18:_߼Zޙ^֡bVQuZ6`;!G_"0q*N1/{=Fp8GB9~퇿:EGԈ:J~$=Bǜ77M`LfԧGYHx*G Fd+GݛK F3K=$%]q{Rɟw N߫E?66TƐ=eog ~NY(,tqvh.q8|A$Hdvx el% H}lq@E&UCOϪ@=kMiRp` j y҄ '~8m9JLؗý<>|~, ma8R _oT>g-#]kx&=Z@p[{ezү|sxVr5uy7Kͨ|T=m|"$o(1;73u6!*׬i1kd ǟXʾ j.NJ"Fƴ1Z"ѡ2]jL8f uqrj: 'תȦ:3o?1w5G}rӞd, `dBurZ8g8HЖ20|N)VOL U:s$΄c9üA=s*- ɉ 7+-U \RlZ{}GjuDL {ZL惑l c8$m`;ݿm4Jwc$xqMQ n]>張ز2%=V-|bVOþ;p7-I};=!iqٰEyh~ V`|,G$9Mȁhnhh \2KW{NWgun'_dnyEY^wzkMr|8gsG,}NZ;O=Kԫgv*G8']޶}ƻ5p{P`gʈ}=|}L[0B-PWwrM]t];Ɣ{I`#J+qGQWE;u:E\}P=gH (+Y\x]~j UX[ۍ.6vTG^ U<>.?&F.[%O:ޛ┲cQ(!F>lOW_69) V]cGmR<ܣim Gp3p48 Uuw!M`7ط9q"PȢuqU'/dy$et$ßDhP4A8Gn?} o}ex endstream endobj 49 0 obj << /Type /FontDescriptor /FontName /AEITUJ+CMMI8 /Flags 4 /FontBBox [-24 -250 1110 750] /Ascent 694 /CapHeight 683 /Descent -194 /ItalicAngle -14 /StemV 78 /XHeight 431 /CharSet (/A/B/G/P/c/g/h/i/j/l/u) /FontFile 48 0 R >> endobj 50 0 obj << /Length1 2520 /Length2 18229 /Length3 0 /Length 19687 /Filter /FlateDecode >> stream xڌP\wqwwNp][p>g'˘s-JRuFQ3+#+ @\Q OIaj R b`3$0v):lV.>Vn>  VfE&=R4V^^nv@g+Sc{%`jt4|Lv.LB +WK40@OgL K+@H`ke wyٛuY#c  p+Ʀv^Vs+[ @YJӕ`lodw)QU1blbeWMYLhW}V@Sؽ٬_M92k[9e%cY],,,ܼijWx /GJֿĠ|&~V@;S`fe 0ZX;[yX@c>^f^/(?s0s8Y<<nNFT?(koۆXAC J *4_Qߘ r[MGmlge\ABt66📬4rZYWcwV.RV@3+WS(-ZU\k,,G:o6U@qߔf;6N.< ^lV5zl3+j` F8̢A\ jFf+Yhj[ `q~#o`̲Y7 PxJ(]7S@#P>A_ Bce06q_ ;c;GIM~#P&Ʀ6@3_?G_(鿈DJ88Q'f@PEh l X^_>Nn; hj!wY;9db;=v^@?,@2? h6@`,45?Mw(N=u;;ԠAAJY >vP.#@?LY9^hn.I~/t&76_BmdI;0'HzV ]-ԮɻAEy @z(7Ts9&%@O)Ҽ)uch}(wY= ZF%nGdTں [~Ief؈d'gDN)ɒcѦ!"8BF /NZA6=rNn<*EҞMCU+{?fc5c>Q%qe$zG~2ws;?F"Hw^ꣻ{K/.. 4A΂OEx-V=%{_T|4x̐c6:dI`8r/V6u-ȸW+/WŌ,LUb(J)y 糅q|tоgRo>QyI '.nktFաWaQ]Vxx^*7mP;EBo ĬAZ~#|eUWf, >v0?5NVbgq2Z҇|C+:y㧶?i1_7uzhMZO,$LgNqvHZoG&x=T|+4#$O=:BoffԀ y,. cEZ]/  `4FiW7QOb-U 5H\@u0j~>-SaR۲XťOl+Laɸ=&h!Kmi`rldU2EWMBZ&-+\N ,y3(:@ J暱փc[hO &t׼ڶRiCX%BG2}-7jHT<ޘ \5؜7.m-{5-3깜-CA 눃&I 0Ab7cIif3}_xq4yv4 "`- tT۞[@HxKlKu=q[qόN"u82؂VhhdSL ,<N:*VteN/50{-X Rt!1IqCHj1(|QBitOo;tv QnumD)Ct!nA߅`V Ӷj;7uu˵!>HGZ:fz,aHO5"OyN$d66n k-Z9T Wj2Xp,ز)xW0O\sK"NcFeqj(i_cm_ '*E{܇J`xmȅuEw<žjdCR?oSmQFDT݅H'ZtZ<ZUҥYi:ƮtꒉiA1?rWjOU;Aڈgx}rOdxE6RKS,]#=ӭn3+Awav>Rץ%/*LR['sN)&Yd-,QGZaA r;uC\T'O ^G(w!L`7Th<{s Csd85j%C(_<$fF[rb98MjN3o!W.6U+[Nʉdf5u &xRڹ)(4V, {6#VCOj, Y^;$aa8dBQu9^eg L\Qt-ePn,ۚD-j" φ} WR>]&4S{IJ*Ҫ\w~ª,D(Id fNC%':fb`:䏞OfG=is/CcE:fk4t|yfFEE*&1%uPI-QöT4N3i:eI8M G5[NCZ9q Gc"Tq68]*OZ F\Mō:~ld 6p~>LdBEįHt2vB+PpDM͐c苟Na}ټGatIaW/xn!e|N{2+1Wá鞛"$}O*oB@ Lٛ6^c*!"Z>ǁ淉䁗-ז\l^RCV@젙sObC/}ω+(7h>P,j'=1 T? H?$N*WO6\^\9`M0CGKU 3.N6)ZA(k3iKD3&Wʄ sIDm|VQ E㗬-aAD}J[84ОA̓oy}ˮ<8~Jqc)IQ=Y$=A Ёd~0{z$50ؾکY ;co'@wFPich̃¬!U!o0gj!&/r{%40#vhuԕ_/ E3'!FT!|o7o}NʝPk@%Ȅ Zv8Z78:#(*r':/lN37@.VFg*)^4D7!^rpYhOzpZ[~P rwI罵-|xt{>sR1~! i#'~)TŃfJJڽPhWwciH$_`m5K_њT))*4ZԤoǚW#wp(o:f0? >ټ`bPC KecYT;oᢟH6γ۾lqRS X|7 w6YKԢWԄ ;6 h<2Q~Hѡ}Qe<v)R QɈ9̸3]{a[/鷚w.䞆$,(x44!k\ Gj~zQFj>@!70x(m⼈7$yaTɐ O*q7Q6eq,Y}؉cC=d$XNanvKB+rDم%oPŐ7ҡM'5}UзkQ4;O .ED%'4s3ln1]8s -^(-HZ|ڸIHw 5D2'3!DM{7[cyGKVqΜn8 vtv@ =GsѣnOe1>*5X *Bx(XN</H gW& bȿ |QX=I6sOl P`sǹB#WHOĨs,$o8g IJYK}2Dk..@q(ۀVUt = z.t7"v6+fb4afX;Թሾ)A`tLGh34]CK"K{W]‡0$+WqWvZ z]W0KơCq_Z 00P._M9͢mqh9.P j1|"DŽ;-d):De@o}g?ć:bQT{le4!جC;?`+NV`ơk f.?V#o>>0&l-f{7̢4ˮhE,TE_Gh;zwI+eo[ROoMRuTEZ6U!hZ"37bj-/X<5&8v1 "4 :vMK hof *waL~:=Yn Ar[HֶLtf2^K,rgJסFL(K〣z LuxSc|0$/'F_m[2CKC}'ѳ" n&1džM4[h0ĠqO{Z-0Zt-3DLLn`&aeǞ"ib@mz#~Aq^Dz+j[lJ!fd`R\p7IzL(gu%vI,qb}I/8g>2qo=b ÆstzZ΍qC֕r-&5*%r"ѷFMKљ Ό:ڣQ{JCdt ɵ7k@dms=%{zHn`\F4rliU7ˣ{k$} ZKWS69x>B NHx~-v%dUZ 3^Yc!FϧUYĪqt +%5+SjWʳb6D=]CTҖ׷y1ś1r|ciD kV$?9إ{CP@ۓTJ 2跈7jΨ̽Owc/*kIW-˔ !Rb1c)=h\vy% b7Gb$M>c&9 <6lҒ' \]Ynk;_qtb嚭3Yo_*5l/Ӟ اr- J#1^Z *`,N FWZR򳺻&` }A {ᛅP:7ĴӞ>Eyy QvӘ$h;O4z"DB聝vIoP*I!rE>2㿱圪YHX<3 luZѵF[j+; Yqu/e S/N3a\ЯP8] 埲{?xȷH1"R*9ᬷgE"Jjaq/_̼^X׷e1co|j^\0ןHm;XwJX! )?}(͝e^FQsjn/ʵȌXOl{In^u1ZsmL$J*})_ny"YrՍY1T#IyŰ>_띘J3Ď._Rg]\w>]|a2lݨ'08ŒSRE{ε{F: ;ŭ |)E^Ωues(cwBíi/dqJy ȣ:}Lrh܈`l V ki'jNvmtː1.='C;TO؏ϐr̨GyPμhyyz0|ܱ9J@`Uy&JT>ӳ{or$]-ӜnDfWvG[ nH/|K0r,9,l:nC G]f'q0J6c/HBK}%sId>bY;H0cj Q Y@o:T4_}7ӆ\Ogc u0OK*4RowԗA-Ts DZݰ{=*(KڼZ!Y5xf4 -هV\zt/^u? =i .mO';>llW4sUj%"ڌ'5km4G/bȺ-$#%0au_L?gsFkY|ȁ?۶ɨ~@[9&}ޙPGS""HFcF%lPN|~2&]S6jq6O,NA&Q%j ~?H/=떙Nb!*O9R_/|.a9XcH$OK)a &xiӣ7!yFԫP?D)XX ۙ9rauSRrJWvw" / 7nዕޛb}-Z1^ =݃9:6PdR n<3ƷTOf5%cbLR#T)~5\#ϟ+&>5%?| ԏ=t g_CX H'sKWҔ*uYTq 9$!7c&FyhHف L \z҆AjLcij.Iݛs&Z #+Q-Jͻʹ㕼+za5x;yb~ɬ,ZWxGlȠ>'8r{u# 6,uM)nFCѲ8!Andk>x.,-y)RdԞ1>S!zhc#M|rB` YZ 2 `1c6 Ui?܅@`3PkͲuL˶x=;"SYL l׳"Xƌ3osǸ1TʄU6ήWvǰ5Xۢ4TR|c y$) R"]SIDRfLÌz7<]եI_9},ULc_^J[ PP t0ԋ+xC֦Rzlܜd#ܚTe>H/d~&!Om\GwL&;HbԡuTH֔l)4UL%Όa{\ ]"L ?xv 6:) L递Xmp6t+sC[^#*1vp=5lZ(еh~[U!ݓѷuqS>(<5v KhK@7jZI%ZR$щbLJ)|lNÛͧJ+fI+&ʴP e+:1$s-XY=$LY!#Eaጦq!˄_|Ң'L!HF~;=ZPdxEA?"ͽY[IxEW Q:[ߪ L0K$#zBuu|._3A7H9 !H..xmdYh[_&u5uleO,]7@l: $oŴks-hr+P!.=&(g- 3S(Z)6f7::ܒ}>{ҋ/8/q)F k[ˍZ:=r"Ruw8&!G.x] -R#y{?8c} C/=Ѣ_Ȁdt;Y&2G/(FOIkC$Ef幛mڞ<kw]ÛRS+j7Ω߁+Q/p3'řc:aRi|1!Ր hbRN.:>su C]^55vwi"RWæQ^wg؝M%|L|SZ Z/ Z'<0{&>~o+)T=KR"D E94틳l0Q'!CRam.฽Om(W_5{Op%}]f:]`5mʘOh;+I2$D >ݜ!eȣ?ό`?&! ~Yh;ɠR~tP|,r^B#,=~7TzoJn|-yVKrfV#9>ͧ.jXf̫nm# "n`"gcmrXoE&C*L1㾵-]eT8Ɏ0p xLS!|aȄzz$.r&*dcpq%*{ʪmI :,Qg KN6FU; < ,Git!TcOur#(c-2$X?2smYЗefz5wI?>.(4^r}gS! H, Z(G]A\(?3Yf PI@i~BD%Bw@R^cLRs}Ne)<^T1܎E+^8G`YFׁ k95F:࣬} &IFDޕQu2;#@)B.oeshXSeI=Lɗ/LhC+2Qpssw-1hO?KV G<}Kj%RbN{3MaJmxzեtꄊSƃ>,~ֹMZNzS%AaqwJ }œ[9-X QV^{"2r!Sy p,QD?Y7F7%Dn;'WHra DX)24)>ChV, 4):f4o bz D XqH؄yӽέ8JuLϧh3q}td%&~kث$@ Jt2{/F ʗ2 z!뙵;;Y|=R+.u51%;*>yYv3vigdU(%$߸T!՘PN š\lp9F]+lݓm_˗CP_6VfUYR̕﬙Y`}R/jG)*G yh~X.Ѐ!\Nlg7ObF~SBk> 13$hwсG:}IRͲX(L?(wMa9 !/$c,{n%^WFS}PpNq!V<∖LȻ7~J;cs:\ye>d0Or?b ׾{vq{X^=d^D r[^)TYi7af L1ewsz=%7ش`Ocӹ S-{ %3 mɏCTI abݐSEd05iEThÎ_7lqٚ<(.UQ4 'RNxT}j䱝O>d.!\6Vв˝ KYIR9ω6\ .`Ҿƨ-bǑclmF7Cf\'@̳+]u䐚3;^ YGl)xϔgw#oi5YI7~YPkP&Fhz 9ez1D>jLHuQY[·J5Ipu lӆ.knRgB~zHm;(vVD'i/!GL?₀;sY^SkwW v0J^qUp%-hߔmQg#`7PN+E#02̥^Hzl)燮wFeK覮؝܍>hlAYmd1%Zt:+MVqCJFVg=ۛV)kv9LDo8I+Ϙ=mt^Mej"*ʸoh(Z-bwkxu;;ߗDjlh%T'&\~ݭlc4ti$Zmk \GYB4ة64߲jbN 5$(`V&-<.mHꂿcvjXz< ~~R}mп ?)j40zƞT9;"2a&X ,sFʪe1P?Ym/mVXro2JHmʣznzAs_#Mtgadn7j.z-BsP(q۽oS2==f j:Vs#Wg}`%$`!ۉ|3X|YÚ5$tѕcZNA Y[ ZRR5j_<+ۻV{Ts7ŝMe B&b1& HD+BkY^fyouoQ SKYnDi5LVۄY3GB_#"ty|Iy.Eн%5''KWk)XQIhmMå2\n ̋ [VznpqlJ7O .I0TӓR4+,nD 'ssinGN7mݓ+ h!.B/ArY]yH=~mxo@Ҡg?I +B߭hh$}gqj1ڤֈm %J0ò"7= U ?r']{` vrXomB 0=Ms,G_>MPl)/sn@iG 4;l>K_ J\+ oq-xN ;o23$e :A<=w+c!|ڐN<YfW1+jL ~B]=՞GlCe,tVɨ/h>$19C" ֈcmi[KXs"wBxT)oFO2?Ѱ,q;5– H6yu#,ڪ/3Ш?>n\ɇ$W qWSי(;2[kwwʠɖbT:yYÎ;k6B3w7bȖ۸偗QD }TŠѫnFsZzNz{֣F`+\gSyvJ mEiA0l*P7t'd77 %[U@ޫRZ$N| dr҈~w"38P= ңmƕt($/\Gsw9(49)?,Di*A.%8lE,;I隙8f1}W'O'^R~ߎ[Y~v+h#Vtv&ĨTS]Ksi!3s"ڱSnEylcd٬==DBΉ*J 'j X Q;#zˆ!POjV%DB{H/j ߧJg12宁*5j эNJ!HCGǤ24u̳,ŹRYYW}GmIRO(?E-,oZ3-_/+Oٕavܿ+/eYm`79@"-pPz贡w vZe<ܮ/4ꭹZ zck Xs&c|yM{7ڋw<3EېFp9aj^7Rg!5mfGUM{$ 8IF,X H.(rCٱ>>1x̌wf! ok@':7X'TlLHR?,aN|v| <->HT-tYm9 ?7%w z $9~D;@0+{)QRu2E@k\4\߄E8ne U_B٤ÛM2kEyړ"buz 9P@4%zGne'' ~!I 2w9c4UٱO! *)KC?#Jk˨ N$#1pkO6}qR(igE~lz PWQ-rY]P`X@6ly&Ga&WjZD덜B97X[l%%-p3$&;|Ak(u\Dwfh`wxļ˪ J(>@t )k)<**tM^H39Qd: 8]kkOyĨ3M?u4 yn_mkܵ vt7ۓvC Ľijs; U+o6AdNXzasxPsImW3Uɡ2KETfI/Sgt5Mj\/.3 ^DR_( أkwqsGilk#b| d6X팸͔1]&u "J4Ԙx~>2|QV!q9%'S?ZS&b~(#VR?e,t{}var[aSꈧ; ;%}RbD%wB6)@N!?;KkS!vI BtgEghJgˡnd#H.ܩ 2+{/vy297R4ѕLELKƣh8$/T8c%E>QV6i.Od5^k:n2&_.^B(ջ:)g@gH; mS[[>j<5 nkMI'+[(B߰fpDM*& llj}Cנ}?Y5dclUsynM?QƤvA{E>'~3qqaLL2dɝpUگLP+ú7F,yBm@  Yqr6GJgE:ݴs6!s{t^\ǗM;ԕU O~xE :)~r5Th ͨI½ 2usp/p&sUTٕpDB، '70OtGEct''dz(2XcM*|ya?feSB d8Rl'FNB", ]y8t7'+]+sE ^(ewLvXҒ/m$~+؏[d (dl}1aW#M 'Ȳem$< ubaby#XWhwhӛv;riXyQ]N 0ɐT|i-iTrC,{M !Ԡ}\9ZIa+crA}) LZDHyR89 S' AXo`_+&-Xؖt:xf$\xl+pJU:7a+bR˳Eݯr|}G; ݬ:&,Q[pUL>^Yx71$x1f.k 8MZ}2_懶@ICu[mos6WwLSau bǸBVd«D8 Σ H޷K 4a,(ܼ_ a-%~>cJJT旷! BKT 4 hPh]9]&6?22),Kn#*P!~"跎?`*Y2$.Bb~/ ~pf)L( 8O.R$j>?-7֥YDe9)zg7> Ji)x@5pL4^~$bڝji"jG%\/ңIk8}$S-خժ[vZ:Q6vA>!Q{wZ-Pmx'X5 g }XgE^ -sSQ]'za~qhظR>1/1_F?s @b+0dT!7ϩ‚`G)I#QۨWsrlD5q2vI]hc:Tq.Lk/ |PSlXjӳPtN]( BFBY(/ ?1x DMڛa!@,& StUSֱ (b59k0kpLg%/ĝ&mwOQ;Op$ŦzЄ_F`gK|95#h-U~/<{'綷>IٽʛFdE5lQoߐ1G.S-"'*nR>ϒ/> endobj 52 0 obj << /Length1 1677 /Length2 9691 /Length3 0 /Length 10767 /Filter /FlateDecode >> stream xڍT-C k,Hp . 4ҍ4 ,8wBNpGf̽{vծ:ϩ:kZJU f1K9P2$yll,llh =/36 4>$͠<% j`ggcpqH,J,yF+qtY@#ނG8@ 0̠6@- _)mPG~VVwww30]n@Koe3Xh6 ?+3lY.`K3yq"@' YNWD fG3'l *Ҋ,P( lhfy7s3ٛ? -0{< g#ԅd["4ϻ,88PIځ!`￀li[# lBf x@'†wzMOGNgގGճ/ b@]v7BcgX,s5Og3O| s~~2zn/K˪&O!ofNN37,?fjc' SG_]A0{es+t!7s: vM6s{ExdWT(AgT:Wj<b`k" Z6N9= T~5fv6=ϛ}|VKJ- 玃-8j ,`9,`qF}obM">߈**x#g߈izi7blV d{/_d6+_9/`u|N/ |._Y?H?t< @XֆV3oIe`wpBNf|-giSJtAK=rxkZۃϣIvahX]*)O'@;9NXy2u}a3j;UoK'iMgNB20z`O]]Oe>QǿA=|Y譿nk\åX%koDşqY8~gFSo|Ѝ,yx1sr%.Q'-F IbvH~(d9ֹs =~Vi/\AwuT_q=\=UIdE4Nah6bwXA13ҚJڅJp[2Ig1պ)7vN|xDW?Yż#noC>ԙ޿ ܻfذh_ o s9 %;sǩMu9c#dl9BHIK#VΘ3&ͺi (+,VH$oq-I`DJg1vi$Կɶ6~L(x O:^D ϣJ Df"e}x>Q1+`]SxCJ#6-7:NԪE|-c#^~u9fembb!/BK^yю(Ts!K |44~A$l^G buyk#?ۿ#h0Δ 㓀dgƙT6Ԉ[K4+NJPIkT.YÁ,(0:&QMۇ>| ic,omE:z=+Ps6oaxr[pڤ2xL7]-jF4/t*|QSKЕ*u#* HI.? iv\%QtKz.Oꄇ[;#2OŔ/m`z|(J `")&;ꑶQ'Y-3; !S}͘MaH2L3 n!DflbIs:$egs_]T2j%xY0tؼW6L}:MCm W'vi[F`~YzGK2j%l:q䈱o{Bá3ڙ|z4(& qVW []?A@k$h3#@^>{'\B!YJyH}_| 5 aQSIxynj$k/x@kيQ  ,̹k(#ؑm+su2SD*ێBo ?Ίcz"7"C(obYO%?ѳ\ 8o{ZF깵i w`n~J6g_~ y!J+;-ܾ'Ju-̇:j`]#]4mB,{+1+ƮJb~\g~R\L!dVjM*4gj4Ig+GIچq=ot,Ls0T)km F],$ab(edPM7ȷgXQ/0^Mgʓ'z`P3=S0I*-~[s rV4UC(}s O-oT)Ș@Cj{QIZ:,h%`gh}HnV:Uܤ.I]΅g 4"+s9$uPY@V>ښ椔Q}5zGQХA7}Bp:: mE 8W+Ҙ9:?~"_#1e\W+6RàF!}u/^8i":?->V{4u(1حeQ;rsR\L2v-A,!b mTN`'c2ԿjݰXԛ|9!0?4 Kt<_d~+5ncO[%! 4>ctm#0k P^\S#⛭,5Yz.#ԅȍqh9rl߸ŰDHsSKLc3nebhO2VXPtp؀CȈ!biv,q1*e}K?;KEljvP' -F `^ uo*z Z*^(#fϪi^Ɉ' v[Q q`}Xٺ }#`;7It#$Z{72YBmQ+\U/h('v7Δ+WHkEhAy:xlh4TTb~z3HtQ[FK1oeϢFJ~*Fg!Bfh0yg)0e;f!8~ѴnEUhՋ7R |%r&rlkjrlX*O2͈'8BW\oǻH#)NZ5>4'V{QsD"-RHd3WӉ/DždJ=U~ 実AC|GuH2m>wZ%xIEIH(b½)h/:b$4ڔjZvBe R_ɨhqZVY~$6T]u7qn6 (qXUr;}xJ{s"2=5; X'߲u.5tuD$M7 u6CQUS >ِљOڥxE0ΑTj""ӊ1節-I/lMst=4цn`AׄB#ab'/>} vUq'+tq>l|Z͝~tORGjf+Ȕ qJV'FC { EXdsF7fۥq$ 5e?@z)KiA~+vtMA+s(K͢ŨCLfe^B$&{6%GH!r"\ن.ǔˬ]jÈ-,rCUĥE8-`NH{%zUt맢p4x冓t/eܱogƱih$ {6߂qpK0GŔzPkpC1S=nwbidly< }h36sj,0@mNx)IjIOR:Dk8 QrI$ptX#"5xHgFUpYm5 +R6V>\4`q;w&e蛵bU.=83ݢ((K'?L8Aɂm+MvKtkUi炁oNjVŎ:KJ߉S>1wZ=B>E^`Fׂǀ? '+L8C&#%2cynGPkPc7h dmGמKSѶaIoN *~m*Ptk^&Db昶uH T# |ך#N{総Hd"v|-v95HueԒRy#2zIG|)Cysƚ _lǀ+z4WEtNJ# $Zȑ̉] "WL=/  t?9Գ֙=pLI&8>EOrLUgܼZ+a5z|_}|VjGm,/EWJnAS#rRJcŠB6ڑ4tvR:KoGPӓ.؜~đmlK K8s(yioE<tUDz~%9_0`J;b)9~}):|4 UTE?æqBDiwO>]kM'+MѴ;IVo&gI 3'NQ(Y Q.FVA>:3H}<4jKZ3hP%)frbߚт3B)[,mo YQʊȏ~LщU+5NjJn|nfB/ eamC!= n̄Ka XSg2W?édJjBK v p\\e 'Tk0Tdc˵^,4j0ΓpdFmyG%*S_!]8f$:$dⰣu2Ak&|hmR.PNQIW}%Ig;m|VխqS2'DXj{勄x%%&pHk?`;`D" ̗HUϠq_oݘ#!`G!n<;&E-N"1Sj,ɀWf$$#R#:`=+ZmF`_4 %kv^ ma%b#)"7)Z1 'Ҟ=_y^q>\ ĉ0 l)B$m7&'vNhCK6b]1O`.6am[{YxTU Rhu_LiK3_#|IfB~ R Ua[uh)w7n<s0@~ܯT;+֥E)p;_dQG1$U9\/wCzoyu8x6'TnM˓P+9sX^=lmyE9z"eY׼~ȰB*@T4q8걁/8]`K؅9kt] ۦ2)ٲ8j?Ou&6Ydl1s |4d2QhwE7KEe?jQ`[";;ĩ p*Jtl%M諯=ޠx+W/I 38o&~ֆ2BK^vO($ R@?}\8 kKYܛ[y)?2%9Ǩʹ5?II$Mh(0ݢIsFv C*.g߱_RYh8F?oA|'PxLਏgl %ϵ=k,=ʧ֣^=fG '?Le+Pm E)2:LYg^8vc#%D}fRxY;ճ|q޽HڰzHMX|oA^qR@&`[$eMC@0psoLjy>2=h[4-e㚿,Z g||7K?@eڃaj1g:cu#NM{răҭ?PCIYlRl K& sTF[%HY +]<)d-xвF_ C.9T nܖ}D)`c&:Ec'Zc6l;r*6Ҧi}h*شJE*$wxlqB qR5Ar3O`26T Au؈{^EAUt ր7kpm/Ґb/#BK8*$aW|0#sIvp/gD:19JXZՠ UR.Eˌx+)7_fw-2c8#@"Î]byhr3r)bdk.A T` a*^ ƃǯܽL~a:mJY9_jfyT( nl2+ S,GWp;2gm/oтYQeiͽQD+2feSy=XJ,pꄳAM:Bٖ>| k&-@1]AVv>0.@n[)չjPdP*){Z({ uVsԂg_thh;;|ohl~*Xk3b/VWlvbv x|K_ «R~ (ǥn#-V@|W^[wqUԏ}+?qB]ԕް/ #WaxjEfkm+u~EװDWui4TxBG2wģ"H Wݎܓꃥ׽_1ҼI5F3CqQR$utwdx`C \Wd\QmyHZcSncg\||]r slc>2SBSk# jK8ha#&w.*\Ϛh›u{_V_Vyr4c)+@|f7kPpU z WZ0BGrc<).P!X}YԆ>ؖPA%VN|SN~n Aovd>nH'? wsh%Ӛlz܂97Mq5fiR=IO_7ɻ-c;b/r #A2i;M6b}qtG\mtlE?F ТUh/%Ts+E0+t3JCůo5uZCKz}4je=mBLvq(z&5];9F-h|O4;;QF럦&O)6, :vu`MƢ#5ͺ5g Y[Ҋoi$~,5/Ι,]X9`UeL3<11B1 &}BU!.7w i>E)=/)dp` Nl! /JųvˍģgH8<)y5YTS| endstream endobj 53 0 obj << /Type /FontDescriptor /FontName /SQTQGT+CMR17 /Flags 4 /FontBBox [-33 -250 945 749] /Ascent 694 /CapHeight 683 /Descent -195 /ItalicAngle 0 /StemV 53 /XHeight 430 /CharSet (/A/E/I/M/N/S/a/c/e/f/ff/i/m/n/o/r/s/t/u/v/z) /FontFile 52 0 R >> endobj 54 0 obj << /Length1 1413 /Length2 6189 /Length3 0 /Length 7146 /Filter /FlateDecode >> stream xڍTT헦K鐔:D;D$%f`hI $$SZ;Dj9g{>}ʤk+g )à^ $@AK_ ' HjF@@h X@np0 * n +Jh@iuw(J$9$V`[@ X`.^n`{_G '(!!; rXAZV3`^Jprq<>r~h[9~#`: `vO+7m@P8*j rj 4:. Κ8w?%C[]^`= t5H j <+kƭrz+?m.8 W%+Am` (N?Eu^yB}8ہv غ?]AjzTAr6z~Q\v( ?Gn ?nDl65 %';J CFM  }2Gqx{ZU54T$/C|xD"@ uwV godT@@_0<ǟ wm ?7A}o8+AGmeV+g0O;FhP{O'?X d vwO rP{ߗ+ []05Tz ҅/P@?l]qB=%pԤ~@UwI% ܬPGI" j9mA߼AaT`s#5OQ ꎺuFXzQJ۸75P}%} !Hp|t^.Gɻ1$=κ5'ό[%n2gY%SO$kJ'7>_q|-G7 Pu*WŀOk({kYުΚ.NK~٩*,ոs]48,`5:sˀEv$89'{3|˨M#dY0b{PFBcB̀yB60#J޴O%I@6 6aIj~L DDy%zk8N"V"VIdyd?Z.*I7%03AiSP|Q c*VDJ_r˯wV3G`;z1o? W%JFC`I|QZ3CB)15W2O)Jj=Ga1RGx*81&mI=˄=ҺÆx0 %V"K=X:ǚ6纫 ޗ$ϋI_|ouRǭhZ\i&ZuO5>*m ]{ἄS(5*>Ohnb0DuY7nP|ᵟVDn~߶H%h3|2Fm1ׅ0GOMCSRJzӢG"dƑd&-,{@obԸ&Yhg>Qlu{o&%~܈d[.@%K%F + ٴea2NyGK۞=r&3 #$E>/Tb|&ޅbB~1hd`Ω'0tf1jjxqAn6STuS&`;CxN\9c$vudbMΦX{* @ݡ]7@xyI0%o85D!|_&~+Xbq.r:m<)[A~bᕕ7JO>(6(v72Hұ.fl\=S$G'*<]<ɔOjq!N<2ZߐfB(Ғ25;yy0T)zLӱ - %d*lmmЧrboQξ+8PRA,똾CT=P&HScmHF=h} څY'ғ/i"_6J\GIjd[q3; 2ĸ.<Vgm~y 3L֓Qen*+ӔQu`({盡+|c* nI݈e[,m" P&)f"tPˏ{sË]s@ժ{8A ͢Wr]6k1*Y,ۮ짦-F p4z%ֺ vkW'WUaB^4Q<%# 591,1οs;aV=J)xi%+xG޲w'ϊ?bώ0O3/=1YM@GwVnѣCfSs6xc;PJ8hu))#>B^r0ۨ#=Q^hԹ&!6 G˖fE֦ԇb\lLhCC%d1l,}T.dAItqGKzjQ7udf)in;GbdEKO_}/JIHX7g=DoAoi>`f:YM͑5qU h# B_ZYf<5-&k"PT ve<)IZu<%;[h=_۩o\#pBW*Mrj%nÊ| XKixZﮓv;TZ7u,It!p=;Ofe*]oܘA-sZTgvǸgDV~E2,ͦGA*Y9?(ի/hk+@.]8`Ttb2N45 l4$Y&4 _0Դ{u WǢOdP*Mt{S?!:ՙ){1>9>f*M0c5wx31=/xJ&a#Kŏ}Yx~3\H!U}n"ceI >Y?3tzdnm #?vҌ?ꝋL_&c pW_S_'|bwInGTZ~ _/FRiVM|dI@NḊŝ$\hƼévHnq`gą،t͋Z Jt~̞՘a62~^] >׹v/jy `a2v K wE?EB 5E܀~ۙ2Rê=eK :\J¬XAz^| n`3zgŠ!3 %ْϬp1O/)(mH ۬g *p|6yBr~ीT\+~SΨG\a:py9okl@|xהT!tDc/)WڈwZ}upAZICk "Z*: t|8H/Bf!#,{wku.Yc];xy .aFHGg<*s7{H{.=(.R _pa͓V}"n1ḘU'[]ANXw.yWwKLD{8oiS.zMzm;@d@>Ю/Ty9q04Bޱ)Ά>R& 9xd thK8RӤ huo'roWHF-T->:RMe; =.P͛&}<Ľ晑oX(hHsPMif(c۲}a0|*ɖtOK-a1+GSBMX޵$O&DQHvS, tcb(Z:"jVCQ.$l=@T;PA#hI҇'dk*tKbOcS~pxxE( ͹(Hl\48@rՊ`s b^~9P;Kc}c1~DRT{*.ESQnlP od*c"? Xd~XUnX&Is Q%srZf]. to[n=9G웍{SPG{~QKTs=ѿR=,_ݸYa[v]b }!vl9YEwڈ S੬] ;U˒s˾,o[ 78xyTNu.@!Ǡ>v ,N(#B?b3Q09b,@nCEBd nڨa%w=KYOyoqy3ߕlOxdgvtd8yĥKABSw޶0r)#ÊSbku([ endstream endobj 55 0 obj << /Type /FontDescriptor /FontName /MUHKKG+CMR8 /Flags 4 /FontBBox [-36 -250 1070 750] /Ascent 694 /CapHeight 683 /Descent -194 /ItalicAngle 0 /StemV 76 /XHeight 431 /CharSet (/equal/one/two) /FontFile 54 0 R >> endobj 56 0 obj << /Length1 1464 /Length2 6203 /Length3 0 /Length 7197 /Filter /FlateDecode >> stream xڍxXm>Rs..  hFFIi)Q>>>w8}^uu^vDXriaPgb@q PAIah8 )001XG=p @2 )  JP. Cp<<.l|~HZZJw8@ =0掭#&(G >9 CFTGAy:+ |1 A(?DH.p_ `C!^H(0x9 s8_DJGC (w0t80##4 `GףּF0~h'A8J=fu$TCb$A'nH2 uE!jiB$ƜaPZRR|!. y~XA(,~0 H@ ap$ɿca_kl= V~ _OXAQH߿XZ_KOZUTPa1i$$RR 7jdF:D(YA3 oJ!7y~+*ޑc~k~5\)%E w5d0cZ׍O 7*)=G \C|V2H6MqsICǐŇK4 376>-!o{7SMoQ\_&B,۳{_iK2P̠:s'r [;4eg8)MfLuq2d*LRӸ}-c{R3&;!cԌv)kWnu_Kr#vq(罋oNPݗ^SalOS^So {yuֈǝ+߱[%fnM[j8ߧqԔ(BT4&J~꣋17yvsj<>'ٜ^ h^~_E @M;PZ|J/G:4Z^3AŒ&ĩwWb+b7PZi$  &ځ-6/VɅM6,LJŻ͉yd njKkzxP\5$ S=wEz;MFswo"Y! ќ_삍+ؖGRD L$ph߈{9:7cI w^J|1Jh&o= \(,QK8_BX8IѲP.Z`0Le6Ud޻%w'EM9C"VJ*:qV7e1^6VM-1Rs!a"TʡhwwtB+keeC ߓcBwL tFʆI[UxIo>ޤmJ-`țp|wPW=YqݶRt7 !IT"a/cVwǩ%gTlQOV 2M8B"9ёf@ԘcՂe頋}sh1'^#/l!wRkRyÙ\Ӊf ϰBi R!hLr)x68vN/^nLCp[;:HK\}n扈x#lǟa&;]:7VDn™/vecC,q)"O5_4]!Lm|8~ZFc+J0,=77.5 N~ @̯O~ ('/ sFopg V*?w˲l7Hqw1 J]%ƛꤓLvxFoXN=%OYotzϲKQ%7hy2 ̬R]vA9c25U9a|86jӇ%㇊ 7j=AF{E(K/ !(j%n#jK];!CG (9l]-:s3Xe_#{w TG6'b=;)ØR@?Z[?9uӷhz1+TV7MhajNl|yъ7BtGM)dBW&05M=Iy= _mψVocf]j" U7sjJ}%˵m+3${,^÷l>uYZ qb_̳ߕ[~{(RP؝V/qB:"9F{ma%Gc,EӑrRT%Qt%&o1|P_^Cg0XYbSqۇ(F3-Qi"3~{/ 6#\IM~7H4+>ܽ˓%%Uf_d]޺f,ڃyВܛy&D:stJQ>ʵlW5EX`LT3[;gd'zh(DʍY/w@gqyRq@ Q  \?49ƢZp&ƾ;\4\:ثc)#GdGC&2tkT<۟gd-7gCRk Wp`U' 샄\ ?D<։_X LwqO w+ X@rlK7ǼJYmrGL2}l`' L)K`O潄(P,3^[-|r>μ[Csk\1*Q.Z.l3TڃþĨ÷Ubl 丽ï]m멑G wvC^kwDgpS#c`حO {rE|f[BΒD }B7X'ti)J|i݅se{{ԔQM٪ot8t]gF_2cJD=5=c5Mߡ1~ǔ2UJ'r=wY\PԻQW-CL!1r,SqnDebLCNջiKg~V?PpyЋN*Sl0 tX㍞s]pe6USڊ3oMNp iv3gnʪB/e\hgѺ^iP1SvFC0i˪[` 7L`(pO4euo8g}ϹY'{$-˻}h:λ᭢)f"5@uc:F@&Ɋqn/L9;KIO"ArC0CTCeuTE*^_I֙ò^L ũJ\jz:/PJJzXK f'[dzk| Eh`7vĶCdhwr U6 c(}9[_~hf_ݤxO/)dOfXx0r:6Ns~YFaW:R\ic/O'H'sەC/Ȝ#;"W%*`NAEheX[c]F x6`G*nS@J!­wȲ7Wb .93wtQPy 8ei-q eNZP]v_(w'0νhӍ֙"(7#+H7}P4Ǜ=O4Q=ˇUoSUbƍ~M8U3J4(K?P!^lzG3ހ= ׋aAtF=Ekzu\7[5 )w\Vlc;6[*φ)w0]569Lw(r%k-laf #17:wk %~iYH+8P4c+~他Gwr94M?`C7뒇`EMW?r[z;P+PH1[+C5{}xץFCwqa kDrjNKW?=#eb $ W#|bԁ: 1cCvkHkd=8 :AfD4ѻSCۡ+ԃD%)Mڒ :Wx>5Oi +>za}\U(7ÇsG1C1cv_ xNxA齬i_B9S˟; 1ET~9\|J٩n@&)Y֩!ٴ]]$~4Ks(|lH F\Oܨjώ~>L_{$:{'ߏ#hjȸ =3d}ɓVnG웑4Xd2 Ko%7㵳/:J%0Z"6Rsf›M5G߳iSUenיP58ٹ Lmrt;o_&AW]F^OG! R5?y5ݻ9$4!#9MXt΂iѨy!k ,KNIqk6gҊAX#Q&jYe>Omp:&Ზ#ѥF f1Ūvuc>Ow /~nYN60zGU^֋z2cbp7KśWx18qM> xph7A"u_4Dfoa%\> endobj 58 0 obj << /Length1 1417 /Length2 6123 /Length3 0 /Length 7088 /Filter /FlateDecode >> stream xڍuT6-N et)tm ) J(! "% 4H H7{}y߳s\wFJ0. e*FR@0X89X_f)A2+@ `q6UB<\¢@a aI0(KBU!PB! pp%~\hBpW\G(h"Xsbd@W vz!@C8ÀA\@cGe@#1 $ 4uznp`?( OB 'CP@:.p]+ a!.. A@p'*!8@7,A( Ր0+~ͧ@ákYg$ p2A"=Llp,P,-!!& PG_}࿝¿8~n(7=<a0O8oǿOaa $83|4hqOϓ ^02P65?>ee7OP  K%%. o>p*j"Q@?p7Ͽhdxp2`(K[SoUE讇o7oᆸ"\| Q. 'HYCxW C 連,ǎExa,^  Ga8,0|8Aqo na]pW E~OD\A!>0d"8 d {7PDaq)@= VQÌ8 "߆uz8=&nϿ{áqT6UxQG/_c,:ű\ud+?Kן,gq4mݚ2ֹ-{ C1: J,Rn=2EȊYdjJy3fR; 5 P);A+W k*U9A"P1g*!lONWgNv{Dy$5kn_r7: JIjsH9BHgtu,!íG{w)2Y5Oh ;_]un܌΃&niԒ,+%{t wMw1N{7:6)9كJe^NȎaUj?'!S04GVSr15A&˦ZHn[y׾L3ґssweއzů |bOzEϰ$#ٍۄK\TZ֚-*_9<'yGߵ;]B_?Y槊_ݞn$ h8*w}Zo-wBB9#{B.-EMW9)Q5G(>$x蠰 < ]hs)gWHYP,qcH!r4Upur^~dѧl,e"m"w+m<鐣XYy$A&[2W:φkdU+흔xКX[kcn] @V M-a٭&MP$7)71SFE" lkxoQczH}i薲Dh4QܩWVXC꯮A]$}j]z)8b#V^Rml^H Hꦡz@?C Hҿ-#{rE:bLÔ%HeP䈾)NrQ~CK$?}:O_-? :"1v7 /wgwVLB)VC~"st1DVr7n찱c  & ̣*zZr5\D䣇| !Dѱ_<>3(2@_~ݷĸ}U.χFy2iv#;e#\LQqMaϫY|IpЯMf}-Td?- 4dayɔ3vE5dk6g-Ax: YJ>ƯY]:[ q]vZꮬ "瞌9~51;Jj+vh <80vJ Mi 㹸fM쥸,haUt=e/z4 66lQk,9t7]i-ЫH揪:%f@IIYe^I*T~$IM̸fåv {˜Lz3l/9%yI"xF$M'i6/Og),ý#W [ʠ)ۋly 9.}yZ"]|dew٘gs ~sC d]|mʔgH^S6K!s&-tU9[P4 ͞j7#n%<԰5U+E·>Q0&4e&5.o[q 눰xB4.7 k|GK $WX|Ցt_]Yw-ԵtR9INJ_sSҥ6<3;xʔ1x 5 =ؼSCI엧[D.yRf蚖Gt*j.̨Ir quMx3w2 PQ%b E2}ĉaWK9$j_ J|$mNp˱nzj]j=!PEbstRuAp^U"gB7-YVP ff 1*L*j]g1g_^8պ!-gV٢ćLˇndǵ)ң֭Sl 64\THz]Sy+l\.0(G7KPpHDB{t9iqW^۩z>ӗ=:ceOPRArx_[O꤂sSڮknqY v%&ͯsȒW1eaZ¯W@騚>YZ.^c1~ZkGKr^#O< ύ7Ԯ W^IZRl0C*Uϯ;f<`iX$߷*\1b1h Bԅ%ԥ>M4fITwn,|<{De<Qi.c1 ;LZd+:8!S>=;B=\N2Ƌ3}7.be {gMD+ dZjݵ9zˆao %wݕe-wymx pu^ĭQ4]Z ZBDZSuYОF^|DMr\Z^(xN8ل,aJBsʖ^AvtR ժ+SK1AnA1k[YuC'RG) .^~%fZߜ0'p}.2TM#Zݰ̐STtQb(K+1"(bQ_P Sv</-%f}GØ$wrӺ"g} 3Sbc(zKkm4JA_sɵ֡4OB3xjD/imwݛ)Lߜ0&t ߓf )չNjF}$()Uro\y=HufG+ J=I.r ^v "lV(4B Zu[.DgTD: Q8ckNȕqIz$CNEֆ|>KA_ A S:mHNUZ@dx& l01V:S,J)pǘ=*{4Em{8!vr+NI1:+|Mgk%(6 -X"JRtӻbpY{\ޟ.{7Ay}|1[s7Kت7˕ Y?A3S}yaWlF_w<tt =Zb8d5v995Ex}4_8ڍz|^̓^SNʗz$K` >G E{@$'VFvSτ]߀JbOUORM?]c;o}L|PBRQpy_+V2U^$o>u4;8,BQk1y=gOƋΕLS8 X B+Zfi"tEjLuD𵖔oɼR9Iގ!cP dHO>fHxLw5 #|_?I oD6g1:\n6]Fۏ17U&dswN{0\.Ο|1y~uX@v oF;?Rn_Sjܭ-"˼fĆyKHl~r_ԩmj^m95yEDI^ܔ! ҒvrK49F8br[ ڎ+}VYʡOPa Ye[{UK7jg&Ozl|p=dG 0X^ڃce0?gICu.f:A_xRo1  zqa ~k%FrIrۂghߑ*ru*^oG&V>q!CLVMK}3U_բPޢag-Tn\߹#af)]W#4;Gܩ᧵܊Ub5/mG YWȐ]8YRl ͘vʯSXUm Q {-b#WQ/Q<_ć*i+H==bxLHZ*_mG5|EޞL`]Vbg<#7ǃ՜K羶4't'Xf>Ym!ia6>H{JaYv94=vsE\~[:씟&gMQ7!|V| c\nb_4c")f/".9_:[2} ~Za%_ endstream endobj 59 0 obj << /Type /FontDescriptor /FontName /XYBSMX+CMSY8 /Flags 4 /FontBBox [-30 -955 1185 779] /Ascent 750 /CapHeight 683 /Descent -194 /ItalicAngle -14 /StemV 46 /XHeight 431 /CharSet (/asteriskmath/minus) /FontFile 58 0 R >> endobj 60 0 obj << /Length1 1795 /Length2 11814 /Length3 0 /Length 12939 /Filter /FlateDecode >> stream xڍPi{piqww'8w;w ݙ{=kJRuFQ G3#+ @\QC †@Ir[@tq9:Bh.0u{7TttȹX\||,,6:$L=@E&RdeߏsZ+//7ßQ{ hf hnjPw4ݼ+3'+-f P]<?Z(j a rBx؁́.@{v@ /c0:+G ßΦN +%PR`rrc:Xahjoa 35{7tS*ÿs5w920,`!hotpsE> ܽ\[GO%6,ܝ5@@YlEˬnN6^2f#O%}mA_WS O++d0Z.Zgy?V<O^1,_-G)&ed0qXYYܜ,wJULAUwDYKGᅳx?7d6Π>@obQIGoj}wC}CTVZW+f#Vs/9U P[k}9@@GWw/ѽT7J:;Zl\SSo9c0398{X: q\f?D7Mfa0Mfﻥ`6x?:fr,YY;ߋWa|/od}/^? {2\enFDD>ݱke|\ hhf^`U $Ǒj5{qha66[l[-X>.hK=肏?\{{L,(2~躩LAmӍ:wJ|"uXr5LVNۏ]`R:oaJ\aJ_ |}sm4/¾E Y'qӫ*;_>DZ$)(>_:RO:CXs~ m8llu+TVUzC7~f 3eX- {ҲbpSQ 1&\\J z  z=i9j"3M2m~g/|ҸQQnw/Si"YcM鼄i > 7XbJ ))b7 v0CK߻񛧶HQR܈噓(͹bzDx4̰&?_O??dPmmQ`;pBNfa̲KCv5\pnȸH N1iCHx3"T ℷNE:$(3,@fdHBrG2W$(8"n$TmuXKj?GN&Lez*K@eqeZ1㊮ 8n 'n)+/5^eXx?hNW4NMVrEN^)$J  zuoH]E4%/ _ZhJbc[&`K _"@p4fW񨸌oSRKAQ xtOB86κ)g']7.3\3;j-W'pO aTߏеѻYk25qZb;vߌ|#Zd3?=C†2qU@)`'"aR 1S1`F/Uߎ姀TMYv,TB@Y=AAѬř6&śZ vxSUumQ߄* iI~׸HA>"gxj_JEjS+᳟rtԣDc%YwάYSCq{^ :T5?fX!2ȿ`Z1rT H9Utp#܉ 2nHB?oҼm\+ ;j9Yz,ԅBci} VEZ;'z$Cȱ'Տ( OG6k=9< L"v-VB9LD+AR[a[[S~[=ӽb4u3Gp:FScO?c0.,mrږo?^|v~ čxSD:uIϻ/u3.>(Dc^+ƅkדR᳋e'~@FMwX0Xa \骃Q{U~f 0)*ũ+i"'3si9nxTh}@|88F-K8RqB}ojU;9 ÉkC#`7h%Mf&+teIK-p 3lgdbmNb"<=־ ~rVgDG] JѸ.H:O(_{A V 1 PG5KZ#`n0ũ0 7ƩMb"CG'qlcD ffg9ubQ{/L zJ% Pu\(`6)4%KR]OɡX!6d"V#hK~ArC4rZF^WgڮX\X(}cq1kWv\ӀB|4tb>.\/7I#yraD@`pH` H5PuPBsY1-'VN)ز (V qC[qk!7E-B݇&9k.ELVəbn S[~{h;BBi'k -Oɋhiv1j@շnhciiSdeDq(C 6rVM Jmjؾ`^#3,vQ_Tk>M[H 2O?RT`S7iω97uc -vyhk43Lď{ %' ǘ[:=;A:8jaB'إʕ_iYԴlu"d|x٩g<5ANīH+خldcB8uJVhKs%Z#)6`ÃţfQ3/,bRh ڙwW)ME I3.IAMigZ[a(.{@)-jb.|+'fd*8RtPZ5$ {3%ĿVqZyჷ*bj'T=UPl%6&Q"~}F<.UgNqa:Wa1p-`~mXrmVzp!a29G?Zxv)Yr>x\O"LҜ,6s;;yQ: g-O|~, h2'E|Y2447fr\%5͓nߟ,ZoN(tP9Ͼ/~&y& u+J[i.ǧ# zDPn}=K:dzIQ6_^7)(J1w{΋UNR{uHb4Kv=e=ִLP&Nh0TG[ 'QtO-gga#*iE.gkC?4 u+7KNҕ-(iO 2^ei8;O+gcl bGӯRq {"o5J*p:Gw[E֎RxxS u8&W*LŸc&O~4MGn0(]\aY|h3%;A a3UzUmS AjGYH boLQažZ fNc͔. pӀ%kÓg){U_W8fB6vQ:?c^A)[lhy#M٧CIɝ*ojZfe#xzc:[ x TOb‹Oq\_Y9kkN~q͠CbR!0+yڌOn{u bv: ~qw8v :"͇&D>KnhPIiߥm^UH U١tMp} q~쿗>B'A̭]S.]}=d:m> X))*7*ߣM!N)vԏ^8dҨ{0!e%(rS,cNpW9 Rg8.bi{>oI֖MV*(!]$8PI~MQМ &SHskϫ8yWy=_ I0u~nTtV#a"ǁwuV̴?ǢM JT.knޱ`bf#_ O?dH'L&E)}oah% yˎ*O[q}Z~97TH+ ly5)V~QȒ Hlv8ts!VYZ fC zsc[™3p xc%*\DH ."k'ݰ|0,`}FD&ڃaq G,Zeraφz(nˈL4 h+B;}fߨs*T mۻANE1JIxI?^.;(n<6 D0qDʨ~ٝw4_e=)]QIYN|5, ܜY255{?񑐳LJcue$rԓ-[֔'#t:arOtN>>34oT~ uM>Bޮ"x%d(uH bN(4.o{1Q-K<}aRHeeI@Ao;Vab%ikaKyH]$e(k=ǒ\f*mSHXMe&+b׻ܔܔ:V}xju,t30=Bb+ِXTv3ej#ԅRuG;k[i$!k)#ra=lwKϵk_1A!ৱx3 ׎K, ί`5g X?[ehr2sA|Rsү*ln;I}ɳ9=Ɨl.z/*Fh2{'},NÞ< jM'cxgfMFs$<]ļy}Txcq)zrd-4zr{*^#pQP7͵Nf999֭ԖacdP>)IrKS9 ,Hܽ^=$~&YܼlNc^NgPm$f\ꄗ\l>RnJe-93~hǧDEMw0}_$қΖ;XSQsk`LBw%"x+6l,сk*Tthe(yI:]{(}rr5@R5*#tއ򀇷lf4$Kz(S~zHS'#.-8`u%JL n)>atE{SsE-œ5qs_,`>2e+?d*qtșD@~HZP9PS ܆.|C'(T^( whGOSM'WR6PMeT$[wAJVA=O}$e>!}O=iZs\xٷR[љM&]aD3)f y+5mjBMCSY2q7E\ܝ,i$1o%QNm\"%__#l Bi˸IKLn]C .eE5zWvG .TbJ)|9;0(W^{2ڥ,sD#61SNI.=M?zS7FI${ye`wՌ:3$ ~X7V#~Y)1*ӑ,E eh"NO梄D7;Sl)!6 \s\#3Xw*~9p1_ ёZM }`v: "<_fq%9J%Nkvn%SyLM+Ҷ> 1^C1`c>~o(2Mp͢HxDpɋ!)/>^\MϡF:[[?BkrgtgaND@c>BL )rQh8/+ᬔet,!#{ Z ՔTv`(Uεj6B 0:il&bu53A>ԍ?^S!Sːַ<~0뿷Z;(ٮz87O(?ڟf}(CғL ɂ= u4l:e5;9rvZ_<-iҙ~wx~9}!žxQh|q|u1KM_g6-9=PPM(2]ʪɷ S KnȺGprs-}^闹ZP3w֓i7ZRxY?VjykxMP1osJdh0ee;cڱܽX%a+MϘ~Akq>AZJtä[;6\*c)}ܝSȏS׶I˓p搞`_B&R|k)(}g]#;q1|bf:fwe%"S?Sٻ@&0]r:C6^X8.nDr^2%Ʃ pC>Cw%\jΪOYgYGz &#{ӇҤE4 E– $Ya,ى.l~Of [8|iFx~8'%XaDͬZ pΕ"i{v!޳$ݮ7'w[)/‡Pieb5w Z0|z. bz* N8E@ xE*.XNЍmz`n=_%"~0ab{| 0Mw8> Yx6u%>s~CZZJ䤎`1|V7uuX6a- <5Ll9SDRP;s"Xo]Y[f5p9`DqnK&ӵGb.t Y1~ E=0*|H9(F&FE`ih쎀.V8BKb5N S>0[\vp9Y_¡\;w=NO08\]o6W$?Ov";ս;ӻh wiK >3SQX2.v_mr!} $Ȝؓ^ >60Q}-nx݄ڞ*Ţ  !zjbaP:`{2맄ʐ  Wq4 f3\t,4.,IX]4XWLX?бGuZ ޣu)eV!bĆ`spώQsK\yxk~[t!7Ts21rk/`c{d_'d6Kgʦeyb9m^AڍV{*JIU)jGA"?L,8+(>z>73Ah*øg?{oeg;Iޙ\ i]n53s:3 ۤ Rij-[=3C7!&Soy ӄՉ~ ~dhGy9qMFG \("nSU]E6w^Z%~sDZzZ.Y)pF2p"sKtE4nߣep;OşhLe4+!/?*HZ[yM|d;7| t 5|f[~ݐZUtE{d(뛑8qYQYH1_ XX."pU;[`[&bv⯲Q"1XUNa;OvrӺ3-"S "/g9:Upu&.?e"z{M/CbYD; Rf;50`Ks1Yc+`Ae$dO{[Ƞa eXnYeX_jѝQZ;!"lf-f#ZvT})e/ + 6 ߬Zŕ1fQyu@SY\" N}3׵'^бzߗsgsp;u9pZ `d 6#M'>0Ybo,w2=_:͓X K8!'@(.amT9[K3\4v9WA.>:y]_W8 ;=dſYv{gSKp4X$i8 endstream endobj 61 0 obj << /Type /FontDescriptor /FontName /HXXUYI+CMTI12 /Flags 4 /FontBBox [-36 -251 1103 750] /Ascent 694 /CapHeight 683 /Descent -194 /ItalicAngle -14 /StemV 63 /XHeight 431 /CharSet (/A/H/J/M/P/S/a/b/c/d/e/f/g/h/i/k/l/m/n/o/p/q/r/s/t/u/y/z) /FontFile 60 0 R >> endobj 62 0 obj << /Length1 1611 /Length2 3395 /Length3 0 /Length 4396 /Filter /FlateDecode >> stream xڍtTm>*N J1FoR%ձFIiiQ_F * "%"%R7;;gusu?Y.DG(Q5BÕpE5F!'@ @&УӦ1$NJBPC"H}+&dHH葽)W7Ͽ4V@ jt@`̎X "c Қn4 UxR2P@s,A*Hqe(AM"XVd<CH$*3N26,@`PpWd K $WO ͏0$F H%31> <:09` b)/UJ npmaM==A 8>bs\ٗ $~!頱 8um47 3Cp 3i<R1> @?#XHՙf3BL!?gpdwLцƦr?(r@y (*H *i=!8w=c S`' 42jw/y6Sߨ'2~I `Ncn9ڂ0 ! @5 cmS1?YH T{#+`)Tem@FM’q `(?ԗ `(۔6S i.)+UU`zM!uef ! )XT`?:#!H@f)Pyg@UF2l¿fS(dŚěoX`^ְR#+?٩նxaZgxFձs7U{{ ʌAl؊wuZ_659V"\|໤#~CMɚ"ۥ(pn錋پ_]]sKXb2޻EdvEmoBY B\^KggL7 z$^5'J.5HPK,P[/V-DB(5富nVxzdZcҤs3]⺗*c 3 S5:o--S|=EȻ~i!tkT@ZÊGqQhS[]3ss_dK\tY ߠS$.J_>ET6W+K~F|tS3|]'2͸tW9i[KS5'oHqH]b1L :~h=qhI`Yݨlx֮,Ͳ+?;2-/۩B^ID/{u*uΐzR1qS1Vuʇ00QV#in'ž]O;/eI:GY61,t[zd5^={Ms6^ '\y+$")uĭ8<o[ ̰n(N95((c|0c[&,څ[lY8օ%~ShYY\LIՍV~.5,vPu}c·ed\$nN ( 36;)'!jmC縊f-93Z_WGnQWf DylKl\?͸#Bz(úv>3|Vmf5\ICPF~c8M* }1H*p2ˑ{V@ZUBm[{hy]grE 5 t3ϛx${n11(G+X"p5Aj$V./y'oOO:'JzF֦?.[Y}/@uꋽGOBvF3N_LM6YYPǘ5pݠ3ff䍨giECvsWʔ#>ĪKŹ͡wOGȟ=Iyu{&-U UNUe=AzRo on]I:W1f%҅O2`qRY|l ʣ#ZB }jN .w[#|B.vl*8qOB?%oO(N=M,D=$1t]<gJxQԬw-]J\i ngl?'} I$Djwח?کB_I|0e/S<}LK}E/` )!Lٓ07vOk>hKmo^|߸XI0]@Oe^, (yxi_/E+=WC?}–'x!nx>|:_^cX~KtYY~to [eY9,و ZBf}U%; y'sV/̮DZ>T7"Ž|MR햛:gkhS_m08"ZtVо̿wխr/ d-s |x@ܱͅ>qu꯸G&2'_ 9jN8GH?$J ynƝzR؋ .ۓPE2S QʆjM! NmnaYn}Q:|\M[~넯d][䈺V]ݹ2u:üe#5|0q)1UW.k~M !bsg x`lrOU֫A%phU)Q6N ӊjܭz.M us ;>p|ۻh.t:qɼJ̒/\hԻkFF8?nQ/oTr |ٕh OJͻj\ֱRB^oU-+"ֵwwסg`wp۞aBi-Xa,Wqrʡ[!Ed 2k5m %âŗ礼9U١]:p9,vNCj[B_Ox{+r įX,]Gh{hZq59á{"EcTD//?.,kA4Lh"執+$+"+Q' PeYffǧbZJ #1读yqV4gy(MNY%KZac) c~9كF3uO]ɘfrLkޙ#>oK3j8i:ظ\>7蓃ؔ=/ T;̽y]Y1oT1.%Okվ%A]nS`08Cjv$}5nKL,'cR|D;݉G3_V>`=]ysA'ƛ߆q"ϵj;pY?92K"d&?pG-אe'٭b{rg:TOWR5lFzwK&17Xvzu~.`}1=`myc!3^Qk Cҹdžgzm|'^Tl{WLn`8w)F H2?Wwg!,}ѱw2*T{?eEQ)ǿs=u.߬aww$ˡьmJA/(oxq%H㭻fkooBL3p(=~_LOJ0p`e F,ۖ{wX6r} TY3|01x:gCpSo Ԧo+|NZ~X 8V rmw޺d٥Jb++}Z1dHrGb]^VBY9x[6&_QCP Y&p|R`D_6>|d|b endstream endobj 63 0 obj << /Type /FontDescriptor /FontName /KEGOIK+CMTT12 /Flags 4 /FontBBox [-1 -234 524 695] /Ascent 611 /CapHeight 611 /Descent -222 /ItalicAngle 0 /StemV 65 /XHeight 431 /CharSet (/C/I/T/a/c/e/i/l/m/n/o/p/r/s/t/v) /FontFile 62 0 R >> endobj 17 0 obj << /Type /Font /Subtype /Type1 /BaseFont /ZATKTX+CMBX12 /FontDescriptor 43 0 R /FirstChar 49 /LastChar 117 /Widths 32 0 R >> endobj 15 0 obj << /Type /Font /Subtype /Type1 /BaseFont /XUIVNP+CMEX10 /FontDescriptor 45 0 R /FirstChar 18 /LastChar 114 /Widths 34 0 R >> endobj 8 0 obj << /Type /Font /Subtype /Type1 /BaseFont /TRVLHL+CMMI12 /FontDescriptor 47 0 R /FirstChar 11 /LastChar 115 /Widths 37 0 R >> endobj 14 0 obj << /Type /Font /Subtype /Type1 /BaseFont /AEITUJ+CMMI8 /FontDescriptor 49 0 R /FirstChar 65 /LastChar 117 /Widths 35 0 R >> endobj 5 0 obj << /Type /Font /Subtype /Type1 /BaseFont /BCZBTA+CMR12 /FontDescriptor 51 0 R /FirstChar 1 /LastChar 123 /Widths 40 0 R >> endobj 4 0 obj << /Type /Font /Subtype /Type1 /BaseFont /SQTQGT+CMR17 /FontDescriptor 53 0 R /FirstChar 11 /LastChar 122 /Widths 41 0 R >> endobj 9 0 obj << /Type /Font /Subtype /Type1 /BaseFont /MUHKKG+CMR8 /FontDescriptor 55 0 R /FirstChar 49 /LastChar 61 /Widths 36 0 R >> endobj 16 0 obj << /Type /Font /Subtype /Type1 /BaseFont /ZNHMBZ+CMSY10 /FontDescriptor 57 0 R /FirstChar 0 /LastChar 106 /Widths 33 0 R >> endobj 18 0 obj << /Type /Font /Subtype /Type1 /BaseFont /XYBSMX+CMSY8 /FontDescriptor 59 0 R /FirstChar 0 /LastChar 3 /Widths 31 0 R >> endobj 7 0 obj << /Type /Font /Subtype /Type1 /BaseFont /HXXUYI+CMTI12 /FontDescriptor 61 0 R /FirstChar 65 /LastChar 122 /Widths 38 0 R >> endobj 6 0 obj << /Type /Font /Subtype /Type1 /BaseFont /KEGOIK+CMTT12 /FontDescriptor 63 0 R /FirstChar 67 /LastChar 118 /Widths 39 0 R >> endobj 10 0 obj << /Type /Pages /Count 6 /Kids [2 0 R 12 0 R 20 0 R 23 0 R 26 0 R 29 0 R] >> endobj 64 0 obj << /Type /Catalog /Pages 10 0 R >> endobj 65 0 obj << /Producer (MiKTeX pdfTeX-1.40.14) /Creator (TeX) /CreationDate (D:20141001140615-05'00') /ModDate (D:20141001140615-05'00') /Trapped /False /PTEX.Fullbanner (This is MiKTeX-pdfTeX 2.9.4902 (1.40.14)) >> endobj xref 0 66 0000000000 65535 f 0000002046 00000 n 0000001934 00000 n 0000000015 00000 n 0000134585 00000 n 0000134447 00000 n 0000135278 00000 n 0000135138 00000 n 0000134167 00000 n 0000134724 00000 n 0000135418 00000 n 0000004731 00000 n 0000004616 00000 n 0000002169 00000 n 0000134307 00000 n 0000134026 00000 n 0000134861 00000 n 0000133885 00000 n 0000135001 00000 n 0000007422 00000 n 0000007307 00000 n 0000004882 00000 n 0000009697 00000 n 0000009582 00000 n 0000007584 00000 n 0000012134 00000 n 0000012019 00000 n 0000009825 00000 n 0000013962 00000 n 0000013847 00000 n 0000012285 00000 n 0000014101 00000 n 0000014143 00000 n 0000014549 00000 n 0000015168 00000 n 0000015765 00000 n 0000016099 00000 n 0000016195 00000 n 0000016819 00000 n 0000017111 00000 n 0000017441 00000 n 0000018114 00000 n 0000018800 00000 n 0000030967 00000 n 0000031258 00000 n 0000038752 00000 n 0000039021 00000 n 0000052076 00000 n 0000052379 00000 n 0000061608 00000 n 0000061847 00000 n 0000081654 00000 n 0000082144 00000 n 0000093030 00000 n 0000093287 00000 n 0000100552 00000 n 0000100780 00000 n 0000108096 00000 n 0000108348 00000 n 0000115555 00000 n 0000115791 00000 n 0000128850 00000 n 0000129124 00000 n 0000133639 00000 n 0000135511 00000 n 0000135562 00000 n trailer << /Size 66 /Root 64 0 R /Info 65 0 R /ID [ ] >> startxref 135784 %%EOF semTools/inst/doc/partialInvariance.Rnw0000644000175100001440000002437213000201061017732 0ustar hornikusers\documentclass[12pt]{article} %%\VignetteIndexEntry{Partial Invariance} %%\VignetteDepends{semTools} \usepackage[utf8]{inputenc} \usepackage{amsfonts} \usepackage{amstext} \usepackage{amsmath} \usepackage{natbib} \title{A Note on Effect Size for Measurement Invariance} \author{Sunthud Pornprasertmanit} \begin{document} \maketitle This article aims to show the mathematical reasoning behind all effect sizes used in the \texttt{partialInvariance} and \texttt{partialInvarianceCat} functions in \texttt{semTools} package. In the functions, the following statistics are compared across groups: factor loadings, item intercepts (for continuous items), item thresholds (for categorical items), measurement error variances, and factor means. The comparison can be compared between two groups (e.g., Cohen's \emph{d}) or multiple groups (e.g., $R^2$). This note provides the details of the effect sizes in comparing two groups only. The comparison between multiple groups can be done by picking the reference group and compare the other groups with the reference group in the similar fashion to dummy variables. For example, the comparison between four groups would create three effect size values (i.e., Group 1 vs. Reference, Group 2 vs. Reference, and Group 3 vs. Reference). Alternatively, for the measurement invariance, the change in comparative fit index (CFI) can be used as the measure of effect size. In the measurement invariance literature \citep{cheung2002, meade2008}, the change in CFI is used to test the equality constraints for multiple items simultaneously. The functions in \texttt{semTools} will show the change in CFI for each individual item. That is, if an item were to allow to have different statistics (e.g., loading), how large the CFI would drop from the original model. Please note that more research is needed in finding the appropriate cutoffs for the change in CFI for individual items. Are the cutoffs of .002 or .01 appropriate for this context? In creating effect size, a target statistic needs to be standardized. Sample variances are used in the standardization formula. If researchers can assume that target variances across groups are equal in population, then pooled variances can be used in the standardization. The pooled variance $s^2_P$ can be computed as follows: $$s^2_P = \frac{\sum^G_{g=1}(n_g - 1)s^2_g}{\sum^G_{g=1}(n_g - 1)},$$ \noindent where $g$ represents the index of groups, $G$ is the number of groups, $s^2_g$ represents the variance of Group $g$, and $n_g$ is the Group $g$ size. If the variances are not assumed to be equal across groups, I recommend to pick a reference (baseline) group for the standardization. In the following sections, I will show how effect sizes are defined in each type of partial invariance testing. \section{Factor Loading} Let $\lambda_{ijg}$ be the unstandardized factor loading of Item $i$ from Factor $j$ in Group $g$. A standardized factor loading $\lambda^*_{ijg}$ can be computed \citep{muthen1998}: $$\lambda^*_{ijg} = \lambda_{ijg}\cdot\frac{\psi_{jg}}{\sigma_{ig}},$$ \noindent where $\psi_{jg}$ is the standard deviation of Factor $j$ from Group $g$ and $\sigma_{ig}$ is the total standard deviation of Item $i$ from Group $g$. To quantify the difference in factor loadings between groups in standardized scale, the standard deviation in the standardization formula needs to be the same across groups. If Group A and Group B are compared, the standardized difference in factor loading is defined: $$\Delta\lambda^*_{ij} = (\lambda_{ijA} - \lambda_{ijB})\cdot\frac{\psi_{jP}}{\sigma_{iP}},$$ \noindent where $\psi_{jP}$ is the pooled standard deviation of Factor $j$ and $\sigma_{iP}$ is the pooled total standard deviation of Item $i$. If Group A is the reference group, $\psi_{jA}$ and $\sigma_{iA}$ can substitute $\psi_{jP}$ and $\sigma_{iP}$. Assume that standardized factor loadings are from congeneric measurement model, standardized factor loadings represent the correlation between items and factors. \cite{cohen1992} provide a guideline for interpreting the magnitude of the difference in correlations for independent groups. The correlations are transformed to Fisher's z transformation: $$q = \arctan\left(\lambda_{ijA}\cdot\frac{\psi_{jP}}{\sigma_{iP}}\right) - \arctan\left(\lambda_{ijB}\cdot\frac{\psi_{jP}}{\sigma_{iP}}\right)$$ Then, the $q$ values of .1, .3, and .5 are interpreted as small, medium, and large effect sizes. For continuous outcomes, the amount of mean differences implied by the factor loading difference given a factor score can be used as an effect size \citep{millsap2012}. Let $X_ijg$ be the observed score of Item $i$ loaded on Factor $j$ from Group $g$ and $W_{j}$ represents the score of Factor $j$. The expected value of the observed score differences between Group A and Group B is calculated as follows: $$E\left(X_{iA} - X_iB | W_j \right) = \left( \nu_{iA} - \nu_{iB} \right) + \left( \lambda_{ijA} - \lambda_{ijB} \right) \times W_{j}, $$ \noindent where $\nu_{ig}$ represents the intercept of Item $i$ in Group $g$. Let the values between $W_{jl}$ and $W_{jh}$ be the values of interest. We can find the expected difference in the observed scores under this range of the factor scores. \cite{millsap2012} proposed that, if the size of the expected difference is over the value of meaningful differences, the loading difference is not negligible. See their article for the discussion of the meaningful difference. Note that, in the \texttt{partialInvariance} function, $W_{jl}$ is calculated by (a) finding the factor scores representing a low \emph{z}-score (e.g., -2) from all groups and (b) selecting the lowest factor score across all groups. $W_{jh}$ is calculated by (a) finding the factor scores representing a high \emph{z}-score (e.g., 2) from all groups and (b) selecting the highest factor score across all groups. \section{Item Intercepts} Let $\nu_{ig}$ be the intercept of Item $i$ in Group $g$. A standardized intercept $\nu^*_{ig}$ is defined as follows \citep{muthen1998}: $$\nu^*_{ig} = \nu_{ig} / \sigma_{ig}.$$ Thus, the standardized difference between Groups A and B in item intercepts is defined: $$\Delta\nu^*_{i} = (\nu_{iA} - \nu_{iB}) / \sigma_{iP}.$$ Note that $\sigma_{iA}$ can substitute $\sigma_{iP}$ if Group A is the reference group. By using this scale, .2, .5, and .8 can be interpreted as small, medium, and large effect sizes according to \cite{cohen1992}. The proportion of the intercept difference over the observed score difference may be used as an effect size \citep{millsap2012}: $$(\nu_{iA} - \nu_{iB}) / (M_{iA} - M_{iB}), $$ \noindent where $M_{ig}$ represents the observed mean of Item $i$ in Group $g$. \cite{millsap2012} noted that a relatively small proportion (e.g., less than 20\%) is ignorable. If the sign is negative or the value is over 1, the interpretation is doubtful. \section{Item Thresholds} Let $\tau_{cig}$ be the threshold categorizing between category $c$ and $c + 1$ for Item $i$ in Group $g$. Note that the maximum number of $c$ is the number of categories minus 1. Because thresholds are the location of the distribution underlying ordered categorical items (usually normal distribution), the location statistic can be standardized by dividing it by the standard deviation of the underlying distribution. The standardized threshold $\tau^*_{cig}$ is defined as follows: $$\tau^*_{cig} = \tau_{cig} / \sigma^u_{ig},$$ \noindent where $\sigma^u_{ig}$ is the standard deviation of the distribution underlying the categorical data for Item $i$ in Group $g$. In theta parameterization of categorical confirmatory factor analysis, $\sigma^u_{ig}$ may not be equal across groups. The standardized difference in thresholds between Group A and B needs the pooled standard deviation. The standardized difference in thresholds is defined: $$\Delta\tau^*_{ci} = (\tau_{ciA} - \tau_{ciB}) / \sigma^u_{iP}.$$ Note that $\sigma^u_{iA}$ can substitute $\sigma^u_{iP}$ if Group A is the reference group. By using this scale, .2, .5, and .8 can be interpreted as small, medium, and large effect sizes according to \cite{cohen1992}. \section{Measurement Error Variances} Let $\theta_{ig}$ be the measurement error variance of Item $i$ in Group $g$. A standardized measurement error variance $\theta^*_{ig}$ is defined as follows \citep{muthen1998}: $$\theta^*_{ig} = \theta_{ig} / \sigma_{ig},$$ Thus, the standardized difference between Groups A and B in measurement error variances could be defined: $$\Delta\theta^*_{i} = (\theta_{iA} - \theta_{iB}) / \sigma_{iP}.$$ Note that $\sigma_{iA}$ can substitute $\sigma_{iP}$ if Group A is the reference group. However, there is no direct guideline to interpret the magnitude of the difference in measurement error variances according to Cohen (1992). A new standardized difference in measurement error variances is needed. Assume that $\sigma_{iP}$ is always greater than $\theta_{iA}$ and $\theta_{iB}$, which is usually correct, then $\theta_{iA} / \sigma_{iP}$ and $\theta_{iB} / \sigma_{iP}$ ranges between 0 and 1 similar to a proportion statistic. \cite{cohen1992} provided a guideline in interpreting the magnitude of the difference in proportions using arcsine transformation. The new index ($h$) is defined as follows: $$h = \sin^{-1}\sqrt{\frac{\theta_{iA}}{\sigma_{iP}}} - \sin^{-1}\sqrt{\frac{\theta_{iB}}{\sigma_{iP}}}.$$ Then, the $h$ values of .2, .5, and .8 are interpreted as small, medium, and large effect sizes. If items are continuous, the proportion of the error variance difference over the observed variance difference may be used as an effect size \citep{millsap2012}: $$(\theta_{iA} - \theta_{iB}) / (\sigma_{iA} - \sigma_{iB}). $$ \noindent If the sign is negative or the value is over 1, the interpretation is doubtful. \section{Factor Means} Let $\alpha_{jg}$ be the mean of Factor $j$ in Group $g$. A standardized factor mean $\alpha^*_{jg}$ is defined as follows \citep{muthen1998}: $$\alpha^*_{jg} = \alpha_{jg} / \psi_{jg}$$ Thus, the standardized difference between Groups A and B in factor means is defined: $$\Delta\alpha^*_{j} = (\alpha_{jA} - \alpha_{jB}) / \psi_{jP}.$$ Note that $\psi_{jA}$ can substitute $\psi_{jP}$ if Group A is the reference group. By using this scale, .2, .5, and .8 can be interpreted as small, medium, and large effect sizes according to \cite{cohen1992}. \bibliographystyle{plainnat} \bibliography{partialInvariance} \end{document}semTools/inst/doc/partialInvariance.bib0000644000175100001440000000270413000201061017713 0ustar hornikusers@preamble{ " \newcommand{\noop}[1]{} " } @article{cheung2002, title={Evaluating goodness-of-fit indexes for testing measurement invariance}, author={Cheung, Gordon W and Rensvold, Roger B}, journal={Structural equation modeling}, volume={9}, number={2}, pages={233--255}, year={2002}, publisher={Taylor \& Francis} } @article{meade2008, title={Power and sensitivity of alternative fit indices in tests of measurement invariance.}, author={Meade, Adam W and Johnson, Emily C and Braddy, Phillip W}, journal={Journal of Applied Psychology}, volume={93}, number={3}, pages={568}, year={2008}, publisher={American Psychological Association} } @book{muthen1998, title={Mplus technical appendices}, author={Muth{\'e}n, Bengt O}, publisher={Muth{\'e}n \& Muth{\'e}n}, address={Los Angeles, CA}, year={\noop{1998}1998--2004} } @article{cohen1992, title={A power primer.}, author={Cohen, Jacob}, journal={Psychological bulletin}, volume={112}, number={1}, pages={155--159}, year={1992}, publisher={American Psychological Association} } @incollection{millsap2012, author = {Millsap, Roger E and Olivera-Aguilar, Margarita}, title = {Investigating measurement invariance using confirmatory factor analysis}, editor = {Hoyle, Rick H}, booktitle = {Handbook of structural equation modeling}, pages = {380--392}, publisher = {Guilford}, address = {New York}, year = {2012} }semTools/NAMESPACE0000644000175100001440000000446413002107075013317 0ustar hornikusers## Last updated: 20 October 2016 importFrom("methods", show, is, new, slot, as, hasArg, getMethod) importFrom("lavaan", inspect) importFrom("stats", cov, nlminb, rnorm, runif, cov2cor, qnorm, sd, quantile, qchisq, fitted, cor, pchisq, factanal, coef, uniroot, lm, pnorm, pf, var, fitted.values, nobs, residuals, resid, dist, ptukey, dchisq, na.omit, qf, pt, qt, anova, vcov) importFrom("utils", capture.output, write.table, combn, read.table, read.csv, setTxtProgressBar, txtProgressBar) importFrom("graphics", hist, plot, par, abline, lines, legend) exportClasses(lavaanStar, FitDiff, EFA, Net, BootMiss, permuteMeasEq, twostage) exportMethods(show, summary, hist, anova, vcov, coef, fitted, fitted.values, resid, residuals, nobs) ## ORGANIZE BY AUTHOR(S) OF EACH FUNCTION export( ## Sunthud auxiliary, cfa.auxiliary, sem.auxiliary, growth.auxiliary, lavaan.auxiliary, clipboard, saveFile, compareFit, efaUnrotate, orthRotate, oblqRotate, funRotate, imposeStart, loadingFromAlpha, skew, kurtosis, mardiaSkew, mardiaKurtosis, residualCovariate, singleParamTest, wald, miPowerFit, plotRMSEAdist, findRMSEApower, findRMSEAsamplesize, plotProbe, probe2WayMC, probe2WayRC, probe3WayMC, probe3WayRC, reliabilityL2, maximalRelia, partialInvariance, partialInvarianceCat, ## Sunthud with Bell Clinton and Pavel Panko findRMSEApowernested, findRMSEAsamplesizenested, plotRMSEApowernested, ## Sunthud with Alex indProd, orthogonalize, ## Sunthud with Yves Rosseel measurementInvariance, longInvariance, measurementInvarianceCat, reliability, mvrnonnorm, ## Alex plotRMSEApower, SSpower, tukeySEM, ## Alex with Corbin (and James Selig) monteCarloMed, parcelAllocation, ## Jason D. Rights PAVranking, poolMAlloc, ## Terrence bsBootMiss, net, chisqSmallN, permuteMeasEq, twostage, cfa.2stage, sem.2stage, growth.2stage, lavaan.2stage, ## Mauricio fmi, ## Ed Merkle kd, ## Corbin Quick lisrel2lavaan, splitSample, ## Steven R. Chesnut quark, combinequark, ## Ylenio Longo htmt, ## Ruben Arslan nullRMSEA, ## Alex, Patrick, Sunthud, Mijke, Alexander Robitzsch, Craig Enders, Mauricio, Yves runMI, cfa.mi, sem.mi, growth.mi, lavaan.mi, ## Sunthud, Terrence, Aaron, Ruben Arslan, Yves moreFitIndices, nullMx, saturateMx, fitMeasuresMx, standardizeMx ) semTools/data/0000755000175100001440000000000013000201061012765 5ustar hornikuserssemTools/data/datCat.rda0000644000175100001440000000207613000201061014662 0ustar hornikusersn0m'Q=<nNfJ[ꐶpq* .1L/qa[UDLj0.(Z{h<.9-9||}Z~Qַw]1%.19[IR]p$ }I05䜳E0BIvMrîn/H`9QFRTf#$<`s"a8tm{@YڄhS*粅ȱ<^U/)5УsCj=WVJg\fX&ӏi&ꚒZ܌1ꥱtCD*eQ)OQu^%agZ-C?3:2F_}\-mB=i7f(M%jǘI9ő˄Css9`=VFOw ?'s=Y6#JnW;+2,CF3n$HvK#:p.O|[[;0^k%5vSJ[C[BcR2OQt-lî{>:͙)Hlzx\(ۡ?2rEROʹbuB _Qg\T 8\tz5,y2"i9!semTools/data/simParcel.rda0000644000175100001440000003213713000201061015402 0ustar hornikusers]{ 8]ў,mRQ'EJQmI)Rfױ113ؗofu5L1=&zVGGG[qL-xxX~&DuAm"Tzꁽi:MGa砊gCENhBΪE0afnhYtPwWڴkH\J_.^aB@>zLgmq^xz=BB K)īΊ ^a}מs][tLTV&;ķn {i_6cEʵ k~;,m#4> Tۧ 5#YiU,ҎNdC6M"`0xt!iRI0.jB #7>?"Z_\4}lS_Qc>zj6̔<>5ρaBo0klM[`R轰eۛ^@mS[Rv{.m؆jՁ .kյi(qL :(%8Q|H,k޽|勐tiA2jq7wsX b=$-9T@f= = I_˞J¿B@ײ +TyJda۝J_0Gv{6%ÀCکx],OEjݎgp}XeZ{<}?4 fUi |X fY;f&yuYgN//UMrM_[lJgl}c!`\2c06l].BSCLwu+N|YTgCKjHL- /ԉ*?w;)VwD8v Q=nC%f3 ]zSm,BlRǰWq0xg޻]@ۃ#8"V nFo?C,0="ӼİOK,Z[j~ !]޶'@x6َ42v ̢Y~~(ϮK~p[4Չe{՞'kAohLso^?}#mDESC.Bm4'fgb`4j1Obyf~QN Oz7Sž6S%X1~voX#)/[o.&rgf[3mLqg7{F5(k?=>~iye8Vؙ _IRq Uf fK8W㞝0TY"!n[CYa0Aaj3lZ ~3&w?{cᅩWkk=r22O&-c =÷Nϱ:1ym2s Ntz:;](eHkUo_$Gυzj?x‡^C{_X&]~P mF 0hƍg6՝ޞ!<(i*;cmP0ja[QH>sAט|d6(̹=oE| #!3oKK zAXr(ĭ"S${yհب}sJ^)Ty 7=v5٤E\-ߤϺ+W @'=7tΧ=Fx3 qݫLȉV['%ECC\.cenoaG(bGZذQAiY W 3??3@Xǎsn'vjkK\ vjҳk(b ::nCrTRhGL꓆;{B'SCcaϮ<(,5_>sб *3UcX0Vm:\9t8EŶ~@=,sq@`7 V6*60}@Gͻe*0 faU,)*ɹy|:nVۑHڋDsuː%/:K8)w/!<,*@=#Nk?ki;A VJR;nDӉihܱZ >(8݋Balψ8PJkw7OFw_WZ ,ѿ8<ƓV8߆!*rϠe8A )SŞ ǙpoH#Pya)ϏP`iL \ˋ`#Cx聁WC`k|KL?-(jܵrh`9^׆%_É6 uP41&zƶ('c /gt~ثhh~!q[sUHCbǦ#_# :Jڒ@[G8d'H{5^yعzD9p 2WLDKiju%@6!)Fh` ^t\~!>{ *vZRA% `oߢO$ʘ֊ KÛ>5',!Cn-RZd$N]GC^CC^CPlgkB0Ʊ.b P`\X/DXğֹW~?3R2I"ԀWǚz篣ոB/[}7tc`}`,ZO7.*]9jE!.}wEtVV(0{#\C3' #e_PykdvȮuU׺6q50BOB3j%B9TKe(eᣲi.0d=V\86,#὘&0ki 3 Ǐ]{jvT6bO5+oZd}9XjH'ǖ3Ԃ+gbF>PU®faM%\ZzHx6|h`Ɗڍ rHZ3p(+ պ;'ҽZ0)ФLm2g[7?& @΃Qs`Sd-md!c6"Jopv=_;;UlF)X5i| 889,KtpK-NhqP&IҞoب:g7 W|ՌIilԳp`d0&P z;/]T rbś#0Mpƶ)GбH[nVѶQ{*v("ݻz/ֈ zOT+ %x icH"C7e1' K[C&0|/I J}yr$:ۙ5e?v~XX;y`A']y),+}ΡPz>̒SFBj薑y/T\)?U`jRd{K[BOkxaZ<g%t {e*f2]{4>2 EV9@֨σ?.'ܪ;%.RU}m&п;,FzW(")\cf޽Xf|(oFE*9䏵cݬ 2?M6H_H8;U< O*Ys{HwcQ2JfX9g{IIki}s`Ǽ|[Hi%R"m .v0wn=t(p[ P~Ta OuJ@ y۝֪(xwrhzdmX)=ǘ_Nǣcg0[ssXA"Pb#)N̖?)r:*\|>.\b yߎnr ~w;~m)Ѕݚ"Hʂ?lZ7a_hهȰu.Vb<nd:dC+[`T׆<86˟8.8 m{]IboߏufGvO|歙_]وk,y[~^;"?<i%}%Ts 7KjbJ\I#OugY =>*wMŧ& Ѷ|R-$is_Юp &]^[;ZBo=0vPW "@wU_8afb'FB,oq}n@~o`Ԧr|vS(*U7{?,l*GF%X}Cb Q&-s"u|jFCAVK5b= U,&o.oCA}exc'6$_6]CH \] s 2@l4/94>zWV`hꕐaj5J?/ʈpйT 8Jc??^GT9H I-? q68*&:xvWvJd)I =;ͰoN&ީ}f.gaM<%śE%qaM5GQa齓2Uk[3㦪.9hyBz\mh7{p*?Ii"9Oȶ@y>!8X1o΍M n݆l@Tqs_)б-# ȶ5Wf}]7ӊkP%dž9 u1 ]^1pZxNvԭUKvʮ6%vp\{_%ķ24Kêm0N-@:};H#6:Ce 9%} T-icZAJK5Ҡc `ɼONײPq%6ޤ_|4_GlYեVAPSaݓC*;&;|(C;PVa'Xf>x~n$2n4r-."Nv@Iڹe!HKEЯ!8ޛ#{.~D%HPc4NA3b:E^f,c%!mϽ -rHhM7hn;N?ع׳qlÍ{A6//|v>t42MdִFqK~ӗt?}@LNlC\K˹$gdwz yC$gri66! }l9 ;[KdZMgs./[9Cԇ-V yuHX?n[$UH2Ԋ w>`yc @NRSGS^Ȱ`mejN"|~qdtJ} ).$?e-bzykbmLgFrBlq, 7ï: S0a~Az킷"=GԤ@ikre$#G QES9Mp( 觊;7j6mۄWM ߕdd h۔A_sLyMUgk^Qy)vX@R:TfTPE ^-'3gf3]Njyоc躯+h;M 2ViH:`ZH|߇@jYm Xt[+ FT]N{ޕ'Onacj_P\ä"'n`ܤkeRaFH|Dhu&otzWCM, Zv[)>a X::7֛|8}s) z<3r PVv?1 >,u:e~!I\"}Aˍ.L4{gζwOF")܀:5J_]P exՙ<6&lb8*k|٦ʾVT8p6p̛;4?ǡY ged>p8|#wCG;o%Yέꔭ-[笖 ˋfڼޛO T y|@޶mqƯ CBuБ{wDJj CΘ)|y|BQokVqSUR_B?R/=&X>5tLai"QN oW %f_[);se":la0evj%Pi Ώ5(n6S_'=3_cYnAj" ϱ[0 65$knLwY*h^< ta^.nAϨr3|' 9V殉Gg!1Q$\}b ]eR*㓓߭jb3oV[mMZ!;бDyG pʱ-pfWn2]՘qؾN|ڈDvĪ'8'1"4dL;kqzM-S ޑ{яdqZ]Hy362-^xƊ:dj\fղr :-| hh}d} >v7[lF0c[Kbgn<[3o z@뾕9+bP1g:ozeߟx`@ ȞJ~q<0AfcEn&KAFhIW5jZw lXԛ =eƑF^]я!YpS0`Ɓ{Hǚ8Id}hҟ%5)q_Xw IG 픆<6aUo0NP9s3? ?an&wYv+ W㡝PhqT00`x2C R*6l>6Fg˗ s/\>XTѻEf8ߖZ?GU"#Injd\_ӃTftu0_68~6*KHĸjt*;;x,Yr\0GϮ*H,OcvD~];%R͸wQZs}~K3t0In*r)phV6;.CdU9Fᚫe,45bH) d̊$,*O-Ħ~,o3kJeHi:.p|L Ɣ>#p\^ʍ}Qr]WMh28{0^ qYF]n!yX㛽,]8z* z} AuQ3 þvϹ훡ҊTm\xc&/og5PqNHX<*N Fpiثi=^Jtp򢤽Oo(>TZa58PHB1 >s׶A`Ay2Y[~f*I #֘UTBҴYLz 4jc uF"%:{>`sǁ~hQH2PEQȐHRt2ө; c lЏ>5 ߗRMElrh~ ML\6RfOLY}p4bD~0ݼ$qI}P:ܙa01/,g*#—GaȬ?:8\7Q)FzkdH!60-` gȨ.΅n<| utR$5 =\SB;-ela߼˓W,O#4=a%Gix~"& }t:Kƙǒ>"M'^{^ćtWir savPL.Rv7en>?Z&=LO}Lxzt:`ït,g1WK+[{ӂץi6$8Nx\`H2I+Wl &`hWҮy\pNq܋G"[3TJν`s~$F4%8vɝ;QC6ih& 3:#pp!8ҋ;4U$H*EP!7xyEq.i^{I~|yUbX.=I/z [HVXJx4uWdžsZ.ir;c\?af_;I p%gP&<9UʖDC*0;y9xnuląb=5]IwZ>,oo~OdG&2߻@D6{y'k#a}s) l \{:$yW Q@4T[%qȔve 4~+LXDZʊfEOHL2湮ߖVB;}v0.td ,oD<8SZM;ߞÑ FbM7QhcPG40 NRf&J ;aUӖ;b2O)IG'?.S]ZdpzeTxv<%j-ӵBA.v ְ^%E9E:z-~FҾt Gto됮z?/HmMjWپ;80xbP;uMmր&*VEQ1aEtU'Al.&>o"U},{'BT99mέfb@{:4jLnSK Ih͢(|;̧t :.∬9N{|ď re|վCG߿lipQ-A) O[@AFm ۣ-0hmgur.wmi&goDq.ԟ1Z|Lu`P&ÆC,=TZ0t޻W{&/wnn=E^@wdNC7qiLXv_Js݁]_^s4'v\J۾L&" }MN7u#?#C{_pqeC+nn{U,#l˯ݑ,Omo*xp3;$9P{x_?3Pf Ը̖ ~tS qM36IU| GEfv;ȒǍ9Hd.D{f^ϯ&+ 3|'UrVIҔ_>`CPbt9s+IS7ވZ7,r2%wAMʝO`1*?I|b}>埝zeP@Aט5F?O|Qs?-*#NJэo0wul,,k#< BۡcLr)]Up\ KAסVOY]0hsK KqVs.e}7X*~/Bp^-k X2_퇯,%C9\A[jV]#;cQ.}AzHnj{_{cu3Ӝv5v3Vf8^$w*>׋y̢N}!2}/6_?g[ʗt*Jcz[աFC3,4՞6[vo`ښ6{$xX(so{S/Aqn8EPy& QYo/AlcX&ұۧt5$i9{$d tPa}HdbA#{BzoT9I>P1qk6Μ YBҊJw޳>Ew~C -{-=Xj^72"S)PwWA~xU'?Fp㩼QoAɃ6'~(\(kƴw=ҮM.AkӺ%SfnoZ}KPG8^HwGm'pQ ~fD'Naa-g3 F;>3R,?R+Rm̸BG0(HW׍#8rCXֹEqm&A^JÅcN%ssc$+ hѾ6|}b8pI en%zÆPVIiEecCwA7د6m{;ػˡWwkefk} <9x:; Fphbel[Vcܭy(@A64}2~|o$,X xy\zű)gzG,G +a|C{ac89Lm8}`<^6|NNʁ $0uT/>d8?/k :7$pyXqi*W_S}JKp5r=ul)&0q*%FV'²nhj?gߦiA><>S+\R/z\1t9}ڠHy 7RqVYHɃN)8Qpc0TEH:,dǸׅVcY1VMzŗ#MsHujWq=Dk&uq5@LenMc8k'?aQQl0G$}wl&|x :"u{e,t8Z5O#<;`W@vn99wdO<F0PGNfxKYC-/*2ʘ*/S0}䥗Д{Ѻrm؅=I,B瓰GTFn^?ӑJc0ቂ֧ Wbf9}}hͫxM<|d\k)zLio!P,?k\43+g7:Ta[ٻ:&9-GpJ].쐘O^'x MI )~Kg;0!8Ns܄e5ˀ)6% rɬ{s}2pkPeX#{ף6+]h+.Eh\ n beQef^Pbb)X}|,4=?~; .6tJrFXv" ªem[-v|ȕjDk_u7Q egmzYo!D&\fYL2/iMnju3ny4~1H0TH7`煰PS,}!IU;Xj޽ޅqeo-)gzqLqL}{ zD#CftA~?VJPm*^=!$=BRFp z?zg׾So!bw)B`޼]ƛU[u7ϯbǎH9w 3/|Mбql;4~K \vW E _\nWwbv;[UWkh1,0\vwt(J\T'e9cBC/ ~Ʊ=v99[0U*- Zj&nIƜLYGr~S9v+?ђ#KQ0e$e3|h1WS+q`=8z 4, ݎ[$_ߺ&-Ang4/SmyRhWj&ٓEn: %m/%l?l>c'eYp:'>r Kz_Zc^ !t.i8_7;H:cUEZJ0}sO: 0tf_Ui.qv".:?C˨J0?s03&?]I ŲBr}{r.WtzNY?`ΦO@͔6>>W!8q˩MH )E5aڬ^Ǚ@ o4ajUm~۫wb m۔Ԛ{n0PK`}sȊ}8󫸻Cئc2wrTU S7>CTǣ˲1Uto*'P7oez^%ڷ2+h'{z9-MLI3BKrTxW  HpqÎs;icZb' $:yE1 CDs?B@ն0\P+ZT|{AGS97iV(KŶ/,omsU 0#s,~Lsp[[@ w<@CuwoE2y$ `ϥ㧠Kєo/d #KM0sv:LuU(hqVWax6&Le"$G`G j!iFntZݪCO olK0%4,Q5=6o='lni:Jݚ.ԫulorjCd !FhTZӶ򰚿a-^fXjc[rW*bb F_h[ ndY®F!0un"C W9 -4Ԣ?<&~?iK/ t@Q͊Xqvσ> }oڧe=PP%xcb?Vؚ+fv xALS1V&e%XOEp]i%6x89HV#XuB~zޒtCᴍ]Vmw&>)<QɍALaRygj8:-q(˕]4Hla $ >r_4LQ=W.fa:E7 .Xps\O7.kk!"`@/Sϱ+.y)@)_?}~<'P=^n0JKzaa8k_~i"m`u&7ƚ"eb?7HeyɰQ~(d<%\Igh]-6;C/cGWѻo9nBK+ oܱ"A%<FXG̼/X`aY8jVTllZ ^4@Kg>=g.aŏ/?[ŒIq;xfhf*A?&G7'_i3 j$Яqӊ3;)%WpmW{i2tiݜH23ϼ SUFbNn7w5:֕k6o^EZ힘R<)D-?SѤI gq4]+ؘ-3:;&p}үO"jqf0<\/7 ۟oaXuav<]vd;R{ÐZ06yW;ԃoff*W¥_1g:Am$ 8o>5˚"C6COFDxt7T M€fI־884-[S;M6!o,Qڋݠw×[TvZड2..~+Wu$k~hv0sER7 6ӎB oprAՋR!$?qE|˽pL-i@0ߌ8RAn#AOzƑf&_!5oVK:/y;mwulwUl=Z)$[@F UhgH^q5wǞ?2 =T!YNܟ6ϮD2s+9š͡C=V~PVzà׋UONdg{ɬ$CUM~'(]0)YGJz̦t'ugpp[nY;Eat7tdņ k.\Ugz0ܶqy?=>W]$a_┫ǜ%?u.uw7toae`S|5Ig> .[ScO(A{_d<0%;v=Q^o=)r]w]}N1U>g Ï!˱/w%:V]n4l]tR0 U~Mq$YԤ^T[[7a:%}90Oj07}oiybaT#;toGI:|EN w1W4 u4ugz-n;ϻfWNc+ey1fh\\&pܽ=uU6c8{Uϊ"tF%4#w !Bh8w.Agh![SCwʹ 0%?; }; AGaJr%m[u"II e& C??Hg1mcWׅj{ dl-p'/xhhL`R˲W?yt<` zޔ޷8'o a$!KսW;_J>ܻ׹čXyQAmF[qccY0E8+2DԭrRQ޼!+^M֣b_&7vۼ:~dx|fAR`<|,n?h]OzDKSzsoxpszykǏOxr6 /Ή/ <ϑ8..z`D:qwx6<~h 00NRm .F(];ܡS=}s%s4.ӟ-";tx}rU_gd{W7<C qB/诺{S8I { IVy\7t9I'롲>{SԲGH]8U$4 XcVv gm;; `x g;-2$wAŗ?sg~c؎zِbvmX~'0R{]Rv80{uuzm#im{z nܲPt\&>woDtq@Oo3w RiQ0mS SF{oޮ+⒯ӿ"%g 5^@1{B^"F "TkiQ`')|)S<{ZDDp>Y%&,yiyqNؗ{54vrΛ$1vB#Z Ry #g=\FF'ѸOGbag20Im[^`$#6+?u۽YH2sQ*9ifksK%g,J3 FB>}4ָʦS7~c_2 ե 0t/*z| %› GŰöˣ0dX1/ڨNX)}UW<_aꠋ~8mQ?NuW9'`ҫ~3 F&S٢#ngVж #ć-\Ւ˵vODž|fd>Rmamp׿bif7,mc{0$֎]If fWXygr*6{6UK¥ W|ެ\~qMybf,vr|:8>"%A_t7MRmMA8BwLswP N)˙o"I[I`3X-pgݿ6 ӀjtZRΆ>m G_g>jס:Vta\.Yf>%{rfL YrXk9@%_zdaPr.)fޝl,=>SږL*a/y~3eZr kb.*5b<}3n]rt$g6{ X037goW)^:N_mX{S` M{ Aowß0!0M8-SP0iG8,4,>y&O <IVQEʑ8=4eorQ$ꀣT?faחO}}ڢ:j ԳϚ5tqa{*`/ c ‡=N@_a.L?ȑ5q<vnE06\b"*'@RyM#$$g$Sbqr&EGdX둛?y2NC`|AݩG6+klj'ԥ(<):cLJj̨ N^fsn?!E=_aGK.lA0V/I>]ȗ3)iDz Lt'`_s؉ W;]3^ag^D.`_3}a8Qso_[j!Xx?@KA2\kѿ"&ee'}I;*v[tO*%K7Ǝ00Xnj-87!)HO0N(&Ol7R2 .fG2}'L&T6KeņBa>~?IfYa ;p6#:amЇ[\^N<ާӉ;[3͒D-gr9 |yJcOu\Ild]bI7+hG)v|5qq³0fusw5B^ۧa˧!P‹Dn!NTTv;[)]iWPZ'o66 _o]S$) etӽb0μm\ƹvс__gTX=om-b$TC7AjQn;qkV| vFjZkaVvBwtA#l*N&@~5 +=GJ}h?>S4;?NFZ$@_LH֧8!cuDa[ hMkb﷝C8'aZ.D3v|TB^)PI~qtǺK\@{]yz9ǽT˩aRl_ &D%wEt%a:Q3p4,߼p0I)!&g˽O].Uhʈ6\}YC(t[ QC+8"/Xaa0!|v(d--=t=mf+m)2 ?/@/[7Hʎ bH Whr2T|f(q?q𸻭z-ؽzz}};GlT* CL7p볩U5I`Cz2L|W{fUew0)LӃ1=&hb Ƚ4XYmMay8Lm c̾B׏wC[ Q&,ui oZ-bOebb04,Δ|qHCQ JXIXKV'pn 0_`sR@(R.U|s72U>hMIp\Dl,.mre9D+{Sf.u|!TA.ԾUGxBћsga֓ya=m54 OI˼Qu3Š \Plص%O,^:)Il w3cA韇q+Oe6(,l!Lv6('XE2qh2t[WZiw0<%:.&皞F%B i~/J>i^䎋mipy[Ǟ,--x,p:LslW#گYh~eb3 =+5ipm0O|l!g{SH2 b/){hA6ҋfv8')yWӝdz5v0F]K!0 ~$= 0B*ƆNcvzw: 9 GLC5ͯM'0R8O AyoHVTr? 3KטgܛqxL. M$qy\\E)S$׭_ H+_05܇8J'+ 36%&P }9 H;楚vfܰ̾]N^{RnFةlqOHEOLo٠ݧyopŽ ,,e~56ʸ`=g{*Lu SE&q(;-6Mvf2mRoI# Ô+t-F-Aԫ{^lEu~{ XK_46L '`uYPh宽MGt̝$ox*̫Ėgtutg^__w=k%(N6t\\WIn[d5ɰνPsku(\X*, 5d ` 53sÂ)^Xk;n_4?;$v~m^4O">K;d=0m= =A4hz)${*&S[\9v%R Sgo"-ْ0o_~=" +{,QMW|;p|1W]g^uMPL8r]EHjUE/_&.c"yԩഊm. ߣWr:}ohN6~-gWr_Lތ4Q)6xl~h[yKb̋k0f~{~O\Zb鑐+{ҿD/9wb?zGٮc'_0^\;̫pxw"}+IL?gaCO@ IXZE!WTo͟w3j"J0%"76J| e)k'mqto5$?~Lc.=IӇսwpǾk&a-5g7L-q>Q3aOy[dFn-AI+_dRl%~-&##<ދ(fΣtXo7>34-w̓~b[q~hFkt35~j&)'vM=ɅtOt ^;M/ I羭$B_~qw"nj3ԲcKfkB$eRDVȚc Urm6c˱nGjt0gMj,=kʼnw<2 <\|}S[IyqFu\f; k-sZOygI3\V|9 ߴx4sYHH1 _c%;C/B[0K^H0յc :ea™ v=|5&>n $XNH4')WHٺ4=bJ8Rns% }׆a=2_koTJ ̏q2x~h .~dץ1#0 %VBWo`3f:`LxT,i/2~l#Ǜ. [$>Po1X&SqH . YW,k]8kG);{Rm EkVHS^Me#羬Dn!IQqNd'uT>ӂx+S@L1{jo[pt=?-ך &Rqsϡ7{\ >[3O?zν ~j'}CMӱ$0UuʎOӎb>ֆ|ÿ )äi..֔JM,%e6t=]\ݷ,q+nO cl{ r`O(IR.JJa/Kooѿ $tI㈰46} {r}OML5!uRrgV۪P)88ڷe4[$(BJ/ _.H~u@#υrn{UcgvS̥~$t ٷ.׀icPU+#+=VXp¤ݻA$<_m[-8 k/G<+J 60[q2NoZ39+:qefCY莈Y:p`}ʼ+N4amb*#\Q\S%Qrwh 8=?Q!Wd1T%]M4N[ω'k8ԫ:(f$>49$Fm)'=~̛΁gْ,cp+0.{7vArs1f(K6胙j8+y-V)2͐]+Jy*O\%Yb=X\_3PipivuLpPNgwq&DAOkQr+8\Oй47\du=#VOݓV[ 'חaFk* zA\Xu9]9֎Gع´u-Pݐݘ~UW"`ϵRI]ѩWby37jgw3SXTپ|0y~k4ʣW̽/d>ubƥ^PLF--%կ5`vQ1HL%aV±mNhgagS'HK,XQ/{U2zۘ Z|žA`L]JAZC0ReiDi|a召24tH\u&5C,o?N"m=a?X[褡eK,ćqg`)?X&V5M*4\z8I΄>7X<*%ʪÊE}DN< %k}' ZdO\xa#AP_QN)q|a\K׈#ۗHj/K3i08C:]J;0#V2G3.t Д4dI!TA[EfƧR3CP9c['Av1٭4h<5c3ۚ?]RU$ڇzhѱ/lƺ$Vgt>,8u+nR8p\11ZD7U`1&yIZ!_K!,O3)-+S94$Ybު8/n-}ʷnzMyQS.8݉M/< 5AƂֹ5|Sco˷$`dɃ`Wja,JK-]EΝ')w[%UYr<`_yX,J>9kSkPQO}é #{?|G3|*vޛ“V&MbeǑm8`$S*{:bqf}8p{qW]Lg/eMޜ"&kdVsA9'^3 3U>KbvFO*LXҀ\g$sȻ<ʁׯ8f#ϊ>ÛJīe^j=ㅭǐsf\Y$[ NQ Kj(:8}Ģy^NÓX +rHX+_͞eژ>^v8lL~W+nlo FMp0~٫_X_u3'b؝[c'rh4.z w}+W/=nʿI> v*d_h~,5`cf4;ʮL!̛zX@9W?eP;7ͫS.z10qSZ֗ݹe/G$) 'b:ù {FC{2g/Ks=:Ɛ_icNXfۘu{v{;EA0wZq|9p(8X */boօ}U*|f穙 8WCuε";f|"X$crҳb |5T[ I푞XnY`Yf/n:n*n+5=3C cFLI֗ P ԇ9af/21߳L@sÂWm3 &̠@v;Y@P8eAo|\HF#I߄`)CH&.PI:NNf:/M2 C3a#F0zaghy3th0tDAC1u;+(zߨu?h:*{)L* }@_ ЩSˬI9vSj~$ӽw?I(Qt6XպaXR9n _'c68#(%̛87wt`A1ȶLDEw@Jﷄ]uBe栭9 UIΐaDlNS7{%uw ) -tz/6.fj!MH_$L)Zaё󩰖rz2Xxz*^(' #0{e(YtqeuZۨ͐xkh|U=<{k: U/=LcQXq;{$̩W6T\e``fnA2y/xx'I3 c{1|(ImYp[]z.,&LX8D,5U쒆N>] \=F9W`u޿ kS>tO6DS`x{'[u|*ߢʹZ4q ͎R1l?D~'Vo~bN|^%jhry~ZckHs=Ύ|qi'bIcӘ2&[Uw#*PjR8zdB2.ôk~afmj<̽Mֲ)Y,2v}}/X ~LZĖg} ;*WdvOk6iX_aNL91 Zc%>^@\hL#K*aN$62$;foq8, z) ZMAW̶uO*ƌXŪso>۬Oc %|6KkZ5IHJí$!+fz l-9ws$ܴpipBSq7n΅r3+Wvo{w:tuf :=%xפq CUj ->s j"QAl*w}mn/UF0e*RKpDlƞ4_\I{al(jcXϋM=nP~"Fw|TĕҗpMV{(l㱽.xM=iσ_*;N'Ҿ[]9qO8 z>zD7F>.Wl`,aK 2MeLn9R/~5BhgC>5'}2Xp4 ^Ke {`#3=*{J+0|D>w*&mlgd&Z G^CKQ©gy y$ewHVF]M ߑNڎ!={Cza /*qX`1, 3_ ա4J\yiht9N)BDBwoE&m)iiwݱx߹PEwaf4 ձy +=r%h|aa}1.rӶH!_o+E#\$+-V;~g 诞ƴ/XϏ.nK|CqgGte*=$Ywz\;a0y̽.;VzLRڄo&ڬm]|ϧ_/!&78|D=<7O"Æ87pvd(6ݞ qBՁP2l9]3ɷ樗*qe6hm۝-8xMIP&kǁ1#7#TbOA+$|: 4l۸/ JIo4zߞ+Hm[bqL~<UgƁ)VX{mv]L ޗNn,g`Ҟٲ6&.:S,&צSqϯ RKu]G'< W{)•7K/F[Q;B 1ؚ 0Eu(GRYuΊ(_M=}| f {5h- +( .^)B/댒RGL\`uJ ﹳMƩ*9&@# [ze\f'(9_IUn6؍ϓ ; ;%ٽ =s9BRn-=֪S^'UvԸc9ea0_{7i"g}s Fnc(S\.œ SX@ٍk&;' 5VKF$9=QŊ-+8|4!QWlRtT}%70g/cDuZsJahXW(0aF8 cVkطXtIكAeb`OL; $jƟiyCLe.y횡cpAEMSgiûْt-~T=@n*}βmv"L=+Ά= ZPQpZ+]B3}6Q&*>ɜsPXSLwTUqʧ>:E!?GZY<Vln7{7+P4Htqcۆ$SQzО;%zWCQdj8t(:%Y Jұ1n_d]W`bR UuJNOjWH>׍(4g~6?R+[-!KܤJB2*Ih"0}QwWmwI;[n%FO(>-j WT 4egh7`;EL\T[E YwL>%8/ZW?atUԬ;ñ#`,V&Sr,7qqPw0Vٹo [Rga,A30˱/ɨq3 \U0=K)k%{2_y=mRQwӂb/x'`kXh={B(J@mK]BXYP/oI*lO;`ݻ ?O78K[f"˺|!NQV a_bל4yRms['HYjc Z< jKApU셩Wt<+Tu f=f}fH仗j28%:Jܓ݃}CC·br8w/*w ݾrW tQOt "S,}I7+.i*.Z&KǼ7 i0wR=xY3\`%j[:ܞIc\Q, z*þ`1?{I2*29WaܡMenV&bɳk.ek!yC>^83 GN:HEc/ t;wy̋/#6G;>i.~;zg;Ez_3^+}jԵ5gJۘvy+]eaLjj u"XFޞ(ncN]mmatoɹ{Ε$ݵB0q5Aωh\=2 <ۡMrmN7pLUװ=_Fܞ}{-SiG{/i^CagerC-pl~)7%[좭$Nq[>Cq-% U\L|s;?.֒jqئcW҄i!nN6 ?r5 s_8"ma$=v_7RE2c(Rer+ɻi)8߾X%f.hRG,KhZVđ̍o߮s)}X\ z_X]6% ]礛{Ġ㴘W!}MNq(e@/B:{ul FK-_b.Gk \۹OYzBDa{ibvқ%pCWIѽe wFn@{s~NMQ4v\|p!LĹK(j/ Ǟ(udQ(0d$2&\aN0-gmC 5 `$1z( |x+}`>gY_,>#h99ala8.}TMfv ]~]h dqPe>lu kE'uo&])[١eS0F'@Xyl(A{$'WU# G(.o4|P]?nl۹FZ1+Mߡs_fXÂ8jz(|,]I630~7Po`CNzX?{e>"Bn%deBE¢nt?0Y%{7w)?MmāD;R_[=_Ϣ&ܠjnA1ouUb8.EfLe8cO0ɬ,1s"/X6̹U]f:ztՊ'Zo0I׻24uļ%6¡u-Nq&V6682 Єvg X:S *?kq .{|H&W@qkR v f&36lN޼?4@ggDiMu,CH]"4a1^} ouZh^m ԏuJNTWSS{f(|-2\-]oՁs|ENaϰҼ qb DPjʇbVF=yY!ҭ8vmH= wy&X:77vPT+ |Mu+8Ɩ_y=~8„7 /@NO?-5>g}*nߊ{{XAs֙ VJIh  RĕI^2*|*FA%&LuFc}S/gzkNpJ 8U)@ymI{rϺ1zS#jesg?cf7?lSp7&bb0 pn{V2 w;Dj14Vή]ś/PRS4,w|ZcI}%uHB/{pE|'<ž oٞ+B ay[wZ p"czG.ozkXV(M~,TjaŒ&hbs`Y&.h ֚Jv2@\J,N|te|b~x>i 'l_-{xs1o!Ry}bYqBA:wvފF-@@wuȶ+ yyj`z&.&HWΎ_:kk2 }6JG:|}M0Q-ϑ4cocCvdxT.)СzzLϯAeޝrM轚!xB2JHnɆ7[R:P;/E#La1 ?azd=^mS%{|Nc]H ++<Ǫ7p~k{ja?elU I/M,i' ,qAY{@M˰x3-}?Y&苚=!1++8tӇm!YFM+.pˆΓ0L ZVC~뇨W/>ZRtAq^Bɕ${B$%&/`8_뇇[׺N]' 0+:X'VӖ$STݦ8hyC@{xm.Vܴ7mo܎̉*ʇ/#@.s+ `Vzg_X>1 l):NOno:UځƘ@m5L\[Z;2ec. `mrcSMVi4h,En,E}!pZ>MV9K'i&04N܉߷kK[`D?^bp 5+ +!˨-8Qu/#Ap`W 4v)]Wk}S,0{Jtxfqafُ3QKO S!lvoқꬣqsG;MBf/ŲO Hzu'O7tV1",tkr/;d&}'(T#iݑ?'N^ )ք6"[kx9ߘ6ء]h )$ JnX6~_O0R_]IQrOxֶ;-W/)WǶ~tn 납ܓ`MJlovt{ R*`&of6l.u uN+DYv$tss`.ddD/,9G,'?8uG1wЇSt/>x!:K\p#/*6$ {|u|;.4*z/5˭\[kFnyo,~M2%[% P^.B`? aBf牤7$ӆ?Cbפk3g635_!ItO+`b]cs&csr$ޛi\ Jw /ozە!3<\Rpv+,=cd`tD&kʥt;t~ُ-'xٗ^\*$ȃ7J(Bo=8{O_tFcC ؛ ;fϔMW3#К֖Ź+YڲUM]+m{b&2Fphpj):pE_Larn2eJ"|P]H.Mb8!׋|'r8vmDMC?ovI-ƥ[kjN9_VYS%> ?rL9VOF-̴caֹO֝Nl|r}_C>vK Fpjڃot<:yb#qiDj`r*v!(܂: f_?&Yf_%v*Cχ'c'A(񎑯cŹ~~_[$zbOL}޹Kc@~KN>U СT-P7 ;ҺT]tV"EOpq4P^zb= K%h */_>L=T"6v} Fv*WLsD 񨸚-8uQę6z?_Re/{q{KO6\jx;A2i!&Nw_]B2aNm~* PøOa-$,.el(+Ĝ&lqmck VxKLU;фuV%hur aRB4b| ngi"ݰ?@rgYhxt:Ye?8 blP=̛}>gD'_5,܃?ozVTU8 O؊Q;,g'UgMet`ߋ8{GSN=ݟuRžu z\Hu/)8/Z+/T6 [ca(w)?ˊq_P.tg5TkXb㻜 %_#kﻩ43zaNSf <,CYexJ;?;>i[R;/$mꁕox,I:𫖳 = }"4ݵ|n ^WE30G܉&f$K/e7 Nt8|Ot}(FPM)Ls$J >~ancoDԣW v,.:(7ؔ5)׎>Q 5;OL28G^Oϵ\9ev } >M0Pm)?g7ZťA?toybx)Pxp8QvJ*x Mmg<ywC_" A&#ΞQۨ~:"k*z ʵ$ *s~Xnjm2hVy#"A8߱gnmVo~4&^_qyrL=tf+xnC2m__^j0$`*Wg0J^{zؿ1Fqs#L3 #()IL=Mu̜/`(FJ?x?6Ծ1H ɼ<ر:9Z+Cۯ>ߥBG} /ozavN8v13b?_zCӰ`-&6<N/.䃥e.n+6ri6&FG^H셚c}{S_+{bxՂ)Jtr.\%yhk Mu kPpDbncz Ӣ'٣K*:/9^'`>;6̆ltVL*7dJF`5>őyX1+']{`:^0K*`{Oi%0M%[7K,sˏ v%qetj|bkg79BO׆1/I'z0o2^'~I9cnbn&{Aӭp  K?.#snpLժ yC={^:~(qv?PɽyW`ud\Q/'[nFݞ]ALKh ֫s1ɔ}FM#JI/m^߳27OV?ssY*wA2r8ad0tzwT7M08j/U7W_A`xXZEt6 8`? { fgh%J2~q. E:]i2'rڴ_"aT. ~1ŕI!HW^-0)wbRfQI:_&KHv"q6gנXg)v1]Ǟz֭:'_RpS=E`S|z\ep,_ eaՁ1 6fE|Vun)BA[N fqHSQFR{35=@{ޙׅ`H4Zrh\UkSV30X"J>h<  ,8.J;)O--y oWVO*X D..9Ѱ)Ao7pbq\}lx1~M}1-Z0c}߇7ZF7*pP NfMwL!P=4XW>WwХX3zMƣ WNoћG>v;OW䶍>!A@=X? 7 .$sFX+g0<4G~^Nl6PtMpzeCRw%oTAk\>?IeY+" 40{N nUa%H S푰2Dg&L6t$vWw8+WtI e@ٶ.mƒŴ$f5F">ŌHM HնPU,v ,&O ŝ14*?/9js!"sRk`Ay4#`P\mp b!?W!0Z̛NbD _&%WDLZ{8mv髒oY%b Eg`{6۟䑑ߡ׹E%qᳶ$N3,HnC'BUq*5ڳQoAjG(\z_BE&~Jp®(ӣ:Wɵ{."!>aWE6Z0ɧАu$ϵ;ec{ĠKvy%6nTtv-#SNbfY)֮b8YP:ХS1u.?Wʤ9{p=C"{<|5M_XB w{,Ky7 ["vs.Pi4{0UZWD9Y`rs5SaVqϵr7@2yܯ&ydgݮd┉ug3֋i- =ȻV ] |L$6S/ʝ{}YUFœbF̌,/=Vq|K!X.0Fk{sѿ78p3ٜž'vKqϝiwqbv/qe>;/Z'(ʍXrLBbgɴdߞwopQ'T!:|Ȅy,Wo*éL>k^ݭOwƨtQXyvj/HO<&q҈ơ$[l;^UƑ˳Udt "ʱg4nWpa Xe>!D=}, s3qmgg)hV0#z|}AOf'L_ ~) Cn!#kp(,/xZOS2a( /!鹚iLBRJ}@l{HN2kV9+{EdҾk VϤ]MljR{a}ץp4@ƥü#uќa#42`p{ӎ\UMY-JbwOq2TX-]ػŽQ$Uun%^r|HTcՙ/ L Y.24aH0Y]^kGrN0t?jkDCEzYu K^g{ j}!+elnD2f$(u&^ߐϬ }7{KӃ;ˇ t _k[=l.O6 2n3矆i{ $%jJMWqr*48m!a*վe3,GY#IfuG[?.a}B>˸͓&w"a UN*<Ko:qO0uϘ0,x{k4Ugf'nVvPxn:j|jw>Yٿq{q36)H^1Mz߬řV6_?N|RƕX:+bSYw#8(Aҟ ڜ מ}4a8c EM E'IW HDžGpJ%^!m'GZ4RhFl8~w7Mܔ儙K5C/=fs?`B,Oݰw^ ҷzs#B8[Æ%{L`NK.ujո.66M"ZP'8?]_+‹@-נV d)gg ns :\gqLCt(K28p=* ۢyȃ3&+B,8>?߉2q\20M g&n>WW/s :awa}7WwZ\x֜8&V~]edVapzq⎻2Atg^nfV 0XOnDMuU0$$b"9ɯ4׈oS8NVrѷZ|gIfၭ[`TlݱVKK޺)5^Wbl}ӻZwh5e%6;!bey@ђ#g(ml%(vnZ8UY. SƺZ'IZ]`g+W_7BΧ۫nC쯈:XAMTL +IX;?[//7үDcsν%H]\:Cw=5q{'WH'c_È0W1tMt!& vw;mcEHJT|,uv"ܯGd*n4A;Uhu'h`6(q3z|SAyX{MoVoJr|snx^P-c5BUŞ*B {mt"Cg$χA98oM.&B.^ >J+ś'3`IK-Ύ̟N!TI4Ĭ{(mKJ~XXSfI#I)ffN m= {nr,HV}:JհDغ!Ur q?ݸ5}LaPڄ?'RVρpqѦDd3;`}Dz, Op3EV_x{S_HuݝXlTl~{Y`Vпh'J]iXYޟS'wX}oSwܪ@Xf.+"5y[.fòSzUpOŜdx(V#wżyAT[AW儽/p X^R&%Bס$Ǫ}n4^kH?uf ͡G`faB/_m=3;w$``{!rM9s~其&07:͉o[ĮsPvܪ,th&ˆys_`|7'O0#eN<=As̕>8Ub2`TyhO vs|6L2<dܨT#ym=SoTϱv\ Keށ$h'^eX!&fI÷$];:Bt͑n L9D\ݛ0 )B9TDO˜pפ<#xNjhD`.jh/.$r;w5'3o^ K'+oʏQ!p6ǹAN=%P mTuF7͕or8t#1-W{}m@T IMv9hjѺϊ&zZT-0ƕHGAs$5+N@eY:$3aFTÅul˩-~sIOX<ŴQ-ƴ{4i,UlO"B ?#3Z6DZr%[j;O=X}u&aCx|cy86kOQxqs;Ht"|@o\ &^K],\1{XbR!?׭' #{{nFW*B0Η(`򄥸`zc 2] gC/+H7{DѿFX4> 7IEp%}~MdpxZe gӠ*(@.Kg G`خSï{7/{/&_!Yf^l_Ґ31$ET%nP{aNЧ}k9-=bW Wb+,7 sc˯ ^\$|ӯI k1$7INJ &EdEg*I6|6x=,t(\3Ox!.5}ۧ#ڱ7qC$7MVy)P6mhdĞ`O X0osgT9۰ŕ4,ç8:{KRTg];ƞNU}\2Iu4{S Ia05M[ ٫?y.rN6@=Μ5 _Z:mO}/ ߓP=ȋ+J%W0|_0 l)Q +]eU*|7h g1W(Z s"U+ HӉz`/ f-n=!)t}Z}3ZĸTW''m|J|*;̜XCOU7햟'rH%¯PzDt-:òX}9V # F M!ٮja6ck$ou9.ルpaMZXI2=*yu}4xMQFɗY i8 Cz<__'jR|&G|A6I.K2|Kw҆8o;һ97f237(ퟣblZ&"8-{f*5>.ukL@ ]8ܡb,vEoO11Zޒ<lf^ƪ^ź!S" O׮ \hMD+ yCG+ǬGa_N?>XwD!L|U ː2ɒioQ vFa* qp%bzt=28/:.Ǫ ~*w)+jL~?A0|n8Vs~5J0)M8~$9~CCX10ۆ$\mݾC%H.  5Zl;; 육k4r{g+D>)Y I۔"q>{]MUrx0aOTKZqElY)0Q Ѻӫmm{{ч0 +d:;?`s!x J6a3mŁ=4mmKߕbW6/'[|oQf\}+u@2V:n'nHmmo#RK̜y>CF,aӍդ l%#>re'ti[4^d9YJ elNy `[Чηߊ"^92=uZ_z\>'VRaˌ>r'wD.FLW7J&L|,jYo9` 8ĥZC#xp}դ.Xsݻ}=/wh]bk8=v<`8Tw"{ttaTC PS..\b{ >r;" `j5ش1}9]*ދ΅\0)(~W$NUg@ݻ0\Wv( [plP%l7yvg%=-HVCkLj?,{|\e͖ #Әߓ]6kk$~u !^'D>Mu\Duaw{ş]lUS*&?OL[}&(1׏P pi8|ql7 ص/=܆F^šc2ؖ(;;(|q)wޯLXti_CݲW(~u}/Mj~4~U;c*`U*yfl7²ܳIPDhA\\rq`beNzaXa#ӵSoZ .H$aOVMy{QL Nn$bJ Ći^7$۳џbGڤ1cg's8Hn$;azGώ/YG=8*Nx0 N\zQ z ɐZw@F$Q/[=T`fsyh$c:XR9?{.'-ɑRƉÔ,Pl>砇<,d'ѿd\MfّSXe%uXӣ&eu`N8 ?"Bީ=p0 4 K HDkQ~xZf'BU22I/~'(P{{\|"Zu".JFUXJI=^QGjGyPe7].SC4_y% 3$u3÷ϔuƏS!6ep踗_ٽp慼ONnF,pƈ}%G @훔֕+8:LQ|{@Z=X೨ 7ާ9~lEsн'iQܐ矂 [axiL6=REkn<ϕJRNX.lNωn+$_@MS{g\>'g%lp[ɇGs^e-^Pɍݘj>+LV'[3`蜗{Pqy:.ww4v𑊽vZ}`UjqXC$q hv[%ٯL 'ewo^ owBW<]=Ql `mI8&e6.Nl|5{nϼC?6=Fm7sjuρ5c8V5OF;5п5JpJZ<~R@09Py~{h?7GH&_#ܘ ʡy0o9.w7`'50U|i+\ I;D(:0P˭uu짠Z 0/g-N2(44#->ޱG>v^W~7w4\Ec鵚i?&̱qk,t`-p8 bCaLͣA06s^}v/*cEwh*bIuF$n6mll.d[ ܢϒ 3\oQZL)}F.)nOn/ ܅NWln2f+Mo>g`̳yzm8pӒVmcc}k`vey!55nM-_1`1@v3ҞR{Ȼ5Hj'vB'}!^e^/ Y~0l0(#3j+%\R":дA]b$Aᑰx~ 2OQ P#U%?N?t% 5O:.WOV:gcK/!mA/, #W0|r.+'N6JY`ƙǒs6:__O_ `Ŧ v5>fݘ|D.Xu/& 娬, ݤ Iv( yI[A}XI z8ǐ%R_FGHz|B}b}.-f8^3!+0c\?%]ZoWCV]) < =kjφ$KaD}i߱Zr`[}6nhA0> eEcX`ԍwz5WpWcNa-݈e, O?f\-uƙ_|51}u\nɕjUCvAu#ɻ{ë$~78:C('kirYWo o.IЯ;-)В\nkxLY,& /nuNfl('`/x&l/p4UƮ)0fB?/swӷ0rrj|{)^#`'9D#ZҏE[?`~w8llG2{8&6+DMNhO?K, &ASܱ=\& .&AH99Qqӭ =#4INh2s, (&dL%Z usBJ1T%{8M?+rGNy,4n_]ӯ:үEY5&N(~[;*`WSa;Xvxu`e4>Baj)X"vs$E/*'n} OTz܎';q)^ C¢e 6{Ty[p}0FXƲ Ln.}6=ܰfe *F {ԇ͔ps܉v;Ω{$oaSC w9֔8E6N\/ (y0WI8w^8L~zr+GW LU9ﭻ=O#ҡk^7 Zk*@;z.p5 Ƨ,kP\ 9#++hL*M<$7f\x2y$8ٯ AG~,M2K*:e脖-^m4v[SoMsXzh9*{s5ڀ.N# (Hv}&|u m%??~޲gq|tJy²o1\ ƶpb ֯q.mAS*4o/ 釡텫ln= M9qZӘ'˜)/xt[_eK21m*TWa5}{Y^وs 7籟?akr= [®1?!X)yU3>}@=47 :e+<ԲLaZ̖휕qMAU®H#4 bo.躠!@Aש(Ĕh6Y*ǯMa G66h7/^B6p2~d nx9%BS9D#U~Pnv TIXY+p`}N3˱17:n՛׌LO^Qk5jv-~fZm_Tru/*CtV]l9y) HH5Ƒpe?t](6s4v$M\FV@ v?8/mX mM]aڴ0t~m w=Osy)HI}:IR5Aߏ7H]UX:%+TC5YĠw 2^@KirX}t*kgiրY+[*?墰!s3.=Gf~S\ֵ\E?uguŋd,);uFw>H +62DM t/&RE\5).CV%: 3LTfW#84CmQo*qbK͐N!4y?#JBr7׼8R|f@յZڹ!t77`rI&H~s¡'@~94~>՚rmcЦ޿{wIōjr.4pP \fNnAOEw8!QbTf(Zr>4}X(%SG:}LqB;4L(;I1a`8#yqHI-9 㲾!\>cSPu(!mu'j5&j[6Q- 5+>8uu/np,'^!kK`lH1w:9IX=a~ThymjJ$E%앂+X7NN؎XhIGL ^=ZcohLfyWZz u_k?.߭C9B? '?'9G&b-Gq+=\ZzR+ `N"󣯷 W7JY6V>} ,6A=e~\si?b{dcpp;a<ǸZ2g%Xs>6t>#Ϝh20uKab\A 6CkzpYt:N|z3xK{oVu1 9~UYGlv}0D05%R^nO$L0VIDkPcm8N|~2k+6*h&?xaIq''{ oXcX(E4;.lBk#6e:b[*I~IRNP_+ک)$JJN]B:AꙋLon)ʜ%Y7Xcj}Xk.rf<5YD1Z|C`$+zrcRp̖njmE'%ȷ3R=#E}./X/3.ͤ8Ӷ+ER_Ɗ_)1QL}_2|WZŦRMeq~@Օ8fbpY##IJl$Kqi9+cvjRl 9Go;5@t=RU֓,#pU.C7ع6yLQmY @~󛻈gąsBK+kqj{_t0w닕^EԻ ?봴#/0\zJI 5ߏ4ڮӒhN|ڝDGKU2ABlws/j4W`ܻu[aN5 ?so4Z _sj%m:S w%- z]z؄aUbQ+M\#ڄ;״xgwô`bgh~>&Mr8ʶOY.Ҏv[뙷YWp! .+ҚN wj ?crjegU4>3&97Cݝy,ᵏ9: l-g>]:3YSMjެ#&Xt`@-I:WRT'|4rF?DE-lZ_wisiD` 0Fmk5ęg&U/oa{%Qq\J&3k}Uqti ӜibÝ2pYw&3<2n'a Sa0Wɇae&K$M7 &K ^z _+9P`dL];K#kO|8#E'Z=O\MQEdfy#GR ӱT%J\9r2uq=uj?9nDCGu0]q31U:[1a$Śf m%g|]߿~a{@ՍT\g[ t[ߞwZ!yvܓҟuͱ6n*mZ)܆$nq.[n%AÃl=7S?Z16O:\賮҂OA6Ek8y \=409{f#.x`ŏc7JQvY=tf%vm@=h;,c6S 8«}"= 0SM,NHu+xJqJ* TEjG2ޞbۢA{H!ul4O%jN>{) WcӞBכ7̶ْpU /ϛ2U I= jgjczJP/YBƕՋ8V>g.>nJA궁=Koa?v hO&aBQcjz0\ ( ̃8?*&GlV􀠋W|Z I6.]Wfn8@e̛d-r2ӷ@'ws?/,fu|G{0\c7j=9үC{p[јRFa_]|># k0Ţ$k}i KPjY ͝ 0F@HYĪRaЅY6ix+%\8o &68:R+JX᥺"5"f|>qƅd[&l3h[ t<⬂ WLA+S'_8LX޴äEj/H.AW/\ I[-W`Rߨ,Z;CpcF)t{4Eu%) JZKup=Vlt)ܞ|^,6t)KE$Kδ[GnH[U>ffJ*wA5ô 3E㶟;+]Ӻyg?Y=@T㫣!tQ:duݲK~޶&דGؔ_#}&r^$r1f9_ 1:kB}QNS$‰siV~juw۾.;8>=_iK&${?NycD':5#V_}v61e rEE^l:C\[Å{J%謫ƺ==UY@zޛ>ȗ|*v#†8M6Q^"oE88b}詥(d9(jυh/sWv'I/6y`u͒^sgaǶRhXx t33ft:K)f=xL;Rm;Mҥvmt19#q2|w 3Lh7۬Xb`꺠jKG8Q&˞|U\.kNon.qWu.kb~e2Hm*P@Zrvfcol4ќęWg> W L),e/qo䳛HI2 `9]YtgIs=3zj-pbL,>Kw}'Ku[&&MR呁pO°0_t:wZ"*l)P Ey{}PoBziH,ZRn:G3XPY]pL>eZUSy;=τs="G9À߮yC\?gH4VL:Yv; \1p&g :u&7処5n[eU ϕRqJ,{xMy{7G"Xٿ=ʽZ j8ks$^"9˺cǼd}1hJW$uXD,O`zxP}n ُِcqV(NYp gpPG >'o 37P4JMx3υ]۫4f}Mx^_c9iwV:i1ⷋ%1d)Yn ?f,q%JU:b'lv5Z~d:Iv+rv9H={xmWɸ(6+"171u Vh~ɾbؓa\{Q\Εnvݧ=3o+^Eg.Bnr geN1"I$ǰpE, *XDaC>ux%2.Z(U[gk,uZ8;u_?K'B 釷pЭXY:K/[zex]v1vù9($ldn=N=V圙L#JkL$mbZw}:9 {c>tZԜ@7Gę¿wGio=N4G`dݳDu-a'}Q%"Ifۏ9,^o]sKib u)Oa7<}g3i,*n&֏ f|<ݧvy% CTB5X?kb f{I?}sXg|?cѻYGn9󝺫>$'|(50{y2Lh_>)u.h|`Q/L=ο2~oo3_OEG/=M-zHݧqnv,m)SX?5,x ix{eL3l#Z}®*bq+>|]J^X4Iˡ/SNąVm̻qw{sh2? :AرhyngI(iMH>W?sj.p>om+I-+OQ}OR.JCPc}6*3X!#H`J jOejӣ>6] I|0h_UVz5kEf6MEy0KtVX> wCGbq[9'y,ڻ]D&+/q#By'= m?>Ttk<䂓g*;s%$cT䩇8,6& h1-V%&NQ.-<" !RW\i.5~'0Dsn|{ Yzg;lRA-TLps1uF9>Z&w}cU˦w"JӋ8"WΞy,sOe0Cr1֧8yg&X }UOSaNWCm(s4]䕅Gm0|fDP|<*N2>6Β#5i'ue5l?<wyo A{QЁ MRv*v60k4y'vOu1Ц?/HB.+ =FВU?omdTLЛhtF`v?7Zc)$xkW2b8O $yq>.20tβ tߓ WThP <4=R[C;`=ڕ` ҅ǶsPIAtQC*`3SK渳ӸZwO;/F@y۬m_qH>nK}3X*&n0 x/Bgᝬ `%F#`^ObDa{+["Ep:r&to`ΛyA]jw_i+[T3hcf9̏rqwF>uu0%Iцe0^g*M;ÀOLVvnݰ~>'o5ƮNe M9cc s,ł?ᯉ^ol\ӶyE ->Sڷqudg5k~>%YV/[׺punS K,>]qP9ӐHڴa-N2$7Vսhy aӾ)fKY8xٷK `ع~&N?Xx=j}`r Xդ`1+sFxBKÙ ˜u!HΉ=q*?BILz?y]e&e)oyǟbFv"VyFRsmU/oq\$Xm1~X$vvCOK{`[b=ɲ tQrϯTڮRBz7eqxõ< EȎ5yOnd=~'W§Ӄ㊸uFLkv:sμJ5l'a27,z(S|!fFرŕ?"0nN#gpTs!V-o$Cj3㨩r\ah};<ŦR+﮸0.>>/ViA2~CZcF7yp\u`/|(<*=t w%EIAp9{\{ ?{x<̬X;ؘJw(?=55~GC_ u3L0?қ!,{J^&KX+6OPwkI/쏻o]ȿl"(0<k Pc+/7e"eȕr֢:Ne 柫셯?V`VGN'+3>̉ a\صQq*3.zG=gI%bE_QK,-Z8VN|8=XwK08pm6< }Yp?_쥷+8!5;֍6Ѓ'Y^b¸4ե(N,[&xH.R]1;P?9ڰԙ_)$g8`^.0;}_n޿g~k'8I8ꀙ& pnkr1{.e^3}G5 Է{>T>KRȺՕ7drIX{[B ̤ԟ!t`;,d'XX䃶_ 0P`5(a3 k^cWSPVRuG)hg&E웨$Ǚ B*YPx23yKl|(mACI[H;1uDʼb Z Mnh g]Wa΋|/!7tfXu{}Ca$@rĈ>L:Sqb3!_嬉j#ZpL (byS1+#[N$TCZ֮%ݐ.UЁ/֕˛ʪP Y#vةշc67oq -Rq$r v}UWd7XɑzwSߛ fYZoW傃#*ṣ8>%$$I},Iي$I*YJ$ERdKB֒>}m`0_sΜ3}_3gֽY,;Lh^[k hL8ʛz9Bmlp$kK̰6OvNeq7G0zXfa$n[3 x= <@9`$\숝9)7٨ZMR] Po|,OJ|6=\ܽG2M^jl1?U2\`+c( T5j!]g;NjBeB6.tU#k<8p:sk/W 3Onwl#Rhc\9W^?˨h,wIXld;,Mقfg'\p>l8,\m]OZ0,(| 'MfC<`DىV.k?@][_/UIJj[{kq+ܧW:w!\'|w= ho{AryﻍdYNȒ f=;`u:^g!k3OQ/5Oywƹ؏[o~)j/Tzq$w^[LO=i+SXsWUdYbm)Ǘ0XX9%'Zo[A+eyZQl'<8by7XӮ'pt{חabbi :sj+ +<E@WuWa,=jzwʕ䨦&jqW ̘<PUiV tdj}qS\ľxÌ_]0$FL %Xn.c[kX{`Y}lf|R?hf=M*_f3慔A 딹B忯GdldR~kEgG*!Dc([ho*`Fo]X>f%^)rVSfɴuʃ|<&[׭`svx Z`@0p&R;ޞ20RY WoTMja;;[0+IgDC3OO!50w7C,$XS6b]ʻIbY6O{/m5GjeBzzz>@?V !UpSl- ncFSEl+{%F;Ϊ7\8Hp[ɦǿ. Ck3gF}Tِe 5U]0#9άe~~$#ݿr[N]sk6d+IF\gjWؼ>BP-ؔH'VC%#Om(xȀ[:C1LZ,R a҉ K Xfq7-9|pyu=&C9yplbƕy\?402/G`;m"_HnS:LFN=ش g%2 ׉á&|q~˂27< QxaCHdұDž40+M)b,:qڃ[:H蜻}Xؤr=㏉ރٵ~hq7Vr6tg5\P.y$ӥ<IJGgArMoE4qyD5w;M@y3r3װVm+Wke~^d`Ŗo)"zY`^Bt4HK\nuPpZC1LL9QFz86l|uđd]x͇|ٽY \y}\ɛht[ҋ.hsJ3.{SI< SϿ}%E'n:Bcݮ$u .8ca}b~غ߲^Ph*!a!ζ`,gE7Ax1GށKy8ɠ3w$ǃ~\>% BW~[o!X86O_6 4ylX='34ߺVZͦu$GXUto S:Su;n|6y!+qeA~|NY6b2{{צvyG(rVdu}o=hpܒWg>v#~HԀE&ǒ 8wҵI1 2.GK@όKi,smA/ \?g~1ĩ2+@׼fFl^;GYf'ׄ5<1/$@oޡtq>3a=xrzhd;72jX o…,l@邕7tecJ5U_7'Qz\u s#eH,zQsi_a9Խ=iu-BS.g2e*hǚO?n`dt1T9;o.wn^= R;P+l+ -`Qӿ_PIV±h1{} ΰ\д =jEI+"jv]~92 Rd|If)%u\޲d:z 9f%fP}70Ψr~7B}?I#v^БX-(CQ? a~] 0{J|i0+db}KgIg+&$jOV(T/%F؛#8kߠY6r! ֳCv]=i'L74k8h &']0v姝aGZ/,^t҃:!*n%֘P}gZ5m^vBVB0&RamT*'io> CZ>;' uc ۠6`36(T N>ԝ}ws<Ƣ,?=-5ND~  1L,hJ#n7$`~Te'u2%:0aϳ n%a/@r3sa:{WqkIG˸Z}chd8e` #&z&I,HA OXb+j~dO</$KY(~kEśi?3lI0 _;;ygOO̧ȑl uG|W'rq MS ?C4M#'1yjwHMğî+w^܈bú2YaVkV{SM<7ܪp禧aL/Bi=x܀NegH?xW{Fq]S{nB~TXo*0uy.@zҁ|0ПI$˫ fYP6V V71+,w1!q21h? a2p^];t("S ҕwqp)gk,qֆ Zi!+H[P#O: _7m Y v߰CsrL+gȁEKPѰ2'N/V|{8%cW/h+{: ypeܗ+{"V9GhHWJ쟩xz|{ry36,z$X?{7 ƢykkF(i)lK0%Z}M]=c Slե~ у!{n~M-a䞖K6E.sz&^`y( MK3Sic>9w:Jƾ'vQc0!tB56Ï$ޣnyѫ0t>|喂dmDjfpKBW,#y^.U zd=Nf^Pr yG8rNX?aJLPBO jx.K*C8&~N2ާL9t? >Hɭ0FrH5|-~;}g(~@;,ėVeK9h^JMm"aqzI0wL;k~Su0IRE{mTfg12W!2N@h,zw?\`H#@>`=չS໭k63NF1<6!rM~I{_HlԆSO.][msZvVe8]4']nO>,,R;y]/LHJM:o}XRSor>hC#N- Pߏسu0k'L$UƄ鍹aT0K`CTfyT+,ޱm۶l|5BTLΕvY?DlBŽ}fhFd~9)ܳ>|ֵfW3kZ|;MGe8Ȅ;oTox$rJtƾ-3}|_k}аeO!d+>4I!x-;G!{俠E핌PǮ~0LM qNŕ܁=ָxyu ]NVyel2.܊bF|7 tR<ЬT$[L[ Ƚx3ߛ`;DEN:'풲O"[fSFP_E)A֜=8zv(rP L=Z07k(\~뺑):z˳DACE98vѯw}Jmq`p1a|"ׄo8orp!j>h(C U"$o^/KꟅb:WoSmH;ׅ)-Yl>1N QLRyTZгD ~vTQdsd~+cӱARX`klWTnopͥWP=X*)*܌"n01|m׻yRcFM$7p:E+Yk&.<3֦$YL?Mr˷IeƐ!9,}IM;$u'/bP&tJñS3a G43U wVE-kt*=jaO.%>p~'%/A1[})v\37kY&w։!lWիL*3Y~-[L{}!D (<έ{&‚Ll^fN0t=. c!#X}ۍ)*1)Ot޻/q(M7^<~ ?u:ᷙI]BF`!s?͵gg]'q~t@չvVs \vIJ4/krDً 0y:LuVo6dTمۅcǰ2COᗷFj/[huNN}gl}5tZzn`I_P+C^iکǧMS~{O0eyfiN jnl\cҪ5;qqHc0\ݟ:n1Eo0LYꆹ["@hվ˂ + J'GEErL0.\wcwr4ճ#/np6\]fYI>s|_=%4>^8;7c7 \V̌su?Fak[1P[T{qߌyi P}ESm qN`\C"rSnz]Z]wG]bש΍GKO\]gk[?-ێĎ/±|. 榲a"(8dztTV DY–u: vQ|d+dܛ\d.ϭrei8\kD+u$dO"\Wj2S4 ?g &ar`/ 4ڞ7|L _^ (~$ v$ ylDxjGw-ɥg(Tj}j#վϪ|}f>  5;aD (S;CUq?a&2[!O`hٕ}= M N &g깗]_'6m׮Sr9 D?8wvH ȭTN[ɛRDmkr~^\;  7`]rOД|,H<) /`e{g2qRÈ I\Y0"pK'P.sk^$myM.)lYoV-oD*z?{f/f7'Ӂ%^HŁX/txNQH{Z*y00so;u7~K2 WG<GL>,w4:Fo$IA[#~Jrlt][V\w?ZXFgU$>.NnzdztNn]?cty?>ag+2{qz^v 7P񾊍$[߿af|fi-s@ -l45{o֙0/I=~zֹ٭d.z]r]u Wˏ^sB4 WV,Zw%Y`zǵ7s&:*8vYb_EHc׏gD򰍫7(RBhԦA'S_O0b=.؍g0>%p%`eM:y/:i”Ѣ`EL,OޘfeeG׊:ۻٷ{.¨\-7^3818{wz9\;W1Q >Kq\bW`Jxts]wXڸ:Ү&<Ä&\*t7v).}cV8)gW9ɬZԈ=o3A[:* "AOymMwd혨Z* !ϯhj:nP"|IN*+9afrj?}wLUVnUȎeqz5,{i2>i$Ut+&0)(8o4TȟYP[fDR:>7I)t53jW2qS J́Gg ӭW¹ bEc0Ri !+JV-˭d7m__M~.Ǟ!ib6=~Mz/.P;FUDku]ё?>{,wOJ:oexps4~eΓ8eߋj9_y*u}<̉C7\ = ~FFU+` 1гiW ynC]&}ݔ/7kS.=68wZj:dI!)/= ^vF)y Iu!0(8kAY饴1qť]6hK8GIj #22zEٱwqfOc.dK;76 j۸&qagoXlؼC̓bpQ`S@ӎPkO=2=R{qC;Z洸ۏ O'".]x/L'.ޑg*.-?zfnjp9j%]l Wk*W^{C̚oRnX@Gr;K܊X3n KsM6Y@tR%Rm1l>| G5$?wVB_HT]pKJ}?ELky Bg }>]uj_#l1Kq8xH;Id̎^º38R{H*;ljIW E0ء)%i`;?KRr?z{t_>]1{F_aMԯ$N X0% =D3Pa%m\Q;jpjzLunʏ#-wϢ&[Y2.iR$˴x{ktże\y$S\M5.}V{w)sώVNJsːGL>q?0_}v^JC,MI&9MBUeűPu~y\:N=lcӳXSE{~@߆)9X_^Z~h x{Q [rfAhY W/R!y+7el&6/J-2zk~Cˑ${LJR!,JohLJ&ڄðU#f+&XڍO{pfù?c[jcLh¡Q}!i  LʔlM $/a3ח wv" ΂78p+Iп9Z\sGHcN/%{4a{Kqn'] Mdnda;3J[v15K.ԇ;R瑤`1Ρu8AQ?&EZWQyhOSK':j1S/w6.f =;z#e0Oɇv$t*`gg"dsС䂾%ߜ _8#ڍ`?f<|[=2BLKŕgzqsWaREVT>ye-߸;˯AOu7LI[0 ,?f>]]cQu!KOq;\yKd>So,ExwbOU{dtf! 0XJ [ć`~u]J呻x Xgc2Oi=uB3NVLB+ [Xcd=,DɍViڃSo۱M qy(M CWrG;{^mf` Ģ8Rb"<}sؘU@7i~.El ٽtOE᫂\_I ?~Y}n6oΟpɇ /g瓏,oF!=xqʼn7pg5PziO%OM?I{mH|vּ)ct.eqh].,||_BLJ'qy84OsT!P5}# z}SkwId8;tL1@ ;/^ ;jFk'o2{N$t-XƱQOS ҇);`l!cYk\J u)[U`2}gyj]2"%5vѷ-(0&c)qx|e(.:@crտc)c?`]<[&m%Hm<*؇]%T@#Ւo\춅8mÂ[05ZOݽ$&PΌJ~dj=nx*PŻ/ƭoK>:9?:^K|,:AպWqLEیklbY{U(P&xfKo<ɾ o$l u_|^cRV]Mku!,? _R!OHnf6珺/ 9-Z㰳إ~0c9e0;|w"Yc_?ʉQVٖ߬??\~~SJ~z_XM7ٿN|PZ)G8+anV#At]|~z\zHM +eye`.sYR}?pz|eEӖdB5IozSC{;,2ڇZFp87<"-[j\:Ȏ{Hg*6xi &c`G~2/Ks -ؙ''V~=PQ1"='``[M#`*p^ӌY7H˫Nq7k^[Ywm_ڸ%e X6C湊Xllɺz8lj7{f'z~ .?jt{"@5`^o@iQ'E sgaUl58O۱|cw4 'u0%. ܯb#a̎kԿVaeOQ@Ĵ٩.'ҞV  QE ,H,t]_۬Sr"9F^P™`8>){Nrmޘ|"T_WR'}ݜ9O0}sv ={ 4u?νec<[<_A× +ǾeH,Cor~*R-jzMyTirz5~o"eE/.À_4 w6)B]0%|֝T&3BΟ2nS+^֩n"NjuX)CHL ؾ+kvnƺ%Ӯ8/&ix{3B+q|tG8s^ɜKf}ct<Z#aK0Qou-C۽o%lqp'8rVsMۓ# ؎cGǡZ5VS/z<ߑdfoVc!.}W1δbJea&E+6U=X82 qwI ).~j'>T=}>tkk1ܮ# KImyTpoxh1fgԷ8y[7;%ok$ /?ivF9Iv퇂CGUWc. %3ͷc_Sal\gBg+9uZ{gj$K3@BE݄(et~]ZE`#oYJыKkG+ٞ͢XҫRg7(ġ)9_-N}Vu=;Kd`CCAУwPC$G8\='hovcD|ch Uڣ/_%#ɥa, ^)YŇc8m-l-P2⦔G,`:^RFwEs'KL,)2lvG3Ңwg `]JlDR$,y4Ɖ&{ ⊲P< 7"XcVK3 44KᢛcN&L>C#1%8{ơw,dʫL+y9fz;h`#%Yk eX^Úo8q$"/a08Џ/ZM'sIwfhr )$S)E.kLI*8I?.lĪv"`Ee ayɳrmiy.[S'e9g/'G&檝LkztgߐƝgwF;Cx@ufW+h~//N&(:dk|upxErڈdrwګ/^!.lzhKqBynNīV9z/1#4I5w~87"ؼr9ZSZz@}"ԉCvOt`uخ7$(@~t}:s,z=n߁y}Υ@g#GW;,jxt [>kma .nKZWlj8G;L0WvJB ]=g悳`6p,zKnNHؑI5%9Ql7)G27L|j Kt1ԧ?Z l#:FARaf?>fl+cqe~x'brO$eQA%g|ɻ+Yxubأ{: bнؠZYXKثLmU}gNͨ:[A}[I'ca$Yx]he_8UVu;yu=ssȧ5k: ⵍ$'HTus)UxB_@ [6LN9__Ŧu&lDy f4`QjI>imm>5B+pHtgdtEz6s齁Mޅ™55waF]$SEU7߫ܛ #g);uƉ FR8Pz֯_  'R?hiIH >Xx.G6`oӭ(F;{ ]HbiuW HuK6t+Sb\43K۝QT]PR '$qֱ4y3 S\~-j1=xtfdOӼӏü/0qW!xhH?+;ЃdNl&қΌu q͍WIGNP^G0qmopxm "ra ^7Vyƴ'xořo.b˖mqCRA[b YhXzd 4x]E+9SػPM~PRe#-Cx,E~/VpaͨGJRI.MÎ!}DE\+yڟ7 #۟l~Gsy.NnQ'v5߰'ήPf[tKK'>YC71?fʏ X-))}=DtF/5bOt\]&\I6ݧ#mBx:\\%iuSg_ƊV EVWG`0wt:L)4VNaZsz~0sOe <.zg)| cv iC]qKU ?ծ/,T/B/H.i"֖wP(Ўq#~tyߘVoRݺ-qSt8P -g\':>U;'nf?Yѧ?v+5`lP9ssĀ0cM~L lU$\yCW_I8/оB?r& :\xA9kS7L_}%N㼜:0ةRJ 61p^ȯ)+\̩,-pZS#w0xJꦹBVt 呃3UHUu:NݨNPN 9~iX2) 6885Hwdq͝.l?#7rz<}A#gxv>/Ilr.ۍwKhOCD@=34Խ4łlx\k]:#`/x _Ůڭ\5Z]tљB.0}Jf8_Z^J.Nvg0Ci{Hfζ `(y-I2LdG2%р&C`X?x)xyuGOn>9t97(KҚlaqsj6noQ, NvqfېdXb*"7 ڵ%y޺Qx2&Qpw Z(;?1]{dk(VI}NpS4=y/@o|$XJ G6j*θAmKUUn4&n ՎHu{yp 2tk( 0n]51v1d %K*]}c-gK;9wZVNC>]?]0Aԧ.qt_C oC2p[~=_ڷͯ[Aorfs\hᚷ _=?\^:^yU(4m.I%WwK3fH/є]í\8*ߞ-I%d'U_3Pk; SP+&Nz Sw5.߿N(iΜ򱳯I eQ}0,hq!q[,B=a/:݅7~2V~ L$\~֫8w*Tǘj!^C8]yW}/Tpܲ7/06W9Q1?O4TCa5$Dm: w1lx/Eɿ~Hۆ/ٹ ˆ{,m:{g|xh$ }>Ptw;#HcMOclNg7|} 9^%wTG0}2җ"I t:]/H9s c6Nߗ۹`:Xy F> 8<΋#cQ0X/ Z{Yo<|v;A-}LX'I92s3W8ɼl8jpj҄wl J}աg/C*FN҃9BoY8;:+`/ݟw>-G2ļ<8r<޻*oWg\ {T bIr0z b}HpIm>\Aɔrw7zҧF% prќL v[PNXݟfIdoPK gV;% S? s9;j;aL$ƋRmܾ9Լ {N X8C9v]wgqY‹ # uh'g>넥qXT-W`9$ ޝ\a1hCQ>[o>{V+>e=V 7=sT_fFe02.-&9w_l`[!y} VBߪw$ʙ&84V*Sq,]~VpwC !f@“%o^ yq$?s-oslkN.Lmg&hӉ |z1ρzy\}[ߎ‡V:MCE$.j܍[9(¡ sWg'$5Jǘey j]Ŵ'P*>f\Gom0/#]){Dq|S¿ʸ- |vHa[IQ}NUh#BXαtڭCu(Ry8T5yyD;|Vg 񗏾CjmͦУi,[Ե8H}!횤 0)V"qpw;Fj={tdgNҠf1 7&aNEMWHD'y,5!aL ?;䖕N|LRGFފsnp<ǰt)1)YKbO?1ɿ~XMq[?)ot:.F8$`s`γm T@~A*v+m7LT0+EGt*To(^ȇC%Pkxӻ[_>%6c \4Gll(a5I` +ɸh|>Ο[iސJa.8brt_;&y`C_|/N jΩ4MjAV뽝w8S~ cj?*}ɧXDV|}E<̙dv,4U= i~41u3aՏa}X=)fG7և GNXm?Rk&P]95|Y^é%l.܀ųFN]apʳAhiGYU6l'X ux3BH]g]ulCϧ}B'ߙ°!y]iOpw?$SKlXdŽ ,>H"iS WCz0uF^UjƉ~lǿu,geq9ܐ>u-kk7\'׽L⇼Ρp.kFn؋H`mY,تӑ`9]sReڝ e\`M9oMp=N{ܹ -anChU\q i~y-gNdJLnSYLoIbbxfckqU*<6s&V䚯*S@򟜘5Dn_{c7iҳ4%y'J?T>žE; 코[sf 3GWT˷9@s3h&&,2 A0Âu^j8$e6f==%wca MR? ?y>@@? ޑzG RolDV1 {tcxEo~&NŤM'aKw6 Ĝ6rEq=L[m3MGU θ *3Zkc}® .bAeJ\4̐]oO7'0K6uʼn7šBmdҳжmz۰Hp0kzz0}PK l5P v, U}pJ0>]gI)>g%sF=81rfm%V+_ur~K]3ᴮ3{'۳@Z0c2MÂ'+U-#}F_n"MHĖ8ή8վd%?xb̛ gMe2=Gaxo`Mq&网Ǡ_B}vGa 1f`j=$Ol@ê8;s;u  l=_~@719?n„^2āƫt1d>DcAIzu 6C xYl֫VXw7;EoDEl|pa!sF?k[86F#xN55oA eCSt=.us lڊS efB`soQm0V)hQn#V ;SėώJ߹ *Ik-{I-?v_Y#Et*\!bK40(O]wzV53샠u[?bwg=r? G-~0j.X,aw8yKߞ/ZV?nF4Nm'&1WFIpl;(Jލ'*$0Xp2|<|sc!YcoG_~l#{V}2VOE:R _zջkiK|Q\1?Z0vO>[F˴رSdH*3.Uh|nn e;ؐ~d8X1gAdcB`1'ٜ73<[y0';_qz"xw}\1;Whr;=(S~=Lh'=pƾIZ׈7Bޏ>j{\RFv)2uĐٱy6 ֙V(L=W {7RGR6 ^)$󴛩/˖i`s{.qaڟHJzJNJ\# hX-=r%߶PݡbC c#R~mdkxt +{nDA_I[fz[ݻyR|bҋ߮K*W;r; r=rnY5?腑06u $չw==q'reaAUy&>V VY3+-_XsvZ! ߳ rvdSap/-5|G wʢ}\TS0jМ{տPH0b <vCeP<`iN՛"WyTYT nŸ4FšLzdܪ~]õy0#c3:eGI}Jo?o%4w%AɯԜ@)\,Z-7|7|i7] nV`rԏ0CV@ˋvjs; e۷<oʅheP˳0ܤ8Uc#\&tM;N`l;Z#bqwp\W0pf 7L^;No8 RvZfr2`gF㕕)E5Ь(8F`zוkAXh|XCk<^.b#GUx  j$I<7'8sҍyWgx!MwluԚ Y9ɗuX3aAֻPuf V>hEĂ7,։d37ۑj,~>]6 {3m o횴*R~)rM@oG\4憐_ϞS2ҫZ\~.3,<~O?Kg:w2"cݺ@R5Wy}n !Sqt}lh=0e=zVML/7ݐ ۿ8ޒ694oFUS~qlSA+:$ű ؘOZLPzʑ nCqLJsXi0b.0R-uCyyPV"y*-%vTBΆm1/믦噸(Keށ?pR@sj*oҕ.ǂJapodlŁ[ X`|_ӟ\vؼQǶ2QvC]ưAɩ:"Ň⏃$DZ=Ԧ6b-F09՞xǤbJNEpLk 隙Fu & glΘ=b MaYEffNg; zgYW.oKW%3X.SzsɷYf|r +ظa/4 +LLm˿^ؙGsяjoE ҲB  L!vk\<+tj(U0?$pZėPA]`AY?oƙˆ#KiW VqqfŃdapJ7,2Jc xÄc0g]H{eC ז9KJ/*HFn9\x=uisox;yx\ƉG'K=5$jpskzIQ(,=΃#3"vZA]mR27+%CMNOsHvͷ=^z.IH\gˁKql9'Zdhr۹-b[=8&,#,:*6<\Hm~$#B*46\HcǧU=ɔzVЦL3ݰ.\0 d=3fpۘf@)S&,y?&Nuot?iEgU<8׋i+vTyysssѢռ3/4`!R졖Yo!9wӷﺥA D+3b}&fsdNVNG:}qpGmɂ/ƛ̢洠^Q vv!0ЕS7ՒJ:_dHV|,,,ZϖF,#ٿ>IJjҼ|`dhc/+N,ACNCs$p< r5|2-HW4rRGXo4v]QL\^unnwm:uAO2̐28C#V RA0~bKr],:g$bc|֎jm c;'u*)xEIۇmW'XȤ)l}Dpzs.mPgg7Gn٭9qz[P#ƃLW5|g? aMۅu_3ȣJv&Y?߼7~ =G抹8u !=^:KAڰBa ?EWkcw"8eמZ3ÑU89k%uǮ7C̄_#*h5O2+ǀ'^cgVd?H 0~N4}6nRu~:ܦ&R9yݚ'ʞ gD# 6͚3aH[檲#̊Y<)bd5{+/d:P2L2\Ѐy/Ws˓wtێHgTjl{[xz"ҡO7߸:~w=YHq-Tbѻ"90i&\:Z= I>q0>w1O5 +ijF xHG.L$] '[Nnɇ!i[uǦTbϤ~̍#{0/zfiF$7ސ͍;Iy"ʹ8Kn5cɗ&o s q#)6Nr11x}oבnjԛK8`y8HD_:v!2O=,o̥47!c?n?c'>g=H7s쫈C;[Gc{d\v4 ^PDf&|dC$cR}LC c~1+h-$GyG(]2ː!V Sh-Lpi[cS>ursK_cAAYlS?pw hAuw9ꥍvdfJm5/xM`KjUt=S~vþ'3(Cgx><7rN[IJݱf0%"S>f1Kk,l75P`J|\^Tۂ(G`YvP˱}H͇ꓶCH^ :w:#xvB; 7Hه/6=MCdk[a5wmaCD oͷǿzaM~^*F:?(r_.$#XhMRNzs -~ I8ac6-q}a&󞡔)߸u>`a@ BkKpR7.>lMHRt?_KrUR B;{} bM_#EFuҰRd(4Nz}:N@ue͝I:g4sr#6=V ŖX6;1Iu>&}G^!^bμct,ׅ? odE>1\q<(uF?h0%{Jعĸ,3vrevfMK5f25rNz6m[V1!ƷGG:B29L5|/&(7A0ޥ6t]f~jF6gI֊1!qwt:q( mC/jQHf泹ꭖW8% 9U93Edp'K0 K F뽦.qUE`[ xdoYoׁԲiIREo{JgG 쫊tqt՟`Zu8>ygz"&}^9f(E0e uB:OGvҜ{a:#6A*PeVMHԞӡL;恤qjyx'z=9  +a팋/^ˮPezuI5zU.+. -3+v6N88>+sRKыz8!=vY.:Fh!D)n8U|.?tz:lj;$/TFY!U,u1h4? _iB!s8X9t+.ń*)u`UX 7>u#r:<42$Ԛ(|4fZȅ k;I.Y>Yz<߇hlla\/XHsUoe|Q q8@Pț%4b;1mg`φlo6%{k]?$b.+@ݽ^8j}dǂ=RUXG/M ؘkWd/=n2EZ@iiq%${`bcG"1]"e ÐQObW/Nt^aO$i2Q_Zr㩞нC;Y˪E?}5>O`N=W>EOх[۴.&)B1rG2&*=bzU:Wǩ&d-'hC8^V^[⾢PTC2~|.Ӈ#e.Bǃ]2pq'qKv7ˆ/ۘR0N!c IY%}1;7[K?u-N{3@0gGۿ$ťC#l?U4.r}fGh3|#C0.Z.Z3N/DqڋwXLgC_cH*kf HXۦ!Sjia:=qYY[dֳ78/ilr{7otk[^=<`nr&ΆZ?,}ĎS< TqQsunF"Ԃj!@j; 5B;V?r3'pɾvXC1*i$ECmZ q^ r]Y{#qh..K,x숂sVH|Jy KLaSЩ&:I0>4O̠%R\ .~HSV۟_ cbbn3Qa<6qJ˜z&tg@C8A:_(NɎj=0&(M4GIg)ҥy}_c$)Crϣ,ᗟW4)%8ʲ{op8bwbt|4w^ĕ- ?̣Зg'{VXrc>.s@V:qF k$rc`JG//_UтVç#Nk/.r@JK|Neߤk7b}(sx_8r/ f2}JHz",J, YN+}E69'-Yðҥq6|#]*ɨu]D<>RۭCz vPaxeKp8IXV0bx#k 44%ߍW|d8'osA0ˉɸR_9ܹB`Q:_2biPw@v6Y| RAV&ǥ_P&N5 MIUnLyLYOjJ9m[a%_0N7?<@FK,pfPz/O__  ر,,'ogmOM*aМhs6"ODXny= nJY "q]}ԯI>Cd X@;V of}IR$A@P:_t7'za4C`o:dIKbؤrKEgzB'.4w6"7ai"賚|{nQ"+qCݟ0%E <[= N+8Ӓs8=Kj?[[?ivowg#iVى7辠WhD/ X\ӏ^gc 3;wmLjnW1c=,­% 'jmd9ݞ`1cԽmZҔ_aEїUļ'.U[Mޢ8ï笷&)P_߆v-yHL]zoB^@!O:$өtM|m<|S&߅_cA2R}*TC/FBGp:YN)F@d@ *yHt\9S>kG_[`׎=_|ցqC@|?Cs-\v_&Xnlz2 K6vZ0(T523vSvc1nPR'LP艄i6%6kYf+Q]SA3I/F"6ۺwtEjw!tGߠ)eo Ă!K*fk'BmEЖu/xe{nb.ZF=ɥ6~uq0{Mo-#Y"e&HQ\1~v0֗2W}xTe%3ydϛ۳"%k9DŽxHAM7Eɫi;LUT򓔌@=r|\V*u{egG2xggp? k2e|+y*qX;Lr$t~Wl(rx>N!9C I6y~2>I0lmyRxF 4 .\:_=Wغ.Lw~śU'?caڻ:W1oYS]7(@XwpQ! tӯ0Kc.@U\h,QI!eVEgѾč% `Cn0XtSڨ vLjTC\eUL[gS~܇5y~V*z/=0{gtT.TVdئQZWUXd_x{{+c`+!+E7>; NPaזTFjHH~ =Շu@߻LχoWTr̷ "kqfЉlCWǐ&%#aTc06 9qn&>' 5"];p.8V3~.c+}%͝$ˈڏ XW8}OY =K zz\;m󛠦:ʒvRaT&9޻X?*2w%7C?m۪BB4dd>IJYLrdX&),Wna4JúNV4g.Ķ{,98m7DU:9=0#nG~:7}9g0԰#$.}-g=S*fucPR)^T SΌqqrFfl/: VA Pرc-=g_IlՔІԋ֜ b8ze"p=S= )), ƙr\uhr)}yz _q`imk%XPi6KpҤ~! 0ؽM*=%pv]q|iR$W|dj Є~,[ :vwX0( y v7tvDR?\e^`;j XYEu-&j>Hq(U4*r$H@{iVr.GuH&YsHVm/R8s:k֘`@Au:'8 oRJXqJN-Y_0va|4u5tAR _ڦ> {z>wEx]ߙ; rp_@/Ţr>Ñm;5|>snP"r>U?xvvjϤJ֮m$r=UX]!)Wn[]NگۏX‚UW$$>H^9߅JxnA[Gr2n#Ӧ縄>#^(9۵PU*tY 4`~.߽>9i=ʂU[(/UOoc+qBeJ=,dYUØ6-7ISʌT葥 CI[qؗμV7;;|^3@ϖj%Yv;iuŊ%t |m1/[F2\sSơ+JvX?g>q ޸Yչ̩\kLkrm|^A[a>{{Ng6 3nm_îO;_9 S`OFY~vz+ɴ]Ӓ=;5h}<W"Wavͧ͞icbrt^W4+#j60fg5CPئ6^A3MXbޮL G[^]ڌe )+'lE ,ҝGŠܧd$΋ ZKaD׾ ;2TlԩU)l^tm`0cu1ʢΟND0]}P˼뜒oJ2$ z{~&<=cFZ߈#Kq;Ny%tUM'/"$jN0rFL8yRjI.gdmf? `HЍ}IR ,3thދ95/dauoDg;,y|#[>luə]؉o &5qfn ##(2;f&+0+v@ $gj[b2LMI$ɭ(g7 gyYx) =w@!SƁrlڑyRX< #teql92%Z˃UKġ>1aC9Ǣ_aa󭿹p+ƐsVDΰ:O-fu> Jg ϟ޻+HzcaS +A;tԀ &'vQyqBc:Uȉ\ՁF]oݬj\ gT\e#ڣ {QyI`-7RQ +&߱SuS"jmt!aU O0\8TbXSң7rgle( ,yՌd+ͅA 98Sh3B 1u'G0Rі.M8h{Eɷڊø&BŸqRGq~©bwJ*"Ie=8!irq$:tfώC=$y'j-Mrjڈc\OnbeO M 8ꇭ[`^Ў l Bꕤ'=O#L `O۫/ԗA?(G2?$+Z~XbvC[Uar#s+ݎh*9ݲiPZ#Kw=*rbyP\_%/j!{`3"3T1CtYlA= =o ̒އ߰_9^;pGp4ތۊ8^=xъH>?Ǿ+Xs&Xq.-rꦟ"li-G ;%$JAZn9nD[Z>Xs|\M:)i &[LcFIL1އGu`N&n څxO,{T†G d`mꘊ!ҢFŵp.st7?'YMvV bI2&Bگ+8e&&kbksGu 0t~ N0^a9N`Ux9IНz8L0ۈ۸TXSx&NSW30IyzT%\ zLshҋ? qc .еTjf)&`SOŖBiKU06qp*8@{ ljc/gs:3t%vmt܁@i?8QPXTyj$s{ɐ"dƯH {5/c˧<`,{-_̒R90q)4I 틽O2*h A@w  /M/R `( (:p'J `#Jll,(9(s{oTBu, nFBcgAXR3'(G+׳xag"fm3ɵ> 3wR:tPQ_\~t6K /gǩХ6("Tϳ{ogqjJl?TB8?5rt#v$ÃҘqiU$w=)obN!></lSKadkvN7wm}+âi0_]% h I8ޅl^xgd{thI&LV6k I&}&G^<ƁOGc۽ ??FaI[dy9[:O,`H^Lf!B℣fFzrS/f_8~6d0;*/_V>ƇM]Gvcn؇O@su$/p "Ef)_mcK ˲An.gۉ˻9l}fq^qHZne(VudCpWgK2gt H=Gu?r:Oh 4ò 2Wy>l061^4>#<3PK?zۿs)1"orutpG=cCI2wN?{d ܶ8i\z<)QC| ⍮%ˑӑiW@2Xů ePCfQI2NO}{ć}m;^IU8)n="z,.yDb-G.<)hG(r J)j6|0+v׏dpz!$?xF L16)KFȷ*a2lˏPl?y?JT~="΍X0'Fyq#j>cWKx s"IxafQHlVr)wi]3vw->QWIȱ*͵[Ir˃xE⪾Jnj!C[lanv~a m<L) 7_E SuwGZa\拓YvǦ&悜m;5KE"&q"^rXjp 3-~3 v[Tr )k͒4x~Y8~<[T/Ȉ $R4[$8p{SA](;MRM\e`2l?~v~$TezVԞ ;WD׹8>*od$V-?;C;֋w` UjP&lǘJ EEf6A95gTIfPs0hc*AZ@w*dy7$mㅖ}4"q S:pر"C +J=lЦzhM}̴NRwb!׶Rπ5y8ȯ*; 皻075 AX9yd=WWZGp!nGȼdn㪵ycVV^$%'Wͷk9|̕WkmMNQ o^/`5'u%j#\1Mn6l Cq?.âQϭ0(sg<~ i*li;*OKk_('f >]{Ƣ?*ʬ*a$4{/Xx lXji1G~5VD CcmM\VBS\N6J-1DwRo>9 sRoYhG -axn?\O\vPąo1KĄA?u hq>etN;M=A_؀kK¹j3#+oiOAn{J~_LQs7?& ]9a}@ Xb(tH}< "[E112.m#3K=7źB3'TcNI'ɞ f۟`@!2pYE lhg'Do?e6f'PrDVz/tbC"9Jz~;V9D2sZ\$KUq~hdt ~q'`v#І!d(S[ja.WcMM4{[y+ND·>:ihMrO???朖#x1*o^:B{ὝՇy$m w\쉵qg)\9K vjᔻ<06Lw5xT $3UeF)~bF^9mh>l,f]]KRbJO=jP]S ~=/+{=ݪET*YA*`~( YƙD:?ŕRa`_hk<JKBʎZYk$s+>8} .V> /Unk;=6*.qZ$B+FKSH!D.k 'pv쩫j̢tl,CÓxj% !vF2 +ʪVszP`YEU?%4ℌD0Iihiڻ_7*m\'_p˻;i,̫(FGc&O.`cP^ .q8BeG&Z΋#Z>ę.%a #' ?޼LSn+̂&>S]L'1^/<70h{ുY;NݶWn8yxgr[2pn?ϾK1$|3PF~ ɞo!rLǥ>'Ղn\Gs[qUΗݭ$hc7uz*zCѝTik.7wu:v`}ĪN j\~%J绅|b~eSe fd }@a7@Q{߮v O/_o$L29V3VK’Beh t\1 oQ$'}c$u3\a1{Kۜh{3l랾sh</{yx| 9s+F0%!8СyV Ȫ >0O5ʤwzNh.&ý3`i 7Z쾓%c;E|8f:YpgtoqP pŒuV!8-ذFh|$ 5|—3BҜض d ,Ca`#Qj˟?OPĩdǞ~Ģʑu0pYDE-++?01M6C7⛽2::naBx2_4}bXr jo-ZaD|I7ږyۉBw@MLTSÚ |o(VlvU̸Z~$T6}}¿9gR8N{eW.+eLDAeE,;w\)vym鎓o3ŝp[ϟ.3m.Uz}Xq"X[yzoiL% %*T:Oy)__68icҼP#N^r|y~?:PZy}gI?O~ 3tM=K$dֱ֞&^csŽ˱Gh/ ,;Ծ?y |i Eou!&@Eʆ –4Hv;^a_/_pqI4O9V8i3-WTy .7(4=82X&A9oW'#Q7xO&ko}f']\ZaZc%.lw[*#Ϧ;=;a?Thry(ebTzI' <`x|P=~z9={Y( ~Ϧ= )\ ڟ;P@_R#έ.T?WaбL5?AP1gl2o&I2W1/ύ4s oQ-$S~0NY+w D vvX3':Yh]k/Y;Fx !Zak7^Nc*=R8ݪlPS#7_6AaHܝdGsW  b'_ :Fz۱o3jVHifmSaWH gnP'Q(Alh|QSBes?0ap5(?v]{qWE?l憓GOG)B擢oxq^p>qu# >03$jk+oqKPuC+t|v*hl[=h{OY? RNɎqlŮ_@ ?|.b訞bܠNÞC쵅L^Ja!Uz7w3Eې~ o" @ku#~*"LDcXy}2=Jwt=L҆:{pʹTqqTZxTCƏވ; )L9MlÁ>s_K魛9S-Y韗':8 K lTH#6{J2{<;k1#̋:񪹃\8:`v4c.p}' N] zV i׽!LYްA* (8S )^:/ūL}޾MwXO^`;%d>unP2VOV۝RW)AG>WG$ۃww wl#RzZHu?q=k ]dx t3U.V 8ҫ>YF:`<_:KA /oޗi@SpZnoֽ}f 7V `ק͒V\w%%}M$a6 \v,=o#`cY un!B01eg{l%ncA ͻUU2=."Y6\ 3N9rw&%qWѝ$S G;G\csP\eԽ@5ڞ.4s``7ߏeo0-V[2slMHUTez@eoU[P2/JRVjaٰ'8cqQݬKr6ϻ<:RqBvw_ t;SOV`EoR(5^^;nq%uKAA @mKCw޼;g/tg#{/G4py 00+W=gxKeHrݛ%8pfL]Ҁ),:$ȞO?I&'L8cA ]Z_OsWG /zAlio ?%ז0uGaPfs} oax8 #O,bs{YOIO܋\GzUݟc/TGÀߕ8~h+i#;1E8nZPJO0}޻KrFI,pSkzH~o(7ިZCGEg3HB]H+0{N2'w"wX~ UE2K^h/S`{w >)(Yoi0Fzo(Z8OY, "o;.Z$EؕP9f&ary:ͭSũ *i^ wKqkL٣u O$)lA`Ep )uud"6WсVaBTriuF'XnJ9ז'Hփ'5zb/[;-@eJ>Y/NxתJ+@!6*hk|J,pG=v|=b, :K0]rrqj{.byj6_I!eg O&ix&+ o״9% ʃ[&.e?& 3Oڮo4q#@`3?ݑsO&x`F|$]Vg.j3iBFBqug>*͐ҌNIe%O2@~dT8;ChЕ+HJfÍ@Q74DHەTl03gIr\"X>? e=ڿ >8 {˵0R¾7?'N8 9"Mݔ΃cS~Ⲱcu >=6~#ۯPG*}K*[q*Qư8/¿aݕ~AdH(&i{3 .*~wKHFէ72 okQvI[(J0wE *ۋcen;V}ЍtB^7-H~؟'޾FP =:NܮNy)Z^\1:N0}PV^К]P h? !;c׋t Ozc3Y|8χK3Hƈn-t5i3;an.'-A7jv*>0ܧ< sҗtwwh"dSG̰?/@W|'^` .s pًUU3+_mьå[ S ~vl/깪d|/4- ̠^P~ߙ%7by3~P2z\gft=p;% sMcކ71|~jrpk"@Sʽ 4~jW>c\;0B~˶o_2smRl2wvgKVscӵP I8g(qmY?-NȷB_`ޑQX#YUZ ɰV۲xoRƿpMI*u? [= D@򓄧׹c{@3sLNcϘsgFv=ɶ#Ð|΋el4׺r~<8yOtY[y`D{SR Ww*̥).HZ<㯴zE0PflPߟ| ^ۣ{q׿LqY[|-wvX JFٔ; H&`iDkeC3y1m{0]+~wG)6+׹aWT ,t%J~_`<'@o< z-gloq O) >Pr,VTf~F(ڹzf V.i {a>'>ź:vg3>guԋ_Zt湘?`ޑͫ.xOߜ`wvo$`avtS@l$<\>`q[q(!3i8F1oWmju[cXp>%/Nc1Nj" QD~MfH'd(ug6.N1)GfŶBx㧳ýI~-gߎߏI<۝m03=V cĎAkj_4qƂX-_\tB/dfp!aQ )lٵ8 _u:@^c< :z+$>1HǑ幘s>'+{6Uמh@39nMcj> 9fݣyTIEa! -7L߇Ss:ɾێi[ƶLL%귚 y`unQ*2O0>wu$Y<$Hx1&+fIyײ/]|}FeSޢױ z_pa_geFںٓM- oO]TK[W6?g匛eM{U-oCp,@Cj),zإF }} a56>48nF꣓$顣FaR k,% ^S;qm̳rN˒ ƴةK.NTvBzɊU z~Z*_|u#f*$ǰX G?R7[V6ӦCv'Zwoz'c?רass϶f{-B.=.!56دsg1Tl<iofr&oU#3ō^ }j|'De| [mؿvI\>dAıfԳ78fBxvK>a+NPo$WO~+ E̺nc>S?H t"̊#`#9g$5CF-:cm`uֹ ̔/AmBpg4 3ckZ #΋Ljj}D`]̙(Y`u8U$Ss]Η$GEm{59`jn@\yARlS@sq[,яp E]{.Qw>ת{O^Y3>s°Q6.:{*,d?\TdV^3GO{u[o=OP,|xŏH]oZ6hGK2>B8cdwy54n@gaHR߽8:q[`3ɗl`{?17B㭽=b's䉧 Y7\^$ ̏4A?+(3Z7S\Y7[Y(@Z_v')s/m%-~ݡɁAS}NItjVN|tj INw#%Y&OR'uRc ݥ$E1!.sKI3X 5ٯs}ӝI0.fZ8Eny0ƴ1*g4}KjP#};7O2k).1NU\2a57yk {CRIkfЬ$Yrr>HOSA`N\. mgF 0M/p6s2l 쭌u@vh>^v 6/ܙzsea\ ؖ!;+5Q H ytV}M>@2rDj"bwccyzde+Ku y뇺z9vJH(sAI;!JQn ̝/W*| U|isY(k'pIenS?z,NIDY4!x7XKr:./yE *Wccmr]qܩGpHYӟXɛ8?|uŊ6;qu3> ,; Żkcf+^n'vU1oN!&#UX{| L%*_;I@b53@eWI{7Zny(?= x) 92X_)w06+6*s < L#=pf&蛼Ux 0ۇ?QK:( U@뺵tds^xGg|J+mc %%%v3ʼnϗ™bjްı-=g1I|f.3:o0lC2ac2ZfMm 1;$ hHAx<ļ:/=WNK}C4Csvy IijdyIBp^2D]^ռKxQ `rsVAiEs/Fs;oTo އZ|j.bq|,DDa=KN3kW:O$cOa܁cX6$6 Io%rLJqk=ma6CoɇM{ ̺]_q'=,o2vdtN.xa V̙~8},G1|jaOͱ rAsmah"}J92{l*PY/'^d|'\|OgFp=o?B |;D3HBz'HU8G>{i@0YgԔ4^žyOد535enVq {prc^ӬnHkz|Y)%oܥjq Ք8boɟ-NXc%" %#M,c_Kp7tT |¬4iH\\;xX)æy8#k4^Zx-#<W?Sep][\3uʩ݉O}Rql5YѼsQ gc?̜My|JpFAb4}* V )~˨ *]QvhT<^OWB]ФՄs`g ]ܲ U[`o͒)(ىݚq+C_ sl_C)h sLūKy17jRވg/JZ?$}V_`H}8y`=OÝ]$fkE*@ҧya~uژ8tz*mZX,ff)E2ylI[tـw ͮl%Gۇ=B0-!  $GD>3P}ܤzF־0iv;iŠHTIL^?ߔ$ )~d@2%Q1V-.X@ uwv;\[ᅱVz]Ǚ7 I6:;$vmǰ_Kιl~0yzn5pblmf.n[Wp`_2<~ve'U8}Nhb W O%fOEPT|UƢmU߻GIe %TTq[:wu=Evb}۠.Ye.o~Ar^L- ]NRu ܿRPɨ/,[mqK ~O89V Ig3')7RٸLTG%Loƞ\n@j78<9uYSN2{IXV{adѥ:? W%"vؑ zljC0hųiNJX W6r7^^lƆ?6ɵ8UylVcVkn)CcW8[ } ,^0Lb}i6T1%58^D f!Na3>kW)W+jb+;+w;1q@κI\nM)D0h8 5 w%ҿxű$RxzۦY:3lXUDf6OR!OgG.W=8=Kr}ڒ K=Oomm8GV@iZoY#?ŏB/7Ilk22O0Tnc- ;Sw)$ݻO+#H|`P;;zeun(>5c#wz!CwcژD}٪"ngNf,n0]\R&}S#|W%"~b+8Nb~F5 |]YܲlS&؈cJ^@\6 ,;%@IAx p$i[!:(gx%4q>ǸlLS8pB[==V]//Ɇd9?MmR>0rJ"=$=UKHWep#茘bEn삹w{qO3đ\&6[USoͼ974dnp'2uH n5& VɔзX7Bq2dk)>;s>TCjLH:q: Yd.%[W`i^]Flr/ Wj [`;!_䴖iW~{aDŽXiAiƨDmqֆ0Kg T4=ؽێs)l搡j7GX1y9Ad!.m=';,`߷ڠmCL ~ەM ِ$NY?pVd#x%[j܇q&8u'R>|[3#_ǥm8Qiv$)n$s98PVxH=|ī('kfNB8:zLQ%1㺈<גwAyLr/G=75/}Nӽw|iP{@S͊`0X~$E9. zTȶ?X`Tx$(6&m1U^#>9Lb𷛡NOM}MqԨv SbCM`!>V0wnGؗnY|L@w"gOhʯ~wt6xF\*5כ"N=PS`wgc7vueIU,- /4q>X՝+$X?LU`I2 ݖ{ Dx?͉rj$hTJ0$LR7V\jzY('$ o:mХخ3<m҆سE}|>ɝ 2NX +qK]:@vsO "Ӈ&ʋ85rk5?R ?o")L/BO䰁 7- eKHB"n`ɑW ƙr%:&&zj܆/͐zNz¥ggR^"5"[\8yO3t2/q ꗾYFsxQcq?bĞZ5W^S5Nb}+w*%$[!9(nI2Nݺ$uY[cAxclK B*ZKʇ-@-0aShDҷj:/JJ!$kGNbeŁ7n&R-ͿV)z{}I;dGmHJ5[FإDZhA躈ӥ;luBoÙoc;q*yaDWʿkd~ ?wpEŔ 0}\km1Loumg dǩ/^@ͅ@,N>ϐ#^yV{ۭ] mcY{\諩WAQsss}Fr^M2 pVj*&~ 7%qy8saLébBzaD=6-jp|{ҝTnVua*mrrK\7Ɠ:#umP/ ֡. /1,WU1GO-)~K&,S-'$Nmfib`懌U :)?a+j"lӶl,CRe1\^2WK1;psT?9}``ɗ&a]d)vQ w583mǕ4^$xzMh.jONUjq ᮕF1q0lbO?YW9アR@++ a2E0:`]._ k Y l:{H75߷0 + ׆SpZ:?83wJ4ܛ<8f7wHR~% ӷXs$+sl-|2Jpm͸i3t(8#O(+V -kg䆩Cg_bh[Z-]tc;A1B9UZaJq;]LH]GlkiY,zqXAQ81ֽ#o} ibSI_=;ȕHx4=IvӃ+<+ "Bߧc\T7+]aqjI3l>{x| &g,];OҙW^)5iuְ-̔`j֜NƼS;Sгr}X:5U[A[#qʕZz0~H0UT\^5ƄFPfyc*oMa'쪬RN2_V gw;݁7^C%k*~>5ͬQ`~?Ȫn'o9=G$-`k7FXpbQ抝j~OoFB!Z8(19wHs\(l.D vjTqc]alu@0s#ɿKJXyWlGq,п9,+1VDrїwwUaKzN] Wr?+>a. VSp= r\e{j+x/˹.zc㸪:zy*"]+㉶޿p"DY}q#dݟ!w%筗CU\L_z/PRFn 8ۧLOz`$b 5b=gPWAn'Mo1tS텗ƹqf==3or{ C]z݅?u2GV!Uan{/Xnvur6Yކ؛bON5Iy 1]GUEy~~̐?7&%88èPI5pmT@5A7М/\,N0?)[znk2IOvg)*6kGG90MÛ8:w[lڟg?%٩4\ܖ ќly$(L炁:@SU;ұ70” rZU8%8* uIW+$ήg%Уy?saEwna]p]?г˝c p]V=d̷Š_Hџ~KOHa''{+f*)놝'q<a[J>AN A3(2umqțpbݜ/0Zg;Tq.tGu\qsD*c-q6O6XH =T[ƈ9{uwڽ]3vX8 v/ B'a&!Ibd?P] s" JsP\h`W,jmvaJ4=DgE_4 sKe3}H-s8}83Sy,f+NN ߜvYkZUz1gk|*Mǣ :?Gm JE8Ssҗ tQIp!#;,kfƙ2%y [a*fL+0sEӁɫւQ8:FT854ГXefi򥄄ga f+e5v=\F*-8Pi0YO?uvƊĦr;Y!vsNZ~IEݟvmL8n?KO{򵇀&C@ 8F\qJ%m'WE,ll۾? ˴Ta68ٳ'5x9" ; R%i sMXK-5-\BiP>F/m}H]]8v'/\fUNdEE"9eSmBIJd1u/=y`&5BD¸df PsR14`[}~oPm B5+.:ZѰ ?rϤ)ÿ ?89{>γra=c}U\V?߱}B uFlg"b`0ۢl*7>{Y3N01h[K!sm :mNlfo 쎲|V|9Gp)6Z ĜԤee{aNA,(=|葖*|8~;# IRR_IBJDC"2ZP  {=c<^<}ݟ׹;tnNb/OHdy]}MIJWe>^s|gV LA[88XG[R96^qY6 a‹v s"}5aNO/~_ɒΣhca /=)βJ;ڬ/ Rf˳Oѽ ؝N>@킍v$=)0bY8YK>M'X;NBHug`LeyӀ٩G뷼7~REcwNl,90*sZ .;)-9 |~úzN)nXA:W] Hw=Ӹk3[E.|*IMʵ7L7AVN,C1|klT,Ce&lN&-@Fu?A_׷} y>C{_ -\/m:ג&#fA#N~_xߟ2B8l>Q~UF"ҷqŽZ'pMЕeuCvB +f."+n/y8]Y:,ٝKG?.x߭OqS_]dp႗[ siSrFce.r9nߟr:*mL~w/Cr z C/:&Vx`U͕Hٸml';֭%+P&6*i3Zk+8\,hJ yvt-4hwE`ޫSWyFw3v 1ԧ_QSj'+%|sגXD+e|뀙'Dj<[1+[=PoqhEO xF\bs*pժΚ'8wo]\J:gW M,k%k̅맦Rױ\\޷.Y+j^I6S˷/z&DhZקxxWڒ1W zxޝb}Y@)ԺIaK!O=+;\U[/+?AZuQP " ,e?ҟXORtP)$4:Ey$oxQBmvǺ-fwl >[u up3~wk!.g>Seߓt kC\;ѭo- r n?r?bq 1yR9?LP~@R,k\Ӆ|ǣ7ϸU%Hm_0& 御&X[*MW(<.X:d/g|}px^TST@8.q⿷uYg8]ZT721k撚 ]7F7"j߬ ,g0Tb7 ~tž v}Ҫ`8؈;S'U.j Z.MCTvhP|N^^wפNgg̐v-Q +9ɾr(0ȹ986^;-Fιu!ѐ(tߖuh|33vgrMFBoJ*Rh)};865i+U){k-FThWo ]'>qH@lSb R &ZEl^# 1.sRJq$WX$Od!+e4Ʒ!.P9S89tCt[7+l:!%Z@R嬂[,e/lZVh-;~Gʮ&tKD}AsxĜ/]-~u Eroo[%k\`.zXOӌQeQ-0-qz\l,DŮ͎?an2+7fa `X$ezޑ8#0.A}z 69%Ip}udLV*n$@Ob.lRJsVZ(/$uߧ˱v0ircNF-_9ACS\a+nKz|z;J zb0Yagƀ,:zI&q?:$N<fbs":-WPTD=+`"o9W4SO$/\՝>4gOCW#5__+^x:6SGpz@SN=4o8jށn*0mupE <.kcOzNKA=3Qb?8C瑤}BhL;zo@vEcaiS ~3Re2F-mIW|/`G 8K>†p楨>A[˱,kI!qJr JwVz4HGb3(Ǖl /~Jƽu8!w^de/;ONa?X sI/bQ]U$%UC+{r T}We󑚉 FǾAt-BYn~VŻj=zAH `w'/Ȟucğ 2wZ1,ǸzdC,at oxGÀbqQU!mc'_~T%Q ՗uF&5{r&B`U=o˺_0;t,ZEz!;.of' \I|Igdܨ{DyKtT[bG0H̄ _g^HdĝԹMܜlƫae2˔H 9~:GrIۖB =eJAAΫ7UV;7[p2KJEfO8~턗8T =U]jQ$׷zKpdRgI ;LϰXo /]LBZtzj  TܡNnQ†HRf[!>e s%l}1`{f.MpTGi>̋-}/C¹fп^y}"5mu8 ?%OǬ8.h/Ww| zG?XF .O9!ٸ<)AVT6ݸ8mvdVmmmKw\_i5佲/a y\1.E\(_CM[}q/4q{cS3X/` C6-_e s_nCW5X{{"ٌ/ !u!"PIכi Q60ÞY"j?ݿGg('" _+ˁx-(8ѷ6?(i6ƩU!8f׫ȿn Ӹ~X7oH`ڵw #<~z +qrڣwQ X+Cqk].!~O, Z({$ȴR@WXb9jmI'sXky .+z2Py؁tG`ִ<~<:i 5%9W!>9rW`˪ kb{`Ӧص"64HPVTPB]u#׵maZdv/)$Uݰy/)yZ]~5HJiC̝J;x0){,'i jaw <ο +4?WatZ:.m5g9orİIYW̵u>MCf̣AlsbN)jMl8 ME (gsg.HmoJ#5@~w;uh|QXJ|Ɂϸ/eȻ[vXWn" SnqU>r0+5+ i[d` g;|$צxzXnBL7$?c\ [yێ,+ծm")sTnPnkTY͓߆whYh{ dPӶ8-5.&+Sw7+#w4/ߒ (d *I*?݇epYR^4(,`]]n4u۝efɒC% OUI=ZܚJoKVN +Qq]U'{-ݔ}aK릇}v~pÜi7s^zɠT-Clv-!aShVq ;mƗ Pl}ĵM,N_Uvi0@{fH4[B/cFXonP4 GWAƌ[\ҏ}u#GqdM3b4R;0Eus} -'aD*,.ݍbkae=M!HKK3^߳O_/vMkS0lJ]NSզ!Ctf3lK)^^|B6"bT\RmWdrN _cH'O}bCdHz~1>جX2#'O9"fm*6(}x~_dasqPiv$H+Kζ9В}T ߾p V[8qd@3:upd=c9͠y yLx7"0cQ>Id|/N.!&x F^?s.| ǁX`+:e]mE2BWߧv0!1љIr};sc3'ͫ:0+Te|=Jaj-߷Ъz >gLv>gd!{b"صE!:-@ps'<\9+] uײ&94"yxh&V>P=%m g(H{d?S C㮋Oא+֛t 7NkvN;ʼsO ۉXjUIi|¹-SmKJ&cxƶwةU8myHMghW<*>nS{Sq#+/`u9N6_s `ܴr QWH-2S?44~EdN.6c/L tƳ;qjTzs 0kCUK"ilq~) =_7`=Xbo \K /*V @FI̱`e]|1AqPA_Z9Įf[o͕k^`K[UFIUȞ lk柂YsGp|y(?ᘀ{1fB{-,5zCvr%ߔlg_Az2h~[/sF!,mXJ$r̷ݴC &c5d 1ZYm`oc SK Wh2&<>v6>a|f^JMB9FzAv_A;eJ Cxj),5~L4*v"@?#oȞN $wǥ83z[`oH6G֠ƷAыA`-:ݶ<xJ wYiPdSҕwsy7cFjf8!6Yrad~cp fVRƙ{NA8H?Zzm\PGtZ#Y`VkUX;c#wt m=npy8o5t}xW}R'~g`o0#lֺ vYeob!x -8)wmI 66ԙ%js0,pǷf -néɮJ0rh_Bf,؁D8}0*%;,K'Obu[M"֐le+5<沛ɳ9sGvV 0DU.#wwޓHx-T:+7O¼2\s zS#`Fič{or 8B seۘ8֮ ]VM*v1"πw;h^i;JJbmӇ:g1m=1*zFgY''tZ}YWhwˎ(]/)夢͐Y㇮t=6sYL{z(nN*׍oV'!n–W<C퐋h:adpI]5b =/Cb"a 8zlFz+˖1)tO5no9}t Iy bNCͧG<hHg;P y)e?Is#ٳ8O*&9O[Yq"PO h KH.Z|ڔ0#R 8*3j#aqйqolf2aG7ԕ.SԬ3%UDWoy ÒICg;б&$u$\4u?)]b8LhESsqw";P.ZSx; )F9Yi;GBьFE>xP'lߝ8pሽ#(8 ۅC_ξ})ΈQpQtN tq\\:'Z ?{\_)O2p?V])r;N&ۚ?~n(5^N`ާkB:&잍<+āX";v\H*tuDZS$ 6 ?!ueB֮禰x5*uI) ٬ [qBȪM~ : #)oڗ//#>l27 yM漟<^rS]aou5vT]3/z5l?*~C{`en<:Qչ[_ ~KdE?Vу,T矫W^B kGyO$۟ Nv0zV-7U8rL!9d Ŷe6)럋dYpk{'hPv9խ0Vg6n ֓&rAVj>s^QqHNʱK1Ğͬ/ é#69/~_'(w/j5U (#5۷qmd;m6߲^)h- ,;ϡoũWuU輛f\I`~[^9 1SHb? E]uF? 3_B45woOk(N172"nyR!Wt{#c()Y "@&噬w2i܃s$X޽h$W߯mq$˹ghuD v)| 7w0|߰T{d-r:*GTviTn_齷`~\L F5t6%ٛegu-E.؍ o]0k8rn+7O1WX!Kɢ&qqvv1d9}T/N ZGtaldv, a +(\?Vށ_h*2}yU܋?wgEMu[u {Ļ: ˮϺd(8>_d\pS:KXn~pcoUb\qŻ ~DTkHxgZMl`ַÄ6: JLWOPkEr/%{.31CSpEr4+zPsqGen8J\|;Hv7q)S8P>SW7]>D͓G7.5A.se-;YA'Q Zl+vf}c54/hԔVF5IwJ.%ߚz8Y3= ^GbzZwFO,V |o :`ƈW Ej4+y~Z +nH%V uZF]l9'ࠇ||[%W =|M}j7~t>BX׵*}p:8f/]_uJcgW^{}l }ϖIJ 9v^3pf-i[b׊ؗ1<@x2E֝6º"R{苃.Pwi3moMoՊX!w/xLQ 'ꎉ-X{+Vߜz[)Cj'vd Zof@-' @jjos׽DwExX[K4ڮbN #)C\A'70է8tQXƤ2 VdN <_ sq\h٭{w|<f3In:%ar*e""Vr63dw> FX+=Bmahw(08 qq+ܭLt?_{外m%oM UܹrH<;GH)<) C9x'kPWt5hʿS_Ja#dB1\逐̾ C8xoHj]?d3n. 8<9i1G䒴[)KS`6cL} U?T-tgp;ڳvJ<{j^`)Tm.ѿC^Nyr?`uE{BO`;<}{aԹ#riw&mM엲0ӷ!>NN:! ^WIq&\[u}L9k>FYLXk1dlxu#ܙ)0LJGVѡt3uzf}+P?r5ƒ4-57k~n.?,2[34hU&U {JCr?{7Lx(ZĎ 0B۵~݂c?g`Fc P ]90dj9ys: #KϚ c鷱a6W/L=5}>ò9` LsJ 2UT 4$ʺaKz# QA2T~M6+&KoF/_a2yYwgۤl!o>!umދ#Vsרsuspg ʛeM ~"݆έNL,=P+}qc.z<,`8'b1.04Sڋ v-}A<57"k۬?f?  _h}|+j 9Ԁ鮚Jx3Ε֭_.WIU.{u;=$ Vxa.$AAϣOp;l.1ąë&XPs;K0>6S vQN4K>sS#J|glNЮ[.NJ{ۡORh)OW?,>~g3j1NmvU]A_z۳I$%F֗9+@[׺Xfh?|2~}dہ}_v+̗ ^_vSZȌ;|aZ,|lZoSBFmOskcp 4m?q2='l6~0O\z!4g!LkKHK8I$G3'6Hi3iiy+"L9m@TL:/`mK[ϝ[RbiII5S]5 X#rH;o ߘ60f4%ve߷zsc$nk8=;wZ];߭R)'B{ٚbIǜ*7LL  #[)uq^]H^g5oϹY 1Xpn^-'Oe TIhy&Z>ݣ@Ըsm0eJ>|iWE$ݚ)X%4)qB͠T[)C%.NU4gpC;1!*mF['>Pdu/6&|/tЙZa|nj{7 6GZPw%]rG6„WN@$w95|΂˳{JA'41݁Y%Gdcd&)[|1LuU7:o?}Kۻpj.jvFl$ /{&kۇ` !qCvM2M?ʗ~ule[(HP4YJBcJװ,LJV`LV[wm2&cμZop.coJw,sgFka&9?rem3 xrc2Ыfy/m͏Դ&ثwk`-j <;;sitPsRv}\Sy ձI;5x:v^uUWM Hk"I%]8tPCxqlrߝ۹/z~w ;iԮ.,3bd:aB!ro{*zuDOv<'pyݕz%"ٷ leǂ.ĤIN%_'3NC߼}{1 KG n?s])Z>Q; aaA Iw ]?l⯭+u_m|3WUn \\=M"iޯx%{V;H/k,@}Xm@N L$9,Sn%XGS]jԦJy0:M_X4Çz/CQf-+XSg`#/_E:do_k]0uSr6wt[|ˏ̘?*0SgE^Ě&x/xh;l_,qT.ղWFC<9ϋ_R4iG~dEv*kODC]GqɥBʡ%۵+ni2Hs3m-,N6$6j(3۳Hi){:1A-wl&h9+.zI`9)#'͂^HoKՏ8Lj\3L{ !bՉf9Vo=Xxk9 M/=ީa_IZ|!ٶH̓*uu&m/[*9"-L~XFZqzn` &8?⭃ x8&LJbCK75׮Hp=vf/$(?mjW+aR u7XSN'Q +}x\9~ݫcǍᷛ"| ||b$KiC8V6Ws =͎UTZ+  tT̖б.bȣ{lŋ7[~̕q?c`3 "|eZ)s@ScQ7mP;yȻIy_؟@lNW]/E1c]_8۞zǗ$E[wT[u}&}{Ntzܳ8^@Wx03W~y>ryRakZ^}EX?`;5=Nnptޜ&Q)8MHy=o3#zOH;VS,Ku cI&k8Kyq(ٱv@vY {AoLΎ] R%3,l~on[l;t9{݀}WZ ©;LaՊJUP!s\?>l/{ZP  U/BZm0z!"q?홰KK%_1R[&+,m"AR|Qe΅o,?\Ϩ8Չv`;veϐ0p#!j v uujwN-%8$c ₋qYMtU&5mأW+nU Mwehn{Vқ;,#)9P]M0枛.m,@JRvKd<ʐo-*3fb_į_ `~?-v׋(F2v׫QkpOsWOr(~"N'HQ!x{H>[Qӡz*3GħZÇ"Q{e~`Q.WYuEV. { P vnl?QG -#y^J,aF{s 5GB(I[_"|x!'$ނkNXʝ7O/2 /pN :^o"8Nw)ԷyI>\RZמ]&v1_bqP:ko !v}s ~],ap[%ePI >2J̐|@%(H[tclΫ(EFt/mINs-M~{c0?EF :sOpV1\U*5$gIٶ_տ&Ȗ-ZR!`%a*m|~%U[}<+nanjL*vsR/k lǖH#_$$0C׮<Ͻ_`gz >X,CU[ SK>.,bgMlq@}[UnRx9HW8ܩ;2,ctNڕ|Yc|AҴW R%Aw7KeˈZp y9]-`4f.ӁލWpBa{,UkE+/Ep\>{S~"6OI9kDr#>UZ40&&  ƕ.)iyfM NԌ.cv}y $G\<H\U+eLksKŢ8vÄ8EK֙[y*= 6JogL`.ڗ,I^Se>NiK2Go\Arx}")7pnt$`Dg`.r2 oHʊڦAƅ]=H;k|ʾV12C0~+&{v¼S)eUr3vjA%>Beض˧n9OF, []1>L1^,.]|_3>l'yr:{ކk8H͌{$+8ty5XKXE6uofεBcY_ C`Ys{ثq?c{%L*+]N_×{|izؽ}tUy-BS4wT" g߬mz L4RoK%oϵ![Zs@nc+(zeN ؏tIGbl/Y  #&^J68ci%=8wpl_v.PÔX&vHJ%<5$6Y|뭱j?LzFp?@EluRoyEᔭGse`Z|[ H۔ ArT{5Z`$W Sr[𪢛3KRf cttNLl_ vmyF .l)Yկ;/B2yRH0F[Ҝ"==ٵ f @F{9DaelYwLO7\X ?}qT́S\D&}C/.;$74Ě].n^8_AF8oIpS4d"a0/% pW2j2z0 ҀuV5 Fݺ!}XWtv0VV%@4;̮ERNdBF%g c_?)}Yw.a|\8-}/!d?_0Ae3vt=|Sd#qD)'VBSr2UumuBM8~ 88~ _fzx8.ڝ*XM ئqgNYU;:9 c}0myj 9]=Nu $58AUE'FpVծ$6.&7a֚%^=}AU/sւw0Rh$ TƎ_6℔j7TST\4+wUqp#-vgæ1v?Nf>t>wf%Jc6MHp'E{5V1_@х…Rt4y{kIl2NVx챯>T+kzpeH=};嫂pfwk]RQ`[LɻdÉqmŖW"1JbR~~HC+6p}88mn&S)/@KQshĿFFpu~Blc(pD+FϾ֤2`br*.e~̡{f6)!DRm,(Fމw{@M2Q;d&.ѳQCZjKh(Kfw<12͞N+2<,ɱL/NdyIT[ѡǞ̻FoPyp#A۝ } dCzDžOGH?h:uE/x Y əS'k+2Nmn~ǎw󽈳w{Fn\ 0O)amsU]=Oiha߷@wQvX?:=9|eipBf}Zsag_.LJCU-Kwzt_&ϙOSu0pv?Ljͥ=\o4ŎrCo^.K, 'W11Ll'3F 0wEa,F nI20cdd~z+@$JPˈ!I+bdaw\1gPm4/( :_H{/+V ,l0x B;a9z*to QB2-qR[dLsZͥ?sbѪ34Gzpgf](z;.ĘrY;=zs 96"Ar^l(WMqϝ8dY̕1k&qqQǮc5^7Wbka TBc`OO4_^7ux1[*t'<>֊U]IY;ol"ArJ&Q]v[4~jzLsqI[dGQk"U҇6ۤIUdiz2ɝfW+.A ܤ1 Y#9zkf FB 5?a<6}c!uU>>O/_o@J3Ʊ/2&8_'ϲN⺈t[OL~a*%0ǾݰZuѸf'jE($jWvB]jQ>\lEMkXvHC}:"l胎Vf_I 9E'°ZCy[5zOJE[lyO.]%7;pfi8Gg1XzegmqA՗2myk{+v|uq_毜j 0";Ԝw ޣpQm {1L8>[bG78^Er<&FƭHoԱТğߐS _( wP{ϕYޫ2%ln{,v^!,RF/m̈%m(h-"J˹{evؓ7lNRբt"|ɶaVR[_:0Qܕ/ a9(`igiOWӪׂ1ϗSߓawlLQ˲7-yP%mW cGaXR*l0CK 96gC>cwjfm_g;;\%kcW3Ԇ0,yj6Ѱ6Ȋ#>8w@oj0p8}ktȨ/mA keg{.CMϼEHkO%" 3"aWn!80wܱk:c~{5Oկ8mR]s׆lH>: l^}Ň*/U  bTo Gz?QG.ILj,u8 ȎFd[ ukwKpps1K]R|jmqz/?Cn#TWך@2CE`2r씨s]+L`=V4q ԿB[=O1u>yޫ` , 'Ĝ$IhQ  ( {g>⨤hMPZ3-!P'-`%?IQ^ g`N? .,:#8$B~T`O$tRe_q!#;qnئ՞гO:a{ $ F쮅?f+qdάViY!a}XRĦ[9aV!{@aCX:^np|5lcn YQπҹMv&W;iKkIh|^ޱGZK֧0mHFܨƫzIZ쎪׺fُЕ8q {%i.vo{=.uVɓ뼮9Cv*7:H>{'qQN|i=cKHlP)t2\~ܖ^FIo,ֵoױiW RwPVw<}ܴUS$sx|'O(b ݍ u 8ٝ+#CHʼ$,q{OTT/ߢNcLN] GW$W5[5snڭ~<Db NYHrXL|8Ƨl? $}[3GOTP`lɗXO]}r@7fLdFGr<οU]/9KR|&Vz}jXeH8Y<3"/=-jz?s/~e{R!$b:0nZCCБ)GTcJeb{2J]yKI R~$rFBmG&?G* -8fK\d }ҟ&k4`͎76^B l0͓lsu`Wx,k'/bw0>:-X?r<x}VmN;j8c6yEXop) ?_fo!W/=z2{Obx̼mB60ws"_?Np*Ķ' ),h_g3$wxs}h}]Q؀'/SX{6Aۢw0`nRpEVo l.FhJ L]ި R|g+e-10Kgnw0Y,Ap%0f˨:R!@<|Xkbۆ5Z%x;gB?cs#l] r8FrVhcMXQ-\8=GcbJK/+8p5o\7ⳅ{ϩs.uZ؅sC=SlLW[-#ep5+KS|u'7x,=9XE)ҞW[\NZg^ e|vM\JC}qGFؤ#L:Hіa.y1(NjdԶ:`AW'E0UKo%0 ^{9Ald9QݶwtPYJQhL"u$d~Ua#tK",pld6tȄuJGaBn'IJwL'tɧRW+h$Kib88uBv'==;߯'?m!{(ȷM~ !>ygRD:[G]9` ܙ0djF0׿c@m?4ɑc;D(0?x"NU|d2CkR Vh2 H=s`2kbKޯʖ붏ڃ``V_\/ )kz<뎲UomhX0V m|?y/AW~<ߕCHњ4N+n_p-<8ssgn҄Efҡn .OR+u)5˽I͊ۍV[ bɵ#_'Jg~a 3r%-3UY5+~Ħ RYz@/+{C|Y~:} .<t)GQLVi mKhPHlNJ9MZb87L^X0II᥏Tծ[{K}2"\2ZeM*RyKV0Ż}j {T"0c<+fm{_m#x{3 JV,`=:Z)tkk$' & oo-jCcUsޣ505̓ߊ.ҟqUAB8 g&z|Kf?(?z(g{:ڒtgص  ֔ FSհnjS -`gRQ}Աddmɵ]H,Ry4]N0,[KL6<({<i10}D6 7ܥ8-|t#Tg}Y6v1o;.`ZzrǤ R~°V_1l>5zk"jy[ i?uafޝyzJ j|0v>ɮii{( r4't0Q'fk݁:5Xe=0"LixMr*㇩f38+72N۞L}gC+; f5YdT@ܱÖ4휷QL}OX moAt_OPE6ZN @W!'-LZ ӻ`{B ,}`(ؤJpՌͮ~.ѬlRQl-ެuef[G%l{Wd>\R" 6J'=le˟ܰǒhh6pY[,Wff8(I_6qqy@Zh .{xt;u/AnmPtA=ƒ{ߕ'CK?؂Ԁ>;qp gVd *+)$ʆEY6Q2T&7]9o-&HlK_i* J qzi* Ҽ1ԋtH)~E*,ݙ`r5Ͷ Z>]~ o6&CU‚]ݚ22W)FVW6cmXy } [_!wK#ɓ퍐ݗgsUNź $`M$1Y2b5Fax3lw>Ϛ`@mлNrVKp޻ǿ[WOz VG:VZ: F8>oui\M/¡QlX \43_oN챗-{jKw$q !ɀ5$iN+\9w6LRWϴq:^.ϱzqS0(vp!LaoL*p#8&-]x,r艫~u1] dRz(/AP>S%FϦbKEI'84Zo^2g%*`wzM"H? .蟩'٢_i‰D`^`/M[ψI͹AM\s0$9mzp+Gt(8\䏄m_g ~ZxUg p}Ck~S@rV-3y{U #J6(<:;w} WKE3uu2ds1[T(ιQK|/ al !Nxl2K_gAVryv ;},Z 0(;YS9 h,El2,ժCVo6aeӦdH_3GWX彪f]6pڼ]<5C'h>@ ۴>?{X*%_GHlO*[iv2oyIRxwLbUNsUtOJ۾?0O[[RBy>K>ZRعp4;bBj8 0z'I;7ƁkC `\1ytW=20y.A7,x"50qVn 3ujSXm:<&?"3qb9?@zobM$svq!=5o9{xV;mg|Vӯ@ltX\,+d5z7)M )hH![]^w?fH-G {;QL}v;Ѕ=NC3KSПSC5T&V8QQkLU?[_ٚ?F0x7]q,I^(*̈́f{eIN C)iLݣ2;Y+oh}s:B0!xG=G;CD Hjam'ϟ8#e=BE6Mͦa)ͯd*XTijzIϱݿ8.s1 h|4l;[&^O?@P?.9tԨN\Bֈen/hS/MJ[L-lcX)kJpoq~'vc(ݙmX Ho [,̩(9\-Hb״i8!߅sb,oL@ A$u7c38B&Q $η-^3W8ܤG<F}|/9,8KёaAb[0c`@9jO߆6_ԾZK\ Ȃ2򿤴a1lMR&uPq&gjX'y3ԃ q`SOۯNCFc5u0_Բk_ fT1˻c{dw7t`.Y[8lN߫Cdu׽y jk$`ƖL?}u'OUE}s3:\5Zt^Su?WK@0tRlV^zٷuV .#)84+yIXOn9M[9^?d]UtƎߗ<~5nt'g}}*\+:@҅tvRM+ðԿ8UTxqi ?3g`^s#vuxfUS&A?3jq"ʥjD&@EhYUlCmPiw6UXC'2 u ԁ5c7L]O+ԯ Ly9ȧ+vߵqsQKq>8{ѫZH~Ԭ!gM{`UT;^OP޾ .Z/OP6=PU)rS: ,MBm/Z 6lrH[|9h2гBB5G+D-{{C;yG7'վ$(݆1h@i!\4<9mݜABܩh]iw9Ș0jBC!o89&kyOqle{b%2 Kz ]OycHhHJRI]E}$I%(2BZBV6{{u^ZNA^dfBb"؜D2O/}}=RjW$9m:$gAyNȐLg+ߌ@o_R \?Jz4@;kw'/'l1?ֹCfPqbx(?]ՙ*h!1; \iT `^ƥ޸>j Ufd=NHXX%X@zŅס{`_hha:U bw&Cp앣$ӝR!0E< =83ӛHe1_a3aϣ'GaRevf+l4fcq0b˛* M-w57CpNi 1z#289%.:djړ;>Jte司:Qq9z&fbDHc܍' I6?+00:c-"s> ?i](+>\5fR#Xq`~SSS-z~~ _y_1W13sH bc=W:|'PY.NN!mrC X{s-Z?X5`M#q97ebԌ4M工"~g1*/_? 1 J#XP6G_2}L2BZ0!UKH%?0{&M>f8rmKcQăz+k#6z# qfǑ =| gz8ߡwޛZĩPr/K~'кI&EfaA\*;y l#y@G Nۭ;Kl.ҩ<%eV0q=JG_y&g#0_1aRe'ZvTaU;?(Jl63s:F@ͺ`K%_ JM^ š8]6 O%ޖY_"skJFo1W.X:/L}tI.[l`z̾'ĸ #HhimXi?sdb4t~E}+*&1Ylza'UэsJ"NbeRr"vG;51^ ɊSmKpH/+ތNnx$J ۡ׽{{&b>d3 q5g+.EPCױʟf.IK@5 NnGa-㩅v7|¯$- u`xWEH nd;mV'dYMP`{o`"Q{{qF\ٗ Ƕgϥ|#v߼k ْb+~!=TKekWK<#{.%Y#^NG(59աMIUs[Vz֛^WHs7@ "u+Fq:S!XǏe}y4jWZtV,0i&֋?2 }&]B* à@NN8_z^7j$)E54xӊF@{ ! DW1kg}@D\1\ص,qLr«xd*'`dۡpߨ*VՏ}NpqM88GlTcouR/z6zJ?/ES/& S*p"|gmkU8UTsf"$MpG`%9Xi.X7d 170kg"vw; ykRDUڋ#Ul\"uj|r ?|\v7Co1iw[:[_O0`=ӜDAvm9HE}pd]o+Ez;{ͻ`/>.%Z a'H-N=H ޶ co+$Fky zr1&iJaM$Zi?sgoʐǂ[T:rT1ɺS>650̿&)jtg%~LV8)o`Sh9du/mk7;^W=߯#rC޻B/ɜX`P s<E8uŦ;dlo͇ c߀ڻ\D ;v= alV_R lF*)'j_TyM蜳֤/JH\!)7мtiRqAnQS)1cawBw)w$#}UH ş"v{Rz"\߼0nU-ZoDA`iSwa$h X(bYwH Gcb_@V>5>aL2claw>Ƅ0GI|oS/MnشI0 N9)ù&mZHX-00*سێl,/Üj)ɛ(z}-H씔)/Ѭ0I Dm+Q@(ՙ^dӊ-}Z\.D5?Ʊ^6)h,Tho+4Sj)J|o0Qp=ms;&μdJaV ׳0CpTj=vSyZ)/*2 <֍z}FpWiͱ3j>E@0VpJC "#a^bvo7t#Z] w C???ŝ޿`Gg+ҫ/phO{FUR Jv(%4f? H7I0{|"iW,.oy tRJڝe2 .?a.S|]#;$|~bD0W|i {$HNLLVw;TޚٶHm7 Y0a9/mX:L"ZKcI܃ TeBޟD9}i[ۋoQ" qH=:CԝJ!MRR-v%?OJ@z;K`'fCYX91ۙ`v tسpn{CT*k7BLLw`DžTk0pBen?X+!q\1\qraddL]8ӈn4&!3olEϊbdO9d%핮oyKgBa&)+W@rv "0soE,|Hgx!Գl9`(c\I?,^T׈]*͟OHũǛ.~Nԫ(G6ʱ@DΡ -8+yZpn.|iS4 $(+wᲊyo: SGlM[w}Ɓ#s0żE.3tn%`d;)vu>~l|Lf൛x92W> ;~/24& ?z*LLs "97goLI v!(<~jڒ? WvfqSmuR8%ٖa*'vr𼵪?\.hlӅ&Rl*Jxz,߉ 6[]C|0v%Y%rjd p҅hv@slʲ-X.!m"tvD:,~#gY%B;;L'C{b\28uin:+uC9 O{1ۅlt[I[[ɦAL0Xm< %`fXؐJ,~a1u{/Op_;Ɗ'?.GwcH3A\>0V )/} W[[;l >D0/s-4 2N_9mKsoĵ&MUS6VW$d:akO>i 3c}G6i@? ,P,'XYv1*} &TĦ1"f$]M|&lɾTXQgJW ~chd>+ı׽K@tqAtKcX̧ő´'BUբ üOm+io(>$`WQ߇IKv"kY%L!O=Ckj2N@i~1%'oזwf`0m59I&p G3X`p+=lull?Ep|ݻGD>޻g,qxWX z ɩ᡽S?(Z. v<03S`u>d [7g,c Yʑk-z-ÑIZ$k.ۋ`b;m>SFp~an 43%_{w ߝچZL´2GnQ_g7Ib,F_ٽX{} &N9'OZ)cjsv]s>c@ě eh_hnazAO$/Ǻ2z00fH:Rê 9ᵌ=s0H<"2 G zY54~w[ดg/VbRǃ pwj8u9L0R[ y"Y`\ΜgNY/ArVܿG f4OCkI>\-l8b=w[Dq|s)seu7.]|uI.NP7[1OL9q3 ]e+ +S1f`ȝXĎCHc ox] Ǭ/+byq|Z=>X{PU<tȵ $36JE֓5kxSI 9*dPSM5~y_D{˟Bܱ[07>Nz>Z~L.%3g#xJzl[ 6Pd6M >pTW@e؟:  61Iң$dշM @~QۆsuL$vTdtsR4Yɝ)\N68C~ʯq\Y?r8@8=ȱo5H;~g{8з)}=ӄԏW"a>CIV?;d: [.xOs&RShm2)vtp, k<[<'G/`N/,u4_8KӞ[o%Kp,:O`rӥFU%~z=t w ='@/Y? or`JB5 ܴxT֊*/~{#|jt/m6uiww{̧5J?_€zF]^vAgCoΌv0KX,Önuǟ');bcgr;[Y .{=A}`ZIoVg6$mʲ -;n[pԹM!$[$0!zycH#40z8b(G:vBݓ>Fk/aWOܙ4nl m|ژqhsCL'=>E VxlQW^A߫jAm=LduhX߬6 <[ |xYI~R ^d);۠1ս}&)X1>g fnA\}vnf'tg#ظH,xG;n!됽ݠ0vi=-y{+KĦg_S5\knù[{8x$ѩ4YǦd鑩TY$Ll~Yn9V_\wM<-~ԣ:A2Z8$}:g`2*K2]^e`},&bL| >ߖ3m@ ̖qcc[_c^\Wm+T?ǜ$}  a|-QGq2n'Pn ~JYwЍfh.U !'u|KA^^MU^L9+n@ڱL󑡵Z4N;~-JBX}qЌ+hSVܾ3!U~:g#$){'Ss5Nem.HG˥0Ǹ[p{C~}Y~)'CLBV!4Q-|w8XT,Á+Xow"\fYr:Dk?ajQ&Үn KO3|VϗO)2u8yHoszkX@%EN}2 ?}}N-┙mrđ6/V=`X+^jۡōZ>D3P;j)h2Q%=f%þUϥ8q>J]WKN |8qB>L;PpF e!ϖq_)9H0yBRQy3MJqYf!ZKveE?R?ϓk/4ethyO_ܼ 6)Uz)O_fEGa4͖F5RKQNF݁aJӸI2r*I4;` #RuTU6mG*wCGV[ֳ͆*^H*si͡]zv5R' -Xx{$3vwY˹0gk1͞"m@[1ڡƷ\7  :˭#-lL Vi׮~&caEz㩉jԻr ebqiݲ]h.**&V.~ŁmayrRhP .[/ڣ,*g:ٵI$_3^FAANNr.rtI^ &.+/vxuvKk8º!u^l>usQh#Z?xL|%"<-v,y!g&X,w>^$}TC͡K9WO2+iT❭6S9H)4Ů`sF0jGav[3`=q׵^4ј7F<~h%y-ܪB +G]O+VAeâj1v΄,ϩz# qO-?wDy>U`{j-}?Lˮ}'۞Op}\lj_/@ H?gϚouS뛰&Eh1$y'qVHAXa_pR',͸Wa.3O1c|+s LXaP` *v_p"$+]"W{lNai5L$xU_zƅ\H!A@_f`ZѤOph !m0q(KÛ[0&ϴ9aҌ䳰;}JljAM]$qCߓ77!-;|o@Á.kfS _-*Boy%h<"Mcv | zƉ>I#76jL}p폐3!B>6I!b`s`TJ9Ot)>B,% c[K̃y;6l703yg89s =Lmm†ZA0OUS`?D.Nǜܕi76ܬ~ To=ߠ,g8[ ;}#Xk_ iOG^̯ξS 2/q:.ǭJ^huϜTpn2tҪR$\qBQ070,r0,+}" tv#I:Ca3ror4 A5R'f6ʼtIQ,*G'd(8n`YگP`Q\r~<]>KJ6OFڼ G[Ӧ~W`!^ϰ񫊻 lޚuZCPpyK(`z0{X 7|w+ H& QI~{c_rƦJՋ8z9`F1Lܩq%wX (HiX0[Ě`eB}fPeo9[ ;F|܍p[ú@.7Bn͆̕-En:Cؚ1Vy,/?giU_ %5K$[Cq%jH? ]']V1 &븮=:kBX y 8aD5!|D)R3Cحv}Ǻi#hIԊ[zoӝgc_1^yXۍ L]\gqYs>6 m+rxH8$whP|\ 5㓪]8{,N8R׹j(spzki줯y; ml VK85rjȝqQE"Y9Xmu3:ھ̱%0V*v*,'uE-bݫĘHp`zTo.'/L(h)Kʲ#|8׈~6?6~ANZGvh@^ -=m[qen-ףLU4PYm`gBā;<ðqhH:W5d4x8 lՔo0uWQߝ/̾ '*%iViO~S do'aZoVCꦯkeV?G* ˪+?n]of6XJܣE/MS)HZ~Y:q|fVbq(0ˁh=KODlC5[vAaOSPD6 6詅qbP2%2o[]#yG/%([w}C7|NEf'[6+MGlno3[BHʿbkr~'n+ye}!:}qPu[+O!)W婻` ! 'u:Os-a_ɣ:UQ?+>etVNX}^d .0CIpksTX4 ɡmlKV:f/ өTjh_IҺ'M q`+Ki\-[Y=?tqǐn<@ư|R(72V,ߍ5Si߀}ŔC5Q,Eaf|K3"u"RE|=f0yv,\ә)0H<ɲYUnN&)|Qaz = xElveفSqkq"mcm\hk4 N~P+(B0PSP?wQK&6vBnJB3^qbP&\qA[LlaJwIsl6N?xsVj]kGELlLH̖~B9vN h[ل[9#-W2?j +t}b6 V׶(}^*إo85Hġy,'X-X5TRuf`ul|ےi£W46bN$yY9l2\i;?67ƷƱF#9cW0 ~#MkY?,=FnG*P jNF[c#8vp#r/G0 K#wPveSX(2zʌ4iA ͧ6VmWGLj1G~nה7zտ$S{6oe޲"jĒwB"Rkd'\:y?lsi{#U<`݇!G&̙ku[]ݞ˜ېCY=N,<:)D{ݙύ(rrK¬RHW'ؚBkaYARRM7&0Mf\pՈ;r43J%/ Nr췭0-=b[lf$ cXH,sq9BO kzH\|f8PdO#T #7iF+@O JXJ=Az?ES]#Fì |ø>\ya!JdvYZc#csǗf7L*B_c_u$t\ع~ÔWOi`>SRdr3dt@xNMqhF'-+R)M7 EC;R%J9ְLjP3T=*ecACᠮ] y=Ɔ-ߤ#fIk^%m,^;JrfVg$g۔GN\k!HyMX/ 7\Pͫ(A2v/`QP?aoW6㴔[fnR s d J(VU߯pL6Yf a%EبkҳWxμj@>|$LeG =_ q'BӍ%/kw~qt&iށMFX-<ˆ Rq`t)ɘ>}viO-eP,{V1 "ao:b0\`2 !G8\l,WJ<2o=ῑ;F%4DFC}#~8aq!|5A}b[f#Cr&ؓ=W,q\YWC@+ zL>pBTRzzU _~-2oCz8vRިZ/Q؈`r<A eN5/?ˆxS嵿~/M~v.7d{(%*$d|ms; &%}G`ԕ AwP#ZqoDs4n7<ͣ8z=h=0f3.%p.*Æ]sSax84mן"EXxul){ g%wU_w[ղ#.04lLym{_*m0kqܚoji`2y2|3*VBE-aEt@*O5-On #ѷjp.X |T .gd0 ْϼe.}Hk3 gfzQC\dY LW}_hg: GlGoV`ZnNC-ة+UZ[YocfoY-5V!}Xzώ>' y k_} 3Np/9 -kN`H;-.aҏ70EG#Y. , WT}kUn5N2uT'vmYÛm8U*f}ybI t.zܗ/0zx8F[pD9Qq^s)774,)㠏 =4}i͎G\++V3&dj'abk(&'en7bbᓌXUŸ(ARʝxA2cJISס1iݵd?уR fIQ`./o^Q v#_`l{tSӦ⃌*?BS_z CqIDZ`"ܡPwvN| &i8r{z(q~ORscI> 4pfЛr Rm+eZpz=v4v5`D -MYͬט/bYxKc!Q 7q6J~6ɒ^8.,\p &{@So=4Vcqx$_=KqJ8mT4̳6wYzX~i{ syo:Uzk>?(<)|xT= E)mU`ѷKN<t1B~LrS4UqiDt:bD}* S.SC6)ChOTrٲOW[vQ{^W?e=l_%p,8 ʷkUr{q5oz+탯.3R C_Tb8oEb71~7J& Hj{νEw2b$wRNbλ҉JyKDQMH mܠٻ4M.vǧvVP%~@@D >q{YFNB˙pt6$}~"z :\VHB_olvː1= lBgyi|F7u~'V>At! CSڱl@*.Ug*&M8Q{yN߀g/}ى&6WC\:۳2vzRaRk^SͰF -*L~뱟~p}"+ u T#ʤ!ޗiI$!{ŦXyrV/tGҬ͚bFiYLv?EϑE>N'}dL:=b|腨'6Cͬ6Ә'-L 'X(?{s%zFWK_:عvur6-/ gX[ ڟꧠ; ߈$eGz$h(ƾFǟrbN3}rz3Q@#Q0z0$#,Dn(Zߛ>;NӶ3X~u4̲4^@{ǁµPk.*Gg VV;`4G n&a.[8q\(,ɕL}PV3mߞ~) 3%y᯼$7w [874!o167`w)yMйGO ~S _Pe{jl9wz6g$ˎ%)lkEq aw ֨odaG{o22 ¢%& ;zmo<Z;ulT4LunNMQv:$N+_QNpdѹ;5ș(lFT8&EŃ)!nfXKq|M#N+~XCVz_ ݁Kը%vL2˛,G /x/+wOhjvMukļsl=$ vxn* }M{jԬvY{rTgMҲN\1l9zƟX… c$`'L쎬BaYCrM|p侓v-:K7 W< '.X[=A,^4aE,Gu$WJϺq{D*VܷS 1ӡ!NLQ+3\հK"Ke`~$X @K!+0/stRO=dn0ŊWSa&X[ʜ,\^i{ZQsZ|2j_aa3Iip%m&! /0 ΞS.T9 ;qmyG9"Xn)Ⱥ /ovx) )[>dC~5#J0ϱ44Խ3!sR~=>14l>¿A G_˔1 EXHw2Xعଭ [7?\Զ yIUޘS91]6sD._S$/v V$hF͡wF畴>c C/=̋b0woCef,Rw#?I?Y|kSlՓ?s+Q~Q$1]3 /Z[?=L@֓F0oޓѩc"p ?I1T_+ rEE C0zGUk IIt.y3}/)Q'D$dqS m,]e&z{!Q' ҩ`CZ-+Lp6h,.k.یU&7Td]MpVwN՝\ 'tAYIY7Ώ,4F˶mlX?0m}2}%? 'YR70™\o1WTԄCɟӟ}\xGYXhnYsChD%c|Y2Ryݜ0kgq۟`Z;ջ #?Kk۾lq㟾rG``ivN8P ^JSyH)R?܋83ңJ:{Ebe֚W$[0N-x!ua(x:]_xz7G82;5 ;#$vlj?f X%U"}*ʓÂT0[wY `2cWnӦKqdq?J5=7 qæyg98(4q_0~w V,Z)vz zoYz׾ć OcȭaY(>]sv?},P(pٓg><\7]'kn'Ԅ峘(;U W\oX,,u׳@/~g?lv41 j8AT~ުKg\RaWH9$mzr!CꏷG?{7kSEa)P7<[:bmO (z7cHU`0q~GfN`_3XtPMMJ>Q"5(;SN Μ 9}ke@\yf辙'oJ n| 3A0A}~!\qcSxDpm68 'N/}7 El\Gbs-dH%yU{06SqOֽAoV1m#s{|$Nǀpʚ3 fӞ㨭ݖ )0v\͒yvV~8㦐7P8wRUb/I¢v,}5ڈ3>8Su{~t4LӹtWsq$ǭijKI7y}r5aʖP'nr֋v+7=J.,+"w آ<ܙ:XN:[t~Ta{f5v+oao@5a'uk.:+q$֝*Y*/sC۹$IfVTMthr퉼%԰}~k<;YRgbw{ ,)VzHJٯwmq.BT0p3^ Ĺ/v#9ڒyz(Ͻ-_oVZB y/#ˑo[_m ԰[asuv1zR&*OlS(NtjoYpf/L1_qF8nJ~\c%,E=!3&&R 42ﲛdJ{ -%<8>ev}'`O{Q3^J"mk~TmB yaiNXrBezouyjZ&ֹ|9cedU/5BQי?_ X‰. sY!(Wлʫ߲}՚5+I _ЏCI<28´5BVsjy`ďE jo =zf>LyX[!ݛs5m"X.nrLbMLK05 y{&o LK_ 8$MH5T]Q_s )io鄙P'WT/Z羶/#i5Ww*1[$aKA)\?1¡:Ag #UqNnB.ۏڹ`A5#GX/:u?-1&LkwbYQEge.]7Nrm?K0wh1)ߙI?^d}Ν @{~`^۶Iޒ$+U?.uU\ùwhEU FF3`yS۳;j wWvzR qQ>ꛦdIDN5yJS^VB}Tҍ h>CXnvrmb @e). 'RVN^ǦkԚ_Zt.B/2h iz=͋dHFn;]`=B6":YZ'!;n0[Dh= ˱>G^0aT#lm8{h`F3L?ne C*b(ن;0ay?kM˟uO`v޿%ণ)oKX8YRԑ@M#cR3̼M&hw;eBT$R6,s‚@d.^uD {7F(!qRP9⏂ V/zzB 'x;lÇ}Q&j9soAxV\v*lƹ]fZvb/Ci7 ̊YA_j_tI b.c$98 mҺ471kX4N.8pEH):ESkwu$xc݃*6oW6䖀 ս&[IӯtaKEF\֣ 8#"?FcvX%k,?7=^v7JIUB,Bɜ$T]HR\۰S77HU&ɽ" Dw$ `ޟ?NbJlY_lJq=rz?rJv`^ 3cBlqY¼8/-]i-¡GRXn&Z̊9'_rcsmS5=]0* z1 )06*779X3 D.X]zdY^y㠫vUOMރ)GLH&=忧q۶\/)=i8z**As4 ܓI[м$V=-c$5`K-4<܀]s Nvl&U:-B[CǭWV ??5xTƑ8t+,M{ڮ')v?vkHYqڮ:ܣH c*ƾQu"cIi7ٚ  @t<'{Fqq_~w-!y6 Еe=um*eMzB۵0f*AЕe5k¶!a~@XC |*:WGYģzIx2ڹَ GHHz˟Ykwݿ5ā8\B%Isu>B]*;^nkÞk޺TB5(,oeoKPFW%VahYB?bn2LrjOÑ=Pk}D46qb. iFʹw#0Ω=oj/PmA &=D2C;ʳ,̡4,0ɻqZ0E>VU޽~ĺhvwJAgdEЖdX| (f~í!G=DعP^3Ƒ[_ y!eg4-@uf6Hrث|C~b#>zX[l {!-#S]1濬b31- K0pg7٨=ٖ3UuI2̶T\\7}bŤ"J[`~e ,64X ^ƩMۖ`.K[?i0 O=L> _坄jU[d62 (\ڝξtDCQm Gݤ2HoP #HҡʄWWľ6°J3MRtX^mGplTKuks1gI6u9v]aZ} Xwnoy x=R$lǡ0vej.ԙ6\6CJ5E׃`aY0?դ(!Y)55R~ҮH?U<5Jrܼv*q&BIn͆7.Eǻ a/ڔy|D/,s:?P?n+fxq~Q+vBjQ{K VQ}1RoFݏGnˏe;ն0VxDǶ07u ! no0-75v6og/|х ?H0;/K2;l+U^E{a6Յxsګ`nR):S{-IArO2Dn=.{̋P9}qJ$mw( blv#[Ewj [sƳ{qzz0">^w͔ԋOvvT*.@ 6=Wg)4`ǰSwBXk;6㢨s<峣/Tyg'|3@Қ4W.le{]-atjn )h8TAwwE]E^ o+ _V]Bd?o~g  >>3\+mL8 kӰU1-xh¾ 5ӏ TΚ. 3S*V۔'1<ޜ0}5r2G{NF^&9iRtUVcщ\ JTͷN<[dPsn}I:ťHsm \_7=K0m?^HR|FhS{{nWifzr->پ^Td:c14?))#3Veau9X^x &f6"}{srTrmzN*KǑߋn!K_y''\.D2~ޭsNd1*`ISCZ#t~=ΎLgB˅@vSxmV\D*ʿwIU Bd|}Ѱ*NtdnxZW"9:[*-߮)CόOx[={Sy>6ı uE$w暩kI~EBM NFw58蓼Ub|/?/r!܋I . K6M5#$}AړdRp\8gǯo nU N@5NSIt</hIi10gzr;+v> =U۟#H]+Y2@MxWXMNdsq/٬j@M?١>|ƽt׭/ø3Z}*H7džtQ|]fOceeרף8uu0r݌C%-D16`s`yuW\-;ٰbPK5B/T{Ίu=# t3hrY] \[VZǗ[7'!M:˵E#f핪W"Ud_sd*Wݜ Jl"nd *yr6FÐAl@Nn7mq]uZ-NMl&mɟ&1O uC_n;߼tHRvr8 2 F^W\/wrq6#6k&j7s,"K`U>bNRyU3#sKue-s1u8>"Viz_O`gNyce'69˵$ ?]}+XbpX(:\^uN8ci 0xYo0rsQ FRvdmŃXvEtCKarCҚUiHÜ;P{۴ o0rnA N܉-)X;Url+ }qde"qϥ\ . j9iW*pbCkv_ys.L4>ׄx4kOW:NGxa~٫ZX{v}QմOC/^-~N#)SVl l$1| .Sntu ~PR= q`0xn h=Z W/лվzb8ĻH7ƚGT=:o?ʳrZ +oBlh*u f6Mo߀4#-$wp^DKY`P'iM`6It{]no}5#yߜHg߳!ϝBahާ-qr q I/zgI (K^<>gO36[ j'{L^Y h\ 2&uϙܴ ul" Su9`U=5k5ݡߩZ_3Ξ{e`wF#ɃZ0ͳ_k0. k)!$H}`]26_K Lc{P퇻<{8XTZ&:ȸQẙӢa[[yH kSm}+H0 ?DL !8LgnޏC:7]1֕ڏlHʍSU}@R[܊ą̢6MZ8$qxڏ?#d9*vO,IX&R04r2Z݅$0ӲN0Li aǖKÄO,fDZJf.. )o^l-3ުÂԽOǸŰ,x*s)6N/cJ:j,~ۣ~"$?9C[%4ae:q{qj@(:g0ϜTWO n:-gD  =fo`!e=?9̔Q6L=*_k1]҅n@S[ _z6-gƧoė)\8~Q 8ã7>^×.d|G)^6r ſxpT[5PgtFc;c̈́lPzԯUkS׾B Z@~ζyzBaAs=0;Dpfw=kN ;}ςGbQ -ށג^zrMc}l>lgCvtu=frP=`yYv_:sDqjL r4J&f%$qUܷ$>(9s{aZ얿$PlG0>Ij%<N׬~[#骉4毨ٟ3JCt)`:ѵOLs 6m3 E@K΅(9E ^LW2i81c\%{g| #{^"XޤPm/AGp&9 Ԅv6;eR;kSSA^ $ez/uKcwUr9{{_%m`7վpb,88WYĹ;IBeX4jf h۟C3kIʼZf>#W>*19.'Y}gӟY$_՞_ޙ;3}r=b0ؽ6qE G$_K#Y7U)9yzY~#}·>Q$荿b{!EiJ?{O*Ưv)ޕcE*Խ;[dA╳H;0/WzMgrMLRhhiJ~:1#籂?2z͔Hr]dm`EVF^ Q,ڧ;f3S~s ,^l1 /.ۜI l;^C`RY|7AsdR=srd[iϷ\h2h+/}V']")9Nw CUq5L0|\9'RgR e&fپ[w)]炤cNgN(ý us,=ټ/8.qȻNIؕH\1=ݧ#oY5K|ݻ<*,n#፞|+޿-'8+MDh&,{vkM',?sZ_c EaMIy,;ƒuFo>E I*@\4k φlIoYgSP0"9o5 7n(f[m.طNh̳xE'BgBcݾ{ϲ ,9lqfq@x68L0| Ewk B OAwFb.~]w+[]왼1;Yj~߯PR9U 2c< 9#kq0 [)'K—Y's2FZ7d.$pgݿ [T[_R]`H+x {rɺ_XRJBPJ.G vz{%!Ӻ̙;Z ث}JwËG5pYk9SSy BY˩$Ј0iǫt 28߂'~c؋mBImW+ l^CO~d7y3V]wo# ?vf{ܜ~3Y[@Z1 v](DjYA u>~¥.5a鯊i$t D*kFu.LüˎF\k;%07˜,30] g6$}eR&88"z<<;Ŏ6nுʭy7Y4?aBtm~ >]fE֡N䕙\'V92gl%_c'pwF xK\ *}].pa 6o9g*^j,V\YދstD*Lx;bM%Q_k *7_ڛP4dm[~ˍT=(8t"/L'N1Mi[_n"UKfga_HCA=h*ﵗ`鵬fs0F0Fx$OTv_0w[lk'NޣA q ギAWWP!iMr%?Tqx4Ꟙ.GΪ nB\賅П%o%?Ҕ+ IM=`ĺV>mSanlߞq L tۆ5wa8oQsñ}zK]pN< +Ԃwx>%/RKٚoJғoBE`[oPVw9#zns_-cӏr?[q#˵W=rR%ݾz.+(iAϹ l|<&\Pm.=@/W7A螟*M|쳻e5Lc:}/HU 9 K'L"G]`R9O-.wqKZx]ˏۅ3 ^ _gMN{lxJ^ rصIZV󿗮:Эa X4E[sΟ别 F{e9vKP3gBI5F`Xx/ҭ[0ڄǫa"盥tmG>_&Y6$n~sҙo~F_aew24I?HVQx!z%~a5v~13/Թ⋜J@Uɑ}WN<`Zu}|[R= # %96PSsۿeSAHg6e=]܇J="`OHe8zYOy7ɲ/1|lPYDm~ #1WqQ| Ph cyl8cfMK \^cy{#A bǎcp%׈ϙPURXw’0盟WB 7(6+ٳk(LGg3UD)V;j VFzqz|~HT]thNߓ6ALw1ftϖ'Sgg5[ىÕd::>ܒ'=TgoWo}2qDZIAwCa.(m)}YqljU#?L.2\.zbyclďo2I&B[̯3zw6u0vv >^+(OUicMAsV 5JtR9 q."/v73D$Tɉq{~G y:asޟW<>ybo5Ӎ6  M6eAeq:MZX@ռv ly1Eg߆i_D*THHE D BE$E*EHRIڐM~2M5~2sx+̰;Љ{L|S#)G]rܳ:$sâ 81w\pd4~js3ꊗIOT.(b H}7 U/ġ4ov6u#_8y[K,I)agiH,샕`4փ)n gw.|ʐvk6^Hp`y^ $<8S{4s־0DLr(SǼ`_L2ͨf o}`ֿ̅56XV;["~N WNd &MN{dGjLPjP fL"0':_hW [Do&-(Jܻ藴#u;Ⲱ+az8AKOot_DE܀,{!Xd2]'=!Xj+RObg {aޙ,Fv1@zVý4=k:sd\~6w\j'GݿGY|> mZK\?GD OW:Agʬ,.stR8(r?8EA@$þ?\ZH[<V39!uw2^eZ)Ws g)N@=Ty(fp"__ x&팑]gbrƀ];)y)Gij\ 0׷䃔$4-w$tNeBI&@ܼa&ymŐ\B2\47؆ۀ ~P2bH=dpQ\> ۴]t?%y?Qk4 w%@KIߎ|{T6A0)ɉ};;$RZ%uBtu&o[~PqEZv`6G8ڹV Z6yu3;8d"5aC)wR7<_ߪR>\4%w+"t^-N3M|ߍ7Ä;8G=4g/ִ[{es]^m,êO4n{$CWKa C=u1{L :oBN}5X"4x,E7F-.H])k0êxfXbPڼ|Mq χ3ZR`+\wds^,du ]8$i`P;`%.J7,g~㒮1ة ˯`bӸ"ךJji ENrv=*u{}Hc*4>_q?RGjBܣs "Yhȉ ;KJC잍pqF60Oñ3/GS}SRX]$4L@^GqE)fp8 aX:9x 1 k m@&eꗍ,kvz%ry+;}$M&kW)P}+sW뙻;`,Wl܎mm N0߄fIƢ}Npn6nka0d3k{H2_|w{%Ma2Әo<tK[CW~vXYbqduq+ i\m$rĥ|g)K+uWa"᱆`n}w",/ QԳu0[7CcF& oGmȝ_mHL^ڗ=Q{7h2)k$V_sY)mGk.p>X !)c} F0V ̾{)Hc^˱rQ`jQO'[+jnbPcOXJҾ÷4KsЏ$Tzb7k;pN_n޿oǮ߯T$_$%~{$x足ϣ9`s=Yu6Sן4?ωSom\NLGb &`λ8\zIpN,m7|}N0j0 WWf+3V;3~u|U}Zi@Ҿ<[ cݬX|Ʋhɍ˺h }=E06j¯6ӏqrf64Qc[lʆ'y+~7NGbw`t^4[:`c6DP DZ|]/tr਼G4kmG[iPϓ$+C7fጤaHo k9w&G(=;Xs$L'PYI.[L"q0W+Ox)rt̪/hmfzou}=  >ykG2P0Je#=lIt7t5Vڎ/LZw`O9|vgL :2CUŁ ٧?k6bQ y:G׼g[ELq[硿TN8 Z,\瓐} f̻+:c\K֏X83N,:azc귤$Óe0x6Uq.Ƕ  CJp0Ϊһ/* -q2[`gwK"ئku?ث^o u[ՋZoJ!5UܪAR> cN[iAEŅas*_.,H[΋0C>3nX?+09[< s1=u7E@qcŰPL%4nzI0~ {C%0v⬱뻕pU]=r8ſt5ۖm\͊7C۳ VkZ`#3Lz1K7t_cz?{(3ɧ|ĹS]gp6C}~8̈ q+Ż`)gOU"dwSKl&tu3oRz:1ygQ|Oʚy%2V@HhvN,؊v5$ -r˯`M .2?{Q74wy5=ETbWZ 9{m^&=F4ΰ>(U{_C0a#6=7{kEtkjY m@(Oh>s1wݚsّLhwR72R7ڹأyęd>\gm4Rew鱏0kZ̈t gѥ#qn09[,.L aߦpEJ#/ݪ)aɷ?.خ 2A3鐭vГ4gvi 4NM;v%G;!xAW1nIm56%,J#mys7Ћ5{ -*EhL#n?W`%2QبtU)GD .?_*>]S4\So[ٽǃws Bov,d^K԰ﮒ'pjWsxhX^,?Iv=L٭L_!'fo$84 5(Ǟ[¥Pz]#M9߿ԪS8Q;A'5[ɬMX4Nhwj+;p;N5&䍜gbٜ!E=m\6ҵW"vX9% 9L"eYji&Dp6`Vgd,H:PpJ +¨RAE#AMy3zs>d'eSpt9ucKh  +fge}Ϯ 0#Be3|9伣`Z@"d?R 53q:tKc2zd t=ΊM+VqH6 f,6ƱA+S%\lו pF`qV6ky-1wבz6`00c~'۞/|y=#`䢸~\=WSngӣ;,m&6PַwCqYq;A2(-QCPk6s]tIaӼwjEŷW`u'9#h+٪rsx4kKܬr8`bDCOajKAʐt]$D3p/w;c_#x6‹J^uuf_d4a6,_p{S+o="Y VߓzQ5ӭq1WzX\5@)_̻^a'5>s 4VZL;dˬ<,:S +C&rݝ^=p`oF9v+ΡW8CK&θt)*XT\^"u֦gQSYm釾+mpkzoz~oЊ.&pAޫǐd,{sW)||{‚n[N#|gt˟doyۀ`ޫCNka@`=Hfi b/"ݫ>?m3O☥x"̙gQ251?V/.>aOCvQsz J\O)ƓksðnKQ(؜ +Blp]3xsW[ٝVAU!iJ8^مs$Ҋ6Gu1ilT=E H4(3u`y!DC320yY fs`jj;@T N<eo X6 Y8LCMEN0y;(BVq['tWD17z=/\G/Ti}'?0Rc9f7MwХN6 _\ q;]$hGRUBYOuFR\cNso4oџP-3ɲT&o |K& L5nzЦݠ˚++P͍%tb" qel fb$%  3qb}=su7s6fOX0&/!sݐr07<4z`je?x{>@:5.^F_~PISOK6+0og2촗2]}Pn^l%ǬogI/b[jI_w6s2֏|s@\Õ6;]9f7j]Yu6`ųI_J^j`V TLm8#U0'yֻO}gH֒@Xl`Wkz.s\2W}Y+έ%9޳"z65?Xa 3RUr;YGTw*)@s2ɍ8|S{b /hORмum(6ȥsi` &d+DbpFH:z]0d޺gߚǚyJV{W0s?2̉4Zĵ+8Ur;.>t+{Uٚ u,c-kl:kadn>.dŖcBzrw3κ3KTNYOUkw9cTN KBg,R8yg=4Q$SDǾ9l]m-gˮW77.By:jmV(n`4RckMz]vR{뢲y*0dⒿūcǟ wb8f+0VOE2l`׋cA94''蝙}/ % 7ԕj=&HsRӮb!T}k@Pz!/"Ϗ 5,&9 ߷/t;;uMP^m[="fE0)(Ws,[{8Gnړ,|a 踘 5BX{m"sh{&-w;Z q67K;.xҲͿ'`jەϫ!.m7j\FeCJ`R|8z&hq1N[5|A|qO|-q\TvB Ma)z]NZˡ;/gg0{՟p@`-ר)jLń6< -0t? q#2@:\zZ.'{Tj A2VoT1՗R\qy sLsaHG7FC-mo趿J]<9G;F>gX:\ssV]r]|7tl; k4`rhͺgSqہ i qRñ0397מǥLEljT#4lt"8wMêZ[O"=q&-w&YYR苃Бv[#|;VکG,\K"Gĭ~dך燦Rqpu j?o<-q}jCӚ`!5L} %DST `<3k4ZUnUqc$c1?؏y~ aU_a%1˚-ATmAjAxKa &I-P:Y6Tڍqmc:x#C'Cwݟcx|~ Fm@O2K`$Z3BrTXyVǹzI*l?Kj;lILw!֔x_QM]Z!!.j̿i$+nʶjܸU^a:ѡPSWk7q|zW Ͷj~ì鑇 1wm!8}ZO<9sIu#"%bW?뭗VxM^`h{:W}|Sj;gp4z)XXUqsL %ɰɚ_eK?~j# th N̒78o# dDe`!X.~:sznbއ8{I/@ܛp8m*M@Z>[}_E}wJIB OEGpتBxԫ`͖X( LjfW8B/-$a"]7ص aot:@C䞧(O$e>I=]Kl=NŶ.aZ79z`jZT&Bqҥgl2?{tTmY'՜$̓qVdhP1A; Xr*4AuG2 15 0r{\`d6Cr_ ~HpQ3K =+Ssq[ghc=uM mx岝8Jl\<~ {Caq)rT)}+qF{+5ajP<yqWM@k9?(pƣ@0CWo7=;y`8IaNwF׿~M9'K~*+Lw0 d$/ƛHfYtk?k2@wKcauga :Lc[̚$tq~uƍX>"sb/#61F{GN5Eu â^՛O'4M#1ٗ` ;ppCcD_b._gkh:xRC'Ka)2=~u=c"߃g/ _0f^MĦX ~!Vs;Hp7햿!M3{81fhUwܠ&s]>6BQFc:fx*"އceX_\W쐯v (椬sgخ;v;7n\F+wћ%ոէB3 lŊ23J@k梅fQmO:adVV #0otOd+ɸ%, tdCΘwJx$jX|mT{p ;ICJ6i1@ߗk6 ķ{p=#0 wT1QCwk?K[ Cs/h|v'˒z`ӺO&_m, G*C[Ygp;rVŞ;r,Z-O\NDz؍#z!^A\TLZ޺Ŵwm[6w,+jrY0)*x/i8͋|vQcK!Y. n0>qw+}ZKwq&q yࠢrA@7nC~8w2 wݫT\QVxjQ&kNb{ Xi:Q3o:7va-'Y :iJE'y'c]礯B6$^w 2d_ ?(g[7xnGIASVe"Z[svw?YzPרt' eܡɇOּ1Vpi=GpYZn .?A:)y}s V`CWT$z4 С>I帗}]x5}wd [65 K~#_\gů3``a@ΈI\*øMz|kE ~aşq2ϧ$XLI}IckǥP#h =_F|#1\r`\塜ngljz{'؝ Š_)4"]|4wab ׋܁]А*Z7 A^JZ Wu_{aytk8ڹ'An+7y_N հ*²UGܘIpڨ=:G0fgrnDwu=nt/=0i}j{SIu/#z2+ᰦEk5h+ ﵪGќ (&U7!ڥzݘ#XN6 ľnNkIa"-s2k"[[C@sYv1 6\ ݎo(8ea|VL(H,l<=jzI-U-j)n.}?P>XEm՗n`Ӂ}?;'VJTšcyY.PT K SM;,!8"` A?pg nP[{M}p3k˶J=:w& BOiC5 xɛ;zXlW,'TӧslY{>Aow!0!XGk^=ǿDZ\ mvLGir0~(>y͈?5v?͓ v'İlSR_;5^:XgqMq(<_j'vy;}g+; D3sb"nQhO?70q'$Q[?1u!gzp3׽=f]߱澮5)w\'gznsbA͍BF;m NtM,0E? +]ܷM/ZqkHL,5VK><`e/,€Y6];u .ł]8zxSWq 7^֮Ti}Ǩ1t9SJ>+ѧ@Ǐ%Y nQ{L;1$׾&Qde46׽ӪwGSSti2Pz,fqI^oO"/l  '9ɲ0㬘eBʦ .r3=Ti ύ "FqKTU#a&ire}%{(}~:Ws[KH7áu_l~c'XL րQ?XqT55` )oֺ00ȟd8C*]‰eb@med86+ '.|,{8ŹOߎg7Ħ"CGLIM5qC#+ҳ'TL-Py`9ANkp9sl9 E)$ZeN >l ZOo_ )ph)Iq!پluS^yC;kN{vS Zo%Ptw U}C' C+ b-WC- oՂg혮C,'ٖBY= Rٔau;ծ!e6Ŋud6*ihLn-!'Xbv(;o Ց?'l5?SN?USeJp>PIݗ$3!+ !pA }?yޭ=q-WcX_?e4robO<|~/NJ Kg~U%9Q ks g5=vPlDˮ`]&'[丵rGNU5~c$TY&b0ќ7'X &ϾP~xn^Jӌ+;LԂ0=9N޵JR"Pyi[$bpћ<8;P{99re&pFe/OqެPx",:a;qԁot03i.`Znʙwx/B(~qY*Ihl8KD{+4hDžsolN=Ui}ؠ{_-aر76M(eVZ;zI|{ω/򿍐~^k(/6&g]SaB0SiYHxYv7,z^' $}V> %\v$Y# euZLP!YS ~V8H+OX=ɰ9ϕx3cvA]bF/+Uv_< L߁^#zsdUZ)2"=cSw$ tv;mn$٤bWWl7o|r*/cXq쯺se̕uzI:Rg2х,/|:H}p)8\I?!ZUMpQ\O[\ힻ (B۹A$vNaį!Ke4RM'ͅ[l6׍qcW@X4}#A:\'[^rd@ rK+7ʞ}rfuYwFw(0(q2s<ҒH7\u\)QIu=Yh͵^.T9{KЗ_|c/ٔ EHfbR,ͨ`ÎQ qk=KX <ׂT eUogl3SHF3)Js͚<*Iχpz֜3Kw^Ǥ* yW>_:y_APtd\:=~yޚZǮ2i kdM4sC4n8Y,yhn= QS[W;õߝ!8ƩI/^!OO#wŵsuC0vh,?+\:NaIYԨ)~s;t`V2Φ(^XZj  y HZR'p]X#ej] Gǩӿ+M"vhq{;:qV?_#UVJƜրFn\t8n ₿uKz8;D?wjq0[5Nclٗp &6ބ7|)7T)pWTnw:HV0r+oMއEYqI|ƣVZ܀{g <ׇ+-8@~R5z$|<`-O` pzMÍ\q^T%(9w43?ʟّ|^"pv HF"۲}hw(D]lM8:Y*Q 쌟ĩ'GTkcpt5]W6` ʰd3~Rz }8^] ( }bP*2g6_M9Td%K6whT"byER-Ʊ+x/!GͪZ[IrNu!#ɕ,ör M돉Fbx% e7Pk}&)c*BtCɟ-rcqF2~ RO} ݳvᲂ3LJƙeL-tWS9I95526Cb\Xq}. ur2-w@_ueOX'.NHNiЗ2#LMMM"se:L ;@WYtv+~нL+yjZEǕvuf8q=?V:_sl"=5Z7zqE:EM/C덱2Ҩ_wI"LdjuSOD A;o^nwfm2.%dzwdwBO*Vx>/vq\OzӐ֗3t‘ɮ$8U{4!noKnS?Ǒ&>Pa-KcGa ΞxR\IN~q}@𜕓`9|>q-F,\L҄M~cݞ8#7_Hja iUڇaWSU0)g''`%+ 8Ŝr7jPAr,7Tf=Er7'tjwR/ü=7g+E5lVJu%A9yr2L}18P؝%2al ??xV ۸ @f}lڍT `oͯ=ճ$Ts530w|,[ aS(o-{`SyGE={z=~a>c$N]i <{\``zg=rb;qL'PwUIcIo~ iL^~K1P|<Ή;JOkH̓gԡgVs[N~^X'ָjw`hMMs~.mL{?X\{Eizv %UfA4Ia:&ş6^%:!~f8U"x6z#rsIg>o2pH0{Pv.RFmX尝EP_0|CY#{AJ%HNUq"}5aKkCxH[ >#"V\7})$Nʦ dю&~*- qAX%b~)SL`qxp;)es"|!dMS%qz kL.aqMO7,@9ԉ &*bN߮B)[N:vno sT(&L}./)'?9~¸:[n|8U2'hpВ ʻ_6Pfy0eRA;9>HxxA Nm t[j}=X__U~΍HI'^7C`8I!)ͮ_RD=#܅+9|$ǟo B$BQϤ+)I1Bl]9Nļ΋3"rCsfY;H^_- σ}Яl@w@^dVԺAXNܟf/[q~M# IY e Op&LwM8Wrj8z[}VX3*`kP'ocD2T1li"Yf +tā$s(4%4*]?U'p}$ṉZV̆yQN~C;ݜܠ3{AdرIN住 =,Ϥn,$9fp6}C8tz/U=rA[9B G%'q 8aʾ0ziCR*zMWevJkwv|% #[9{qJ? c5'yS[^a`rNc=S,.N5k%N,_FzѨvHuѕaZg[ >Y)4 +c{f Q}\L#n1ܵu{~0gfGA=q{oKPtR @ݻǂkyw09evwLe;iv]n"NP;se9~mdgu7NAjOOaGhi,L1.딅ݓ{H+gq*7+*.p} tƂ倥F8}@7JWqx/j~; gwR.wck>qo6Uq8wˢaUV[X_E7.i(bF:J~ 0ҩ' mكSON7rђy1s,0)d3OqVIz7>hec7ϟұ&=dgmcxX'nb }tZN '|w ]P9a6Tc;cb֝^}Bbkv3Nc~t&c|/҃1m@cP D[<9sYa#M8o qu aإ.e-?6`ě6 :Xa0?zQ_S1N? `^&.1(.:hӊ:kO po+ QA7s :@fQBO79ZM&BoSN…Y?i3n,au [Jk9i;B~0U=wGZHr~o˖â䉐L?5Vm{ѶW8~D c}{z7i+P)k#u {HV-0)P\ lJMc N Dz0i 0.Jpu3q?NyXl[, "/8칿oa(Q!C VNNP>51|7Z {%o+X5vNGaaHR(pD0sg8|weRf)8Ą!/|1YppHPpاP7\Oi;g`4=~댴.K999 1K*g2Ci $ Gl¬8q-DHٝkt#k}F {B.nf* K\'6@`FNm+6cbtMD;au~}1ǩΤw忛M-qq*m=t_ :3 64K=Q78 CEp㫸xdבhKaD3`y3^1"ÈjLmOZE 2/,1nVEf4S<2sHfOO'=_ӎN-műq.PM򔒴03S:UEI r4xTUGVS"ҴovWɡMAlܦmuyJ}mg&51+{N'm1a^8z/].)N8pq.t,%SrqXJpbFeXH3kI%s›3/{EHFe}j`R6aBhoEԇ4_Iݰ6̉;3|Fwq\p~!^sR 4J2ؐ0IyrD=zԉ׺ @=@Z\,#%dMW[|Xe~gȎC ~  NJS Mˆ\GP7=$٘5<#z5^Ʌ='ssF>3^5CN>5 +%|]1\O,%H rScl0e$Ά8MŒ+z2,y N_3MTytbЗd² u@I/0X]z)0+ܭ4|dNz Ԟs$kII8ǹOoQiY6гf}Щ6i, `s:NK$~$Y|8u`{F$r)-0Vz4Zg9u?оo܅c'+v-67p`5 z?hYH%,~\*\>/\5^72S23ENm,M-nWbuӝ:q~?aM{CBp2ݨ"\ g?oKzoS! ּIn4+h;}p^iseYUӁ2ZS81&qH\^E,71P*m%=8`$h@a%X~Ʀ1h Tܓ 3B˰*>̏#2<1wG<̤ n־'8;c")ì 7Mhck޾_:Zf`*_Տs;XY%oJE#v^ƙ?}HʀK}wN]0U#5/cD> ՍMTU0a}:88NUbE:%y˄`S~R=9F[ Bݾ9.OuwUO0G8`{Ox]&"rx,?<}mUO K'PwgfIM]쭽L0 _i|wr[\'HfŒ4^3c%Krz~{K(> 4Ж E0Y}y܀apUne&\/#Z=5uSDºïC&yWy'P_ga٬{7+]d*"%A\_YxM%yg#/i@?})%Yf<38Ѧx!`LúRO]+eȫG(0:a9k r=8=}ItvGLN:b]՞G/ @p:>>,0YoWk3>eeXT!+Y1y/ Fw=hم/*h00C-nJ"%n(2 +A֑s\NX{GBV$eݹx&E*ƙ3t`"F*`ٖd&w/zTr`[t[ZgX*"5"\$<5AnIOwW?z^m]o/A)H=NnfHyð(?W-ni[#[qC%Y[ 3K,O)A~3n(3\Hx cfпIq&,Vu!}$7f M{cI1 Bu=͇G/Zn|[S%/#u3=KXkLM[Cql׸պ ift܈t=JmY$S:$pW+ϵcǯfCYuy8s7!(Ib]][ͮK@bf20-N7"x)Lncib"R_κVOLjCŝMb0ӗ1ꌳ4 MSW@R1a874y%m28s;XSj={ BjDSp~h_=קw+-W= $>nZpxMq#y6qHwH{x)p9l/ܿrTT?qꢅyu:اs~L`tL!,y9safP>`kP^I)r0ףq0҉ݬdVg+Yތ%o6@dɇoVտvl>O7q<ŪnٗŊAv?-G&e0I|ep|+5ra8E鍋LÑceZ"nAߛO]uWNA5G-!U^Hr֢vsMWroJ6h%|uq׳᧟Pc+Dm'`Q>QȦ0i)AY=Sy*f;{\Єiڸ0o=#g .m|o&SQ7dyOvbt9= oAbE[gLs ?Y?Ev)jJ L\􂀀cXsGtS $_سW hxU2AW*H LFK==a~_ }qj "ZlXvJY >I:*MDñE;#Ht,r˖/N'֋ܐ:PSGڥSϺ-6n% 'o=&]l/-%sʸq8eǍ<KHM~;;[dmNyVgW'+<9=#m;cζHL# ljrAN /u >CgL@F sc..rFtn#usK+PV}Y~ H>=d|1[Q~G~w񑔂dϹGf$CA+Ӎz?mjX9Tswߍ>dk3Q{6"u.kxkR$=*kKL>?3ԑi#C( c$ň|&q^&#u֠-B,W*,_jR ]m.Jj3i1ٙ_ .CIV<)o,)S4ͪ:#tL&v+("EY0tG^a$tuv4_XY o־)9 GOX-Dp@ѕx3{̸lZ_!n-u:״y}b-Ag…'[o,Ιs $3P]<;C7. ϙx9w^,QksRrDž/ܟ.,[gd%/kQ}u4zOނQ|02  QdmU6JU#uB0T:Mи@%X>|/yaZin a\(y*+#dwLhգ!Z|-'z?SG2ZHVk Nkhg3b)ݯ5mm ?'QrsCIvL(5_8Q"a̡뾙8o8$`⎞R8Fw+(h1awD{)DA$~tM*=K\M؛6Yrn" P?}9~7̣ď!~ PRaUNR'<97mŗ~/1Hкg5Z9\`ZȄ mY6ǒ?mLHʕ2Q|һgƨRw\، u=[Kw3yZvzӏNxS$#|8v/$]=3ƼmLN]Å ^z ݶS`ӷd`tX)N+#PgmzwK"?`C*bN:!U {s?VZNp)k{`&۩R{,Js܏ήCS)K.(+}u'18~ Ӄ)Q: -[4CD;\L0y~}}bIN_qUR8A8;cW6)9,[l=GhyS8,ɢ=މ:ð=ӊL4r9-U8:h*,I!D$$RT*SBRoTTBE9Cyc_yޟuuz>BOă~Xb]H[).q|I$6}|# ra ?<_< '{< NAvݛ_ kǣ"lq>blɉ#.% @/yHuVَbɬ?Hgex*}+黟0SSOUb(2rI&誯xV";Pe^8 H85 J7 5.g=LP5fq>\ ]8 n+tO;Nް4=#8~9"~rWRwc/\3*"!Yz%|s濼M0&xg:KXO(tu7]qv$LF5GD歝@[b>FKOƶ$(3%'r{;FOCvoO(?}3 ˏ܃{+ խA1ft< IyK !/ZoS Գo=g}6&'٘\Pٶy[fû0>ҥ'؆ô7q9C8t3*=7fYI2xư'q9qef 疌89_Åܿպ>`R=; )c9Ӊ IDu(k`K`m:cS}v _M0~+R۲bT{+=ZWb{ֿq>{(dʑ3,ں7>D ʧtAwHW۶ϓ ǟT]7%3f?cCu۷g =akxq}qkz>HsM AەJ*? }w!E(f/޿;4ḮPd;BcF 0g=A0./PmUkǴw?Yٯz05?Ŋa$).>m'whj#)n#)!t4=F -ZBw_nAy#C3N[7e2.*+=aH^.8./2 EX6`_kiZ71_OŠw.`=|6lH2l]L׳|V(F>~Gq9?+Q#^R3SZqSia>57?Zq8 +M΅ Q&xm o>o3NJ 3v91\ЩQ;JB&Yx,=. NV "7qdņ@En&y3P_} 0`6AX2T[`a%",f[>k%/OT`z=̿y}jyoR%Yv3o]cL{8pA+)esxn9/ .}ٳ$a-e]>xuł;{bwRRg-!5m?2eڌOE7S*%IfSI.rua(9K!Uxf7 *K2% 5*ؖN1ĸ̓H_԰E߲j[<_yUnʨq z\pC+ fFzPNl .K7#س?L*cxEFJ? n&"z.di/i߷]Vm0B:_8S1WȄLWIb\q0*mP!\&O/7d=~ r_c/iv}zpA;?V[i u8wfUvJh9Ty%KU?=F o/?! 7 m!Y2}\Raa zǣaγ^Fܢ 쀽;@Ds!dicNoaZGm^XV`b4wYH9̒u1vMJO'#im8w<+]O2b!`.Wv":*@Z=|'+#mLgdGڹ½yYX. cw`Hz`t\\^zotO?|C!us@z>x2f UgըZw1> 3-lmy1w](kE ->LK\oU:l 0' g֓~~rNI0o/Anq.K?}Oo;MX=,)زH+F\@!O')y,k/9 K3OÜMC>g8+jݟ_k䆮[6>D#vjE5AnIO2UA g8K7\yXׄڥO mT9B.Ezӷs yjZ{ 侹pel7 aKd=] R93&+Fn_au9SL>2p Oiw>Oz*g`*$nb y6ĸˆkX!Unf xtmCϺ"R`r7~wu&:(m0\jU2 sK M, $9=:}̰'iV?tadi6)TkqTKJAl$6qnU̶"@fvk2W ?.5IW=T &Y؝$"}yudkg&\٥:Qy|N3|;3x\v91/~>)Ü8sxj`}[wi때 q.#)m|"`135 Y2 x"+{&O8ˬ"eNVPsO6Cjg T$|׉iRd19N`!n=V8u)O _+BCgiWk}39'ʇWvqųߎU螮,0.$Y;3g1W3l¾$]Ppgg{d)(O2r gq1#Z0vs@" lW>x6TKB\<]׎."kQX`jZBbotc^?[CrծP.T' `ܯLD|>gٞ چx`H9{'kmtuM ~Q/e ;bF;fo[\(o6& %懭{|Ӷ2 $֦&TKr^v߰Ǘ`97Kr ,QY} #hYz;v]!:VZY%O7#U?C,_= 60_NF1rVݧܰD㐙&_o~Ϛv.z'Yr(Cvږ@=q&f;,܏n3"Hv?-O6v߰aK0y/'g23L6C_ʓSXy-q O4v8TA ?ӻg8\8Ћ;rBj4nʇշ+)e}l1ر^~Lz "oǾ36f8)J&;˭Kw6G6\b[ xj+RYj/.;b:8TmmA+rÀZ|$SV=ۅzI6!]-lڥ2{;{=.Y]l%nN! _Y͍B!ЛI[ aD,8?(M3$4/emۀ~嵉luQS$}c,~UNO(WCĢkwڽ_ %(ob5 ׁ'8|!U7 뙒҇ R`+|^?7߁VI$20:*'>4fUGމZBPJRsڑ)Ģf= Y&8ozO/S$)EqY(o~w'^Y8bM*5{ll5W /.e=)C 'EHjNt >芴U #YO3.0vG,?f}9+ſߕO8{ufKLI]=F?}K0.bLTx88j k։cəǾ,PYa҃Gye*е{32N>aQV!~h1+7s2G]Ca2Έ5iQ԰o|ۆ'@uy#sZ'_$Ʀ_0lzRI0~3cV8Ruĺ/YBkqAuI"LelLCv͟D4A.Xȁ *֤cƲkd\{C ziϾJ0՛737~hn¶&G}CNF)Gz[= ԟ˺0+m*'Ji*f;.cO ,waO@kwиA7 zX#m~#-DwA Ѱ~:"b+iM۵6#}|-ŁX'wvsc统;*ኈJωRb\x V Q >cu[vLʷo?#R$e{˦gPtƀP|{[A?=^*AS"pIli0+$ql[:'!w7<΄bi{}4yߌ<)ςpѥJ#<ju! C8`q矤.EMQ'h9M;}hAfXgL[==/5Vdوhu^}>xO K<2&vbV"V SP<1*˭5m [J m[Ēt喴FðJMHkGj? yC۴FP61y Gߦq.O5>SXtl&kd }a=)G#9# ^B/9Qy:T&yܶG3%Ik=`6%;4^}?=ve04|T!M-|Dڗ_K>pguسecBG¢Iڿd+$/41^:}GNbN(2^QCKE-uʟMdWI {t`f}6ߓW`6>>d ('" j`Dzp"vn/tsu}#4FU: 3:B"iΈ(2J2H{{3Y1`0fr뱬G+-u\g^q䷣'p NdV7܆.P,' 6 RlqvHFAmօ=kWb.<#TF fͷrA;k^Y#ӾYi3D|AAjK yo ?#daԡcELM~"8p9Eյμ±^Q?3S F5ARDQյ[Þ% qL9K&ΐ:]_dP_Ww9O{h̩2^0=ٗ"92IFJ%c'ct u;EXK@cIN'.$+Iޛ<H&#TGSw)Նwj^yW6 KpF(zw]Y}Ap{^إ`~Ue䟻Z =ޝ`~Օ$mR9V`?^;ԨK0W'\i𸾤kG }E;?_FWz[Wu;X` 鸸N+rҪwPvÀ0ݔG* ᑬ.ܐZ{f;w'bC[׸Cijq2hZ GLutAձn8:TH0WgdU@dž&4hMUM{b? ~87⡺K;8Ru_B?Hhzރ,nRG'bR vcz68c,{ntl C6. h=㒻-%;ĦX/v4 n. ڿ:\>҇cM:\zs='rUE+gϒLN즩UrfozfI{rX|vLVpy~EkҝQ ,7k7b~瑶*+^l47ɐ?.sfP^Cr|#"~I臱K j)Жf(_' Er򖁺SoL7'dPJcMmdY*2uDliði-/aMt,*3孏AZ4 &P珦>V|; =\>`%D߲z`6<ۅʚ;tQ*s:q%Agi\rTk:_~!&+VE4ú$ 느~YO?w_"?Gu+`mU˟8SyZ+e`RU}!7y5磛O#Wk҃0E=؟LV ͣ=/;g&#ɧt|܁um5E-qUO%\K`)7X&Z .Mvos?_>sXgdRiw{NO\ v^\EADRZN3ۉ;@1b$g{HP,;fwgpВ8'>j@r\1Jc 2 N\l)y8I:$񊳽 znqY|u!GLB_$-8[*aޏtˢw{`нJ>}@|o|^`3wz8dx08`O}[~hU፸(7L6} ݗұ0fha탙4W-Y+|C~>T}?=G&Aی8x$^O>bHg0aO1_owO/=e]%Jksdz@9ܖdO ä{ʹ)oLW'"RK;qbt ->bV?= EzBStYq1%,W{{f%YXbm0>9>:j4˔|垩L )[6Bq>qT.Q5E>V;Q8G`Lꍒv ؟(yR3AB?-Vj^8IY;^Żlοn W'ȱ㖗3| + ֱ).6BIgƑ,( E蚋:@](L (yԇ~X}:`*3a"6'0++]T 牶LВ]ğ:ffբ9,g ʞœJ>^{k=ݨɣ]}L7b"w]IR eZ "5.іGoJCfH9rL}" nmKRWoH|&5ekxj8p8 6HHѿC0zOg.]\Ռ]*pe%Q M.>c Jfvsl[/jLd8:x ķ.'/xI{}cXZ=dv.o!BNk}GOv=L$߃/ۅ [wC{Ci.C$2q A=Y7)z['0"?­gVdH之G J-.`7KK[+D玵ivZp.3/wz5%@y6{]2SmsVNÕ 4˿3q< }]BjۥlOnXqjw6 ٺ)[>;$ƕkH3^:۴OWyGM;JR`Fs/VlxXN˅;ȹ 7D||xjF0tهv^s\ytV` A~և.Ǎ | )ηYPpF4 ]ߖw)=Ǩx;wqΗ όI=o$Eݺ e kZ4h#Az/ ane 2H0mkQŐ_ma6ΞT}F* }- aH8^?&gum6\OS@c"[ajw`X{uůR$볞ޖF/p#" G CLiAi#߾~k\9bddp1(7܉;Cqen6vlɁt"Ey_Jj܄F3\*ޘ{YpcNaӚDPm ^lgXމ{.&Bջf"pu'ihHRWŸNAywbCOi`}} #]1Z`Qꋟ$ۮFw uOɪl6˜Jq bllǮ w=hTӘ֐XBv^$&KMZRBaIj7μ_q1a' nww}*$mva['T'>u^ Κ);4 bLgKւ6ɴS v0a]hekdMXu_-xC iot֍ )}81O~3|˓z ՉKdgfR aglGuk]=FbwUߜB-41!ybٳs$q){mwF&l:v;ӋdAf{ ^$CIt{~Q6#^ZU CǝmÖ;6K\*{b:,_V N?1y-&\WbޤnYؾvu/vJI' RqƐƉ-2?66 B2gBs$/g@7s-Zr_e5qm۵2vn=_c38Ŋ]rje&ґ/d~x0`޵WޓHO"AWa5Al81uP(UǞ+g}Q[bXE9X 1'GvH̜ 9*r\\*quγO0.ڶq4|;a[e֑/K3J5ۦN^l"YxjyxXݵ不\$D3%ض`޺v[Z '_ wPK9?%\f; H.u[֕Iw]wL.?lw@,; M?azRW$Aa3 g2 >*⸗+ fѤ&!v$rt|-Λ3yfgzO-\! ` QHͰO(5G G;'=NDˤE;\}"͔dKHg ,sD 棻]۝?8y~ir6#em0;7}" Żq]=;k.֊㱯.'3rɔIhy_ ]h`Wy\.Eҵu׾ ZDzCo ?~ [I6o,$aćg׼4`( {6Aasf}x# O Ȝ -HkmݷiѰh4$P gz$|Pq˽SW뤻/lix_[Cd9WZqj>>eUڤp3b-X9gѽTl쁋Xd{)bE__=Ni:X4H[o ߎ\,xE2$?=Ck?5[`T[8K >߄]qsbG(0ZwX&̟epM:EZ`jܠaY3G{)gi~ ;|*qaZFİOBFin'T #;'MHe޿~A!~KQjQ!B.^_=xI=9{v Qo!c'|*roaL0 WlyV:!Y9e'?X9wݹ!.W\m®p>kt҇0s(7ʳtea)Tgd?J$G__a"ٶ TlS#(}k9k 3t|i듋lWF5dfzg6> # Ba=͡$ 2X]QiF:N~\FjF&&wPyˎ̕jX!vX>";ʶ$X"U*+ͯW\w"i|`=wsrԞZ+8!'%\Uɮ[%oܐ`Xg4d}r0 d]YK{'MS=j #ȾapچĞؙ3"v6z&>|}VmLf@8"Inƙ_o_ ЊMe{|(Rcq!$'Za}j?_;#`xm]vI:<?MC\) 47Ӈ9!Z:%٪yS}g??iʈjN^^t`(L_R ޵y#m71v#( m wyÌKϐ~§]fd`> t^ w\r^;)xmg|7ӺL$ hA݆0k`j^v?mE`ֆ0'ڂ13|$EJaTvDŽ??niB2oS(;,ez.{Bf]Pؗh0 aͮts\W8C`AXR-1^vymMeˆS{JR:On?x][ m %N"w8Ě,:߾SYv˦N`6jsm}<7bJ]NhszV9E*9+$r;^BsSt.,g%{h5ѻa7˼[]|^7d|y}]N,`ӻ8CC6۲S Ԅ^—U;iX&4`-e>L^5"?I+ϸzh7ۥ>M0'H^ ˃>]&JҀ崾 &N méC^m{ ~9$3pY;;`{'hݻ 0FIv'^%#}u/Ġrx/[?6-[à)^q,XغuS.؝m7Rɿa@ʽ1o|q!4/V3͌z{ u3g˱b&OHl*:*,fSY~aFÌ>zH <3ãk#}i<͹CmL=x'lަ8 =rҲ{ xw mp% A=XJ&Ѵ;ȩћCABXvg~?I&lkf7Oj rP 9}LH5T`NN[<]<u7!y$D`F,l8cT~MXP\` } 'XoZo tl%Gq|$6Qmh'9E}fEvϢa/=^ZK)de\񃨅;X_yQ7dN^λ9"4F|")gC|UXhT5)ݮ~_ ]mg#`rf(C){lܔZ^&9Gw[#c;qXi16U˅ YY ae+W"7wr8yi0?dG X?~c V%aդ#EA 5_XfH?,1~J0<8f'-E/.SzgX9u\{ytYfWyboU K 7^:1b0snbjlHwˊsi0{w{ N)c-_Ypǫ#w VY'n踱J? DWDA#7ls RV\[?y2.rԶo=JKmI9-;41imkߡp/ڿޓdm{ ~0Ź%+)OXz>$OI_ܪƲ*e&RSX\A2%ά r+A Y99P[~6\J4L)>_o:a:D ߝBrh(_W' vN[Rr)ZB"m%ly6,T6 EǪDO4ˬb(?&y r-6Ељϐg'<F$s&URbcq\][LAlB Jjݑ悋foLh&oR-F`:n{ov~=o>TX.5N0# 3'3'1rp\ W[2Q{a,|Åwzp>Lu?6sgj2C)c;pv7JP.m.u~8&s6x#$(0o*I{"Mk\Q|Ǘ θc;Hެ_8p`ƫxXNvLqo{`8z=,-m:WD0~i9ϫ ތwonC㫸v oGv/ 9o*_M\[݁~୨j=_Ea:/C'Te]t.(=U1^U$7$[fUQpA4$I)Аlښ"`dnOmrjzXtsT2,ˆL3 uej˶۵O+y;c{A'2'IOcn!(/J~{~D"Ja YPgVu~KRǡ?&nJNU7>EֳVH_wqjWީ.0\6?P%K} ըcl!(n#-kKPT_r ɡ Q hG BFa|b&c_>cv3+. 4ɛ0s"Nur'Y8^ -m[ic~;}O2'.Q& Aa+W?`ahgw\?3uLȘ[o;v.1efʥuGapyFCs8ub aIZɬw\) [>˼[*lemسrfcB RՋ{pȑW_h4Ѹ;c7r:Vh/ժWE:GW!SXeOFz(<5ޮ@85FS/P~fIL5 jr>dzEk.ܤQ3+n.֜]/QXi;䈓 } \ ܞBZuH54{ f`ӈu0R&hb -cp[fOnܘ8e  --?bvv*V`14fϼ@Ph 2H6$os[pY^Mͳg[E.w\p]Vk)];2;1T] D2f HPmg \78$J3t|Pɫamg. ̭SSPsI̔F>=,3H?ϫ}wn]˕xnB/fY 8m}일n`VWyE63/֕3'8!j T(CssN׶-zGft@t ?z$-a{r"7R'&|݃> IjntV-!8^ug^\7e#9wHEԿMBPK}|If*Z0ans]}^>m߶}H ry<1 tȡÜОU.5]T,k%(LU4 JKzeX)9dD0?20/u?v]W.ZŰ(&ͷ/bH/*q$+^ݞ ߭FI??3pm W;'L7c}ypg9g*_}r1u|FnD;N[,dϰ_\fiz8s .{%7DO/jPwh5`(- Í(t3R9cc!v:ۏ=[;绍Xz2e&_@17S Uk{~?.2ī"]/Q8ɹ%cgU4HeЏe5m l?7?6 XqWm;"6yO-8Z璡3CN4М7%~{$5NZFU';XJζO ]63CMa>>dZ%fn01«h没 ue"HvGq7Cjx7YDSb]~+"1ΘCPWRpbnĪu;v`pj0m:+ !5Ƣ9ɏ!H;~s3DO^~52)¢g{:!'h4?䎔]gprrpL0NV&vZ(aop>Жl0/v |ڢ`=J2Fܧ U.ekJ u$)ӄh>DzvZ/1\=o5s㕐#P?;@ ضSӄe f/^`ve[0fV|p/nMs\a&A(WP} `\kT\`p]7y+zTBUra}i23x+{ I}# 'LPXiw S |%!ӳl.0> 6BҕnXXM.p=ZГp[+o^^~ A0mT<*qAyFRBűPAY*o*җ[%Ӎ4 oLtqL9i Ow  Fe^2;Xf%h3NcDb{KJyg섩7)bE8KP҂K7lٶMG>\`{s91񉩽˒8;|`cHXkcpB;(GZ"nCfU;7s ^-=[l/68^da/Ѝ\1Ʉ2oO gq 9xCɈd z|c~M`{^.XԒ-H, c\ 8=!ӁuGr#te}-ffF ~ ]zNn1~;FЙ?́'Ymh=G6|f=;uG%N݊DsmG߸SPݫ R}G52fGE9=l_#Vy W gSU ͛fÞUﲕn{ℐQG.=v=|Pos&f{~pX|rz_ 0]B\TcVgaf!}fXs[T7x\VON w?9l1kmM}UB Ͷ4Z†5^%)NUQn#ImbaR_ucH+T{m>#?NU;ݏh&ahzr_AѺ+ZN2Ύޓ`/bH.<]b8>J'`5Ɣe(nؤ 5 PeJh+?xP [hX`e2SP\%Î]6Yl8ה:Xު{a=C(K NŢ0|\,N u5[: $-0lc: Z6t`Y[sj14fͿE<uyHuc.YclϱJt n\Ď)>5l`1v힫{H*+O̰Mvhww:~ruEkʮĖբJ0S믰`Zɼ&f^1Y.{BEK\o<|^:1$vqf?ft1,2d%b &G 1fſJ|2p㭙4X-Й|+ 3$G`tSiX%mɽdtR:N3 ಁ#?ޛv}A6˥ۡ~hoT|p)s Nw'oI2Iex'M O+w:ҠKl*ڎl6HPYDS&*P'y?Ny9W+UOp,_%sށ%^E7X}tv\g/s%e!<.d 6R}G0wsvu$p@.AU.`.yg4n]2\.إs^8NRy9_G}cfz 1QH[gD ʞU\ŲƦ;p5Sgyѩ1mXEH4͢),ŕ@{?qd |]P8` uңel ~X cL)`MR.q/Ji g8=MSL}Aޅ8~N&RVn4ݛXWL&,T'_U"BM)$F'R]8ocu{˼̥'p[^D1{~.AĐk}Ҿ?;vP~c=fˣ2PwTe O nsCEGf,p^b4m)`+Py\zWxtt-oJ^fRZ{u<3u q0'vk2C4fz~ ,Y:݊,\M %_{buXmZl7ԣ)vR602-\ޓuŝ]sN+m>8$,)P{2=7뉕] IZ0' ݲ Y.XZ~b 9VReO{8"yG!̶ʹJk}xr؀-ea>8v*|"tN? {, ȑ1v\Tȱ=ӕ_A\>Z4K`yX#A+ xMŖDYSky}Rb# ( }x7$sB[Ō@ {fL]Ad20i{CH5a1C%M h'6K S4p!GYKThP3Vݙc]$G_e@fRհdT dp|dۥ3ɛCEoB >UnT O|ay5Jnw$``UQ~ kQ]79 b}Lqq;L<. u䇢ЁO&Ea m@2z,ili~6ֿi t~7epBV__.;t(?~Y˃,qHl-bo[ySg~d!3$JT^cTUZ,4y,K>#rEOt̵<\bT%A]y ޯ3 'qZD;B}L8xtے0ZSK^MWͅ'y6ϡuX{Q=KUWKVgwŠU.) wC㡋zq~WZ'w@Uk^":L>Qk a)}W@K"/JԾ_]Hpj?s Ku iFWW q%mgi?};r`VX{cE-k-)_.dgR M *rEXR/: *!L205K h csZ٪FOn 2k?.&bf/ӯZ8l6|KƎ{UoرVG|LovxÆş)aR3z SiפGlQ mHP$;B(vq*-zcNI{*œi_ey4<(`x!W\0 q~{>.5C0{ + Nd2YfJx+\}8ݓsŠse`^̖Mb9OYa?a܄{'xހ)@>z/i!)$Hҭ-0kL^r&]?0C:" 3iW εCnI998^͞NރSG .i]Sf =ѻ52mP>U5^49C(nG0R.n&v% m$";n !G gNxn^* 2N`VSxփW@dpWآ c\l8I݃iI8{LswI!,v?Cg̒;:Shȭmv_~&v:j_KŒJϷ+yŭsX ,8. |71FZ$ˬۋ/Fɭ3tR^^YfnjeyCp`Ӿ,3_64O *_epRt[7- ̽fk$M>Ye!р>[q^A߹KfB:Ӓ6isXp2>L$πnTv$&X4$@ooK^;nǮ$:NxU_]x|,fDZ^UEO95g+qI+EE/">Zh@>I?Va}r7ifwwcIa()Es1~8~-o8R5:_Laˋ'I} 飽9xkI)}~N/S}hon zYC0In{^ l=:\sfP\j JXTUH["\?.g z:Ǻ#{+BnWżuN~u ;o` !ZM^q|)X+~{h;o5g"3aDD].}+PnˏU#5!jU펋#ء] h0˨Y+ˆ=k>A~1`( 8_b  ź"0@\eViW`rՆq"p >k.O_r!CTaN!p}GBnK,+a J~ﺔ?jN` (3]2zLgoo~@ySIVyTg⡠-RIP~f \1 rs)bp;m! |gyxEO[3Ɔ| sdz? i;B6k%GDFz}ך&;)Rjt"Nm9=p =}x 5gH6 ځmQiVwarC0Bjs ߥe |au߰퓾Z}˘AEkk (y0rsnHMe:aI i0QzuN.45V ~OqdoۊlOH[ GabpUh} Su+X_R3ɦM\"L o#_q&|$~}R5ި<^F`LT !p²~^OdS4ȅYzy%g☥)~>6Rþ#yYWFI%ETإމw YY!u'>^>*a7{`竁@z?0p~ Ư<C]{ gnP  SuF'= 5rzv-0!QW0ܯtFjI 78|l=*=~. s2\jZnLJoa|`e`pv/&Y)ۙy=/230&'9MHX|YZڪlRDyS 4c|S]>?|aqi疑}<F#[lHyU4!IX-ᘟ0*eIR7nŏ&[׿gF_ja|OIm~Oձ%F|g"z.f[xz0ԃ`'Y;L>',.Nep?XXC[ &Z7*blPtn9H2\zlԅdT4V 콰&&gWXxy[rhup"w7$)O96h] # JmA_LJt@3}sgjS=Vs ıw.= B1t;ۛD (_l q"$Sާ!EPh/#h;K3v*{> jIR9dyf8ΧssB5v0{#(tڙ vÄYJLuuҧUpeO7rp2w3g*Pp|#tX.JIkn؄J9:=$! M;Kf鲸<볤t+m#žg DR\qti'Q+zHPn,&&7F??s\c;>_ 3[x y`vI7vQgC{yAWgglü6Qӳ)HFhUCp1qe?5EzEl5pj\l@^<oR>ǻ675"Yk>lU4'gwYBM'#{*^_~n<e~00<(gvg/v,?ě <P2$J*I"SR4Q*) !ɳ<3|<}`}]kwlܜZlӤI`=0SudӶ\|% '0ĺIN;jBW;r*]YKr-( zvo0d+qyhO*Kyx v_`?`u5±t[ؓ럿Y`a(x` |j޺pHjky}N| ,MpN$=FOpY`"AyK񹬤 1v{^c$G+R]# ekpim]Qą@;֜ulxpJ?.ǢRՊu$ ~bl􈇗bNUpiĹ1{ڜ=8.~cѴKӨ4h;A3Vg _X%XeVnև~͍^x` 5W\:H}s{'>!֕4n0X!/rL-R0wr|>ǥcsSgIFO9ҙr$ ܝ * q./{۪2NRcZvQps#Am9޿['N)7X7aM\-q}`q -E\p5IG@-sl}BBH+g <%B9%F>)l s81~L c'fcUŒ~1Orhs"$0 "%`ג:cn-4Pp9zs#wT< ,rZ\-R#6TJ$WH[x~S`Sq$ 3tv$b ,ҪDip1LDah:p4TF='xl,Lؒ}|%t!GKV9ɝv&]tӒ;rww\7ÅH[Gȍ+>p[&\>*L{ `gW\$)A<޶[tY]:Z_oTB)8&YU]Y>lliٮ~;\XI9kVb S2CO`;.CcHQC$/Z9 x?ҁy.)c|QQqz4q8|'0Ep8\ӈi6jeQS:n }S /AkТ{οԺ63_F`RoĥqDţF6Q> "o//bF)foTs&zcͿ+HՋs]G`=^o| Dtn.̲g)#p p $$Y!S[lCQqq۫W /WfPQP2$0ݍ탂N$2I*sکN]i ^5XiУy6i޺y n0f&` $ލJ,a~Ԛ^<}vVj bE6|8S5\ꔩcU].nlYiRS g1/M7gJ[k:F8e{E฾`~<U&~a5Wo8! vR]{V-n珏umƒyFۍE/v ko U?OUPafQl|6nf%z矼 )G@ye~yH;b|`Jv?Jt"k[+}xM2^ 5%ȐG$՟|P`;==o^*]b* gCi-$HZ /\[#2a$n!APJ }&j0=oTI2Kh oo*|9"E0/3ϗ<7_)Gpa37%4z/(ia^KyL.70MS3'9,7mZ/^GKy982F3WtE0+bU/fnBρ'NF`+]лG1i˷3;?D2Z} _ Y s =̄`Y]W{5^.9WߦqYEPZ%Xiy-LN+yl:|o+mF =MhsVμM0dK_ 6bk=v'ǡFf`-vo<ʪBZ!iͅ*jz] NNg n n|ԕwV1zhU=>Uj?غ zhy04Us!՟-y 9<~T&6ܽ:H\ZKZlu>V)wL;eZȎʟ$l ԓ> i$˻^^ϾBv8S4tڭh( a`Y"'< }q1ف$Ҥ[8~W]~m6poPlOķ[ցey8@6 krŔ8"k9%VoHŭC"o5k@ȗ#0zX8^[ĩUc!_cc.oSU:4`TmkuM%yRpBvXn{* Ihu>|sԷjroۍ56:6mӆEHX*uJ\޿]nA6HzJ7FV9FC6yo!(4s`xU ]{=b+Hu?N/K>陲Hxyધèo=\ a%ŨC7}=n0i3oFr&cS7usEY J &j\Y`|.)MP16u^!Y7][)8b{%dRs~:s8vg-cfy0@u yR8{WveXocl,ly߈F h2;|mF]Ky˃Qלuk0̟en\"2MU={d.';TAô R)F%NW'l`E)XkHĜ5(0sN+*˱iaU[z my8s_\S8dPCGTNPcs"pVu\V>o)ڜ#X8#M"ɔֽ 0v̋#MgN:3 (YE޻M".PnpsЕaY~ zL1~;KM1$olgFe[`,#5וL$8N}I?pv4' ؆ bmj}|> {Xӭ/!j3?,/"Y\ʎ#,H&knY2RD4Np4M*"cb[p:b%4\>-;6M>.$(2Z^J> 0Q&X,':qiE5;i~lœÃ}xT߾/ |y)F?wK`sOms~ԗ=d*견KS`n3 b.HKU3=/^QX2׭|d^s{ʚ}ox9躌s<;_.Z}'{;ދ-V-8ܰ$I[k:eThtųskfS#r` otD5&ҤNmnr7+++Um;-:s]v )prP\$,;lm+zZԐgNwڨ#V198hyGnq&CK]sK% zmcN9e#m݋X9[`R0gW=oCuSj@uFLުxf(I;`(kd8^B7 HhE='(sՁ^Ef=f7+Ʊ[0mR'dך>!>g.‘g ﭐlk5L2UY<=c߫w-m"07|T ̮5-qa[<5?%q ào-ͱ5>w4Lľb/ho Xt"4t̺ŃX+7]"Xb=jw묜$&?֝b )ʲ<#)8tǏ ķgѡlu)M< KLVe)nꗢ`z6`ыG7),l@4S]FyG޹檽#0p`7N~8vcƢp o5mNUW l:k#yV ŷm9Fɚ5fWc*ѐ~a9!Y l~92f:Jr0=U'T0ٮx#zC3T;~Zͱ<{U:L7?T {h)է0ah9zmj 7{[UM c߆TךF8lmfKCainRiyrPwqDqiwEZ4bkq.l u;/`5[!/tYKbl2'k?ao-~Qح9OzE &}bR].h`Mi*,q]aDXc{@rnU_ c-Ha-x=Fp0Ƿ=g2SOvx%Y jПaKnT9HhNDImϣ aȲ~E=}AE$C"(+Tz] ~W-3 .I9:)u3!cAKtdxs!fGnВÖ_f.He) >5Bs*>gN>ƕ1 ҠSHN?6C>T9J3ʝ,Ҙn9-I'vWydlNr2.e6&&u`ғ %HUFoݕ= 32-`X||C#g-iGKC/oKeYUɢ8~$.F`7&8EZǘar%FIys8ho`>3_>ЧM}d|Q ԗS&8;z޺>kCF/w{_Eػ@`I}UTѿɾytmwnXW> oDWyrl)jXGidzEU.JBt荪t G׏_eҁ%~]08eN,Ŋ HrN~Ǐ۔[YAu6iq/{/N*>e,r&8];g!z:tGD_oҡTZ,LLN`ڸyX,yWF`쿐ïWUnK=>E2qj|dEуx$_|1sQߡj3 z電rne5πxЕ.tbG{5Jq3Z<ݿ#'P>6"Ɋ@R W5 M[ -髹<&? c dry}H*׏^zB-w|'L9=mjl?׈='׼U$)Y# $e09hF#t޵nY=ﭏIJGpBjaBxPxMH5L,b +urA>VXd~E$X@f U־nр^7s̗w;bCXjHn!kם蘏 %O 𘆕2ok/o \b 8(1yW s$;~o:˹-6jȊɅ !#LY%/ȅm*V$S^cV}9,׼E}&w=W';M!!0w쓔涓Tk-2A$ iƞ 0{,{mG~cS7dﺧU?B~oY-.TQmע:hj"1{Et+琕 ur٧k878$N]^VUz5П8G4!NVj$["mWyEtFX=IEX}t6Kƃz(cm ݯ} Hֵz0j;:19vNWI8Hrg^Ya99]'}v<3.H%,5 lLP0\}!@3g S:g& ڸ]H>^e3)P)o\<=m}ovɰoEQ#r(F1[-_؎ͼMztJm8}.3f 3/MOQ"&8뀳Wjtnޣ@{v/fǭsŗ5m`M V0u@ķAOSO2G??mV8]6?UWL/E];{"  Yyv+cŪl~&T4,&BH#A V\`A[>ͮwZGHzcB]-$Ӱm[|:ƬmnڋچoW8|m݅fF'2c%aX۪gyMZO*Z`1g\T $n${q,qQͧĐ| ݑ8ꖄ=`}Ǜ%H=09s Z!ELc/UUUKMݨ=Sp Z$^+@Ͱbr*c_E}* ˭3/ժqH;XYm-ׁ;i{ĵ_CJBXU/vަoE_Xb E96~msehб=ke)hP.. ~;/eǦ7%CN*xaO nK|s6o-[($ !S:U\6'rUvka~'%v=H N0Rqzhۜ Ə٩R¶Xw}$V/YkŢ?M8zrsې1浏䏑öb_ʦm$5oƂ=a*0~pKq.3[u6;map<6^sTAHܾ!49eg+EOpGu,z#!F; ]f-ٙ0k4*4#`AH!Xh`|M{ =kK`271pϾ 7Rg❛wt4WOdPGjrt0sI;/[DaEP2P48'9? }9U}=}7f-"DrצN, =z%٤SqB+|.ޅQU\@>w-(`˒eC˯6Um^_TE{S.Xٸ >0m [ZD։Qɼ׷ {҄޸!G5+F@2P[q`%,@^We΢/ &bv#/F7qv%?j@ywaF>ƿS8][qeKAo}_K.N>^mdIp)Bĩp$2)4`#F-Dk֩BnZ?!}7[;ٿ5D>z}X8\5dyUڣɛ.8uvt2|:o&Z*0ˆ[aRӞCc8>v3?9ff'V% [-"1zF^'V&(^W*ŖoYpP^w z,9qz8&pu 2[avGQKpB sB73vȷ;EtahShRXJMzVC)EΜ>й0t,z'|YF]oK_tJҙnw;=ѐK)tciŦ?/ϮڤkrRf+6ZVwB0mZ֘uFRq)b)rX-E`SXZE,ӛ`]Q Vۧ.kea)-{\Łk)Ox>ލ aw9ꤟ*v5ƾG w3!g~4%<9KL$*R͇(0N < K?kvۻϵEPl36<=1Z*kB#ߝ X-UA1GS|.-8Ep.=MLf{ʪ=8L.Uj, dpE 7 7x L|IcWƘ6 ir>K'/g6w * \T8g -Qh)ܴaKwF~f6]`HHf`9!c*v_~ m5(O_& KrJ;V~;U?і B/kMՠ 56(ܞ4.(GE_o¼ܟJ`c]̦WMb\a ?)+w#8q:k\%fF)aԦWWwTaʽ/̰}Wp;tp+饘2=ye]Eؠ2<]S|:2~<~ =>> )%C/aʖ|T7d'N?,G8ϖ[h0|uu_Vyi0eI-wH8xZ'5-?> BKo ,_.m#R=` Mọ:LvӽTCPMoͪp^G"E c׏ K8N:XJIjGLҰcOGI}Oel#s*U]Fa\ S/r'_]-Bp>_}fljCU %>tzHj4/N`OZA9NzSaiYv>nkW~w38=]r:z;8W”T#fb] n+.!s~de{2 I&ǯ`do[QHN*Vbl6C gح}Ga%FW|{*IG$5HfA)^0+SbSr([Wyh [:aZQZ &76&բ>%AO[Oԏm@/67ln͜foGLZ&m̖n HE'Of3%!W[L~K?_V~2-'.`㛜ĕ;'ngj M%_岉DŽ*>r0| 3+l,3QX*)w+t$Ekk;Up0ܳN8`6c+$Iw땚}x &u"m Z&xi9`'T^g~82^-O}f 'u,gG`j+N_) im5ZLG7Cݮu<'WN"]4adyYKQ^EY9Zcٯ/&|Pg 㬛9x#܌9{w;IgqzEՑ'XkC{y`xc_Q7WmVS2DZN''.܊S/{?ߠ^+sN!,D/)6?p kBM`ٷ=2Zw x @6ek`a@z+NޓҞ o6x9MNh}}851]FͨU51#/)voyƻ<#!׭HѦڑ8/8 ig(Zu$o>|WxYä"N3;=9;aD2aa$kv9ly=}ZOg<]Uc[C@|}ֻ3:?BR^W5jſ6Gz#ۀF(| 5orS-/M )[ґS|ßsV>rSA=8vS\0$ -\ yCG`}6(zOu[$M`ȳOdn&7`s]U2v{gabXyyoJ2*WO:j$S؜DES}szGC2w4tECNm|KKcmDKwVQ yC{a:f̑s$J0%0V%19wzG= s J$vےL|m@ŕqK\:<>7sЍEd6ib*/_,]9mpED1Vc&%ɃJ&cZ0\p,;vNGW,{CM.ֺXE{<7d7llr25ҴNצ8g$}69fZJE[nQ`|t^zkܷq ,"rQOԙ.nayj:/lKk3.ڇiUMH{SS{oC)#uUڅ(NpMΐ?qrМ}f)u mhhu8|Wy(<7)=R0APUy|GˣyBKF$Ĭq"n)y5O—k s!OQ T[{q"VD];57ɚ #TQOa~w3ř5k^b~,PURi-f`lu9L}v7JQıX 3Ax䁜"v<=nx0Vo+tR ~;xgάݵd>MQ; qooÒֻ a3kcaiqK?5{pL6\>Ops_~͌#)'l*fOW02gȰ'.I '.rh@W,dSp`r%nY$ۓUpȺM*/ )];rP¨g-e@_7՜) ⥷좾޺a;oIfu(.=JĵV|Xbgػ6x[-|Fz9*zG:XT߮_|->cfI<C%3J>ŹCF\\旺]#b"$ԝܻ>~'%L+A0W*MjK l1N4M+Ňplq|=+',_IPd6ͪ]>I֘7zfPKE 39!v{Ǯ=vN7VU>7$Xփb- 7rqʀL|hb ϫ#9C?Oךl}BVe,g  ]{~vQ2/У9_Dw1hVmCRz,͌lŒr1U9!^|]gaswѮsIma0L+QW WgI 1ܽȠ5*$6Pi\0Lrҗ$Fs8j̰]dH3!>qq:a.>ѹ/ύK_=Mz0sySXn\l/ɘKbDD #XwQ 5D+8sjGHk𘨎)#C t}tPis¢:/ٽIGƺc7NHԥGfyAc 0cU=|A!:8ǣ}ӽЧau^*o ev0c?&|b%C5/œ{1栌Tx쾤 _^IĮH.!j}elX 'ނdiEdf<ɒiOx֜%'K+,{'L{ 9Cv¼#2t>\̄i2'O fGZT!otz4 j#}lһ/m.ϰyDs}kHW,i")-2-C`oqYP`H2q,$pm@f8V[:o"홏iHv?h9yO 8v!y`ڒ%./4Œ78Ty@f߽,Ջqz)4np .X5)rGuHyIٓƥthXDr[[N `{DU9W#䙷%Ť@]PE3^*r;&) u4&q&Rf>}$ ŷniR~@e~Y8PrbV?OfʋM]XYL߾{лWsЋX(SI!QU\ mUEj?HNo52OEa[Ecd`ݷ#WNv5Bg|!q=p L笰+\X}_}sy\iTq{K |f9ޫHZoHCL>y%q_D{l˫sM[lp~btӿjc(ڝp v9y}]7 m΅nsGҧpx0 +-^gBgu҃kc  :{rvݕse5ñZ?\ji_;n.EH}m&Ns.8:fUS4`rWi{8[yx NjcxKq+H-b*t s60uAYS9ۉFNJ~!FAv}32 a8ߦ7=8ۼpO?k:T.`i )'7v݂svv͓;~GP{\/,zW Z?v)gLv 4<=e g(s#cov=1hQRc$K'cgKl쉎g7N>'X$U1`8eal N;&+^d#fωzG0{M_ =%|q*4_r kȶޝOD,!L{R9%mt;ˊ?,BO껻8`T q^ex^V d75}\b zgC`gӏ[O3=nS=7#នޮ8:p*TN6Y@s0kcWq)닡AG8OŐCгDkUc1L] 7]LʘS&oŪys,V)MI$)9~˜LCڹK"m\)xaz* v?nFس':M$GjeLlŪ#-Fw뱦2eñdX= ڟ閆A"''n^j;{F.==g: MMaZ\>x ӿbIK#( nOiA/5b8qnSM!@kǎ{k`E~\É  eCdKlvE']U/ԂM81#>f?4:&aM?Cf;epNHvtt RЀ*en]R7QhE r#zaMZ8ɤX~cK ,U5Ցn VlrbJ'ۭDZ)b2ro7C-0#>#3y;p|r|h0>:5so1fchQiYm8erF?uijz*YL侠=fk.zy#a.0 m#ha¢uJR86u,Ko2?ȱӄbZNB{䅞WtL:3tk2aD쏺KI,FL??u5 zn' ,piT3o\9&TLyRp't9vheLšZԐg6.*#!&tS}I'r뾠~ w^S];pʣ@sdk- ~0H6ݍo'XlF^n.Ub8y"]S *- ]њ'[ ~ygj5Ԭ$ 1g#Y5X֑'>7kf/L>z/G. f[~EFTcVX|t *qxBOu#YuHTI 4`b]f#A>.@OLdzKbqfTs~29E_> sB)0p5FN3hMrh?UW$5*`ze$Sr~d(d ۖVlngd*atsbe_Sⰸ%Rϸp WGN^RuCwHS,ӧKl>䴭b8*oܳer*~EZEЫ8\Qa3̺_1(!_.Bc|9UG U;`eD, n(@@GEBL ƙ?IorSY8R* ܦ!ݘ.-\ -dE6-q͚YAɤљ S<П,ʶ׹ Qbi3sGYbI9紘3ɴ̝,PxC=%Q-iIwftRB::(gy= {5wSnw^,&sބbݔ^eHfPFڐ yy2[^0cPPw)[l`Ya 5lDv#a0;4۶NZ}in 9fW{0Y ^SWc7.;b~e&]y7*$vm2$KN9ND޿M)ZJÿM! 9VV?'gK0mʼwqd.Oe~_ f!0ivs v??L `]a!&8,J5c~{)1fg'Bֻ0modeFZfvM';I9-+tL:JB=87?9/NPmov;lT^kZ+ iAOJK>[IEqG`hL{>pQ; XPmtbqT_+0w Uo߼g#)JP&cM?BEɰpЮM?pNofPqɛ-DI\ʫg=ۇ7q.R{7_Od-՘ݗZb tɗ=0-{ $n0_ >=yo$0qfEr >Aò;ujpb@Pt@joj^zƟE&5 d'oDbG 5pJ4Ϯ(\܄)gFpF+F-rcG1.=J;fa n<8lO00-^4{ %_:CI/3(SљBV3=U^#UXtB0 oK;Ppȏ(EVuw=EbE!{s7zjGw11akW>/ _ijFj9mI'ü5WDzj]|L[ͅ6+o,lWhoۃ8YsM>t=;=Jѭ3%cLi%6U߽ ;ʑi$t $Ӂ#اĴ)A%YyE?sCzZq _م})֫~=5&?r8 f U%XM }%Ĺ|w^u݋gqV5=ne]Mt(qn%nɢ:caT#' Ì\aIʭ3g&u!8(ZNP}^؉KMqe19循aMB-Tj;nYҞ8G#xuu/=G򰂌ܪ\Zddn},L p>O9&` Wr | P S`|䪭t]g9۶`KZJ` :xÚs4uW]BЉ)lXkٜ8dT_v? s!o#O^k8v(֖3I_FqNøuUƔ[$;WqIQXx3v-ױ7G>fM柨?VU+?6a>u@Dȇ6Dw/;U)ӿd%^<+FIR-0r3=,,]?:Fד "Y]?dJX͵5v,0TT 4o5bΌa{u^H?]˼2zs1GySޓ6й3I̪w>H֮ul}'[(Pl'}7P&qWp -PuV@GczU :dz /g4}&ZtfL[ErX3A_Q=iYqJ8+me,}? ռ/5N@۶Kq''gO)d-ŏU܂"r;e~0[e#vvK?gB_]r+^gv99B?VQdͻRь8&0{,S7l;/t{fkeW&wǪ\fD> t @Ө-:切]}W=a8uy]J0|'ᮻ>m`\eA][-\`VpIRG~wÆ_[/jZ F+fr@U#*XwUe^ZP>k|ӢplLW# 3{{0'+{ϺNckSѽݼ- |o*eTyaBjE:o:!/S0@yg?g#JҼ PKS#,='i&%r ^t)/&b#k 35/_,\= *]煦Ş1CVa؊5ޞ6}iI G8b`}LYXo=0PڅlgCuQSd <]W|=^v[jbcnRY?ub+F*cgo慂LFʼn_Zp׼"P58A׉#/U.ʻd;h=2C_|/&0pYKN$H#(VTmB-|q'tyCpn!2-G9T[7cUBבNsG-)u?61zK+X.{/9d+ S :G{̮EP8(V<0wM h|r~ς Agޞ<,.i &A3K>vi'.IҜ}{ BZTwfG`NKn/u.bGCJR}'`mE-eJث厚)>K,ḱ݅Gw8ذ(^ar1 ?~wN$dKq`y2> D6[?N$cw,~%QN`KRdLU넌-3GW$p&\bFy~8Jٰڽn}}6V@m`쓠@UI@澃$so§\R_=kOCkOm:%rNlny)1l nuĥz=T3s[ޕ|]=#AZ8ks ywkL<9Mn_Z}Z %+/~_¥c_Jxnltc!,K PD2 ʦa%EQXZd]wY'-S!d"t#v s{,䉋bm2KyԳSOAhPWp0$˖Hd4JsA_$,h :)W^2A~3H_1.M)Ro5hŒrcl$򢁾 >`G Ӥ7`ꙃϋ/ri DO8b{h7ILPZu ;af k4rO8(09Ց;H?۫Gr3\O˟;p?'fudžGS%E v|~%h{4pwWumtju/w]; m/zGp>')/n2ߡeKNq)لYI#0̶ᘟLT1KUVvG;a3Ӄ)?2\RJ/m(~iX#cύ,'0PO`J1U{UIOߌzBY|왁A+=s`no{Ի !i$9mMKq{baӇPr٪S-i0?ma˘Hݏ@`p;8Hs5%7Q,n쿼ḭq'+=be o2bX{U` m V77AQ"7/>)@Kg3> 'i+mXMiã.7ˏc FhUtGL QK@.Hk(}R{g3~~<Ԣ$:y΍2,mOPl^ήzE]-7S~X]ql)s-5U2U7pn'aٗzRlluB#)Jf!qgFRm gW~.Kaye*+_(rzU\U0gvKs t}lb3.^ܧM]wǪ&S;a&48(d6tCSphK +ϧ& *o )_.=aAVݥ=jBs:0͕0x/Elkd2|'ؘ>sn@V͛bo{E$l'M;t|mN_l ͋b'7IPhzT} 5~%xk!!fFh|(j/}[*"UM @<S\p/=BuoHޛ Ω"RC`?INhmEc.&.xAx:LKdfka5xrz 6t]ұcdݔBٶwGf/)<~M0/fEƞws*=:0&Q+ 0:'TLyfw=8a[nFj1,3\eg]Τ} VZ8fA: 43/"5WNl|)o7^懙_ymxqVqPεCg;g5ppX1m银x.A֑\~K;8A|tP**wOh6J`Q'qCFgl`v#7tKZ1>C]NkZVhs+dnj/ L6G 9&; EOi$t#YwZH=J_T:nJRG =;9H&2F^/FljvfsusAnof61 f]8AڢG: weN+x"IH4>d/M;yDr~աHr>5:2s iX2y]zA^t֘gЎak#]Ӛ&uaGfh9pL1<;ytx1đNMmw=%f{y'|2gO&'ieAF&HN匬ѓ0|?AR:MN/Nq팹`Bz&*X{,_v'[3dXͧ}.n)ful1X,Жk*xR59x0<9Ov:Ϳ)$S1͹:Vj1Gy@[M Ǿ6ːN7w,쵫U`Oc7rXCR_!0n0j(v$Lôv`7)q 0 KWUU`q帬yǗ[0&{*ݱfIļ;j^+YcMrF y(hyЛ伸OY}vX\ vY6'ݡI_UE[me"[r`a K٨8_]Z[8|9 sCa:aQr`Lܐx;Ce ?ޢ+hR9#v5pX,0ߵoT[^f›mf } 6EIuR&^[ı{w ZШO[hׁ6~Ʌ,^//~\z²5!%Q0*/p?NUIu$L{q.Y*w}9fe^.Y_*Ub68_u^9Oz .eB؆/xgxJ` f˰H':8L e05xϋqdH*m}aG;a[fUk, ތ4ܫƃML\Dh{L}}soO։-w adlgĻx@aS\ieyF 24'VrZ`ffD[g;'zNerV`@%k)c_p6/Ew/}b;[ @rB\\q{o> XgaZtTo&ik``u{pJbk6f;I`!}mIkSԉa8۬ժϐLi;aLvPUH#ٸ*>J_!t`Lmgirfi@[f&-i l#IXXxeWu.8+I2bHV2$>=V So 57}ό>6b$';Sx^-źWgY(t";+Jl9 $k 7u`v#g#`g;8tod]m;# h&v?$4_j+Yw΃\>#Rlm}ފ:_=174,_HVʐΞU8`h[vV$> Zd@{ BGyΏCHۘp&izf a1-`eU]n8Nτ{&ƙ]ip=yRK4iE3feRXU_kx=(SZm3Eav1 L~dJ8qO]8}ηxaf?gCI_+] ˏ{v( ,s{E-ϵZ^"O1~$HP;|7Oj}O =P|B39aX JǕ%չn?Qqy@n;I O`GZ!"o[>ÎVe3xM1R/h UBbQQ0V}OgXUE8FOM Ӽ1BIoSgǏ4?c>A J g 2A&(us`ijn쵩2t|Z% 2O%`jjtI.<y:t <q+{pb׶ND(.7b%z&aGPkPѫƸȺvip2}'37ghlNs0Z\o{ dz;I[;mw$i?EPpWoHyP)P]?vH&&*L_fBuFO|swjwkLڎ-Ő$>c%j!v>\HrfTST՗ӳ0Qr<蟑ze<נ2y{leO-?\͆.BdCiqYbgrY cY$Ӆח4oB@_?H>H-X4"Y6oP>}Xwݟ|o,vl%AnѸ rfbc;©ŌKgCKjRBuz)n| gl .II_qCpGȈ Lp;X?Z(rS@Ru6'%%V.߻'.2 B6TS; ,~_7Č/pfʰI'i Ε)i'2ϧ䫅+$Ω3p=ہ&i?ê٭{-F09^O%ڝ{h8tаatKx[/Gۑ8?<ʿcwR$R~$DQ(QB%I-!IJIv}˾ﻱ93|g1?pOGrZ+@_Uv ;% b.죦Sdah v=֘[W6ʉ=k{x)+-g ? RJqO%XI_pu T~H2k=Nl59sI]G%IJJ%$% a) ?몆F 2{ZއUX9Vu06.ACY$1/ oĮZ]ʙ=8vx)yZ5r&-t=nO%2?bo]׾@ז[aZG$VVwXX+Mj۰JJo,rzئKΦb{DNxBPM1 KR ]Zafʓl=y awխ" xF$4QL~$1R/ F[/m~/ 2mnHno}ul5L׫8mlzpXd ܳ\ֿ,*7@61z8-\vT(A9T;=? ye7{7֗$f-VηHVT#X/0*xiDITX 4JR ʙKWߪ%ےIL.u&%MFDvkqI: N2AK t`âV5zyfI`x+kAbLmu٠mz\`/!Xb,+? -MqD)N' |Ǐ*— 6jK=_|ш={g`qdUHVq{ |mjޯw\7Fl Ʒ"=-Re%69k`tH/W;@aSO+wW,{z N,vq;8[H J8]%(Ϝ+<|Y|%aہgq O{زgS—`x? L4X(#Yw(ҋcl=4Ҿ%|ƖO{/bˉejUf7?ZР''3̨贆9w"Z;~S#pdBi-Vm5X-ug_:`(kܺZ>8Y꓌iY!H ,{ ޹thĢ8xLmqne\[,yocMib}*IZݣ-æ mv ê+/YH8rr:wpvc`/mg>` Vэr4Xv#&@Fm7*ғVd|_twkXGܝڦS[Iרǂ_ *D<,ZtO7oLB]ƽ'+A0ut6<hJAױ^bH8qD :wY:}X vW8&"杽[qF9^彆`8n+j#.|^|}}{5\mfLm:"?" $Smf[}F Jqu:6E'¤{'dI'3_FwjΆ3Wꆏi\jρz8zkT8, 㫅_MC/:q72G36:{_XaO㼟Nu<|PV`ܽd+دsgF?= !pgP癵k/,~ߋ$^ E 790Ǫ`f&7@@'N}f[PŜChN0>7;%,L"ﬠD<,,~br'X\WoZ?a>F`ׯKi#S`c]Q<BdRtb& s9A=Wbݩ&rH2+a t qz/;LG]c|?kS?(!oӵg9 1Ta= S<7]>x?I&h?AK~$wd HW,‰05elw9|ggZÿWԍlO j(<羹&f:T̗\`xL#<L1ٷj NoJ \Ɇ=n8XSf4#zspluLw(`؍931L{!+`uƒƋ6.f ctכAsqϋZ7D. yV#J`=AT xq.<`%k%5ng.V[\O^ 4zݾl~ے$g~MW`=Bc;[U{g#MR5ob~^-j`&(K˚?JcGgs|ׄdt:kB*Ł{{8gw&`߽$c䅗kQ||}yb/X Jt\x-l{Z'ޫ)<zE9Y/, [n]̆&-""aד[:qSn%W.("̞w&Y!,kB/EL} ;{͡-_>>Wݶ$TM"Z\ּlfS+SbO2{9pqpwӒۡF 3մ DǹU [UT^xfս p>ӻH{!ΜPvTnf*xu#7GS<.uLSen0]a8wL_a?|1Lul?]Dpxz%X|ߍ:T=iLS3͐286:cw UC#nZv vE#b@l,!b>X`mXRزK7|  M9ǥb7?3MHqAH{~@Uϥ5vҏޭ68y1Zz YIO 3$&qٳPT秆UZ ңX-db#c1Wシ ݉7cm`nch6uTCWq~Y`{0r3յJW_C}(`i0H`DCKy0'3zPJPdoqqCRk%Vʆ [8oHNYWj;/`%mX*8f :G'<ŁȾW8&}>R:8HyH~i Ą ݂yNpo:{Ƶ^mi@/3`9Ϊ/Upk𚗟uzmO.F[P_jHXxNyI V7q\QV -|䬯.-}'޳RN:+:iloSP$?ąȟ}/k5h =6!NG mzȊ[䤿sTƂ H=_`&7fB8!;{&3`ߡ.68>L xiM8񖫂HҽVJ!uZ8㙳n&aOΧ U6箝1$+vݗ:6y~m&dKak< U  gvJ_wI96rF4Z XMW#b78_.;vtgd{L?%$\>b,.Xb;&V>q{chc#0cn@K;߮{ʜDs xwZ|&#Bأ󋰤̡!Vy`qg1s!k?gF}Z/V0N 5;qT+fc=g4̅PWAZth;=T.<,1aD3Uc.\W,dI3:`[ܙ 'Q {+nglc;2q\iʰm!b6Kg^^1?- lkncgFF>yȕt߫z ̏{!!SAx|OK  7C#C_ލK+:_@_PᴐePal!?? "k4_%g2'X='f 7)@NbP%eYIhTxaS,x'5:ö "/wX_qO WbecȪ15h-ݯq! 0hv9Kv渔8X:#N6v[qdk1k &^g/JcWC5 mj~O- )ڊ$6]0tW;;WylF:UPLRrG(cf,0g-ðAUqB&:Äd_zk?gޓCK {믒:F`&t.!SGf:auX6+!*s+GK{΢Vg1ι~L.t_ӕ-@?wIo _o!8m"!+W.OcϏ袄Ls刺8b*8c3 ܋=Mp=/r`o/%^2p,!d9>o3I9-5(JՋww H5 C/!ረ)V 9W.I4 s;Ok='Y9jn$,;' )Fu nnkUB٣:a0.&z~xx{N^+p'Y=88#4UX| SnKց}iK0>XoH,t"?I^b?ݏ] R2_SF2-I05E u'<Q/Bꡪ^K1O.^F+mz[y.LnpkWŅwEgmg%U%]7-5z.98TX* j~LȄJUoN>0Ja~M]3̿ F]?{ƢOfɑ,'qK'æ<^vX& _aߋGOB2w,' |dW\d"(ŚO*B!>G:x'lч=3NN8chJ_t[&xAџIVTH_2+#_ ѡ_xdwwZUmN0,%+oU x 9d1-v_z;0sGJXz3n8d2pFCdN-/h`(y ?/]M02۷@UZN?%.Y$>=|C޽繯 Kx#kcyۉ5_g"#{ݽ5:[xLLjelhL)s ߟ_pέL~$(̶jyPS5Qi ߟI M^\9tnK0Ź{flP#:b?ʼnmF/Ő ˚/]5̙$ڵut/-mm.#)9qfi?bJop@#aʱMsV̭~ w?u2Ñ̆+G75 #QR=;^zc++'?F9wb:V="шeﴐv̈1~ kr57`ˍe0T64'=lg^fˬkzCFWEO=we4ubW=Zw^ QЦ$WcJ4v)thw/r#M݌m%+Ư4opҽpDiOXh4v\*Z> ʕNQ_},)+U{GO1@>I2cſ":arcd8xv{W]> S3aS}.TN=!oQS6kLes;*(ށ5}R8odWmsK/4a_s`Lt 9WIJ̓h0?8Na}0n9gCݹ[^=&X_i4H?hcBujs  ~#6/'ɱ?f$Ӫ᳧"qdCoktKbQR!׺PW{.1MO1`r^aݝxHڕ& /UwWSl~ɞ8T0B0ޛdQhVkާ&/A2.AﷄڷaH Kq&X1=*V}`4pXA&'H_|69[%0щ}[9Նp=K=KFH\^fr0\]x 3OLfכol f~mfJoXpIڥA-A}^C2 "'cwy5{ ;j&t{Ћnͫy {7Y k6 )Ⱦ+ V.Yn'b 0˭P{-X`o'C#]7pSoFZ1$Px EDޑtʽAMAaRKIh3s3F:~.<'7hUECI+PX1}E6&->u+Ɣƽ5둖v0.sv`!;qᒺ&ʨS^'Q*`~IP$p&TW[Ϗc ]lg=$Np{_/`JR[ɴ)rܛF1Sn7v+4&٢t6m [[?ay۞q@z(H9umz ,Cԅ 9!ǾƢi/ 4ô#{AS\ޱ۬6f_w_4NI|(& 5X qT&]]ݧ`8-CcйLh g6^пfSg >$RYN?gx` -V`ddt>d_'-`UCK732_o,FL?K2{ ![]ās?sӹ"hNɆg`/d#,Hs&)nB sX$QKbESw;5Wf`ķf-\\ St_6p{KzK u=+coܛfR>o'>FnP*ZWT| w@t٥HuC?5K_Is/*bݷǽHp3O@ָQoѥz]Z1\M_@I͆J%IE#ɜ~ntkᭊXC O5oE0߼tXYYd (o 5L6;;a: %"vq[ ?w}[ sg<7^Q"AxSy&LoVVDa|\ b&qOIk{0S$s:l:s -+7QjSQ5$=œyCr05(X12{|9Nžo͹$ n Fڰs(B))NI`k*%dt8B=0c=v(I`Ճm|3MѳkE'{e:][̈?\k}+lR>Rձ{`ѝV0sS hCً>5cÖ|Gz?T[Jۍ4hz6U@TChjW'3y}wOEb{U%f19ʝ ݼoIv앸aW'@2W]0t.}-u;?0#+V!lgLkahq%@yaL'O};6gy pWسG{NjN^Ɗ8zWtzNa-&OeŲ߱,kGV_MPK8OŏS,x6@W[j8pSD #v<SW\ 2{MbKtCpp$ܹ(hO-LA6xjRj^q%vwםP OP輫1?&K /lOaY* SO`᳚>pM&w-VoӠ.ѹsW:9Zţu3 al:H}U~;"͊\S,]pb Ϋ>Ka} a@wU&Oc#X0V 80,Q,Ϙ%|e@7.s/3fMph)9`Sn7 iَM^+x oMOӆ%Jq%쿥PzDe,Go/S q (8'X٩ ӻ7Y 2Bߘ?b9g0xSINo gw 8z 魾Xs.P.`hEi5R#\✄ZfVom|5`MǮ*@ s8tLo^˽}2o0oLyZUO>M-k Js{8WeCR?0OHq_[f0{i"$k[&vL{^yx9cO9hs8e3S]mNKm 2 I2H4RiiAcjݢm9lɤ*+j돜鐸Ax?Y+L s׻9@j~N8'ޚJp?:= ocW`ޟmKHcGfȲɻXl'6?k 7L.n[wMBpzz-3wv~khY/.ruV?NQsy=g`H ~0&ߔS*VpE;|7Rb=6Wv4zBΏEś02tƪtu>C8jKS?d[zh6ޟ} qOF҄B"sd{;o9g]5뮗]Xwk!W31>ЅS` ޻ykh`^0ru~+aӊnA'Tcύ1f"'EX.j;.0T ,7jpɵ0) `=+ɳq#XUKk'}xfdI?摚 W,x3LY1{]<*NqG4g]#[" ~*A͙Ma υOYZH N̓6O|y~LLĢiOƹ1Wz\+5_ެ3Ż INv@P?"pq*f$ 42a\a2 kU:|*"xY>b^@ʹ=gl 7sm8'8XEpnӅK^d!i4ҍךb%0ڻ(4y, SzrqÉЯ8›}}nIu.W0x.ҽzl.υ0cC8WG<.W A)XaP0ϗ|LɃo۱Ds%mƟ4}+!7$8;IM%=֬^0;,W3pu0ʿ;׿sEcX#i)I=WF'aHy@ ij^1Y˽E`"8é*|UYTaҔ-Y߷,gTpV'oM>RvjüI%LMy;Liϟw24pL\M9;UxA;]f[ aCɗ8Mt-r׼F[)~&A콼}8XZ ӗB@[k[c&,<-:NٵںŖB-T`wçl6~gu 6Okp"csI!8K)-+vG., r^2~ډjZr.gVG@K5ǽa.pxQsFΝQ7 lNpAT\ؿ.dߟ=u^Aѽ&{ .-O^]9?J1[Q7zuAM`]a+?C'.GAA Io{:w٧HζB qyO7^Xv:\*d{x'ں۹E{^|NV.x@@'M0IIm2.Dž+T@:mvoB,`0s]rq' >Vjg!IlI8Un_ Uصdťf]1u-U<(dt9fWΈМ^hLqҡap4luO eUH!*|0B0 ߺQk{<9mS..YZ4bቾ9 {rY!p|D9Uf(R߄l@UD_U^m;Klµ?]CX|%$zW-p8o1(طiU YR>VzX/k$kQ$خ ϩHv[^/dž(U۞]QcA atUשS?ae\8z͘sIK8zPAo6D g`;Ljfӻm.R s#t2XKp''/bIl9a}jجtcΝK0`u1oɇM:ٸtݢC~R:B2>Z]lZ'D9}8xr-w6]̙B2NH-"$URv!uD jLNv~s8^X z5J2۳l_%8./[bCr+<:$BJ TOqNuqe9׽!"4̬s֛'_%%FvڷaiMqvFqĦ`zC Mib&R>8XvnƘ(nmEmoolΩ o WwEIm%*to}4AoFг|p$%j]ft>H ̓vT,ۘ=+C=B7a=̫-JwU&fXsHڜ}OH5ύroRОeq{zm UQ jօ%6q~`A2J%y|MnRVsϫ-Ƴh;5l8`F& @϶%!o13nPĨI]}>}|d˸_PE뱀Pːy(}_U iS2PxM Xz3k{u|8Y!EsIwM-UQ.()J_Iio5jHRn<,1Q\p:.+'E1R֋EQKhqB?i%MRosWY[B[Z4B'O%_6ה~rBo`b:b`~*򾲊 -հ'sI%OɄMϯ!B_2τ Ha D.t5zkutL/^?Mӄ^Y&ܞ^>E؋3o2&p^ Ce0SֵYE`xgGqT FV4g"շv_ΰʾS51f t:Ǫ29K׽gy}]*MN8ݢ'ۣ(l.(fD4> !&4u;[l=%K:$W5p0b^Hh켲1=`}O0lm,ie@nƲL0ܗ Dol3OWBvY,Od*tR^IQ.-9B+ B_Cn4gJ~7 uȨu{t' c dߌgc ͳ-`DM&sj_@9cq 1J\b,7u驚 wXH;qc ynVW 8`kk=Xq\XW. ^?8W/P`:z36AwwVNYӚԑO;laڵbzrAȣÚ0zĂD>P`H_NO+ ʌ\|33*$E%Bg!W{Gt0l[b $ ȟf{b,U[^O,ф|w<{!<50h8/cjC{kryyR?X.k Ym@S?zwLph{(8Xy:2ZRg^!i8UMG?~Tbs{ #TϸS0Leƶ 3bw&/ҵ Ke8,$}` \Xy ?Saf L{pF|"<uч?BgI:$+\^'Uh.|7^ѿВR{s k˪{mzNPQ 8CG69%c> @z&{;@_B`A^`9C?9bf2 Rc`j>1/s:xy mIRLS7BE`76;ъM< ΣC)3Oݰ+\q]Pw@c**zh.^r=̧p추кZknV_{vBo>R_jŁk,aps p5B2Ci,+E-+Tfʲʫ$k*"eA2ΞQx닙tmk3ܲQOͣY]vV^JV$Z>b%E`H Mw,@ z*:3llm3zڲP {zDLO>U83ews_U@a*0#:Ex a/wJޑt!k|-sU[vl ס'+BqmN)eэyIXhV s$wEwVH=v03#4 3ruGu:Ѕ4f*q!%RQ3*9 8ל$¬`50a$Vqܼ` 8_d SKncOA6#Ѳ<NL @D=A0۪bUI*}~;ƅ&}s< p毞c{i</䫎iц5~gC%gNksO33)^8^Ng]L[i 2!S?qucnt7~p_?DytՓ'翚Wb_bhwz?[`?oCsM|_]aңen.s3:*vl[I EF^g4S}`>ä :pR'd< s;-E#PHṓ3y3Sv"1&&3`Oχ@2mp_E#aeQMyjNW].0΂zo[؝%<սɖ)o( 3zvL¾fo ̽n`Hhh&%cam7п_6g%MS!U3йa2\g{S} &S=N*,]W٦H{aw)8ay+1~y$@n/ َ 4l1}U[泿ݯQ$W&}͒t!SC i'[leDAVqXP!|?V qSïK0('"0V^ |.:c~ҥm$[׼/H\8Nz*`IR-(b'=F>AWN<#,/kɕ\ӏԅnM5P:ߺ5dw4cjk,K3y+muC'aƔ8UeU<-rQW`Zؼ Ve0dHK/$8ٳ5/؁`! Ƨ[(ӡnf~NmwS7`ņPlR6˛d;ěb{̏Wk'HSɒmNyfvQd.IK^.97 3Ց(7bT3zł`ۨymz%zYjކ!VpBtT_ov w$^_ޏ ʍ nb`c=L9/n]*$^1=&ۆT+(ʆ]C1LRxKZcރK̇0ȽHݣi>q>|b + XـNly>jN013ܰ2MLN8LW.ā줁~S{z{ !}@2?.Q [TfHj _t39]yQ]3bmBi ]M;EsI.dD+>CZfM-ӌSWXtw( {޻ǡ,hS[wa2_a9uk.,7nbDZp)oxKjC[cp(i+8g ڷt}1WH ~PL}IWlvF0m9=dq5L5玭 EqfN_pdQB\9Ѽ$hY;lۂZk+yan3 94N{~t{Kq=ά:di"Xw|AzIfٻ<&N$qle%5~!0lqS<,9>i5Z }s0֑ Q5 ';t#djk3ӋEB՞Sp,;0zm> +VI&9=0TKXUg ʎR{ҩ ^BX97zBPBLl'F2/0[q#hyW<]#;H*n٭ǡg3[aTCq{r<Kk?KgrRL\zw TUq<&ZA9LMud:LdτYX~~$mP1o; veXaKgX*U$yd(.3^ yzh#W3B=m̈{܈V'NPF0wLj+dL<$W[^ ?unv-K JԍK-8)) +O|swJi{KX4:]`pɳc1! qraOԗz8ud 6HUK؛mNr8uW= fr4vN&H#gИwX/^`|PT_*%7yL5h-qvA!GV!^kA˓dҖ].QTy?]/ukι̯p9أ6 \88^S^̇/$C~~N8e^݂`XK}G>ApE 5N=ÞZ[߳>v3*;w gJ>h>L_r-4`lJ46wlyqXԾCg%nXuK>7X#R-[~]0ໍ6^?/FblK*Dod؉'dzqAqj>?w~ 3ϾLݜ0rwwXY1̀PD<֍sgC7ol Kg fDŽ^2~[&c!7VKhCv8 dy߁SlnX#+G`Oʝrޚ2 _{'őEOJV<^ktu-a#qzM i'YT"w>}t/a~OtXyS?9cw`F%8ڽgsL\rb6>e8l]Y`Vt_݆f[.}Vҥ5kf+|:̆^e|BITJJK'&`DPd[_ 4ɿޢs"2 ZXQ2"ӛo;5Kj?ī;|Rcнnhfx`4h.H]ⲣ_}땏1[x^zړ"h/<;Ӱkx)='ulL'4jpQ8M%g$+s|n]0iGZnkLXZJ}ցjTIfc`bc78}hDX1l^ǩsOac/^V,,Jb\3HF񉍑xe.I#~?B]5'+LU WZqS!띋_{mhQCl|uWP? e~>̳lk.M6'R\$gyt#;sk }RN{M2Ȉש2Ap=0%E$x͟j.uWM^!nwQ(IU:(##k``{f,!c# =qZiEjDZqRhA',evV }u3bͧqҸ^ufK~͍W[fzy?F.$TG~SLx -3a]i(ǎgO5p6kP{:)(]`@Z2>uNU_/G2U"}mױ'i hٖwDsL"Cay XJ݋c_*R(fo"+M $09^&4z&h=6[_J+6#7|p8_zTAϐeh F7y9Fd64ƭph%tb?8^pNWo^=7%x;Wa6[$VhRxp>5oȋW^35'$G8\\lct&G^vgsFGNZgqW|>T]4[vZHvl;T(Ւ ;v~R*)ÂKcNT+0?j;/HV " w1wJ^6p5i|Yzm86bWtZG\B I||)`Զ(: H 0g;̴ Bd[. sΊ_lol-&48FcIʧX}d\uEni{4!):\ nz3 KG?>Nzo^'\/櫨ɾ`N(/C2^*VGP(/Vۓف^e!__a V6X6QOqkk1OW׉sVcNX=G{E-Fsܐ-n<$MB_wi3C9=pPh˃I=/he|SN}6o=_։*{P)K6MV_ٹn*M]uYuQ0㳸nެ&HU-O}LzM8_w⊅Uc:'+rc4Db¯g &q?#pac bhݩWzS}fO |>)KPv 7GlIK)uY-#xӕaL䫛*.1~MwJ7̾TJj=mGq$q kYHo77$OM8WzAjxO ipboh;?L)@L68}4Ug4Ѣ%A|y]G.JzㄙIlLJ`rwԱRI7mx{~-Lu0b\Q_[mqj[C}jN49ìaF@ t|7 'f CG؇7k}㇧-:k׆v uKm6̋80#Bgl /o2i5rpWN7E;~c?Ypw oafitWֻը nݧD|ZX\7>Gڨ1!}eBKʼn[a=g6|I{ xƷܾC'z&X%}pC؉/aV.>:_cr2SƔ0(KTW!-9?qXIH<*SWW5%J;9Q# U#0ȯHvCj"/]@q 9 :\^Nqrޫ$A^̳<l[ ?XUQ; 挩HXv~x!"NX W~yKrЇoi VCݸ7_B:ji!\#Xy'3`%'|ŭL L&iTS)@u[`= \`g_~$0=PEd|Jy3a~sU8%P9{a68:i_X"xI ^(]ƱyHwަ\tۧ M""pb0;Β6=n8T"gS+.6u߿l~d7i_5yq 9w8MZ˰͟W"8OƆLZ)~{&8z5Um$J/L ^mnOw[\p1s+mk6٤ :4 _NVn(.rr$r%$hCgwvQ6&Fme1KXoK? kNi= Ҡ^j].J۽G+, #cJoq#Y,+cq!Z.P9  L^eLwx 48$,9d s9[һG;! 뽿w]' ~Wt)|-3'7zLU:$Sc04qUu>+avHyz*ue1j&l n=yspҍ:)dޙ=W3+CBKa=WUS;ǿnŁZ0}{[Hׇeb %UM?cjd;07dixCh07msL98.%^jC8>I͉r}V>sdϦa5ĺ'Iew'φ ^# Lkd<K廏uHbXI 럶4¬ XhOuNHU̹HnkO^/ ueĎNQ<\YxʎhyU9V#:?Ii zY*m Aa)F&?UGEWeiQK)0Ҽw -X0fzA0*NJ-e1OzR{7B|Q0OoڨzgNPkd^=O*\;N$;&8{Xނ!i NȳkǪW.N+0!t$v:ٳԄ`~HM?L歧ٽdn@́WGƛ_0=Uan>K?;^HN~Q [ԝ{@ܢŲS.%w=wYjqj{8{ԙcLb.dͰپI:O>4q. `~Vų],ڿz D⍼Ұ6}a 25>==Fsís?DeZzл#eIx֗~}ofT^盟.#RظA8uuQL9y+ &[0e'T̍}''@Iz-Zhc_+I)d7+4?yJ0 )B`i 4MwTv?9XgsLrX(Q+G \Fzވ#STtQO2~3Ysvt2)AZ9)9!E0l6 {'v5V40)vvt6yRm5.YUz(!Wu:DZ4&مi5o9+O<3K+l༅}~U6k{1wUt_>"G@ ,cWچ<%ufG%W2?OXҢ@_+_ۃ\ M9{ݯCrFuNaި鷊,u,LUkYksta6PxYG<p藰 )򖴋Z? \qc0ˊJà~^rֶް郧_ďMèeը~7Pfajh@cRs&Pm0桁G`u%`[Y,(G9IC/9|LTpWCk%}~`5ӾEqf0? hީ;Bڕ`.JAzqtz@>0 Vۡ0\TnL\=>suu?Dh9j ô@P_? ./f?Pk;5c0#֩5(IP8߈v(ǀiK\0Xы_ddՎ]ʆW tl; ?.v@ͽ`K__0>u+o?(.dZlߦȏC 3nqZuGtcԛ.HBn1ָqq ~UI9߬3#4v[*v>@`iS&l32C̍EA{DZaqԕ۝XR]m9u{K8Qw+J}QagPz8wmdG#y[]5qNp | h}e4įYDZխPEU S{Ook@AmA\1E|oi6ZՇ22QEL|p3;H6h`KR>M8bISAz̩=9rGl%S 0qu83ӛ'n\޿/6~(<nj/z`c,e8hv+zS#x^WYOK`'4 #M0tv2϶Ƌ087R':z`)}X"-G!tYaί:YS`"/0ق)~@FkR|䝨P m!|E h,äҖSmZXS/{]QSFq{nQRkKo8¼r [64zy&<* "=cߚ'q4 jv.<LQ"`9^|e 8/`C7LURul4;k>z`zGQyݾfF#&͚/BtIp PeYܛ];v;{b+`0[QD֏O+[0lhe(j- $ESE=W,-8Ӽ4p^h75*k/E1+NQjŹ}B1%R1a[W1uH܋,B'}4 M@JUE29ػ+:54Jt'pt =3J;f}]TCb3VK҃O]skQQRp`ӫD7bGs:KY8Ltr|M\YOV5 2W8A'%΃X .,-Wf5O8WUcG|U5;Nx-7z?}t;H2a|iG2Z<+fIq.t6X˘\U!|fQ$^w36 ha,Wef4.-d~7V' 5Co<1⦯Ϯycfo82eQ']'Ϣb8}ʏ]c"aM$҉Wo!}{1\UrZ -fGό'\DP<%%8wiL*/g+۵]SxDܺ8[ $mÛ 7);sa0"to8**fwGMW`9"zGFwQ>cYNjPY5hsӏKI0ZXD n ?-u=tG$J5(m[|.cs ޸Fu&)-Y*Y ڙc`Ǩ 4^P6yvynr>%O?^݇5h$SB5LUX~]U\VX'akugv6? E$e!eYy;-&CJ>n}MI5zJdWZ/3ݼ'DPC(AʸZ g{D6]!XVY/~ʂ^pY)`Q&&28 M+8! q$m֚Ϫ8=_;f'މު]_aqo뮵v:s}P!'\(~i_8 _^˟K_hlF(tcW/(Bmn >R8O$zZϳhjEN}.,Ɖ^ C[tkŖԀ~0g}3+4S5q;[`a*5&׏9t:=[ pCpg lsdX75$R;\~tl[#" S״SSS%9ڽoA@QR`-u877,^g=.z=j 7u{x u[f'LtӴ&'C0Pndp5 d?:Q[l/b*MacBpN^'`>]-l#fП~ R9x|N}j8ZL嶜h_Ur!Rt8 tޑ~[N6`]Kc_WK}|1dzb^R+ K< ebڗ ΒУsލ] _`HENc$EȪT~*$J*-!EQIBRDJ(*@{^q?\z]s uoMW6{g{;Xi/(9N蟖s}ARb_r=$Xd dC]!Fa$<ܭć`܀= B$iTPؕV;"v`׌`ئ7|gf(V>kp?'c^?CcL&YHVB+u1>^ > SCN?p~B\^ # `u-?g/pL*nA2y|d zjNPL٫W٢Xwf.ǽ=~-֛qck%$?V|9vr\釞^ޚ9*E7{gALd_d$ٿ3aMr<#1+XWaKeLp"6 ݱ+xLH O,"c]RyKĚ}.F2syC$Scsn*!^xUsSqKC!6( b3^8xBѻv*VROA9e6I*ȓ,٫ؗ-T=Mkwð{F9zyM̻|ŏ`SS:C&Zz*0.P9c7vOGTԍ%q=ޝ 5Bї:gpnt,6ge\"m^Six]tȂgslgORd lK,_)H$绾Ng6Oi,0 1lj XUc_4Z[8~ohB-8ZFϠ|MOЀIgo4:(MҋX) IXW%M7\nbg/H߸3G~OgTom_ί\_5}I/O@gm*>[Gdgy - n\xɀ摤-qzJ3 KԯD'J!X7 !jlmS. `F*-~:6/,5$\VيC|{ϛ̳4sSVN e%cZŵg !GPȂ_"MC۶Y^Ӧ47!L&v06N6[͹GS[H^Tpʞ6g="'>Dbiɲ6:V\=&u C`*Y|.`1OjP^Cv3;0u %# X.Sn|;Fj R.N~'y͑,  )^ wl0wg&BH} }n \6'QK7CNװbtm!F'K?wN#0tY)WBGqܞkYX&缏CDO EbyH[`əy/l9O=݋tnV]0KHߑ\unl^5b7X[T:-]9N@c i| &ωՠ&XT.q> 2_^/ATMHhs8F+$:C^ ߚ{7k{HQ=m}zN AV닍#ν>O=yWKݪ^QO)@}-ԯLΚQ=h I_G5 _Ȗ;  }"s7goAo>]u@H8t%)x5QP5&L;c޺&O^lm?'stnBrgcp -X9lc5gg涭'*xk^uAAV9~6IE| Lx\ kG8LRccba2O~j7}"Q'Oxsţrtj zդg4qUx!KRd|Lp?}W .Vn)8m$$I]cp*5t)RfH"g܈uu}{HkfD'n7k1_9sJdw` ovAF͙aZOe6 4c,Sߌ8{?5owy)|NIT:KԕL?)' ;h&k+Nf)3v[d[a%Mjr|i;^m.usn+to{ vjwk,|]q<AIB0tj))R?v_tλLIqkt%E3TkCc9!XmRΟdotvApZ22oS1?ݽp4#J Qw>6 9zkjƕNt76]r~mX5.̼W`>j ̅)=/Ns6|O׾x #Eǔw൴|H'G_Á Aan-G,}y_L.$88l ?{ JMM@ɄI:Ds՘l)Zb;<6y5Oܷ!V[WʒG,G6Ut9m= Pa00WȔ6)ӎ.cD"tyFYQ|xT5l!j@N9Aŕ eFXЫ֫Q}>eYV  L"%uU6Zl%12qГu@6cZty)SrCbHGߒ'/б3xTLo>5mabY_4g$Eꄾ}bY N0# pcz`[z1HEd'C$7*<?r IRe8eir,wUkӵv<.|POr|s h? &9sXq8zVS~Q(I9T8Xuv% q\:~ݖ(} îMo^eҐ<;2MM:yњmXQ˜i9"kntAU?ղa@+w;|IBh8qHuΈk%zqCX [PU+}=xTaقcr.ɢ#Av~p<| %l>?6o G²OBylm ~M\v\HRc.S;cA~Ryt W5^')fwÌgvOM9.dIca1>S đ7XYq==6T nɫ\Le <9l>  [9RT j= K m.~P~`N{# -@ ![`@bSI|$?vW9(e7BzJ m ^AO}X75H]vRA $>ݰWgUdLB;|Ky^+[mgIR ڏc`@Xx 4n [$%s+tO]6xsg^RYÙu"rq0>V& ~6SX%(@ymg?ĤۗղboiP5L}қ3ŚDvC`Orzv鳪k?RK;ʛ->ajL7U?!G2S ޭ)bs%k90 羂~ |T k3m~̋uY7}ݎϽ|<=2x̮qwFzG#M$sq\5P& 韣uWi&]L;}'8 <7I&j-kðeW[q>CՁ:k~xOR 9z7n$ \l*TS3v+2J܈ɭ7ϟD[InpB'ʗ_8lj6Y6?v(qs3v^-v${rl]:hI}ki,fcp Ǩ+XZOXoU ]K%8FI-h~۫ ({tVd$väm)/^6O(ƪ~.ָý`Dzڒ>H!6,DNm.]L#aG|0A(}ct)2n_,?4fBkRy$,}+xe+~aɳ)mjfw.D0 5OFa<ۡ! GPRq˻Cp-.{oIk}ӓkn? u5`gp&U<5.tPQL2 KN0;&Lʋ5(0<\u.-(E6$^9|qbk̮bC=Y]\лW(& yr8r^8RR#+!6{C*`¬|=0g{|ES"^.]NGV)gq4ܒ*'//|,¥&gҨN}(SܷCR)N~>_.z)l'.so[G閿EӿKCy5 ,{BIrSe. ݸۛ9[}Eso`nY79 Aq.0t?B0_W˟Q?ke_5]0cQt7,w-I]tjКPs {V2<0kiOzB$I2D$k\& kISS&`{qC8vFK1m_w:B_?3^SQ`\x_u>]$sɰ8( 4҃k_P9%Xh@rzN|rTGk6ݣ\vw&6G[͏X`O0knudշul{zWf.oYd x%pr ЙR^bEκ|핵>q \UˍEqzY ziK\z'Cstoɯ$s!v:BuwF\q:N [,TGG)H7) ͓,ũKqoY`v|gȉuA3H߼/Al =n;b([^%4{dDJɺl'"az{}$s7Mp5Y|@@.뽓?հ +#e> 4=*fC9tx]3 U.aW ޛuiwNڿNd+NJ]?d Yp#%s?z kՏMG`]aDt7JO+GI ֯y9A9Wqδg|1s.~&񍂋\)+r{y` tmfQ'.ݦE-5:)\w>$;qJEl[=|O)"yE-.l^؃cI]sVȘl}C1ǕּW~{o;̂ 6J2=g5|hP,z JDKۆ[LjЧbI>u(QsIA X9&B|?v=+wb8ܷJԅ5F F0뺱"KW=R0e~o0p<`< co8΂.賨?/ξ??_?Bk¢ȶvBlǥ,Ҙغ%)h~hySbG)ۡ'ƌ, % 9;>i,g zK\8.VUJ~OCpo!0=8S7B3E`VԨ}ELmv)eϱL`$!㗞m3&เ#F(;JkL7Ac(`!M?$ U%[yaԿ<}BKDq0%Bcn8nSIojC`"\f.@RtRGø m NO\7n5 SØt *+ߤ|\퉍aI8^濛H8\{Y~/aƯ@b+{ӈJkG#0n>Q{hDOɮa&3G08&\^d?'nQ O>bUAZ )YFkM6tqt^0% v]5c'o0̴93w>G ocA0áp2J5'9CT #p$B%Y[JWFqī~@dPWj>rN%ne>C|(j w0l>,wpǶ28pFVT Gz. HnZ+w$$'O} ;"''$b%lH6aiŚi!dq^ウoO4>քcV#O1+I/cqqŎy ,UkK%-[`^}:νetYfb?g*fGR^v¾z@&W$^ XlA[ʡ=B۠ZژzfX ޿/d]2` }5I"$TɼTNj;3V4m j~үݱ/%A:7 ʟwmrvJ..pUY#>*/ Hy7v˒ܠ3^IQjP*#HkNKg4Y}ʡwN,J!s7Rp1F/ ؁Y ]كS6,W"݇m[Į -aOvO-cF̦5u>( ^tn[Bz+@lzLeMlq1 jr0gu&Ca%Q KXIv,ն$JAX4y0 Р{n=yq'G@A$(мHoP,c/k`Xܚ_RBؙv} y駦~SBZՈNWKLY*wp\IFzHlÌ3. 㢘nKZFDܥCW9g|Ɩ0^LY6PoQ8:_53_ve|! ,gRq"бkueY<l 6AEY埏p9X2[O؛#JGpt~$*iW8E{V6!Y0`pi/d ZHAO)8ƥXq>ތd!ryUqNA"ZتՎ/IJZ#BYͷ¬W{"8$# |J2ҳXezgm9kKl=`#7Dz&{usWK)n skk&Xǘno Al?A_a z?ɜ ;#-@;-?"D`C%ݎ# T}o%o@u:TbKúw߫[f ^\x'Z~ZqЕqX ZZMΫܽe_fli2Ti못[-ˋ_ГG H35杒 uۇJqP t3,A2Y[s&?_#,+o*XKsʶh6 C2'U.~=\F6 Ozk(.Jgޏ"bC,C̺2'{09YϘuYHas[f>sB{s $8[/F7,i'vq[Pw|Ş[rɫs42 'VAACY҉b ? };]o}IU'>>u -jvADq~/=~C;`9MX]Fb\>__ e[I!a,]G!}$G<333м-p1}-b#|7C'$ɴ$p*l7Ѣ3VjGez|Cr z{?'qϝϰ}?Dܩū{fNO% /H nJ>]T`ͥ o0c 4…+R0uC/<4K~UD{"0spL}#L!JF3.%hVc ^?2'on{/- B?#%1.* Uŏ4`a<hxx8Vdӥm n*L~8jk,`q$>$ DVMl;gn>}Q_xnsSg,N{Sz\WZJfʺzڂ-諾vާh򃼄|-j%j/YUJV!% inWy vTLa|˥8vBG΋%< vC^E*GI9Kg[X4}&NKnݮSE0moi,-Gxo^lS7;#xa6Bfd4۪^H>;2ok>bǡ=,ou;(Z׸eDL'>Nɐ,gIV-ϷL ~W KX||-Sl}̠{(.qv;&HeߨSЅ-8'i041JfI.Քp6ͻqΥLq/=Jm!)t`"3~f977?S|gPD}4dL Gzt5HB>EG첊ق3.zjp ;9$.zZYP<t]Xg{ c)wBlȻޥy`H! /+/B+ܯ:x'2/8HK|Nu{f[ Zp&9 彚񪯵p7aRŔ!hy\lp57 ]Y>UE}^ac; e3Śwfafk S VwP抋'L3mbhW6+Co7;a.{`rl0mW|\4?Cfϸ"+MLepZkJw}`In#NP$sC<{Aǘ~"g2bwV|Z(ӝ4ecM?콷5hb: K5f2w"V_ A4gxΏ7Q$촹̝-'`XUu' lm| #N\Xp*X%16hokuӳ/s#=Psfn=bKiaVF:[>ҙ!LCG0!v멒Cs}Pn%ml{{Iߨ:G 4ш0Dw!b೬yu 3'zƐ꿴a֢;W U7^šehٙ&*Dz?iVS1+QѪWX㒳ʮ~w 8Q\eR_&t0"C*L_*'D͉'p (plxUnM e  >{D<]X힁U8ٽ[GOތ_젫=/~27'XDaҎYpiD9$Dz@'Ln0c喳gk=ڤs}fau?uMHJtxv|3ozdR0\dɆ2y b7+ŃH4٨SVfڷN՛\ S*9^oeo}lu>ß8#M:\F8Pzio8No=P >8.qwz( f:?W<%R:'[f`*'I$XwA;mˇphrz, lr^U+gcoII0rzĵq}-R̫xK8tgYh`xXl9'Dh|C|'OpDLN¬0~YɶdU5f?T+>aµ2Z/F?~*̊]SލD Xr:W| ~#M&Vġ~GUN^W*N-`e} ʙx닶/Tuz/w P[ksN`܉%)BRsעL/>K"9hхV@=}5HdHw[,,F.ackBr$^ao͉cO{_C HOLsK=Hni|Z z:%TӸ՜8#z4QvSU~G,NM~=Wbɷm զM&}/(rkSQজ\ d:9<3 oBNappNjtU)vƊɔ:>k'zqA5Mf̰ro릐 QE hUvTyŋDqfon #L3xl}g ,)$Bpc~1>mDŹ~7VZZ hZSH}%$o-Ж`ٙ`-P^H^iSʹjX6+Rw{x9NFMSNMO>}%aXIz\ʩgm.wBtߕOI0ɢscQmvOA0̇޻/;S3J7͓8IXŵ&X`N')МJʇg}:;Źb^qQJ -Ut@g(X*u+ۦo89/y mZ+!>RIvyMcvER?.nm&Xk<54ة),U?wGnpK'?%b\-}27zL< O"&dST܅gF߿H^l ^ED  W{\U`~o a#t0c ~ e y-󂂯w^m@m~Mmګ1ceܟ`b y *lEoC  X&):GΔ/}fI 5LPn{=$,=6.'{SγM(@ fi-WO~ęxٯ̍LU[=^3KnL/ݪz>O!z\6= W~n>|`p#:8Gޓ0!*1M7^Qı‘rg?^MO\9eLtn$*jsaCVw +PN}jҺۈWJm'rXOR7_$S O0 [~cZ4MV^hD-dcB*) eMKNi33glr|ٛ|lfNM1c*Sl$E gV"ӗCE~Z "WH.xnUO|P&.+ +"+鍞Ԡ|FHy=۫̊]xǚ5S5d|o6wM, ?5M뇏BqÄrL~v&:Iqf {6$'abo K̈́t6k֛JL0qnLP.?i'_s׊dQIVӿIVcvI޾?S[_l.aʼn[,V닗ϑ rzJGxIOv[W}K6sɢ%UͿ:|]Y aH3g]-XW`0"ib}:,6b{d\ӵ'n8c0Co(;}RE6Z`@hV>3"?];s q={No3hTxmT.]NFT`oGt`4@ν0q;T7V&Ʈ1ߌ axoNTsЗY&/w9F?׷c-̟ *L"QW/{%hwe$lpFAk? |* [l*(,|#~@P?K@ Rɞގ\Ujf %/mՈOТl Z:[}1?%,1Lq. ~Hֈlq~}0r#?]ymb-#éPWau!.֥%qש78v;aC.o17n=PƆm:w}MvbV.7n#Y+M1K^95$k ݝigq^부lWÓ;9ܔYr©t@mu$^IuqwABM&@'m8$t3h5YG |# » y- ݋UBŞ`eNE?rEHP xup5i;9ڼ88+="(A wL^<ꙙB׮΄m0v P+3gDE#r\~f?p9A5Le9?5$` H.I;,}3c5%}z{qtA|tS)Ijw #֒Ҷ!94uju!%ݐzD{_`%}&C_,IO^C[lV~I#O/p/Bmty{,׳= VoWÕX$t¶{q_#=$]ѕP)Y#V"b?8TUQ:^}UQ 9п~bΆZ fbepԗ'GvP\X1ˆZdok0>I2x kЉYmaƘw<&Ƥ:wgq.ŌF9Q(~V8ľقt1fOa.rDeaπJ3l0gmyj4qh \Yki}wsBo'DDa۽X?XkԸm>juR`zgУ&!%mԫbE=]_ҕ5i1Y<>'V_y((.ݑ\'KRVi S-bܲ+>kXЕzp.e"=r##iWA!^-ΘXI_qy0W鍾U.zAQ%XN^gG}(;Ecwv[f}hWam[$sb\жT{U"5Y'{*؅C[ÌN?P7\L&Uכ-l2p\X$(ۮd6,*a?Y%gmGG_|7+B*:R`qL(m,InaAraذCZ`5~S^aq/\ JyGU>6 .X`YEo+~ _,z)l5J㠊DžO[CFO)Oea7eLj|Z/{q**ogqց;/.\ mgVo:0\UWO;".M aݛQ7|㱇:NiuHd6 2ȿHݶ8Wi'Liv)fE@R%8x)KyOicUJR޽ox}/QaFqB_lZ7͑i l2z {á1{W;\їt F8~+L_`k&"1'7yA?oweWw?WڗGRmՇx*`~MXR`nb[*I4Xhŝ0S>^G\A mov,SqBbG̼¡z3D~~j$&2]hdK2: _&-kN٧Mr@jj}aL;|ܪ:6yDB6GX~|h>h3s7n)}upwqz[{.OK=ހՓmaus\h|Ii=MUfpg9n(\k^"=m:4o}/l:NK؝ET4dߖu9i|Gzyڕ3II۸BF_kvz1 w;7s8V\n{lu\c`za\`x]Oę/.pV81$=n^CU_+ Y}.8WK͛+1*`OBÆ+~ M*w,bZobb c~y.:~oK #}ݷH6MlX@[>>ncCԞ5]`ڋ5=e&:xAy?:RJ0?iy(xK-@|FJXNCWS5BC+M0?#5bK1Uk!w* 0.0JE]?>9kvRX3~DE[հ뻷z,xp'f>\ṡNjTaK9LC}&"%z'˝OߟVǁΓZ[s9g4Hz: ^Oa΄֐#WPCXI܍A/0ܤ&/v>ĦoU= Η<Ϳk~Pae0-;{HP8/yIf Īv)Bb:w+xe~>AJՉ|% XQSX+ܱcj<@ ) =-X}֮Ы:f%Г`';Gc,f=jv*Ͼéneb;U4dӪ(@%:)sliHF=33+kd+HΫk[5^uKʦW>{ s+q;_l3><=9t-'}b6 U;gaVEa ] y3!TJ 5U0hK擯^@iʗşde2[Bk#x8,K"%816n'VbΈUÀc^i2$fVtu]/v}.AϩmܫX^w6P|^I_gٴrqع$CfѮ(/"JI^s|jsȨ ޽km?{7S'7y @ОG,0/_#SpXgl5~&qT& n^lsB2nк`<;TSXۭj e@%w/o@.z[R01ek5$*$aG{gss- y+ǝF0{ ǁjגp9c\X =#T_֫}X>Y"7LUBGa*5t$<ŕ kOlУܷYXfkS{4Egt;gpFj8CzɔЛ$} _1,tuXһ5' ~_?VP&$[Nia_ C˳IعuiAKح](Ni)} !WnH8Mi_ĚuB=~yڵ hW:oὑNMn m8}Ou׽gw7T\乯qa-4]]f#k+J}"Rey֜{\@:[Yr$Ŧ;svQ^o{{Co~M? j}5 #1:>ߕ &-ۧY lJ@+=iuLxUq\8o Xؚ^,Arh~٦-I.S@gS+V}AYbS8eQ# g`)nq严m~p ֙rŤ opպjbw74?jӐ?Õ˲JWb1*frlnGVNefft5_,f K; mp5m{#_jr(IhQYQRtd%Z,zP,zBXkUÆ@?.ݨXYǺe81MԺ}.dvWrb_arh"ά #lXG_r/~)dbSس8DtBG60/%q +M+пbNC 23԰F}x߉4Tl!LkJf% 0$ܪfoa3C)\s&c[9<0-K?/ } 똜pdL)㪯\((ްʥO> C>yςdDȕ{"ޖTqN-<G[EmqDXe3 DԐv#U/MBwu'0< \%Cq$D:Єf\N >.YqCշÛG¢w-V0I$ f]<_T$\`hnj:qy8t2Γ|z؞ΚPp8m-V^BJL8_g/E,-]v̉Ki.XIXT܅um}TCsOxA0o\X=6 ɹb{tfnH%tÁ$K>sd}$߿p&>?az\+:q6)"6ԻF+ʁvC뗁!};a;뇳*=ڙSH"KU.'Q`f1>mi V_^3Y FޱLY缺eۤu"mul4&nt/yw#?XoRFlpc&.NC柸B,jؖ7Qx5MD8214a_ܚ8s0Ϯr:dc8b+0fEiZ0~NSJB}?j`PAc[2V\x]!{[d\dzqe /R,&G8Xq>3K-6j\OgrcSJN}/ݎK_+5 "E5;oAdэ}7#~B&(O&KCC}A6vjgݗ׻bBZ8SsAB  8ƌ+JtcFOGab/X`uY])Pﮓk[ü|$)0 O~SYͩLvнt{lzg8H'~Tr8!1w@Gr?o9/ߡ1!9,dPB$>K'YZd ,;6ԁ)aUn^{b_.j>jl *oy䥸/Ad85zV'eʦRG8d<<%$+qqコ@WZةah;w# ĿjyV)я uƯ~o ϼoWtHC'L>PS0`ě!?+ vj`'iCyu11N9IBcq)M|}EbS8Ԙ9R+SIEH_/E=b?\6h4dߦF.5098W4{{ҍC2aęYם)i+Ly]gut5KjVWhܼsXcaܰ#w׉);u9|;f ^BWs^gW% ܽy 7B@g" ,\ZˊKywn_a:k $+rk;y'ׅ>ys`ጭ2!b #6k!Dʥ!;4uO'su|W8)0W~ #S=7cӺt-( C_M\{qtQ7tpxx$)QZw\aj:aF f 6 hcٲ \Y?ykJ?ĚIT0108L.j˗>*9WnRJt`PC'%+]R*LY}3#O9bSh9@&#ص:4tSѳV? cO_A $` Ry^ n)G@kOփ鴭ߵL̪Eó2Nc+Ygt/ߔg^8.p&o9yO\ōOS$å-wW$v?|t$`&Z(yO,2· KumLk ؓr\KzK,c%.NUd_wPեdogceH&oKkg^~mk9Si=p Q20,X4fOkPq.j+~I[{,dzD/̼J}a=_M^8Ly,;q&lF유/vc`ur6[U$^->6?cNMg"rCq[Y, S@0IY-XYI3Ցb{v.G>U] ue "҃H4vt퀖CLlf.wk7/!.[g-pHd[-lMmxbէ_%6Ʃ;RoṜ6h>^3+:8mv_@V|(L/KCŞ݂7—Ե|Cpa}7i'0Ħ;M6Ŷ+}\QhȓdmHts8YHmWFMĬ.&՗[܏W4_|'?G)+hy?ŕO' 0|7?v0{V81~>{u a\ ]!Xt͢U4?~AxSP}BH \MA)͕,y$ٶJ3e!(ιfRRp^uZ-dN ^w7<Rz3K5}WOCVVћ^s?"8.;9a3XjuTt\821ݻ{XZaf9lQmZce]MEYb-IeMCgH7c{ܽ F*rs~1.B(0]vNL]NlXngw%6xQƆK#-c YNly/V{d*roa,K w>JNpF_jf,^m!h;.W%Vha:I0t% HEgGgp7ӊ^_Xrgvme#FqkL:ǫYP!!eS^Y##" 'h4ʯDž P'+7 =~Pr?LI~Z1Ϸw8 ϫbiǙp ôHM[GF ]S`Vejn\`ai}3-mWB ;%a\4TR  vʯ]$3:=NdzzVBaMZKIB:wclIp g&=w nK_z@ &ˊH~G;QϿ~ Z>Ѯᤡ&;H:/>}{r'~p} EZ3a7 ,@-L|W_Oǡڌ1}'.2\zUI6^tBxff d;dE뺽M;C?]d~Ce9ty_єg8c,R'unE^1="N4&XNV=S^E ?l)ǁWKbg ׮7q ?*>J+R1aU"qf*~YxV]eCRHr_c޻i?:aڪe\y 6Dֺ9U'o\?;G9!&sRd՛B pBvT/{z`,.l8&M0tZd\F%cRn7۵ iZ}-pμҳ k-5dͿjmw$̑M cvV#X7*GhЛ,~ =͚_㰕@[Wo?L~'1eR+YgQBk(&x_#O[aMwvttWq o*Ig'f:6iYT͵lGbǢL:V'R͇`3v='4ydl#vl%bq:2mFc>ښ# >;_lĘ*I~6yWo3̞qBks`߬~2\pS087<}יdqo;NHo͛P}I_6?t4ų$f [_?4nJqÔJk#~ZE*,|j5Na(w57>"wR%.K܊^chNpx_JPO Kg:Aɉ }BC"?G__=я10:F-6+5_QbAs8&=0vΧbh_ox~٪?7ܸU\ "KéIOŪ(X:/3L;cT򈀧B0r;G01z;r̳ZW~2.Wf;vZgW5݉cʽo<%ިlG_B5xd(pr5jߡ4pvn؉I7&X/ )RG \v^t1JحZ4]d&y˘L'ȇp)ʥM˕3CGghc/a̖5b-PR7,h} G[+OYm)}gʎr9@|}pB=IA1_R 5X5Geߑ@ %dz2g15ЗUŤqG\0u$ch':' #0{h/KVϿ w{H>Ǜhjdx.0uI(6Ozh.2s=_,}Vc P1+RO)͠VS uys0YWu O4/pW" -aRWf?_.X[XzNd |>[6H+Q, f6k'9Tp`P-uvgF /`˫kvH+z^t VօW 㭃 :Zl"ɼVCcgA.]V?EH4<;3~WH-bɥ-@6s׀9rLp*qL<:vؙc}W+VX-Xͷ iϲڵtqן1 OױJZ:O=w߾<\ )KuW#=&dH2yPG5z~j%&ߐwr ckpʸ6-} &ʛ G&Ad2d 8Uz6awd$8r/]|-'.³bˡM;ǩj>q'X~.¥Mr‚TA&V:_{p6fQe'OzJ6Lyq}X(qY F0>q{B4or0*u&p~(`wVx'H1>$L4t1.~gy?%9cq2^=!he{(%I\ _|5MsqUlk*g`r]Jt<<{#.@9<=@wi7iOY A c`]5d_>F_%1lߗ$'^l3ҟ']:s%'Eaq _ݢU k53p鑠9 O:=uqYX`?܈t"1M7O/!tBd*EOcQ$J(H%I]w'+ -IҒ$J$!Jdם۱86_?~zùp,vnjp|lǥO7x%&: fc09WAU􃱺b;F`8jX_^SzIJgmX4C{-b|_a~fm L=awN;2>( ťw9Ƌ ~^b)c|FƬ ZD|t_YNmZ^{ ͑/D\zlXW;qWoW{9'gĉDž_}s>,&'`곽@s dT#f[;LQE>#2w(bƩfXuuӶ)3',47HE9kr/nǞ ka&ГJyސ#(|v $W]P6 3+Plt7a Ή.e {,G3@%GCK1%X}]OAvE+wZ7dɵ{ $\ԁ,e]ACɰUvvc*Z54AYq53s|C~ Xtsrc#%4RwhszKid] k [LǯJ(81g/m 2'^X=qVGܱ!M~9oL is*8WK'7lwXcA[ۜ Ώ7kp5oH2:[`XCEigC#lGqԉSX5Zj{Gk *Tב.||8t o<|E *CMm`\<5* 5 D^d4l3iLolmf<\7[aN_:?Nũ@eRנq!ǚNƾ28#eý#Xj>~(B}dL>ye `jD6Abnnݴ 6 &8mK,T'n(1`DƅwvhIZ73%/>"vJggpBčj5"0[a+7]n<S5NBU;%|G4-,*޸w)8{!7,V {I:M;]VOXq.ş[Y `L>|d}|Ԇ?] ܿ16mĉ[Vz~4VӸbNId/gh ?zN8/!9@%Q=SJ핤ǑWk7R[`=) ֻ_َu}tazR(̨+}#Ewl?o#tI?zXO$Yhell'\O{R{قs|7fFvT)- Էӄw[>ٓR*0[\.+~~r؆sUqz_rhy7ZȥV7-]c&G8x@ u *-qx;ҼKwGkL-uj>?ͅ`|&5*R.~1K;KkL%- ? /̱;ܾ߱+*TsUݘ3D6; l1AuZbw7+!Г:o{bpw߷Mx,:u:F6/Xy!_<:'y'y"]9'_,=U-HZcOŃ0/}!f t0q̵UuzuӁO'rшƬNWk[N}sn~R2[ʮ椰IE܊Jȍ6cmبm 'y^>6N 奷aFK>۝;pҼ9"'m~+g8(2uĔ)g=8@g~eb߈9W8Y(L\ϲ9~g]96o'd?-AϺW2Ph ;NDZ]! e>]+V{ vnOtih9ĈZ"X2zё"5VpL\u\M*xAoXsDXv3kw8&}u`~!)AWmUJ.4p,~6G%>K a q~P„˗{! oEz"# P _zJo- {?^J"uxʩ[@oVt3}evKo$Xg-2-3nSw16x];?`5%D|'p6U `G?Cm;U w!WذHI9̩.m= u\H ۴95섏s!X.7h$( 3(uNJYűQgmGU=m2L U,hA!5n Ǹ! .3) &%]VspH)=tШ 1@HGzЯQ>7e#d󂻣7&lnc,7}KӃ wm,-6}X5}8C'] y%ԍ2ٻ)3M2]+#agN-6yVzfy.6/Aݍwj8S &U+0$!&_yj՝ϽBA޴jYBB G|zu|QGX6%*Bo@lbܜ%VR'?#yOιbv`Ȑb0maYކ~űw8!3 ' {)rCpY@"Bɛse98FoS! uِc2L%ʶ0P$Œ.Y|EwN:QAW8 J\:N)jC;,ߗ{0?Ou!Xo%^[7^ ֈGGcO0H޲)cѺ`.QwSijz(#t_TB[w(.]"Q zN$2]+}_4}h6c}weR󅰘7EGw-*^5+aKev`9[_C';_y8JFjB]O㌼5w^eJ-bʶ.$V o9ڞvM=Ka$ܡA{?b frMv/ۃ9ua[FSx&"6^Zfe<} I6,̫K.n{?voϭ@z80'ɮ'sM;8yƇ^vݦD _oD[o*$Y=W6yn[0G)m܍&*I38t FY/ ydCf߸?+|'nv|2N$ 0jغ5H(˖S 9RUiuWF; EHT^#jdzlO_&oN_Tq4eOA̟=B[.K_.ҁ~iC !eG2_ tM#5BM8xnZ hR3e=]NaV8(%p+`Qr'*qT{>pk|&l8E<ā!Wcq ߥ> v~uכwvy̋BԁQn &H@էAٵ1$Yk]*dRCs *>B?Of/KNwV튓h%,_~q^k{,!(JQ0ŰLڋzI`ˍN6zSƘk l8,ouJEg8gl3t#M{ʉ=MXήiau =MY s$@hJ]ѿ0woR(}s&'TkL!v+`Y>.Z77q>VNz.9i? u6nb}f핽ڣ !?F8S|Kb:N|@ 7[Ntb1HO~o/;,)oVuWF hu@)O Vd% $qA.9o79;wKp&keQ1"vcl,u`Զ_ƑGʪC glw`W1 2;j%N]lM`E^lO,6 Ӗf oTT9ҟ{\߸aHs{0 %ohԙ9sl,_s:bckS< Oњ|=[}ᑼmuwb͇دd#.U.jw.dSi]a?n5 h]={'گZ㗉cNqSHI/dnަ }l%nhDIu9lu 30c90ׯuZnd|aapaVŤXnx\`/:2A CwUÈc'Xy٪cL!#fzJqm{3A]XH9K~JHN3-솅/Boo ީ]"bwYf]G6/oK޼n`ZtQ܅A7, [Γ}?h, ?-u]Nk<9m=ɳo;H6N%mNꗼORu;~!WSH􋓎wac"kD̜>B. :3X+~=8Gklnl%9vnIx?xWڝ v<1*9v>G`kvWro.C '۹SͥAFWנ+ә:ɱʍ*X1zjץ>[֐'Fv&@PG}.9,w mΧ?&Xϗacu3WH%őYY:zĠ!_P yʼn$_D+Ѐ?C}L{:L.w*?hGK 5<N iC8nO.M4ݯ4b퐴jSy=3סh'MDI8* fz:'(O2aێŠV~;n\jgAn݄qR NI/Ws%IӡXMb(o__7%8$}Qc/,R^tp$ML82^;o>W~èȏo~[jjՂX~֦ ļM;=ccIY$ΪzcvZ]T)A3B籦@_n% ;g%Ɯq=6Z ɟkqH6"O/Jsdž5is*dS}

G bɺr2֎:99,xÑl, |a o83^HIf|sjΘG~zy۶!H.7?o "Y+̄]Xǀ: 2ܾՒ<pD[DZU'ʼn߲>Knk)1 e<7uf%xPR!XA j^ y.q@k;c`?B]wgԠ'jkvrÊCgGf< O8ѫ2zpҧgƧa.2W63m`{Q]g7,Vq[*nwTkŖϛ@=+ϭbSÛ= sc@4}XӜc??'#aᆛXlpgY'njށb3%CX7e"(hDrPx9(p·m!XȖr/1z* &GO>}*]OX%)^ZcYE6|+#C&ztT)UV/'eH6[ ~Vtgo?so:Z)0~8".hzhM)qЬ#>w,fCSV~o@ >6utBW;9萼oXmzn=1>=œ\M-`ϡȈ=ݿ:Ʒ=..Rp+ 4.j(=t_gzJ`<a?5yeج4g2a{52Έ|y믮Mg"3Xw8-X2y;u<{Iܞ;ZWGq-Nu51շ8wNGՍ:ۓ@ =O 2)ArU86].g>w\: %ၑ4٠uRvq$):c?rF'0>"٫;n\ O\ 9c_OEdyx&Z}L`ˑnѱ;|灑FGH^_!ko̥-U1a_g"IJ ެ-`q[OW_rlVDž_X^D9nNTbG+juH.gK+j fA79$hS ^c#+ERTo$u:=W,mvC*<%@v Ohh]̪30`F?C5f')>My^^ j;FD4y1h'_fYq8$rd,t^;-DjDBM\Oo+Bѳ#rU`=j8h x36Lگ7w@ځc8}o2Eg'mCGyIsy폒`*bgY$GpɩT bxk%sfteX) e FIE|N-t!% կ@OEc?Gu2}~"s?t Ҥ7h X D^|EzQ`(sq!s~KeKKKg"A7k,k־݄mc Bӧq7i/TH[$w6‘=quq`0|ȊR"HN|a0ڨM8-}I:G\~v[(l8u3vɞP"QeHOPz4sON:ĭ. l5|7YN5LN*\kYy:=H7c{b)9u,qt!حdDPV-Bh kQ;8H u/I^yM81k}D]SƈfB`(s<;(KQ6Jf1Q{9byzu4<"֋m,+[ OꗕNco3w%l`xkxza:d)*?QŠSiPTUPO",'Ȋ g|&qD$a@5{=:8t1hm3_a'(WLK݃=scU{еNv.pY.xm4T.Y1ɣ r@Մ3}(L:nGaYfr6HTN$UR=[^>mm%_CS0S[_Ԑb7V&>t.̼i|]5r@ P!Yw3$ӟ@>htn$G1WC߉&KBp܎Ly -ˑ!,0)aKSJ)ośG 5>mA}P}jB'*e vM/`MoRj4ѮʲrpX îW&a9zf̾JB}МfcڨNTxU)oqzծht"~ogo4Bzs)*ܠ/i )Å21wNj zeQͫNp%& V.SMn0Bhjxv+ o̳o5*<>LpKՀWfT+/)% 7oM`mmX8TiX7/7]u9YiL2TwgcG0P3$]vtUN=D5;mў#9&~A,V㕊w8{kw7f.ӊ\x&oܿL V ;5#8/T¾>M_R|%Y ΦG);C5]6Ny#=F]} τՠNtN:e9aeDe#r*b6BM˕|g:Cp~:mc00BV/ B"a'?F=L4Ԉf%ٴ6]~̋5o;ȅ[.[Dq,7O9s^^j.M™lj i!(g$h`[P48Qar`bhؼ:qkN[:C''K[`Q3>B^ _`IpĪRHrf2YQvuegڬ2NU+;Nْ ϼpԃF:eHeW[Q+EC$Ϲ.`/ēHvIh*=#rKLxCgD!2} +ܯ] ~ qFLS:y_=n v2/mrpV}TJ;P/էkqqyŊ{f gpOMjoJ7״ G{0y;^ ?j sG0r؆rɪ|{6fﺮL^-B 1r@Q}Kw N 'tlL=l&H s@,8Re1mڹVZv)|`PYc'_Iqq#,_CzL7aJiwfݠߪiPLg JZo×pŐ:_1]h[o8!`^h7Kq8\Gxܹ )uae!4?~HOe$4#38%8o>a ;ZlŹIM{b>cӂh]X^TWs"du"EpiOĀ/lx']~~}v+Xd{f5d\ Mk$R/DBEQн⁽|5}iѵy.o=ތU3˩㤱EJ߬4v慝2X23d }zzL s,QOW੓SkG@ZWWqfy\ FmN*xI$$9Z$iJ>3 F?s4gӫ=$PJpJ?N qB5e2{"k 06K4\4N8)['B?,$aȮɾ$ ͔J$)~7qܐf MC!#8f b>fV,@.D08"jiO2$qL- 64ć_[wn@oKE.J$?ϓf6I=[][VhU'aUJI\nؿ`w{e1צ9 d'eQ~& SꃎA- sgŌ6K1Mt\uz Ӆǵ Y>a|\r-2C~[~L]s%b wMo\;cd:.%K%%nHʇ;=3Ǩ0ZQ3$o)P4HO"f!CE3#…Xn)0H? I/#{0aAG/ xckg37?[bz9,`9!<{(mUK\,$dW.D~ &MG/$X\󭿙,#Ͳ6V9de8tzڦHg> Nߖm1GrI}Y&THQr9< ipaVP1`\uwf/K{)En,o&wͺq0i'!e1X]IW >&Xo{ O7XU¹7e#hI/]Zw'D!wܶ#)=aٱw0A 4QvNZBӄ_վba.lAe2u:ʡm„[UאvXt.`=qyaO'XObmVdv;f kgĂߎaI;+&lb"[`0:Er:ZTNwy;ΟSKYIaY|syɣ˵{eQm]}t-,Н+೫I-0gTs :;o޾rJiMJq81pã'&3sĩ]~PaB;'_'O ~*ݤ'0< eI϶VuՆ^87Sk[%]rppu6 sդR#uuyu4;w"j= '<6 ˧Zdg1Yn keS$qAPeɈdkGnj|#" 'MRIv#%}tY5bVz}I7^r'4ʢ>˫Wxgǿ1VM)3hR_7zሌꉸe@ 1 Z$ҞTl|KyZK &ģVXQǭ0g @ gߕZÜH^&,:@Rܗ<ßoƍ8yL=:$ǒ]ߋ~#K_Z>תΐ!cIJ*5t7jGA'+^'*Ǎw7]'nz{w1~3-\6_B1v0gqvI}1ȣ[1ʋ\^k ¿,HfTW<|lUUl|7#OJJH62}$#+A󒫕Fp˩Qϙ0wW3l{&M&uIZ c#X*5?vVu C͛ |]THyы$[]@j,a>PV-/eҔ/҅% {Vll!NBJ%Pm|*+Ku>"$[*]8YLf?o8FU#_D,w^ K\DP<R/vAF/p#'S˱+[h=U7qTeݝDsX=_h.9kMؗ~YΆ}MJIvBO|䧛zؑ־ǼKpՊ$QW:j zŖw}xֱ X?O53m#:':%tÕscQiN/D_o~tU|NXegbKGd)犹'.Vyz!:y#@?(Ac8Vt3>)UwjK2¢rZw/lz!OMeAxpy$¶M$e&6].҂7]\7ս:/JXm~( /̷n"dVC}dUx]2D!^NP7S*o͕lJcCFg2 *|M>aOZ<U w}*ʿ?xDar⁃tӿZqq^ƛ #Cfhqv7Lȼ?g話})cYM~d8wǜQ 2z[Âb[-FܯP2{ٱxalZ/ *`Ƒ?6z05ig·w?*|0%PJ#+Lʹݭ{ !]5֙ř^ 4)X}s6`}F){%X:5 W5i־:4lPJ;G-9lveLhDO@׺V -y ?NΧ4˿;epCɑ!!hk&z^ gJwAyGl>=>]:j-1-O[lܶymԯb_6F&cH-z: zysm:|閇L7?w hr-pjGہawv5URsD _'LS&̘zh iPS}Iv 4o/"Dqp1+viDDO~ˆڸ!d-r,O4|CE-KpaMk8K⏰Ed*ĉj''_bz`Yh X⳦XwZpqOGINЖDo*&Yg< ю1h0$Kzb& JDxBGNU-t$o4nKlJ%X]϶z׌&'0SbK0Um9Ej$_g|~I#z!9?\΀cg|kLɺ-V8_kN5ő_A"@?W<Ӊ26st 7SeHd=MR҉ vﵸ=wlѤls/ u^}/jR9ρNN&ƽWlONR 髌eh| %dž@WlИM?c/Y#0(tԩC]HJFn tZ>c8nu}I}k{At]|8u/^/| ?1J6\zbE6k=;f7ތj >B5`|r۶><i77,%lOrh8٥U7bfj{`X𝕴PMrɺބ:QYFqt%Wnl' XGog~֤Yl8:vax:0XS&TcrL7b'Y6}jLXS\/A`qwOi蜼Z+hyARfS7(u|ƞAS4 ֗dtyם$u6[šOJD; bD~uA[Vֵn;qRmk PszTfE˛C>Wq]djjUWm4Xum', l=1;R[vAcV%/Iy3+v癚0gkoAg+a7ĮEʾ$n6ELxt%+<|G.r'%!`"g6 hN}!8z6n`v?+aR.xa8UkndF5ڏm[jGH R8F,Yľ' &ir%h8ĕk=!$/?vMl߼Of~ʲ N}oKUr6se؁I|Z]g-`э{jq[b?LMj߰eyO54oTs ~ ET]X,1'YSoa!lcwpwf.:%+'؞es>+'pZW!9,Yq֞ᅐu@ Qu;7^cX ȕq!S∁h=)UUqJd_,D!ؚFaւuD&ioO%YkHpH5O~ܖ q0㖭weO]^c^ֈ&/">_0;\/VDԬY{$yeCLGNoۉo5"غ WL&)wraKѫ4u͏x(1xfqiK lc?$N(h-EAm* EÄķĻ%B[ ) /ם1o[װo6\唁$T* w,=w9$%3V=pbӕ* ifK;o%؃fG.iRPSlS(R M+$futb$ QyXkJqg[f1q3Yz#GQQzT;vbޛsm5 zw~oP*? JEoݨ3j\/zCwzJ)kYqab/]}A/˕J}"~{Qjz"W0+mjYQ_ݴ;$H]ofIz66_0–ܚA9=pS6G%Ջ98lsT{۱A|?DzievU/=u A}W'Lqs3%AټgSQ/!C?u?ݎ Fplݵ\ЏWn63rk*"s91;D?ʟG <3)F y8&}^ӿ{?.Vw匁C\c_ah2tk w}ă// p6^\Nfd(,"*v"{&|x cq5|X]s[na,;6._y7I L~db\=᎖ y]sD]ʘYuϷ'[()˭61+=rJ[ =?'Mu ~I>sXt{::, <׾oݝa>GVyIL'$MM`8li᰸3OqbJ\d-"[¤2LD`[F^@^zl6sW<~URƑ貗6p-*}0 ͜NNt"¸g8=A'J!(Ą)k3.0m$MjxbLX%a챺\LфVpЛݷ{֭UpKpvn=dqCv; ڜ[v|(#5^t E!u@ܹ=kX$ 2ž|OἬ 믝kIݮ?i{mڴg'h#$LEE Lܦտ\ِ^ɝзqYd{s^)]uc-~ oþ1ؿ0Ju\Q6~l$ɖ{lu]Voac8aWي>|-= VѮ2Z\ַjk(OM M;q1dmXK`Nż;p'E.oԧB0;zdjQd{{j L{Nhk =hsV9qHGf}[[qpPgys.H,x[M7¸rԻluݘ0;͇Üg *C}*;nhU]B|, .r.Y\xk٦I?VpQThl 3;-{:7_땙 㽡ŪZ0nUR뮊{ ]1J ֧ڔ_W'P |#YƦ}N]ۃx^ao:av]w$%i'ba/$1i ZXgm8ehsj=$vvxʝR)^L7? *~у+P 5Pu( H쿘"@Z<([\9H/cm '):M˦5vNU:2Ja4pe#9CgFgפ8I+F!23u]Ty^Y%]ݟ?7#4-;dQkE֮9~bz⼗}9칗-^zzXs4vRɾy+fq*KB͉YR.=[3TG8hx&wSj£|5wl{,>w[&[`3T O"Uн^]ەv7ȗ?$IϑC/odv)i.S> EqcȺ8﬎& gp`PH(wv9c Sj5[V)$Xΰ>'m+-K`s9'an 9C1db= 6ުCz_~x= n0]' j'>ǻgW89[)ڷKㆌ^푑@cT"C9`:KH9}}4`KWe&9iyx,NT!8_{SN*llI֦yo|Np|JВkv$qD:8]Q<G篫*tci;ӻo} D\[.5aN8h9ZÏ(-q~ ,?Qq'~~9-wC\Bʈ!8"/tKzbWZط|,9׍2} 9K\:nk;. <%o6ZˑoK~cc iO}V3X~4}HLX\  _ʏt[fX{})/<PU4JV\y](OAv0'v뤯0} {IA8@7_؃[^U:8=ǡLѷz-G]7LD ?{#a|ЙjLvl8Ċ߷qEiS햚Ό:Q{g܉s$2'(we?.dUaɝ6'N@eyg}Юyc5,V %SŞ A'aԽ.Lݞeo GS_+ʩ\{]_GlU.)& 7kTHn銻A/3oÜxo1/.M0gW;&ĉŽ)#9..Ex ollKYűO6S{a@40] N&kUIŀX8gu}[ X)pfZFޟ-JP><sK=aJW4 ]c7`D|s1 eiХlnli#g>)tD-۸cY'81G1d< CЕ}'ΓeCpUC+yMWgLyC_1?ۭ܊!,A:+<ǛEsi·yXQuL4u[z Oow>axsq\Q_\ 9wl=sz˭EE4=yn^`fa\y4EWU ~'C?] n15Z兤$ޜH; ?y+sfzI>%,*<$_&Jr}10!ng`;IP4d|xfװ'ܻ #'p"S ֳ]{eEAɢOqSɌ$9?K~ ΏPw^X LEäI$ϭJSԏ~=0neLaWoA  ]*db˶ǔHΫefp\ʪ]qGq`w׌ 7Crl6SZ[$a%f/AC;{_gXo04ԞQGzuQ%FqƤBN||M%/I&xc{  &(.vTa|:9lT rwqq)a?"ukΏZ`-? }Dz >nEү~?#}rT86t.|܍Ukc[Oġ_(''D &\*Ca# KgJV;'Pf0 d}t^[mϺ/IB 9/$woOiاJe u嫡,鍊у'C˥k?LLu⷗ys (M w܆\*ɒоn>JٿٱK.WXeR9WjQkzj*T̚ Jan󂿋aݮ[L1g&g!zcFc|*vk ?*v,gkCrڋxQ5a GWs3H_YR ;m?`AWxG2pc v8wNaylz;\n^$Asc繇apUx"L,sO[6cʒ+\dr:Gcyb@_W&Yn:bw!`N桽oo:02 UW~^öҭyH{ʹ"#-`(Gץ4,.=԰0/?V~h]'Ɩqw]ydNh?~djoN]$k+ ٻƜWiL%ElXORWFbu|wMvIaVX}/~>U*8z #{w< U6Wca瓝%?T1/N[ ؔKO4E072N '\g`?y v 9a'1 fk2j&l]`?N 3 !XՔ'[CjуW0'm%nL͓j[֓/6A +6 -~!_+IHtm&\>񊈵55aDث:?fys`~e;<0/7$TfPdL9e7dUvb Qi)G, ,=)ChO\{JhȫTxqOHNdX@3^vO3OI;sK  򭺙K'7^ z]éCk~EEQܐ SBe?v%YXzuo $Gka\( h" 5o$}Ӈ4't]i{/}&vpЭ aݼsľ xκ"H@\p ض*_e&~ApB^_Liˈ$2Lh]ُO-8c"zN ze?Rũ'g7dM^;aŕ8$laFL[^ZL6My #Mػ<-*_z[K< =AV6JG~&-é;_s&Bv ř̍U,/NRg#&:3^`]ׁ1Ư$^S ZkrK ͆rE/wq䨫ňdi_~zmc0HxmYWzޣ,9JP5/@:OQ)ލ<ۨ+ 6/7*7f73unHUU$f,LCEY7Bg??p/>vsV?s 6Nfz;(<\z4v /nߛqyUmu>0=UYKu [gU倣o׾IGT$`4]a}#1{zR$ԼpiKQlT8lz{Ong@$[p:0%9iM_d,\ X6A{m=d_SqͷvWUߋ9N]&(%gˑ>_c,Yy?;|%*&QaYӵ]q![/7єW:ɹ$Gݴ:V|>5WG^!ZrG=Inn}Qьv]q2;Rea84^J䷼Cw1!ewr/ mNL4KK?E#-j(dMK„󷅉fz6Wܿ>4 w vW@9[ǰ`fV&ߗobZBS70!{w+''s?>.@k֒ \VL>& ' 9 >vPNt U#`„z-,4v m/,ԗC^pžcbl0:H_4X:8M7]Qʷu#|'x(}bH[H](*W%Pa6~"5NRTH(~*'W<~lwV'0(f? JdĎ[0EC2Nӂ]&&cݴ)(-xF'*|| ;Zc0լΦt /l6;ܜys qQnϊo%|Xm?{B]sR{\}}L>x+od N|ڗDfp,`4vf((q؍*Ğ-6OSʅ!G:S_Z=_ki(-GjNc7@ڑg77 Nu ? c[ybF)9ި(!ui*"k*3~U5W+1ɿO-א,"Ozٍ]w̫ yle1ty8V&4Z0W82!eд8/7 =EnИY{[N9!\Rpz$>H0KK$fB8v 4_)ȨaGqdhmWާ̝YIׯmVgcZLmX?,?kJcS< *+z5f27U?s:Șa- cwU$FC͛$a@u9E`zl/M{7 }BQHkjm׎ fls!m|?8I>d7݇Cq9\[qgaYk}-߼ c[WŹ1uOS L%7d;B0 >[u[$r;6*ɆzAUw8?xl2. ;xHЛ;/`ܓZ.Z.,g0A᳃yH\zv%Zosv;WcFeI@]oc#2|mů\G^uCBl`'m?Y}Kʶ-!wsp;iv4ɺɓM0=Ng g@4 &eԏ8vtB(6^+Yâp JmE+qTхG$ꊊ}<ñx,fZ&xE׿/'m3ddckZԥ0myʃ݄ՙEȴ#l*Yfڭt̺Z@ Y>Z̓<Ά8ۚ qzoP>]k\J,/R{8GAgni?ؔ?eכ7oTRj`4K8GeQ:"Fp8~]Y*Q]iZr ځr@ekvt|mȯ@U+ʲ J^ۯIxsh%a'vӏs@F) \|]R츱yZ'K̜;̕9^Y8x4tNB 4 !9ڥboņzWHJa;"Ka>9`ܛ%+,C,9XQkI~[礎ՒTrVgJE320ܲB>LG,+b-m~C`GwCOoX݁sty఍DXR0R-"4u줓홹SI?ΗV$W+|WqW&i,\OLJHʁ_}`n×ZdkU۠fS'<Z隝E[BڥrqZvgb}3nЯֳ >q3Qxd?v܎)[a3T3ɚTv*.efصii8ȲY-/[|pڊ*&B[_1غO<߆0>|KjuD=Lզ\Tϯ)֧`X?8p̀h#v Z6 +jxUJ0#n͟|AOOĉME0Mw/of uSvOIQ7 I+g?hd4]}{esa193`5=|XPrЅKRh0'<6M߁!G(q[`_I1*8Lc Xs%; &l<:qUd1B6]p==ҧ'/tþIu84 |jWFg'G\\Y~,vbc_5vݰ[8S::^4[fBg>Shy*-YlctkJq.)jp2ۃdۮT=Y)حvWml^=oNVfh1J5CIyo vamdFhU nWQ3j/+}PxTF=&qcQ8k3v~蠑&.\O7ۇ=uwYv4ƀf"%pk`]?ŞNhh5ܛ 9 O&va}3˴jg]SD D'~\MT/D엖WiW6E֙p%B֞*a|M[تt"/̆э;^:Uȸ,o7S5ڗD0O X|,[^j䫇Bs:ܸsŗ;ҡv4BYǍuvd {3;OK`A^4L\=9 na%96C.߾!zLci 0- }3̅2SH;iu] ~fQg׌KQ@;9ObAJi\~;9MNdatH(4kșyv?œIuG(IR^0 n(.]4w\ E=WغamsM;⡪;͡0c<#'Ta{qk )Kӏ8jGN Z'^S M; >aZ"sʉ;0-o9JN/777ly-?ΠMH?g'B¡: TKYÜKX@w2&^̑Tb5[@@L ׋__ȠN N#ϑ&7e (7;oY^PcY'c9U~ ;Mp5 ck6YUl~)* 7-DdO!L&0B0Up]n^?/{,Hb =u*_:j8嬄Mz]ǎWEshH7Ѧiv/ŎQZXB~,6;s}*<=׍i1VL7,O%cŝrغsLշdvhZ+dYk3='c_+ 1c9'*JJ|p7Rbx95}2u: 1csʑCٍ8(‘(ԭ󣳍ZOY'^|>Rcg]hh|MN6Ɖ3R#Y=(#ʑ7qFAg=JawvP'x{:k܁{o1E@缕Vd 8e`l7̝fw㨻؛tS乿w۱fhGhMh4;SK:6C/sٳf8ۆ3 ;wevC!w1S: '?i Ӫ=İ79ЗDwFNޏ>f'faˋ=َ2#3)`6@m/jp`O(t Tp\=J E4t:p}[ח0@b|UrR0W'Lݻ"pHhxM]zlN731erTk'Y dƥ:JTf&)V`I{W׾핞P/Ra)Em_mp&&ݸFK2ڴar[? 1V7JV_6}#Pީ2"II'NK#QX~fF2^LTAJ/력VŪTa쟽ָUG:I4I@pwf9f:$ M'\jqb,.gW*Urp&Se1(0FZ9{q|~؏S!? <}Uaٷ\+NXxґJ1;R!z%lQL08ĵ泊g;5Q<*8_w$+^1vhc ~['Α,]ʀ'_O2+F_FaN!jմW\T. 폨H0Jf9`hh̜(~n~m1Fsl9$ Y/ߓ5UO/RH+u8,s &)G66eCbqN*;H a4W?;.hyS_*6Cᒉ_kwwXMB#n nuDI ߺ`q^"D4ޱ.+;p3; [:]݌c_>,k5sf&ARBCmE1QWa(8P zkM. 9R|ͳ♰3T,,**xurC]í#O9IΕHhݛ`ҤCJ(%a@d y&+*"5,H?Oh7짿\-w\b Mڻbt~G|g>ka >_(jjp侯oU/m֜666bϔ[ZX㣮Qy_ ,局*6 iV:&Ƅ5J0Zs󝂥_ @780ӎ?[nbZI/MJ9k -<}6mR$ҍ1 V'G Vԙ5Ǫfr81v@fg8Ö+',S.G샷oզ B`їW @;&{Yrm$}|/'g7vÕ^m'7k! :rPa{9w&^}415$)!x&5vA\NLtp>CBO4aTRx_u$sÏr~v轘&kvޜ 3 7]+8dC?O`jSeK[6}iA2zJP?1V%cþ 8S6Tay?/qr+NG#)ZpO tjPp1 V,I5+">ƶ3ơ|`R-/XV¥ ო@_`V!'k;JF\cQ!H;YC}c_N5DbOH=ud [VMs75#ˮc"]Lᄏ|?Eu84ի`?'Вf7hg4*P}s$,m739(DU 5VT+\K2*4y)awUA|#e,bű+;=o/@ǭ$KQ MX jvr=ad΃9ڏZaՁ!M]_P>`Ͼ#q)@ReCu↸=w51;&4vI@ۑ LfHK\F|hb'\MUMLN[㓓JO&!c)znj>eBwW6M&_dy͇\j7Iq%p5}-N]6ۍċA_ c>|MV_%*aE+7͟&ypa9.)z}@_ WhCL0ES0udJ/"+-Rc [ly=Gڣx~SHth&aӬUG"허_ӃqnoW6uqr:\)/M0Z\2 ٻ_ւ6l/T $*cT\Y8vtg5χɳ!lFu gcUΖt 1a~ E+cXJ)\oOL{<{/-`Ֆl*_J l_ Ux~3|KDko1ikӥwYPf|Z.'r^5! fEajYƭʶj ?hCĤa|0Ȑcxf'v76\:3R7 cb9=C G/hJnihu4~T  [k;n. Cj0L|iw7K -.`iּ.a۾!Ha]^#Fr~S'!]ѡO8绀>-Xd #N%T3vƳXtĞϛ'jx!R60o 2!rO/̟{9SLj9.oco]G?5/z .E:|Êl@^?wu Zf&3* -ڂ] Fg~BUw`g߬g3(ڢ}>>i#EvkT>Z$V[}w6o=M:߳Q=Ē0Q$J$Xz=vŎʘ=h;#tya!SgeRvOcWMSkz8{!ΠFϊIPo߽M5Dr@\=v9 { \/*S+VE,:LMCqa;Ua{Dx;sxzt]?'"qw[-r=p8+~헣 oq!FpfmE?`]e$Tޯx(⠝B2~>z?.&څ:.?"=:tU'oˆ`bȥPzqu?\KMnN?yv uJQ+6+>r~䋄WWLRF`u]yyB䞲uWLTKw9Rdm,0oxF~iWBc%8[9kl>d qkAEL-QO}I'Mr\pwZ2ߦ٩C@W ~AYPN-B_GKDJqyJ䄸W$X_ӦђtָP3y8Gݭ칎W/>rFGY8}P6 .ڴ3lѷW"^T\8攠'{|J&TLkjc$737^v_8IRVn;,,ao׻Ko`z3*om,tF0函$Y'pߑ@j5 ^ CUc/wc*ЯOqsOaiY>f؞oB`DPvYUZ#,V w+$/p^ջʇ'abj.Bx P ox:[mA K;`< {gº}~OB5cʁ0iho]Mw.)H<).5K{2E0\<"Yb /c7%Ʈώ8u4ô%Jto11Pdbo@z_{|2v ~E0Dn↡7˓Lܹ8Pn8ɤ>_Ʃ??%)/4 cpuJD+fOv8c;Q_4n+t/G:_Mzhy]4M0\}mf3NzDKaP{w%Ͱ8 &].myA/`q0`@ "a^0ad}Xx}+zoND#)x>Z솹Wx _aDSu /HU`|f/q[F7Q 0&u'&6yI zc=`u9Iǻj[;8* 9 am{ZVd#5~^8I҃^NŘuꈖ?֛ap_Ҍ8Iῴa;DZI=y|d:IL_%'&K rè+y;M/^^CSN/H{}!ӎd}_8q  NHmxIc˵)bs88)gvyN`vk#(=;!K"4pט䷲0׹=JުҶ(]G4q_\ . cR\49L/gV߅Vōz}ۢqLJq'U\:NUj ƻb~o;t+$LoxSpG-&) M5wRX%lZ0^_3 kIi"}Z.>E8dd_eLoaML/L 6^UU c>X[>3j]Ms'LK$-D)efWQٽ߮n/S} 9[L?S+ƠtVlv'թyDJˇ`saKXI.g`ժ*~;<>ӓdvv ,ZP=O,Ҽm;₲lyXq7g(b抨6fMy$- u0j濴D#PI>c^{`tݐ瘛//cpf)0ßdfT^V/+GɽMioQv,v yJ)9{pðt7zq"Ҿa$6jogԑz辞-J8%s#(YH1a{`n5tP.|9;? pp+J,y6R'ԂAc規la<Ť,Jۭ;q<2|?7vx8te̤YABp6R<[]4 -o,Us_1C|z婙8Qq+v~:@)K 4-a?ksF\{iS勍@[`2p?H!d oZM0g_Fi H?i}=TeInH71ɃZ^,DY384!/I2nxTT[s%*%{&UY$SAGq?So\U\zЮ) 30+%]WY#L魠9k.pyio]kՙzh݅wq1afhe90Ϥ}ՆHt"|]쉃8LZdƟmRf%܄#U 'r} *?X&rwf}XƬ+4â<R4u.-v"(01kǂsڱyƦQؿߟ Xn~eڛі`ptKϦ7VpEuWC'ZѾ?r}1f댟e+E;±w2ZY3fp6OZ` 6s64m"}3oL_/ƙ\ ZqykS{;Vix#̵ɔq/&%_r ڷNq>$UȬv:`mnm֙$ӟvntu0-lꏺ&N\rk6lgټ8dl<6iH2*GyxG0}~H;?N1҇unn8Y1괡Dµ_TvOe@EfePC8&{clVS@]iV97}^Cf U9%oO2׍ӂNa@wr?-α{ķc@x5˩C){%"'ǹn.F(%|XKP~<ڄKT\=fw \%8y1-}7},VDmdۏS"CEH=s}/:WdZW piV۩hgtg]^s?i_t1 'ёN d?EX Ɇw[Md*%ԽŅ$Qʖ|x|ƴR K;>493ƁbBXKz-}7GQ6cVx.Adcoz_5rQ8NOa~_/J[jГ_9O(u[`π矌I#/k{]#͞Mk[ߺS$_Ń-`f;=NFETz'uēYqEA "{.)0v4ǓX\nM2ӻ45[h#'|Ya#|6Z$5ˎT{GjIr|*q;+(Qn2mˣ-4\.aoLu~ǎgA8&rg:W@jޗ2]pMW4(m|%W+ CCQA0YGf> $?DsƓ>COg hL-Xu*YYrLIn܁lVz{=-);/,AXiO0$y`h:iI[H|.rAJmE`,~cV]sGLX7} LQ_d͇HeiK iG6c~> oR/R L먄s[B nCíI =lUCuo>ړ>hq#~^I 0bd6UƩ)ߊa*8݃z ٌDFՓ |@=wEWOU<3*oIVv=tqnY!?O>- B^lSY})Ly:S:,j0zW8T ڵq h<X'XCX*;K\+6h=` K金{ ]F-V~Nek݌3;eΰ#m|Acly:)V$G'sгZfba#XL4# au5}>}mq^&3^.oiЇw5ҟTB˥.ԧ!U>sT$Ŕ9 /$4|LMŏky"Mۭwm(yv,@5oo_ {~[BONqVwNl U[>{+TxN@$ԡGP^wXٷ-8{Ы`Y۬{; #]`1|:eGص _{]] B&-[Pꪢv,©[4wbD[ VJJl f^j.?)_miKPhPY.mōMe9B{ $N] !]GqΦ/C8%:Rzޟ0/Xډ̇w3k/* $WrbiY;H1)nza# 3 ql)CP堫'03h ? !O:L[dƔF 왉(q LOoq>u'4 Q騼X,N5NǏc%*V։=~&8dC#w!g.da_Ǡ?qKcդ巒;@7ټiz&MSH}]p\2HڷQju ȸh`ɦ_%Koy./P9pfRȍS&XX6_I2g~vYc{)4dܯC9ݚi LnʌmG8U:}pZ"Lٵn^ wۗwxch_g\z݄2}m^\`* M/q#ƿ$K:;S5܋gU*7n2^r0vjoXaUo7XlϹ_f=d̺;7)qtp݇@{[oJʲ]u<"͵-S2$M^|ǚץ c\Kڦ4N[KBkbT N~5y-C]Tv&C{0T;&7"mT= ݾ~SeO( Uu`}_%5}7vyHQy;(E0q( AOIm9TB =$?3}UԵz$N x:c%438D=834;z nj}*>TcJUxDU 8"wFg gM߄X?swn4g7%M'1$"Qݴ@:\oׄmQnR(aKV$?.dEv3:~ꐲ.K07t^"C"v̹%/epȡ$d~%:F\d +7͠~e0/; \:8vE k4l݊4s ]8/\k4Ү-Nu!;Hw^v.w[^Ȼ*0_jN2v#ct`V(<3COZ"__ĕW\~:IL@o@Jh1|l]W?`R} y ߁A5 D3 Iׂ.vfG0y8>N.t0C`ܶ|4N]2톌\J]|paJ=oX]ggpYFrgJ2h(,w|nl յO ?i]A1L\!z`\{^qetN\petv-a*OzXLW6=SnZ|2d|bjcqB9%)xQ{X::2oo#L'+K?pƠ+NH6 a$y?0H{'y+Od 3SQ1γIa?#qiW17zZ{NZC<Ю@NjIs~S >D3AtKnŸHO p_CpRu ;oY$oS0xd`xJ.Wp{K8鯯n 7.9'o_1\FN?=J23384F2VԢxT ns!^VbD:cYn뛑2 ѓ$(Cʒ~=LゥN/n6@{oѣ fG$ `࿑)4AQ=u-$EDF.5;L0j*^7@CHr:t heNP6YIy!m(vݑ ]'w~B{Q2$tn;MRI.L)on^կOJg 7: zzhĢ cxє^D]%Hg6Ĭ?NPۢ6=#=[!;yEdxy6Z07|I2),$[=Mr}RL!KJnkn}f0t46cp1gέJ?pDμ{HƉOgbQKl E^N6"a:[u ˹^~; KAh/X+ 'miʍ/oH76Qih\P.Fg/+G=1B8Vb; y//݉')NjL0[dNMV!Yv<JcÎaUs? &~d4!%Ϡvp~#P_[ºǭ2X)GV HXjs߀w wr2WQea ݽ8óq_MN>!\+U|Wb(GQ1k_(_J )H;޴aU C;t&+L+u]Œմbh4M?0*ZaC&)ݱ^| {SUAͼ"X8u/ڤ-)WaYT\ކMׯOB o}TSBztmm!~D ɐd򢸩 %1Pc%r$rˊ? ?Fz.{{@%L1 .هCyfP[9҇/z5c3[GgļFI/rkߌ<~nf`mˁX׺8t>N% MWd⻶/;+4*߶FAn^*ttd5l;qI*U﷖ۜ%pI[!q^X=6=+ĉIf =QMaˎ!qˬDŽQ8Us;K~Wtm8꺱5zTci'bD|)n}nWd)C1L6([1\k0ZjoJ A0kW--ZG][bQ\3hZ*(cݛ :N*ms9S+ c+A+5E>;*tnNdHMkXՋ=`uǩO 0X&`f:9iZږfB;K#lM  fj0`մ$t:>I(Z%&?@rHCxTymPZoojǏbW$g[f$L~&ˍ6aR>eȔ:ؕܧ3WַZL@=%O4%[<1{Xv9418 S/GUtݥD N@i* cD ,k!]ĂdPE:8ȈXhB4HI/KA8?cf,Ne9y FYR8ӖkҜ0eZ5Oh gtIj@%x`/&Io84c6{ԽN]c萻w$Νs5mNe$lhG{]wk.`sѻs'7*sooAmWE&[ ®3H3pl!mΡ m75sI~Y/`*pxۚu}FliwFg[Usw%R'XciYxW,]}n`ʟ{!Jii܎8c<ǻ'(L;znn- 36GRpDhM谪lMoM]R%;g"I,~̇km]XnDyWH0EyT #cm`Rc|0,&&^$x,m($X9xt O˪nJ>zԕk!1Quߩ6 Yt3#{H2)Nw>bkyH /ˡo/>B+ͦqE@˧f[> L=c?} q컧qTG{2 gaɷ#YZR:ԫ:׻.l廥 KÒ8ߕ~̚}[fyx2Q`z8T\y)-O[Ŝ.׉ej/H[%} t8l:7i\OP%N  r"\H=2\{g=}wR}>&M&8ZuHFJLc ~{IX&}_ku\4_(6@p; th xjy|mo0[co>u(k R]zs0W{L$.VMa)$Niyc/&zڕ̥y ˻5|wz77 cKtlj5O +`쿔巇9q} kg*;\?e qcS;H}0oF$t~ n fgnh3<0S6ēQᰨW.Eh+daYaiP^9{nUqu)jQUЖ;?fBP"W.Kﶏ.9ƨO6IC Pyc4[0qjDp^ӎW76fitٳ{Mtq] V?I rk҉qH3/x(χux% >.$=+U,[EGJuX [!(_5^RӝƢ<$Q^'q̮yV- (W6LnG#B}Ge[{wgv̋ Gv]&gҡSUi yC ce$Hn3tcRJ2<٣+d.2;a^_Й7NU͐־.\{F)\)Ԡ Ů.=ͻ#9}}lp5͆LZH.< -GaͺH%"z)\hljԆO7[`ߊW 0?/ CZ W6ރV8C9z@B%>`@nڧUkcT49b vzUɏ47A1 h8lۙ;+JP!^酓/ZfL蹉8B[2{P)mG] O;yqehp,i% +@4Xm\cQKo%cg:cGlޛׂo`gU *QlV0=a;0k_ w쒀D~:&NkAuqҪul{ ۶vx*"Y1PS^c]O·6ā,ngwV.U jv3@ufV* 7Ns 3 tm AI7/Wxic,:_}nۊ'X[K ~'ړBbɿ5[%碢5vLH֨¯˵}v5P ޽=u 5/ܠRmЇbZzpiAMٷ_3 hwZP1+J/ģIw_Ogt~׾0YﲝoB4$aj8M:nIo__p[^oRիV:$8r<,Ny[?1{ƻj}@ٖP'X^c`]uPYO:,~bh?MÏ=#ٿwu`8{0UŻw8aec9Ϟh+3nha)ڹG`Y٪7V=aTioKul?`oĪg#vStv38.[W. "HB] ҋS6#X-t|&c'M$;pmK{R̊U”fQi1H?cqs&a1c\W4pVZƣJڰMn 5=o kK4 ÎgZNc&Qm@}m=Ǒ(e@U}I.]]*в e5jLNɳJk4Ώ7acۺ<amPo"+lM"#?ncwφΆV8KD:DW հ rb ?=u]".k?Ʃ\w]w`v?k)8*[&>zW_#EP~BMbJBjoܝZIMXTn;ч[:F e& c*V>|w2V>\R*@K1ѥGDjӯ#V8c|'@<'=e ş [o0%ELf{Mru)n-WO; ǛE]R5Vyr3B#yPҚ{W3J&,ː>xRJx-W 8z7Ry8{D[}mG[Ic2ʻOZf0\E0*.툛 wN$xg$@ケCn#vMT +~6q",j1%Γ_KϮɀ0I?~_vpCsFqAS_ط H}/&v^yIJ3۪s{ma95gZ9u4tyNi@G3h]1֕=TԷE7ط1^,9ֆ³?5b ^)U;45ָpU?G&aBI%6a&ck Xв з,Np"8.j;-~9 5V4>1/m(:X]8EU/}|kN{ XĶm PڜQX<)rdKU=q>Ad񨎅14d[o=yk/Sc"QP1:-_"*^ [+RHuEJ^(NNONlq˓eC>o Tz%|Z]1ᛱ>=G(*;\PI4P|Hs'(ɟ /e+t2+%<#Z1~g]oW5@RNu;z`҆O熊|iY|\*L ?=]J`5 7^8G`jBͣZ9( l8<2:jH:@r+IJjWb6Tr&8`dZZ{ĭ{w-?+vvzlvq:X/ϼI.{U[-οR?r$H3WIKkF^77޴&z9Y-T-P.`O-kvhFV6F5jM8]&a:oY^$tًb,$p̮{=Ұb[N_-mp׵[1S{86?FcsQ,p`"pgVXUN~ ?k%>6VQqS V_Z-Àa`ΥV~QK?wz9 LDz6wKеJr :߇8VjhKH\;eøY}8z"O YNX긓Ijkf>.^#{O j^J2HL 2< u%8;j uqhA0&MIB4LS03kZR4p]U˝y{sסIY*dzg~q'( ]k^} < E~}0;lS0i఻mնx ©I3W2<):eu8k{A֖zfc?*? Yd,C5e,5v.['Wf |[~ee]h*t}w>wf[H >"R/>SCY").O{_vfXCp&Pi 3:zT06{,1oTyiIf.ۛ25 @{c(H;anuy/$BnT\ B4d_3=ٳ-/ᐅ"ej/M7GqǞWݮ{&z'fqxO cC\S} v7wKtAq۝AʐFPz:ү |l iB>vwUp)&-⋕py/uPw S69b3 lk/p LgаjkV0E^[8V8ۑLF,;\8t9-ƅY`:<V(yl5+(!3TΪ"-eTuh;tW*=ۍ#e#2%F }0Mn\kA?qh}W'a4ٹӻ);jn p v0nR=P0C),`Td #3qڼxNE3U^AkC]k|pI_8AOXYoФc]` i{{[`J-Lp$aU]0q[NO+И8.هMb?thM un$ogF0qV|_O2a7 Z _7W$RN\F XxЄc{\Z.HȾOByTZ.ho_g Ӈw)O2u|xO&; Jn¹rmov >_v^8`}qg9&jf#0N΍׸*lm>iW$>8mLȿuA i|Grs=-CfϑЛ usm%{h U%bsU5.\-9[YԱ4\A^hƟE[Kq+xdbUjܗNNvgVbUyY&؝ѷv] Xب}lBWw*`dGaQd59l9RfynNl=jV}cӗo1\MH= ?7|y&NlñE"vBZ2CjXӡ:f,|/R{&&#j7G)$#k9wqcNsj9RlՌrXWsc.$S~ E٪r`3ο;`j&K]мr[ m{ =tj*&=psvq? +8}nGW!$OmH?ڷ|`꘠&\Nںi[*u7n+p"X^ @gY̵3!Xe8J`à4#<"GL%.G|,^ 3^3F8j2;0lv.ӫbwaS Ҿ=窴If^7qPzguIzۖfEzSNqeh%_(O>c͋JOb람:cxWqleR'/1{ 0oݧ﫨XOe/|D۾KGzg&э"o\P |qiny.Ѣ8T5R7Ilxl*5F:yI1a=J~(J jAUj.l}>Xrjg߅2X|j&A˿af1%^V{N}V(ROr}^ÿ0MrA6Ԁnҝ+ݍi۲4#By -cY+ 0A{vvi|6Ll)Պd18ّ#S/{A0jYu$_dvF0Aey*i-')\iz|WE֝aY?8jct-#RoA߻^i(f'YXA`&7m\ޚD2xW GSiLFӀ;(B>һo\ٜR\<_8~I'ʻO*B+_~^LXֈsWgFQ2;[:~A rG]W)e_ʤnSvs'qRK=UOXiؑA)%@m?(_IZ/L,`%,~*t~>9 fSd٭v&-sFc⯓qQbQ/J`ٹk8}gQzxJu02х«jSHu"lع1kWG(BXVjE%Mgɋ[ڠ cpy;B.܉F{vW=LΎ_ YA< ¥Y{7"MBW*-t r-/wiSЉT2JZ8L/ॴTU-fꎭL&SE'ʪvJZ /٨VްotH8tmoņ$ ̺$_8ݙ?Ae$w[ B'"N08c󕻲[/  N&)GhUu݃@G(` V;Ocz5$(Wfvse)fP`8RKz$5Rp}UUw7EPl0z!D0=u,"YcO%`;[sX2v`26@7nbo;XRxFՇ{ػ<VS-)$cXk{ţ ?]{6| VKﭝc * )foE1-LJd8`dxL$`u d "}vl6qg|n§sBk~-r)5w ^QJǺ`ʅ\fRu]y_$ wN56z`g˦v8c:ftm#zK66КΔ4P3+'4EsOI,$1- L|׭%懌(a5%.Е5F[<էl M'qf?wE? rہu<Pv᫴J&F*=4~3rTj/qxUmuDQfxk6 a/esNJs꾉#aMXּ;jPh2{ˆU|:T*3ɒvlU:֎==(2WQ u%SĐ9SnhΦ#7DG3pp:*|}GlWzpzb2&T)-wY#D~:TV6ZJ*Y'1)C)COoG}7tE4d4D\w`$U8T;ıxB[$>  Y?RIyZ94]fSt5Ygo<0'\ 60(PT^/ٕNR Ml*3aAU|lfҗ?Ԧ?tT`.t[M,~.q˧q~u3eS|’2XO,\lnl1xnlzsy$ŵ̽~rI^sC1Ne9˼{- .0Q{gyx3ڳ`ڲ7XH '_CHM ?a X|p,vI4OdI=yzͮS`˛qQ[r_) M,E^>-XiF&U: b<} +.~|r%).`U}[iMp_99(T˽: /UWUK.2/fp,XxкP*g~&A@vX-erYznn썝)`|T2q!>+v'=}݌E-zsa~MXss Tj6.&؄fML`NAjo'ĽdR_& .x>I}j7즙_n*Y[n/h\Doҧbm]$#[rv ͺn]N$K"8>2s:pZ! eLʭgڜ0o8mnzψ7rGB8;O$+eB8w7QS,6Kֲ`b=2 'k'D<0ޣvo9*sggvi70cWQKg0h_ DRA7 ˺)TB:Dr &><cYiℽ4'x3w`ҧub^zN` ,lJCɽW$6RPR5v^ФÕAWi`ʼ.=Z[Ed]]…߻Kފ{&GzHҰS8E|'X]q/tNC\ڻF 㷾h7ڌ>$C;\`)jjEx{WٟojyfՖ . !3C/4_gkS^o:iV×yv`~M.ך9ٳ{t38w58 ,D h,%9aB:G3Y 6PkcZ3Īep~A'Hq~fHƲ_27~as*~pv]9ڽt5\VzT[r[ ̻[o!DpGYLmB4~qF"d]x|&<>ǾX0"l/H9pZYɘ˭<N{a-8.Z-aٻ_v̵{}xd}"V%pp'XVs}zS9령k"ΥJr{6Tifs&(9z-%yvc̈Cdl? w[~Z0q?W WcūYhPDDZ-Bǡ?Exf34<ޤqSFqkz9TmG!;'3U\A% WgQ˝uN|I#x,enW[Ci!q[vMReCWю=?Qb5[q?,^( Gm,VBKH&w͡UP=v7[ KOM WЮ4}*/ZXT9~Mr= . {hWְHK뭷oNDB%wSӧ! ք4gnR~EPĬW_߮!s"r. )ʃTt\R=rɭl+ޠ"9 ubb7W…bBE-Ċ>,ޝ"333kANꑲbT.֭C XZI]nLRzھOcqq{..qB\'n4f=%;qLK2`y(A9('S /}`0V~&:Utcq{RY.nw[_0eՌI=3gAjݝ=R*{n LW93EB. lpsm ʉsݢدA/ٿ:C8&t¯DГwqn,3CP t47Qnk׿g3+~]1qz:ҟQM4!~K mhg[4(w)p[b')υDK6[\*D^²%؈o=uE#o]ɤ͙ laLaWL сwsv滿`Ku[)DgT)#H ;j=v5)JZlqscU[sB+bwQWPTPq]W<&_|?.6ދ4IMIrMwLӬ:e{]f; 8R"q(N0Z@ꆫϓWlK^Q rլMm! oC=8.Խ0~#{8`<}`(m|We {*_FaCz&4N抳&XEmV=b-g}#QN;v!"ǤozTu9Okh޲a`xw5iȸ9_Il[/BNJBl &sk6- ЩFiቹu WXI<)EbėB#}ˮ3.H3Pܴ1z79H:{~]Z93G.%deF[K*>$r1~x%,茴twq B":aMwcX0L%pR7 `ͧuŚmʶ8TێM47~{,dBGq TvRH.90+UB9gYE~o] $Ή!k"a{сBK8K U@ߍ7cK_H˨6$ K UJr,'7$V]jl | }nF,,tԥӃp&3I.qgKެg RQ7,SoqfNz}f{A|=ҞgĚ]Tj.ӎ$|8Uw4='Ps)Q3Ef}xA]~.Df$7@'ҭk"ͦ((0e 6Ε~ yr(g2:WzS?5n7?ҷXUC3cX'Aqi{$'' kpA|w^ vȪT/< ɭ@~-b:xB,*T$vWtD9VIp0UrZ\eU {E 8k}2VKt_Lnqrᅼ1C&Ecs¯Y&8cYiy-\zbgS _NX8ÿa[ϫ٢?ñIzl܏T}J*$靷 Nʇ ah_sWlQl"j7b/&1o[J_]wpT_ nٻkAOۇ5NEQl+͊O!<&Yc‡;q6e[cJ;7S7g>#u!Όp߾E4 #"Fmd]Nߴ-_5uzv <0y>WYJ2NS⥛l\9>=o Ε,`uUkΘV\1-Q=5=-w ޟ󥛔zllMK"Mv{@5XL}Z. ;|p(KO-~3b0jF~u^SWM%kÒR {[J(8x< 1\& Y#_BЁY$&#ؘdk.lu[\I'vd^wAdf_d LO+g{& 4/2oh÷r׸i-kN͟(1;C_&IߡFu+dU٦}op-K͍JuƁ<*')*&e-5V:=u C>y8"v}o5r@@#_fipۣeCsTMX]Hn]}ܞNbf+Y8|ݔ5rgn>7V:خ_q\:]OWH0rֹ\AF+=;q5X~W籍*`.QCjh8_9#3%gd V+vڵs~-zC3 k-up8ٮ9bA}Ս9sO[}͖*;/k ' v3^c妯aw8ά[FUCXqs%)pA% nQ:L>h}UoEWI>%mQ{ΘSuAsը jV?-N_|,NP/L XlcN@yuۀyS?JYkŗW|5eWH}"ɵnv%(,9-ηr ^` |]2ٸnX]"wڀ?sdi?&co(eaObx(J}W@3r5ϰMN!k3e&CU6o4߀f ru*o(^`7~JwA+'hY;(x\Ԭ 62q`𮀴楪$?gI_@]z#Ik<ڣ* @%-hЉ>({2?NuTS&)Ά`@X腇eSjr2 ͛pOm=h>"vo,cI` y DBkbCdl*}(;/>ĞMǁT/F_h6_ӽK<Uc) ̲2.7 e L~X_+[8QA3ڙdt,27+ʪ$QJk8&o,S?< ssEpM8!K,l`BK!mu+Ll>db7%(F*܆1oG!Qtp{7? ^翇n@ϹݜvTӮ2@Cm2{i[\fm"Fqmr]O#0c= BNw˯OCyv"kqi: HI*I$K7)?|1\8å_e+u=H'e9>s@]G5؟\19NR]"yV+SK0ۭ;r6_׻h,qj?)Cyma|쵑fAP[sO/%8JR׿{4o3^Zҫ 4ůۘ(+rm('zL|ݗ"~v4#ijϓ춗yB=m}cN]󞸰_\g/̯s a~xa>S`\`%Xmez_0u8ĺ`|=::\4tL?G2ɝ{ N5u/XG*q>CAhn,+(6rI`ͺE;0?:*׃vB dghG~3'Łޱ#(,0?Qq["Rj!Efi N7OBƸqar}!;J|ϴ$%p)9>)ə G Y,m 2}߮'CYnPD48A!<}8_ I-*أvʨʭ A\8WVH 1JJh5]`R֍u?^|sL}`ՑT*g>.&*SgpGG$q}d)z?~}S3Xho+N{`b$e󹫫l 6rQؠ-?w!˔9A;U1Hj/s{ a跈KRKvE5_%C5Cm.@2SRd6.L:&=A1C6OٜJtlUDžCI͞F%ͯ} o298M9/mΪEZo"bۥUFBcgQߊh,ԝ?՜ ؕLb]-j[u{ölY:xLAq'#qrc2U༐3HVVf@_:O1$Mǎ_Ϟk˖izCY6T7ܺ_0v+@eѳ[&6FڸǟZR+F? JS4T6%쌀.'w.=ue0.rIcgHGHwoa xi>sbD;䠕oP#c?Ys^|cXK6@<8n+LX᳍8WA[WLϐhk]7 zN U1*+j_o@GhԞZ_uE k^n>* Z$XoE#cL77w08BhEhb(f N,;7RzM@˸ Ӵ2"(7ۗAg"`Qߏk$瑧Ni`Ql庽N{[zuV2bOlOc_ ]|99 ' ~.J74٪]y>~J#W"rʓ\tM'KByIA!Cu 0,ៅ [ 48+qTك~97WCg_˛k ~:s7>`coY00xl,fḳFOQGY8!?C.ɫ3qNuo?R/9bfwSE nK# H>)]tXŭei[Kr&C3-ڍx10tֶmC@X^ގiLi8 O?y \f^7x3 ?'UgZ#q9(S3;86EWRw$XEẅl7_y⼶NKG<̀K,E,7!#\݆ \_t6]pJá}eޫUl+kL=(7kwi8RլgSqCU{Yz_="332z`x$5Ԝú.l-S͸t8&;WWqSx0δAY tƷ=:gzXMu7d\]M͹j{oòEyzF=):TNR&#a{뾷m#yr5V-%)qyL zntǺa* E/xjHf-ÀomԾ8oN6:}y}j|c@RZ]pqS gneZ#5=ӻKgO Nkna>Ŵyۨ@u=V6|Q66n&OV/nh<C;.>BH벯u83is {{Z2V5t*;,e\f(%:§Z؞AGHCBxtd 7x ykcӭjmH+սPC8*xlRtjKD?gl}W@{P8`dzȻ`l*H9ݙnm9ǩ<'=\d0nc1~ `fy!ӕ~i]XK۴Bg!c _ΌsdJFC0GۗqRjТok 'uWn=eO ߠxd`yϙw辉V&XH11Lm);<{S]ZL%-F`P%A(?^[J8{uAg H]?TEzaqy݃0kIVn-aUi#t[.uW/7gƶwq82~pS⩟tV`!bc.J$J|YRd_n8E^ȩ  74?;L%~vGOGܿlCd  Xi7,;I 5}Ø?5IsikDVqtVC+Xm)9]peM̽JSwEr}͟ې=S֋Qֲ /Ev_lҟ>kj¾qA/:tƺr ߃[^~N߁9{4X)oCw$>? V./EDĝF3~ t Lh=c Yknb˒MEmH!-De[.ހ!~ɓ{iG #Pu֯%Sf[)K+'#2KglV'v Y.z^l9 68 v6I؅Բܓ10&hb@å;onZmclW2οI7Rf 8OS\nsXQ g 8y;2Ul#—X`CTBh*zOm䊿K\8T6V^`*7NȞ$6.+yj+F%v@d@[+\ځ0^~ߣ9ܞJo-ݲ¥w(#Ew). ?R~Sb݃y17Hǡ7Qğ]QlOR+k?yOTz 33qг#OnDp*+w5c-%~ !".\Py`]9=;%)!5J!D(vl؍%lJ[{0zn+;'G˯pTu#K#SԘܔ 5"6t9w+9}>mZ,'[[~gV؅~AV0{(O;J8R놪F%%ER8.4a T[_ mkW_ma ?w<4TpD(vEir4VED'Ǝ {´(d<' Ywu<,oٸ֗_ee`Ⱳ\ʅVW&vHvh{ފF>3 C;,E~ys bM[tqǟƜ\<8fBRT <%Bbt܌8AF;RXfz5,;-^aӽui0F6rt!vDnS]DHeSša;w[ϝA"`,|41y2[Xbq/VuN|ŗ6&)X|bfð5AcixX*]8UЅx+}_' ,&u^b{[>Gs$r̼OsV[/@Sv,\?ug=m:(V82>dڱ;h"YgAC/L6\Y+DR_]7&ҡ/EaӢNqm#ʇT]_?ɯ݇IV7,2=bV}"AʹR4:,{o: OU(Ï8>N^ǂH UpnmazH{_92(v6OHäG^:P-[q7 IťF*P{T)i?{ (wCIEx{\Ҙ:z1Ipk]3$듉 n+Ofi;a},.^ZIn{GLl{ǃ.n#2_σ^Һ]$sPQ Cͮ_uEebuC Z`}Iֳ~7 }'?|'kvLCdʻv#2 (:A6'>PV hanJrjlayRI#0NID]Ud2133OH; w{;k(@{wsR'' E, "Svj߄4NwK2ؼPѰ&,^K_R!9'TܨTU gՀlڻ`az}aa)lJ&؏RA>(qR Fq@Y(r`m$VP?:d=f?dKxU*tj韵Ga18q( i{{{].*{:c6O=9`ex^l/;m-1[~uH+m^Zrf^:G)e8Pզ|ϡ%RVV2MJTȅ?88K_5-[HJm&oD6xPcB(SK\1=ˇ/:mǜ~? ZayV}`8q?gQ%o,Wvy6v1š;gqnlgDXӀ6d=/*hhe^$'vnW5\dkq?1Pg:Uڢ. 6ZrU|p61. B´p_y>9hf=BpJg\mFcn`DiB9;آ>7UbYp`|qX@pg-V=_VXiU+[zJhVg3m|5-_(Xl ܸߖXSV#d]Ï`vZ4 :kH^oߕ7fhpw+_?W?3MG0b4~*,89l;X3Z%عf@rK,ɝ}?܆wN]%Y~^ aW|ɚ\!m,;a|&RGWȩΜz]sv0Kӊ5!1 Z߮pdh).s°޳Kq"&}[6ؔtR Vk#=ů"$;qriV-2q.@ 00^&~e&{;36~ ι/)#X'>T!8\ݾ&%yY޸VPv'wwű!`?7BG|/Vh튿?? *N=*YtO^x ==`6k4 g!F+}f*L|o9biC}7aOqZQP8tw5y0.0c %"9_2埝դHjޯ[XV?Zk`,$Jٛot/a⒤u?z46&LFPlO;:}y0ƭsu .Ǵ{lA9@RźΆ-WqD ɮ=є3%qu}3j8H@DjfmV<76Y`G*;9z L~U im\06#;ЉsmǑ6# ` ׿CpcԴ@Mflrt^-:0b\Rh (-j̊Blp6 IJgϗ#3Woݵ_~[{ e\/ZzxTcuO0OG#Ste\ {3XwOo-~&qsmp:-s˼)YrfЧvۭ`yjT}+ݱє-{CAnlY']EWY`pXt߷\+_QYIG1Or=Lsvة uBsCζ\XTBz6;|cB:6s+t邍8]˰dieT9uhhv^a"a_oJ@{+8Z>irkD R`X]F?s^,׏ؖa׏ġ'U3'+- TrU0 _c-\Q9 x55! 2D'6 B%w!vb֧=Pb&!} 4*I%<{r,M6YuIQo0Ϯ0u3v(Kyy/9XLd-gh~gCG3J4ľ R`D[:nb 0V6_Hc;&0JqM.9z&5/;]p+wzkԫVBcᏛ8iwX| `,*T6J^A+0. |܁=Y /T>vw@,4̷\cJD0Qq <0#(CgRDJGgo/x$LɫÖ͛Þ['#B7A)4&Z-}zZFd5ʄ5/@}NWYDu76巓sf槃F-CU<( Nvჴ'c:МG|^!{\-C^0k:ۥ"0b9;HP2n 9'(RT}2|p{/@bW54FlEӿJ4i\*kWXEFA/ ~bg` cWGSaZ%'\|~QVotЇa^׭[;~aK$LveW vbNa{ϧ%Sg]HƙI#~a,^]8l V'0nq2OhRZ9ģAc4GXI ^ $+Nr(]Q ] ؍PR?Vh;,.N|rbDw+,lQf mC T1i3&- 6 j~ߋ}X$JI6V/BvaYܜ8)e v~៥a3#Ll(>EypL7U$܌͉=:ƷcWHe=Kz*Ӭ5{de땃q?sN瘝c~pCV5鴣h!4[ lI̔n,:k ۊs7a9+Ǫl]'{m2zZ(TFl2priSsuξN3J&LcZE0|CS.Ð-&PRj61m,5+Ra:BBRP|r_[,n?z0 <_jb3fr|Sئv̻casekv¬Rbwt$/*ӁZouF?JWBQ7@őhM:M_fl Tqb%hs7PZBYk"?p}q<L>oUH+ s48V= /s.:'PƒT4&?B!Y\e8n9)LcX-p`KI})$~%rGT!Y,^>F_Aq]84x3.n-Wa9)epRvFOq1a ;p(Ezږ'Q;v8#UzJw9j?H]6dxtŦvwuqB>[/vl:zaAAɕⲇjse0@yMS3 ֨8(w[~0LgEדY8ׂi9c_rUйxdJ0_?}76D y'KׇM;֩Mdɩ9d{1ѧ>cۂ]S428jSXdq~A1Po9 r7~9O>&Ao[,H]?О9'AAGN 6?A$G8~<[\ꝱ1pJ >sGz{nzwkZ\~b'7k1 bm/6qvK}! vZgSyihnzϺ(ueCoKB#8腅}z9VnʼnV[MI X+.`#{{⌫9fy{X zK65Wm*q2.iS,:~ v m2)lnՠ h`/~4:HF`Kl8~y@GF[5^=ܕJ6QȵB\{9OCN Hڵ RsŶc>25ߋ{A]"Ig7BiBZMw{~ ̻G@sbZ 8+bЇÜWEYKmRߚęnJ'|+Ԙ|7v@B.ʨ k*c#X- ޵wG%63s+z83@[yToi)WT]Hs'f~ LѐƾCG%&ۯ/RGCOxZ?d~jRPƳWaܣۂBcQٹ|31l]3SɏhbEMQH9t9Hgc> =s*})"b*վ"8%ӌ~GZl}M֥)%W ~}Bq{زCRcQw̨0?b9DRZ~v,!'lgKRǥ[_uE?u+L` Xذ-<#btkW/72.6AqJ[~9@av\>eP>N>7̓򱧊8$w/z"oBZ¹Q¸9iރiJ9鹥w:afr8!(KAч2<:Ki;>q: b }+R>x]NLN$WT;[{e.qB慴\&NgƦw'B`ɜ%.Կ,9 IƓִ97w q v5JɊe+;}BǴid ]zဓV{cl`{ GT4BG7_(N4WDґv;9N~%C*3;$B .F ۿIM 4hLU:O3&ėX@1%0o?db9T)Mz/2{azPꔡAK#N1f-CGU0:,rЍkP_pJo1Qb{qlpNHl6-ސ|n![3*O ;>3/oAsЩ@*rPZ넸ؖc>\D5G-d^QzZQ0lėC7l6.{C'9ziQ1G̢%>~W) &hWM]x_4Yh0J띗{N4k7 ql^zmdԜɸApwŵ4uz r8krQ6'jzQpM@'LK"6 =` bךNC7ݲm>TO>V A0<,U$EP(2Ae'Nt8jyH_'6 t˞붆Yb75~~do ]%v٧&9~=ZmL(?e".A ݛ΅qKlgn2Q1&.~h8r:w6f=U79a`"k cu`كCbkT˕dpJ3ڊdC{Vo\k9 B9O[~WLa[v$*~4bM$>r٧[#] }*>K@. _``n5}#=dK|p]od'I w(Hҭǝ+{Qcodk]8c#M}(NR_żϗlZ;.g JIV|OF˞nf^QojQصpf`|~6>biw%Yny{1%XGmƷ`=M=+~#(Hι,|@1V6A 5I;u+NצiY!0"Xc` t%YNʼn$@ğ4c5iű[ga_#`_i[BOaӿ)64~EgN:5Kٟ= r!~P> @ל<>&*^8(Bv<%ؓLE7UcܠǾWX[8us &-.ޑ#rrv'-Ҭ1w{Yq#JC*XyVmOr|;g!zy[VİhzqVH|W4ڗkL"MM~9Iq-5m` Cem 2ytaW K.X~5?t^ػL^^2X8L#/Y|euZ=ncգnY'8.}KqpiZړTn.%]̹_ UR|$'qtVк OrJ~]`4XV#;_e9Vu+~=@ۿo",pWYDdz,8fnd3qyHr`Y7 c!H+09"K#_~S|Vrp&ց7f82!= _- ;.[#٤ V8XmSEtl# Z/zPhy \?9It]ˇ97,K&85:b"!1l$j~$ӥ/bu@31,/j^+9Te ʇrz+$~ĥ2~y0i7߂ J7霩!m ,p3H$,sBpDsmS*JU| ^kYB̝BpwxX kgwL? =[a2ǩOl8ٗ0diIߏ~~M&X6%iyHV=]ŎSVrr,zE`/ ᣣ^-nO<z nn.I['CG*tC~ Gn Y{VCѡuEqnUȜ=vX<3?cਫ਼KOwK`ϝEw `(}]wpn!.wh#Yq-ꅩi*/aq~W0}lt6wFZևRP,w7[J t^{v2f(NZnO-.asw1,944j>+2i~DpYdz͌'9,2O*u&20s:#ebxlBÜi2^rb&hsuIv6L`T)3#(Ec{oܧ~c~}@_0QvF\8UrS }݅}k|10Wy",' ovGȆ4cե"O "alTdV6o *HĞ#h[ʍdSo$ ru$Yb&oK|JǩXGq: eC`@i.sǁ+C9 zS&*A: w?W1a2o XB\mCm<ͺC_a Mo;qhLr!eD\Ẫ@k) MſŊ +r,N^Tg_2/^Ǟ"F JuyA81Wȥu>{(V-߲g}On/%(|B}Zɱ~'#n[j`Ϟd3Qu 2l6/ιlQObp'WŮ`P~L#yhUgvl%`yG^WQ/UsZs}SxXΤJ@á+ S:b_bi;E8EV[&XM~~׻噤)A^N"xˢHtie@6#;0nFƃ;)nvdeQ~, 6yU0;zcD6@vP۴)^?l~Hi V  G'30SOl7@kҰ ~3y, -սģ_`*Z|[ 6[5-QO\SN|3 #@j#}R'ǿ@m!bYn4;vuK@L~vRF0ņ"֛ýqG5T}TuϯcψGɩVcs%gnZ/J-T}A(hY%$e_ VϮfm̶q g~y^nLocH)Վ RB_^dv`\`U]TGot+Lde~ -UY$ip~<O%-oºY>ޞ78=3aŕayM ˠ,;o Ra gK%\" 4»!-T lڂ yUYggLϞ־k0-wՏcM#gu693lSڼo)6?1ݱ Ko(Xjwn1V]L"^4=XjU |Ld,%w mΏWQ;!ھ/u:zҿph$Ϯ8U?o~ RCXNb3Th.H&(sn<nKڋUtW!P_[|m VJ3oNndV}]n!wZ)6J&-'NQ}c=,$n ׆9EKU(oÉkw `NZ Гd}- Y>`dMw xD &6>_ EN W}I?6&iP gVp8})fibjφGElGH|KT ~? ]}3&p"bUWϖ`62[הJQyYzRjb[W`_2f sg͵lJV w'00wHݷ="Or|v^u90[{t[I*aeiIBX0{~OZɫxyP'DcdJO]Чs\C7Ȑ ؃jV~0Ή^]w.fwj>fqum/k0C=B8Q,|U&eS麩MD8;? 4'V&`]6 mF!0.<:L/}7Zq^϶%YI̍)\)wH1c]stm}5Нbq{ 8`{5PTq8;_|]Z-"IxslZHƗO!XWŞl;Y y- LJb%8H-%t2r>_l;Tٝs0@:nz!>0UGB3pD@wթpR !Oy_N UcW. H$0ژS/ K+w:4%j+}x\8uc ApT\<4_k{Rf$잴ЛYD 1$Ͼ`ǩj)˾q1{ Y9E્f/)8ɹ[è٣R`'oS?a)g{I$&;y8p`by $#KHxu{$ /,vj ?,+5SV&zA_H[LǑ@ޘw1}D};V^淧 I\u$>M`IYPzTM-D#piu3נÀ{+T}7+"ԏcI !Nr0FKٛ|e009>cxΐuwI H?dhag>d{RPK:&~ ]Esb>8-7 PwoӅhJEt!q]\FhM`̢"k'EV IPX!*4r['C`Fc5]GtyEC[#lZe2PܸEN9T*mOn~g2?'N<*/nJIi=B\Y`x!jVvo;cq~OߘBy=1 XL_dNPk$K5; v;oq!{JKAI ׅT>!?ZF=dBNYuc1dOфE#0Q{j?̾w^qs\ʭ$U97EH.+AW)tl0ԩVGe?Gfbׅ 5ﱮdVlui\Fr~8 SGS]ϳRZOSuj,4\ruC2x*MMa߿DX@/u[Lp~[D-L ƚሓL;U?oXf>Vy:hx .N ]TƯLFvgd~g#+ݯ;QXD"F.pS۶࿿5﬐(Hl JRlÎvͼNa9c|l+j1NC6v@ -O*=Y\HfywL&;J5Ʃ2zѨ:i(IfsD_<Ŷ<|a?(T>Uo᪝ؔ2)ֲh7Uo ]!NN^]1}{5񁉯С?(cR*oFgm&Nv\jEn n=g_'*ĉv?R0vXZ8۾o_ÚCO2;CoX_>Ԛ-K H$-*~O}l:S.uH>⭆`=˂9ص'udg>CD6TUq5F7% APw~f'Q{ve tϾh\O\$ ޒO$g5Pn"plhd0!m5ү"+5Ϸhb0(+}b}*y|b7{zlv%J4.:Յ*|UW35f%azT;{C::㭝Fz2CVNgzVC$y$ u`HE)@oO˹83*T;j0Ay*] xp<2; <^XZt6,5U&9r2_-aLR-v~+n|rCp4m*KT|EpM>Os<1ӈ =<ņ3[KR;Ɖ ;`\9rj]r9la[*ba;΀kyIK%l+a\]fLJd:!Qw l0Nxٕ^p,^$E%ZC5ިEl_K/J3g#eGD6@ [ q ^+ ݵ6`ډ#Püݒnuepv|4 r$nP [ 4LeAB$ }7}~yb2z֭5wN7Imz |svZq$l^pJ/{ yT[@AL5w3- oD?)j KDЄtal uiQ`mk_`@b$cǃoHto^5}z $zbgP= U{ECk +Dp5){`;G X[ajcsV (+#92K5/=w=f͸_rkxe~4+Ty%M+7Ҡ?FH dJ2:\kWeĭM60or8̑'h2/.B18'hzvp*?/?i2ť75^: ͱUfHӂ:owXurNsf\J!LRv%t^ĥl>O 烚zeC [m(rqP*,*8|,1n*bdsIӟ'Y}!t3-"=9΢( W;8a썇3\tj3#/OE:=#x }go$뺛',D(Пi;njE%E)$am?sbX#ZN 6cIbsmOpZM gkb{g7|⛉EeqgWA O#9jv°pԹ61"c5b5)dy٧tE NͅQ$]?!Aߵ 6*n{ L82 [ B%_Tr wSpNC\mOz=JKafDz.O y3DMaZDjk9䝴[S c3oܾ{afafdWy?q\_^' &.}TF5rze(^˷Oy^__Gu W.JTTӇS;ޝ}1f0G2GrD0DFDrh, _coCKE(,ep~]g!urF2 ϑ˟9[A);I8’x}9il~v+XCy5y_`#}ۙck0["#^w/ MYf hh2|Iܓ^D2pz1 7vjaKזFq1k.Oo%4O˚%! -..e*:}x։ ,Bs@Op5#~E2dm [W^:? ~Dg&|nEI78a/3}nOMXQ ڵ;nn]m;3 gI(cAط>h}컟3]X~4ֆ9\&aErqiy/A?aEƵ2}r'TDnݰalwaOčᐚ/HzK3Sc%!,d olJؖb`ۏ)Z) {dVz Sj&4 ;Ht s+FX`=*b8<"s0 X8p jJ2v7t!1?VNYĺVޭccLKȸWKZ+o9۹~.gnFq^dvI1Ie6kpHE zb^ԅQgaOّ^]}j ֋}pZL|P:+cp?±bKoY)Kؑ?.A=-jq S{ "|w #?0ecs/uKy|&K`Dgb쩠] ǧHNJ%a@v}tai" |(R9"`Wy}0 +W”%u{.SI!?=5yS>\YY;CˠdRTSk|}sp_SѫhgNT+{ckW٭t ; z8_41߫nNfEmXf}s~c3m? fa\˸EPZK}׶$VHRr΀ތ3mP]p$a^ey7ҚοȐ'woE? D)Uˀܑurnq hʌRW[͂ f? al5[sXcOIeH&6w=;N0=lɪKu;=`םGXpYą/ %we[/,4h9I8tam.,&C%u;Ws^)\徛Z?νW=QGKEWo~3$MeX ymbJ%a.7tˍ899DT gVAv>},T?O[| +ײw|SOCG-? I 0<'uPe{ܿXE7ZS+n#pD[A!Cl3=oƊi4jdz|`ڎ'lxfR|A'4\ָʆ]S;kpݙpأ}tpqpx[Vyv7B`ӹ/{q6C1~YZ˟\;h/&;qLR^ hWؓ0ΞRn~Ptܷm#aAm %'qlWNme\ cK%Ɛjȍ?waI^aPrɦÌ[59c' h-k(t&1]|%k^dN`_- e:lDz~q8%6[\Pl]^7nIc.f ]waR5.W>MEg?"pM~`xq`Qx~] :n242'k6QհK..Lǒ~2."(lg6 ̒UގOdڑ;ϲ&Z.tTldr O VN2۳s= =15CzeebrftSIƞ+ażT1gR!;8p9fdv {TwCM;9Bo$`;@ws:$S!% kڟ>Zb'L{~sD|ZoyXh}hU-nՃ]vnxZ e׾'yW=yPʿCh9)_4:w;p8kbݓF%qI8w]gF͞OK_@ݥ讹 ڇ 'i>`}Q0v8&صA0tp:,>hn\\V f>5wM?uRG|9A5[! ZUsm A=MH7b%)'YH/mUzG3Xx?m8)B#e;Ζ!$`iϑgpAV=`x_(apM}m0t$Yn%?=,`OD͞,}^CmF PԆeݱCdR]3|%A>74;[*Nx #Nٹ|ʯN^aظ7 ߟwփd?q#`nd4,DxEL.ǙN}1 Q8S0K0ȷqӺ:d J{dgknhtb!fysK51\.Ǖ_(9tuNU< aK{pbnSk KvF\l?#5Ir+<=LnʰđMMܟKN,7?㎳ gm'8ͅP*%m*XܵW!G6y1R0zCRVьRo9$SX"Y_=wzO FZ^y5K+W=wwQ9Vu%f rCZ=avajDN~kx GKwDGhWj n"*öP^#PBr8fLB~|[ tUkaPz&)1iyaMx1;Gſ$&ͼ:d\(kK.&.=ܔy+zNt k+GHzw|@U.:?Q"bK WXayC@…t0]j?T5ɌmHos6; 'lG'AK'VX{;dˈ=Vcx Z1h;v؂]<.һ>Jso6 }1շPkVSn0$쫨'jo J?|7qa^'|8E6cV+`yI&L0hoq?RR,~Vo/}Ng+FP{mT'FG;OpUaُI&VsBZ{e8o ;̖kF ffq<=2YJ փ8 m’#"&Ms}$IrD.]LX:q^_9!jiy|ąO=^LmmH4mH u?ɇt0`@.C YYxGkq&ޗ~'K z" I6B Ι?rv9I )ɲ`A{Ww,Dsρ _Sd2rfl-s! YX88+d9An,6dSeu7VoO6L5m->AI<#VÙ7Xa"⌜;+iƎ~Tr607fHE:G<= }:N cC9D)̏h7OBPv>VS#N?髾-!VrjkԓEuKI!*#*U3X,crQ[V n$:M5 J~>{s3TK鷃饿qhs$e%Iasv]GC?Ta2C BC=R(qdu|ہ6.t^o>R Ӳ嗼%z4_TTqb:/iEooaLƖA;F8<1կ3K_ qXvZtj]J *G2{v-gmmwH.1m3"Pu\{#a.'_[67MZ/؈`d^8l׵0ۘg*0c~k-^ܶZ S8<~~X0/L\_{2 _Be.B)e4^@ԗ$zɳ^$y1d_]`m30W0~,;:4(X5 AH'J<*#֨]1,@W}t!;.+{^.# { Cvh}EPR.݈%2̷%>#mM>>4?LX D6BIf}z֍  $޴?#<՛Vy[mAp,f "Fmjx~cXs~V˞]ƟIX_~,8pNQ3]&k3ӟ<'p0B[;wI\<kk^Q6Iu:zZ`lu )_ٷ R8> 򹨏'XjHkE KM Dc #[#'_QwأrdAp 3_ *^.Ni`*~b$n~bP+ceI%6wt>4j:0TZO4<^o)k,6,uR7(gTɓbw! hs-cCFflhc%EϬ)CiHϝWli&_P'%(Nó@M XpbOmC66~RHq`L tK)fPjÏ|+{a`ݲ#qoa̷Bw@^v\ugߒ,x*]}|W%u n'~8Vr_iSK?|+RϏv(ɼa~Uqz7ڝ 7,2~[YЩrS.~賉KrNj #0q`x6d$E07B[af0UTg'~]D5bDSY_g4bY7Zϩd>I_Ŝ܋0^㸕/27.}&;G`e=r`os;oUz E '%T+2UŶ #k#[H֖krZ:Pb&|!ͧ.)E/1 h 6H] X*wU"m-Svk-_CGFlSklRE{ 7VP'^y=Ю (׾%6oY܆PpTE lze @%IYK, +sNCq1R-4\Ci@3ZjQ9L^&`T|yotCյ,+G>uR3gy6>G_TũuAtM0f'N `9 M>B8otkS؝E8NWA@϶pRVł^2Eϭbhzf*.g=/ rAg6<{XB0ZckyW?F$m} f9Yoc8gK־K@;n[([|6zu7*uXuLKbN\.CU܉ޫ}&/p :̘2{6WسLjHҜv&ꗘ_.iPU yal5teMN$nж"ԫjeV8iYvNLдPޙV`z]@rUO=fUE p['<.o9k}0o}dvw? E-#y^X!Sw{^&h̋C~;${omI]Uİ^qxyǽt3ߛеoK1mӌR g+oZaYh%B 瞌 {&SLu }&M7A8$='1;r҉GK"EHV%ϓP`0<:"JpYb3k1996x6%VgBta 0w;#>k.Do cn>=x$Hv6k?WnfPy+J\7}uj[IJ{krtz`+c6׿gfc#u l:|Nֲ9 8y?`}̑.QXyIs|xZ{IkىBd`nWEygdY|^?yySs+[g&5e@3Sy} |m< 6vէ:Puy|[Fi,"rgt9=ß=E ;r g*tXv ŕ3纓`2_䊇"7\MR$V6lwЊ+/F5ּZ g?{83fD`+͕v0`S .ehdQLjOXsyKE+t;w]q_ϰS34=DBkSjBa#O]sDÈ+xX/5*K9BS0W?tәFL=Upo'Θ>dYJ.껬7pD\󖤰=0 ~d]=ﯙi}&]XUXZ~y?Ѭl?sh+## TӴc`sv \&cmYUmL4Qz4*{4aCQ(,s'`e7g@Cw_H֫{ZBu$Yw$kp̄([u}ĐV趫<|[N7~NFT'YB>ym k^ +qа(*(7^AJN0'ArG5-&Cq\.Ӏ=..dǨhll"lw֧H\Ή5H_~xYS/ &qc[ c`p Ո#egriɔx7NH6/;J6aˉW,AeJḬ:W2b1N萆#LMlm^azGm-vټ:Utw2H kvwjrC))WGXE?u?](I@ #wDZK}>f9}2QdI mya }Xr;fQ+w$ٛMqܬ31BhrBaL<>+Z`$DW4 x{1wƪgVt7?' hfSsaI~ur`_O|3$S5;~8y?>]{$<گ 7yd'`ώ4TlapL6AC;;Ma/g $˶o6q@gq[V7(|w&}*5sjBčY{5._Fρ~b35*c&:*Xf0sw\{7C˧ܰP%-]JuV} ~!{N8YUU˰taR=,ea;Bp FLmkUv{ӯ#N7rz?0D;8`/dXr6r̛uI$ =3 ͐`\!4#1M0˪YƊE4gP?f{n>kWG*'XO 9CTql_IpJܓ;TF1MH>n6,0X'(r{{)WF3j5aDɜ=dְc`f;vkgOE:sT{M|*"s?t.UO=)PGP5T:Lgw8#%p4"dx`crQd,ZbLJ-aNZv,j 2a#SH[TR˟/]?K\&0iU ɑGH+jUcmAҫ$~Ypg9G.AdU)lh$؇Gƾ z#<@Ek/M8_g.sn\׭f~0%(ݳvgw=ص! u_ RpE({<. fW\#"Up'mXL*2oz0{ݾM# HjÆ`(lL+%"[9߭Cg/nfk_.]-〩KL<]+kƽie5c䰭SLj]`NޙP0؅~w)|sDV#6#㠃Q=޾>0=/,f}:ۙ!VX4܃c)"{ݻfPu%M1#)~ߑ#6?0'đawp]X4vz=؍E}Gq1bwGƬNjTzlN;r˔>NolSl1CzPiQS#镍HHN˸=qARN^CrMBђbH_2a)*iwXJ%DLkfWq$/{a&5u?}U"zitID\W ,_W-NKdsoXsQ6ź0")^}Db}V>z>t*٬TĞ>啐."?V$ 24 _*$Y/ZjcHwwp:pZv}zt8dB IxS UڣOLMuI_TrzMw=@r a},c+^.uC ߌ4v8 mi۪hLYԓ3H 6]ɲN!Kq~򇂎5y' ֘j:`.rhcRKٿx Qvل3Fk9zݩNl6L::qsbJGrJ6Fz0E k,_ScZߕ˒`a3<<nTя>~t-Z]ԂT %UX_3]s>T U>!B8u:kIho*5qZ=F޽=Cy֛YzJm#}}Q9J/ݫ۸@rg {9*a)NCڇ>-7u7|(A _[yV6T-J kNEڒPf *hx"bEU7:ЌQOZCzQ_:s' bemx+z:BqOhy$4LpӇ~ځsS-9c]ռUN( Bn_7Xz󈌏Zę wgbGm g^w\Rĭuԋ07f0yF\|'pF:gLr{7ȥi+2<{\;j6b ծmG*N=+j0b?CN;k`^2o$:^{W -z;,,= =dzU!!]D^07f^ge, K65+v)u ]w} ROKάr~~3\X@Jw {8cd5_8uyq0 zN'\_ k77ۄÂ5SoFſ)Tu|TVs-~hM+N8Pg":߮<És٧V ;JyJ}sj͆:87W?ݕYPSa/H+ cǩR.<>tgo0rr9ye1] ҈3>!YkzIv.1p_-yuMn2  gXg8%U+?jʕl*_f{k ?8!Wܒ JM$SKs8n%j:]u_0w6m|$4x5UtkVV{7%P/VW2tzMSvB߁>8؞urq!u;YA)}Cxgk3U%Pdgܟ0'wmn{mSqzf^+][tAH~|0>wEҹUn.>髇0y ƠsRYFZa;}{8X ;N^}mޘ}voqikjԦw`gy#E`t^ X47Wz\ /60.vnqZ!rC9vG.1|e#5vlJVc uK硫m/>m\؜bm:'T]wBDa+Ӑ*@$LGltRdi:%Gr[͎=Mz3 zWy@&SÔ dQC0߽>9z9^0Mܟ Lhq7n`Z7;I݉[uNډK8+7AUc+չIJ904ܺr s+DndVc3=ځoOAԊ`|r!籪k%䖶n 9JP`>vI} ੔%$egEnh]fYX`tlyG.w,25% :<ݡRm$wmsT0*VԐZixvs#!HQ0zl8?sѥjk? gm/Gh!mC FxOe}C3j;د]xQ(TorOeuƹ=G- ,Ĭ݁w82Iņr]~8֘jM-L pH#?.<&xZEC꠿G'dslH%9=_O°/a{dh8mY+M6 NL䮇z_{P!w!Rm01d.ӂG}U2 yUL 1ێlGRzܵ8j5]{k ~o{4cS!to98!& :aK(8޳*!xf/0i7`8g|Ȼ{MsOxzqq}{\zmQY֎cJ~[#m:Wb/m$ᢆ ڊ)1Vyx~X N]hcOmm϶X#p& 6Nռ #&!P3έ-U JApyjLz`?[>&ɏEևU~Xt G<*"j Y&cTXV{/d?kb3+S?U9/brN3.Y4}t0Ԧg'{B;tKAYY+#{~@Mr~cS8V,ܼ'JG6DTm=qɹV}ۦc.}<>d #aDZ4/:l6BBQS.s\Qv,Oz*F{4YCߵnt--'&YyBBMzXl`D9E$OuKOZ&p! z^@2hS;R'3Kac_ڻ?@78? Ckeӥ7r67 ̛&_M ʧUOI?^.gQ1ʰl?-;p93bL]1<02GQY͗Y Kٷ''~uexA|X0Rw =BzzԳ9\| ٚ.YyKWqgVI) (.i;}}l/;#>9cUJ8/U[7RQ{͸i3֙*j*ygßB[מ٭kC~,VR/*$~Ԍ!n4J`PؖoU쟘xo* E?V$Gr0UAI'MujCYZݕutWğSIWB]3dKΕ\8=؆Kۆ$@(zGRn'.z-vr5X;%]h˙^W9"z׶#j+~`4mgbcY>qB*բ@c;pr^PL% U~j//Ķt7 <|r`ftZ9(|]r|=B\cѡW(oqI}N,Rψ,Ǝ̐ -۠GK08# XfN1 pT ~ M:q/?tC~h{1* k޿aŹYoK௕~ :[h䤣 @Z=k!۳h[8]"tdaBj'yt0L[zۄ3:?eժ#M{'/M.ɣQ,B6qsw'O~9\m [8'-]Fu>O:G勡+Gw;JLZ×fTa%O$aS3.Z&vjۍ(Gq\30սSqO2 -y]OF&߱|tF'fw4=[z/`\(@d+!_ GV.(:`ymu8]}VW 6/Ƃ*i,)0pEC$RKg)@إݳ z"^ jdn+wKnӹym$Ad]z8aW\%qKU>WsLAzj6 :O]`8~ytm4 H[L3_)kI)e?+4<7=Wo27 hH5c#!6Q[~Bȸ9a "Cd{`` 4,^)}^e79bݷo ^4uӸˆFfLR0aI!10\R[$;wŻpTE7o|rveqvZuL_eUTynnT=cJg֯zBt!@/ߛ`,H m剝λo8(lfj>Щ{Fa]k`4YMAcovƁp88A:UeE<`]UC"I aK'I = ~}PjSc:lшttpIra9_LtT^D xk݋L.G ))\xm=ez^D376CVx~߆Y({vRu\Fz%BtB}*m7J0z\^5P滄=}>vI",lwz 7.)+)sNmP6)NWMpJTo\U%}U sZNo8fp/;$o`+ޘ`-OR0`8^h5O'簻ΊXc) 3FzCЃ-ź`?g^(tyFp絮%0kٙ!?6YC?|P]0LOK\qG؏!Jd S~!qҎz=0έ+3+nBǥSYHϮ01bϘ\6^[7oK+oJ;Jpz?J0(ftTvŁ:,ivQdʛXG;n?jm/X9T]"%C !*3d𻐲Vh+:jOP3] O$VC {ګ7kW&pCxhMl.bdrXG$|:*iw {(*V`d_Ӟf/ak4$b.kT L (:La>+gz@PHM_74 ^KKhj=a YdXA͒FIu(4 |4>pۮ=E@;tԺ?0`yBayzNc4vjwx=\fajݷ2q8]4Ț ݠ*yɗdR]O%({l7z7oet\ qH5>Pq)?=©L:Fmnv>5@#jd_5 7\M0{")AO7_,ABB1'?rz[?(\MMӓL>·=q嵭/bEHbN}B/b[Zͪ'o 7󼡌U>:?wU/&5vz~5Cvv$lhjAF9WE"Ѫj'tv,g\#I}[s'.I4Q[Zj+ 昜0|)e U2ex8b1*+*?^߭:La)?iMqRHEŒׂm#o0eE/[Bwy>Cns7r m ʍls}*G[ҹwKH^AO]nIǃx3k眔ɵӭz<`"ЭAR:]0.ycb~[ӱ8sR-!yixv/M07ѧ&aޔx Ih£+mMi#`_]P&~.^q0؆BǺ߻懱,w܃"kDHV-Ӳtod(zm:;qGQR^g Tk#nc?#?DDn+䄡L]Wq~vEhn]Ì'sד͆T/HkUVbzeC T[V꾧;@OJBql_s~Ac$ż)=5 j>:& ;ٳ<1zbLPsN~\ s&̉MmCXqjVN[0`vI 3,\a˴ǮO(aNh4V*1қaX[E}t}M̗=L$GAOxк$뱱oq`K޸~gZqz;8h2 2<0(\'Wv%<˙>)_]Y8 h}'Z7KT(IkKG't/ZB1Gp_`Ț,Lmq+R2kuoĴ0~RyB;];χ|6Sl oaT^J4PBax" dˡq/,=fܣ93H0o֖Flh9~%"omC&Z h~FՈFpZ_]wvqOP#X[/c C_-nB]D ۭG.0\eO^wblHJ`fxP9V=.Lvu8zaM)7j#~ }|krRdz*M2EyņBq‘ZXOq<7Jc aw(7ay08d)+Jt1(0z'OHVg|̧>-/ϐl?4C5۝ag&7M/rNT hS) Le~eB{n]1s9êE쏏OPrj>i'zPi|r\"I; '2M)޺@};&w`S,xޖf ҩ;_[SIl:Z Wr4hm- 罭oGW[m=GMq!}lhN)痟^ı]Ank0Dq^gy?ZL+58<$ue]> X;pqb'KbUan?|u<Č!ly`KĬc;t8f9=˻xacaqס\aT%3+'B0[kw6^)_7Mnv&+L'YI>m@d9ԼnA mg]5%z>l“`[rv(M*^mEq;խGF*uoT N}Z q9ķs/z…^S덷Tc >AcF#"gֽ%/KhM g{6_an0(ru|eU?ʓ$,287ֻ MEaw[1{X0yܱa*[B2 W 7 OEJGHfEÙ!?I'|ga$6Fn >;M 5}bA0v~Y>)seW/_8:mik-6݀,AwFM:lh%|wvoWfgqƏakD&u"fT4 ZHT<X!69~@K{P>GlD9u!/M@unОd>̺[ob*dvAY"AM:+\+)U Èw0~yv9o5amrA~Rsڒsb;vmF@L/+L/G<wnZ-Z 3,Xeg1D ^9Nt$Y+~bdwV5h75P9DRX$o`CkHOq&~7)%!{ΦENܗ,|4FNv"Sgu NiHv,ʦ>ٷz4z .fW)d,J5Ñď2$sSݭla<L|,G|W f8şՖNJ3U}Nh M}pސ0 [ym>/<h5b!gɢ˔#ĺе2cNuL 1:6G-qԹMb;Sw<^B s_t,;"x`ZP{Oq2+wB' 2r 3v{G|vV"[۪fb?fI _ JYşܣ\PCZZ( wlő,:$ G&6)6sC,z# v+al%ۆı:q@%4&+U05l?M{Z? 'ˠ4;|płcQ}zæ#]lh TKjֲybw:WQ7bKa6O 6y 2z1:!N{53gP+%Z%,yj[bhr뚺q_~@ށUZ #vpBn`.f/djݚ<޻Q1IJ[kr@h%Y6}'|.2׵oil@̗/Y5s]w%pшыmo DZ coG k6V•}lPQadqT9*d$j9؉d;qGȟxpjp0ݳFEp(+q:b9IC߻Z Yxc=iC"Y\M}{{z]#lvtnu\E*Fq*X޴00'O9 aϮS 8I 7.˲%r`abccš+/a? 'U?(Jae_`n| >9׭}}y@ZJ藬jĕ`5xySYT.mV΄$X+k[qGn0Y@EFO`ݵ;R0E!N)+ۧv[&a$O)@c߳ض~` =+Қs|WyMkFen8&k[4V;#NUSB|tX:蛣y(A2㋈{Ϫ:L)r×)@t&@;E*f>A_z+= -S4}"j#Ⱦc *P6,𤋮4Pq`<όSQleRq8|cNP{n|VOb*4tU8o?xةU'~C<0ՙ"iUb'{zgIY_a$!rV sE&޿ o}8`]HTI&}|&. QDяΡzLW,$9RvF|FƂ]@{uY"L3n{^wOE_ z2>vh4"={Z)2 5&6#<AQAH:赫<:?-񣌉_ }TZTU )D`kfoXm#?loR(Jr.B/~Fͭ"&AzyܾsPK`H;]`>?ۄLzy8 K@5k'D?eEɠnW3$l3U L_zw=5]]w#7=RHRrB^Cu'!_~I]YHCk A4 sf{,^pC||}S| ׏~$ ?[Q0n;FWtnT;`>+50WTKbz9=_tSq`>3ks/l<`i˭7;6H0G `Ɩn &Vd[x<CBvSh&op>]ꝡЦ0,<^7,OR꽺CrLgLC וq#SmlXg 2Z5o IQd- T=}#%ElwJaWj%_>llJs p"zfJ2_q`}N3 z$,gXOU\J~N foxfƽ~~xb|k@ۛdM B2(0n i&f\qf*.{9hq~.8{iId.jIV-3 [ڼ%n!](&H~ Ak;'7qhd ?KL-U' qfW΀'Ҷ-Dڱ )ȗj?8NABjP>^[5#9,e.!L[8}(~`0U7L$w@|?;AG\`_z2wo9w.$FEA˅fiotGu U?G}'l3nIP|í럏) mʋRc079B_V}!M[4K-,ů%6 bEk|{'^\li7UW9.#7;) %P'I ;<( 30wOvu쮰Pә̓!O`r}PwiE|g$˭@bvߘ=}}y0{ks<Ӗi#g}? bɺwű}6E(H^\\CpNnI=׵%p#l/vm W<=g;lkXcB$nL9Kiv &vqL/ZIPVnz].tG.m@{d?4Kyi_T#^ǗTؔXN]}/yYةкၸ,nql i}t{aOr'qQyquWm2m 2 M:@jBJ7T"W83%a=,S<8 e=W1ROω|#Y x7#ܽ=qKe/]Wfc@m`*GqջrTq7j0iUĆl|IWiu z A^hHtNu}AyXzTLֻ#ЭR,JT<E+|v)wm!(^Uip|ۍGpEp ~M\ӡ5Ӑm&+y߯S $w|áQ;ZJ0A~}(;,zT%q:;fl8Qc~ʇ+{;cڶ8@7h)RCiE %{D5 6@KYf"Jb t,')Oi;lb σԗaNbetvUSFXA &&Vw@U+RZ R:䙲D)}y42.(f]Q9_U1{Pt4n=ڇtO~L'j`1##k(*@g Сb߼zɁ><¦~ᴚ-32X>r{/0䀟'TǕ6)C7Z%\s2L+ |@!)}SG]}sձI!j6蹻Nu GY.2^r"N4?rE2pC 3[|}t/}fA+]}]UY~"8+mi}D!|jx^aY!m{B. z$Y1ɜ|(H4G+xH6OvH^-W?K~T.,o~Vw%t4Ɖ;+TRn Ǭ \6^7xBr iǟDd|G.h_ _Yÿ̇<`mS.޵˶S%^t#&29jgL ,i߿ Sˑ#^K])X &s$8vxF'"y^kE;K8E[=tY2*qX3hXO]xC'7<[Y u+WT<_^5\7R۵!MҮ5d&- 7:nP%xLWa`yN輠@?y!+☟t蟄U^:h<g6?Xe:0+fب ]ÓmqZwe f{bl`FL|O:z9YƷe'D+BծE, ʸgu1- o%YNYȇ ٿT;=mr L4> kG'zl&)Ic.gO/z00gߕBG(_y~Z0J^K0[ͅW:# *8kNw`J5z GCX{vIxjF+<x jki13=yΕd?1eF Ob}7<'kĖ'܀aSnn P#U6*cT;`/ֱ7z Wd?ۮ^ MmRI*uHϞ(IJ]VteRa$9U ={k.n:7w8Ǐ=IR|+$Q'i!)BR$E$IH{}=~~.~_/8ެ[||OMKrnڊؙS^naX~4pc8ٌdRS @kpDǁ`+n:b[9#W?iUl24oIf9& }^r#۠^V_uxm43:sֈ{@6\=H޸D=RMqg!h>i2?SSxɆc{wCv!n47LJJ8bil sݶ~y+53Mߌz's% a?а4 *ݞ*8q#3U MBR^YzB8,%G%вervJd7V{>Ihח^cF,==wٳfY7|ȋhj #H"R_ıNMW9_lŞVG`VgegPI%z OяXk YFFz]EeERV{Y!goWُCZTC1#~-n:p>$#L\"†N#8g5"qˁm^A}2_f.W4\Ͻfkb:JKVŘ @;~']OLSK%W: .3; X5ڑuG\ KVXI} hyzzaVqeN-/T,],Үd ړ +rAk>B.Q*.4gA*SS4`ٛ4z1X2Ln$X2b\`i3^l*>xx=,Tf2jeoE1=) lF&ZP%={;ϵ;d؆vv]W 2,/4\^[znj"/xuK\dRڮaQ0{PqR;dLfM|p%;zZ׊U~s%S7Ws(8gxl̹]fX ̫sD}HFAU|^ TAv>_XvIdnKqbZr7vǫvK3y6@cУgA8ay5+:X͎S6ռӰ"c enθ{<*-2Q#X]:x`+ 9NGb5b9gүd[1ϋ+/y2, ?=q UrHL T raڥ!5Γȟnрha.Ma<껝TٲO7Cgs]/Ob0ݧ}eu[wErA83_hBǫ8 3N`geh[ o[|If,b+͔(eJ3t\Ǘ-8 ^؋c<(6Yf .ލ)1,M0tM 6<_(+= _ѫ K%n >G?-UsxR7e:?CUwL.5ќFd^!cǠX-򞍚14{`Ť M2ۍW*,q3lR݋s?^ԇZI9:`Z^a-+'qgUt4x|2-6()F`8UfvT f![?ҺkF," E@ZofCqX { ͚ m[LC?hZy7C\Xǒ)PGY#Xby*R8f${yq۸`''Q;.Y?HE䷧9J`I6m -lLDW̦lu6ܱ0+[XaW7nzp}Yը^(ۓ9+vLϼ1h0=~0|C>hL}0|"s qsQWXg\a%BHb w )ѬRSLs8S*0:[jC▘X$`Zf̳%X "t)l@s[ %4_z=Ǭ_!cD.O}\텽׶'şѹ` fgk5z 5?xUa/:a2q!"ݗu(5I|:OόbK)9XM9)@Ohf`6wz)DڍL9PN 9n'IdKod[tò8y, #㚙G=~#JE]5M5ʏqF-f]>9!酓f[ϕ:51Gaaw@Lض!, k7Z󾥸*,Xl[D5ZL7vTs%{\ӐW<;slB߯4a:U|G [3FN2X7}rj~gw@рSx*6=XzӞK.cqLr;WB_+ BvŧlVpdܐj2Po8hÊ]WEQ,GY\BG2pz×AdWžWB8ȹ-\{5?a|YZ5kl;t1qD7I;FAd"vْ̪gMT xjب/u,^ەFrdL[6v0*p~ ΖD)m\984޸!0Su _Zx$7uZ j^8;K42|9#9 R+2qBA*YrƊ .̍ː aGȅ86[~@MQ"N!cc~mw:oc5Y=~~eҚpXL?2 o7"`fotk\0!AŢl򇬸dSynlf Lݞ8 EzD ~&Lq)(\|pQ=&ADW&!mIjDPLÚ7e_r}t/L^^|ׇμ/ FO,#H/bbҲ8i U'`FVƿQ0aiRw;4¾QCb@mi&${)BS Y濊r_58PXQ%4=B[VN~E"!eM/V+ 㘯U9S^lqKf;q՘%Pd~ezԻ~ :{nsM n5YiAA|Ŷo8P,McX`O{IH1N|v ktRV$Y݅z YdAHD7iӭCFP`FGϵHm-SS[0z L}s5/\]m5,J= V90{k+ՈCڞ,q]W ~DE|tq ̔ !Bs]PoUqW@ GiN{[0yN/3vnk!bg?'p(;{rҢw脩ş5 !:02m}z{Xml]5WZzo_G2ocw϶[M9JR36tUR%=47nP_6$8aޙf=욲^w#M &eȭ^CXz2iyMdҡs*cGC,պ0QC}gWmX G$^ØGW;!I~m̺%S &`e9Цi'k!`M; 1$9|Nuia7 U_$UX ]^9-!BH+JٖT#ӎb5:5G1zi2ֽy@Goͱ8%\]͛`g3Yev}%Zɖ4q 7~ࣸͿa4ޥ"V[2<aQn?W/`IYA ?y SGnHǢgP`rt_I[z (E% TYbr\9qrhLA)bm^nc4V({z_ͬs\7mǞ,9)lV`1u11bqʃk絋Y$#P0`yUw,v 5`\_uLJ3v}ք\(;14<5.Ky>*e 7[ QZ}T`,~6rضZ'Wvlk*nI 1ܼf FȳV6z`5݄8Yp `pT5x'5VO~}%`Lå_VF?yb]jP-螋SzwXa`'?OKÂ:KȱU{ L%qbYcs~~:;#4+3O 5[qA`~Z^5Q7bOC͕Ssm Q #UH9϶gA*S|7Ef*~#KPzrGA sDu( _YQ'E@`z+&>сPcp J+_-"-_Sd} }rd,D.2k^" xLw8%GA{>e,]nndt#0dh+? .;u]ƞ5-lO)^y/%E4SX}^U<-Yȸߒ0î+'{7*brH.lNFw%Nnm֌)\qc8@u凧fT5%X]jx X1!H?-cٷ؏ޯNJ+!1{'Ϸ|Iٕ~ϾUQv2(oڼ,,_C(HTTV)if8wOw4Y>t"5I~sj4"8vhC!?>[i_].w*}HCZ7*.]4; GSt1E뛚|vkv(S`՚/0eܥ ,U?WuPCBW v5 dz *}|Pw=Dy3C @)wOf#,xsNj-x$XV~e*V{kwf0j_`隝bo gs4n',~")kгYxHp8gc/sǩji?~w-$A֥=E<!3*cގYI+,}¢ :r)BwޯoC%Bk ɭFAVX;`/ + b vnJw-1d6*KDp1y5L0*L{|Xo>J Rf3Y0Pi_L SOrP ?¨lYf-]^Xb]~*o7t+γ$Aخ #硳7:+Cw1`\ <?ߒ5G%wgUBD0Lfm $~8g4ny{A4#2Ү*'CNA%p.:}f0}(h8L:s19?nx(TL!?2ЇSշ;֋kHп.<?F`W0.Q+d&B5t$_ Zkoj𐿗ahzr:X9X(zvB >|U%I Ö/I0;#fA{r&RV١) V_Ca0(~LS+ 8t-beP*G2s?U- ֹ'g;y;y /?I&ů@m;vl} n0 a[9``g`wXռ@EfG | ]h4Kr@~\3n {:ոkτ6#g V辇ԾuA6seUDuaC t0N 9f̊X7HI'SXuvYb_ N&ۓ›viS,Az̿JBѦXkC8e^}.HJqĿ{ 캭]&iW_ ا(dn0gBfK_p灖c %Bzɨ' O߄5>w|U܋䓓jWQ cJ&MzMf}jSw)_7v9l3W$Xt"sЦԓY~R=k/*ty䞵NǁbZsȈf*~j})ɼ%)0{6;" #Dzhq|]ί;&~YZnK>AĜūi噊aÚ0z+5K+[ɫƧe`9S1ٙ'ߚ#}Y4pn^] ś'c:`;׶6*[:}'ɬs&Wcmu&w:x@sǙ*`(UTt(E02[iKjO%8eokxϺ8bDH_҅q'p!(gHn~];=vmz*RS+ύnƄH[wkɤ3CO Ψ$7H=dG&_=!o鐆\ΗbZX?)'CGKu݊Q_5VeUQlSUoߥzpL=` CTwsyI vl"x.9idQ,<0q4h+t~oх_xi÷!uoՁ7>$^$BЧ.ll-\q- 7pl{m\sI t:ìb/`Ule.\) kSL7Z|Jm$,&{C~$ڛ*^\ ]u¦v $m =ak>MJ`Eh(!#+52bĔq˵_׿DzM0ܷ4RmD-v[60⦵- 5;?AW7N=2*y?c߽ l6 `N rEV:li %n<ᱧ>BkiQO~JEL*:K Z`aRX%1.#y!/GDc~gKX<Ve3~[A߶Ѯ|jGAvY?tXvA$+' eŠ\D!yD =+SQr07H#՚]MeK>0w?dd: \^# fX/9 Pcem4uwϩ,(:|iwץ/#2 ^19Ņ=* ;goi ܯ#C&&@mCfcz<Jdd˷>ާBcdmUvvOZ"\p+|%Ff'-'s=6n1>c_{͌sXߤ,vonuG¶$ w`hh9hҥ*mX ow Che_^PI +PǼ;`d!l_S.bۛNF~>2{\S=G(5T)4wWK&MsR .B6 GHMpyWB'L} 6eU@`~rH=MN1uuVǞN!7U7tXw"m:**HoLpG)K5+\*r!?J3k4WDXc)|&3z{NI]#P O>z--R=#b )542۱צՀ_U˸`F{^Ktm{iM<17kߚQn9eC4c|dSB6Sw!J4Jr^=I*];?]mk_@~K@Y2ҿ3B"eV;GiO$-F#8g{I|HQ';{;?p/so<ډ=8Io)3MNv_ͷ)sMyШ5)y5ٕ="#_rE_BFblW<֋LҾjzVr$(fTk'-&0FÕqxrS|* 39A~ uɟI`Ϛ|[Uvr?J f2tlh/Tl2V_+YD¢ݡmH)|27Y=z:xzkce_@T1&} &e1 "2, [U/Uf$(av03Y(@ MTmxww!edsR\&=0,foyk}`pgl2V>Sad6,Kipf,Ԁ '0{itۂŮ_$WMIr0ƭ'dDqɦImҘf"jc:x(Z3Ө?׌Om`ט: 펂σpv|?vJNADJ^ 2 )\ z q~o ̥CqqOmAd ui8DV jٙ C_ SlaBv~ f;2h`ޕrػ8whhޑv%Dg"_`WpfxΏ ]^{Q庸ţX١\Wn^ ׅkcV*؍{c;awrsg5RZuvbcƶWj\=s`N{mS_comC̻.iJtdS@*k->lKg;{H?IJn{ ݆Tְ[@g^OfiU HTo8VI/Gfɱ!(|eo?,(kBGq9\rq> ;zC&L9%mf3QyA;m BFY1 o5OҦ:eqt@ fY߽8jZoHŠ޿]'pnK͍8kgyMP P7 ga0{~Sg1Mn[0{я352E u /Pq]v1o ljф>ґh K;asy:oڲn.˝+W`.ޔyfi/_C\0`p,I;>=gv_tuUcw;|;2$:! tݰ,Wr8hG|HǪP}Bl;Axo/ +/stUu?=hվ]D&s>Gm fؽuWN 1o[0<{EA@1œ={p1־GI΢$ljy`C1Ɔ ጢ¿_ᯔ6;@$p=w Ad)S?#7 }?VԝÞPv=[wB׾zKy,,cNFڣYJ ?vgTR!5mfM}ISKm?y V)wŗ[G95a'<2gnh}z\CrcL ۷Jǡw5/Fا dY8־{"OCUlԕ~ QRdisG'&`4㥖|f}%$^X{qE1gBMGw\ZM[sak.L_Zy6Y .A΀i8hW,'TBzy6ΎZۢxNja"#BAּs 4CHCZ`ԡ!{es%XئčK/iۏڕno ]wV{ʶ 0p}W3HM {WWzx*%pP^O] VHݘgMNbT(ge;>W-^YDO~CHpJI$f 1Ͽ;{?\ t't"2uLC ƕ0php= \C ˀu c֏dҸsfӅa#߹βǮ9%wO^5@b<^]B^*֝uGzrܶS!;=Cݾ ۤa_J t3e܊]!e"ppK-0e {9)G;hcꆸ%aQs9)잺XB>}} " 'qT x[f&p#L0.,;i?46R09$y0:٪<2} ٱ6+3aBHgqX8l,Z>/V)cǯ!s|{?&s(ɩ6?qp3.OrPEdÓk@׉?c8J?a/0' $snk+6FZem*S*zF\x]%MO|iB+k}lMӷvv: 5Y(PI7A6ە!JFE3K)g.!JFixfn?:/["^[#T,((nn9O7CJdћd5z}ַC굟s[ SBsD:?;#VC)qD9=r&S'CW'ljkaPŎJ>rcK’2}I:r?] *Vא>ʫL .7VM!hD9s=T2 /ם<-ͯ^#+83; 0sҗk0C;;W\[64LE}'~X$ TLlÏRVGXz>2>_7ypTc~e2WR3?sz lS\i8{_ؿX`-2lxBI\b@W!#/RyuGZǮz'2ɼ1H ])X-!+~ sYF@$z/4XzzB\?mV]t5ԎA F8yb]v>\84SO5̐bKtp_V$Cc ߝc0CF&w>S46顳2qf6ee3%8dwhl7I.HʸVz;\ ԮF`̃Wt uA y aMYR[{:0"e*Rվt[`/4Bu8ܸ'`Flav1w0 H$w^g}" z!7vqMސ[^w$a DĒ'nw_[g-%#?k8"`VfDkP>`1/#qg_b[Ⱦvt^{^+Zn%; H1%6%U`6MBem%y_\yk*_]1 }l\/A'kN)Ր=6X1bi5J"yt2AGϕ uZ ӌU$ߕ¡42 6ٻ3Tqa&} uM*8~G$cPW|ֽ 瞗獤f[63O\y|6֯/L֏иr}EX%Tu">˜1O?ͷ!Jz&8LG4iM\ؖ}ڠj}UU#8+we)k`f;mqɤ Ky0қfPխcB_=4 ׍gJ@}u̴Mff| 6>,mH:AV缾Eb0S)&[#QD?d{g~4,y]v:mÉ{Ю:/84쐉9|B* ["tV4 j~992 LǒNMc2?+g9-L3S)JvjreO>k06&K<s60-tFo}qgٱ&5=t_Fϼ/a/qglmΰ?Hv!.נظXJ?FTE6d;Jfa'.Hzיo')gYǜ|Բ܇2}ϋG&0#pNIoBR<E2)א7EZ'`p|cG`] MU9j4Tqy;w?v]cΫ#YYjY8kDq sk3c?߲mrHa2s3Gy0+_vo74ע5I7cE!yȼׄ^I$s8m4 f=L KLEc235KJB.šFCr/nʧ1wڝOuk?뷲\l^b_z Zg }RgP:Li'cSe> NpEΝt7߮+Ri,\zC:yqGσwTz"JY,/8~]kT#=DS ݷ_)k3pRs*왂G}u`sfSmcv_.{K՝3<5R%]]g|\.-Sf4i2PV}4kt۟4z{~m{wBu;^@ףsVZ0Hh/3-"moV𫳔?J( %+U2 u1)OB㍟V'쑈KȆNU0-\֚Q/:嶺>LU3'pʋI0/ Of ؃2vZk:r2,49YcuXE-Wct_/l8fnN&>AVsd;.-awOgQ 6w#T޽Qo $5}Mngޱ~e]El oPd+o/C{+ףu/ڳ8*~ewWis't6) >APs%Ql}\;5X}LI=B̲~[Ȅ΅lSVag*!xVz^n}9tFl0'.AKx|TaI{2y ͮOFFTqq1}Q t tCq 8֕f v'Ck&]þ|0'X1Nq|k_F6´g7s݊GHջBs5WT3Xc('y,lS)ϲ$qα}`ĭ9b#&))?6F3A+5j|i1Fq< 0֓M`Lf54v|gC=0pmr@meyZstF&1 &nVV#娎_悱v)Vb%zW5 ;f$L@1]N hX87 Zړ Km96I&?9VpЃ7Y'iJ׊982`^C5572W~ (ɋErgF472m?%A$ /z8[hog#=CqR6smŜCͷ-{\K6ӝ`Ro,ro[b^LBڞO`L˾1j+~ srsO twɆ^(v5.q{mNikځRFkhu׮k_KVXjSel[ƢLxsK_gS ŵQgvS Qw)،3PG*|mpEȶvZJ{mG+ppY]Ç0=^Mág"/XW]FQӯ~{2ef cz}'\:.%i`F|O.@LX~PJ; {Obɚ-3=UGmIбI;S[ypts0{mY,d cie#mqbυ]řW1aG_b\д/'o$ 8)]dwqnꧮmq{`#.?p򺂾jt.X(jXK}W\:;s,$XGmL5Aon܆R髾k{X [w.byAHgvHsFI[UaǸaZaKR{s+Nʘ+z.0:]1TXe *u '/UC߾^{5B8ZQ5C[&zXkt)S+ c昺2N[S=n+8gOGvbMYfeϕ[زtʙ R{΍ag`yXʾ;$o)a+aB8e؋sؒc4kKEͿv X/L9AdM*oWQ:7C{kL~x4XTM[Mp}Ҋ)(L4Fz' 0_ۭ>xa(ss7zǥC,s fg{N[(8Y*"':g^#m#_Ln@؍0j;ol /D}P%7;%8M␕cw,0TBOǍ@mvkIOrlq`I[f+ f;;ԯm[Ocɍ)X1JlZ];i!0-¿*wߕYL+ "|LOW{p4>4ǁ,MnTextٕ+2h2910'*JM x7mŲWNIEYU?ty:jI Xǧ| q%àtSDOWІW&x5qD.Άs6ovNDS*z3ys00ƲV~q0xqYM}=hŵ69H (՝ĽWľ(g~}5Ƿ{*jo?TmT/ E;iaXX߱՝v2wU 6H!VRk*g[ RD'- ƗX@rU[aHl YO;lfaX%&y>j'wRʏ]: cwWbεwf1WnPgy9D6&k^.[jHœ912F?GDx!9<ݗ nn@Ѯx;$NY}(w\w&9$MGvn1k9|dגNűʡ] 8fgKo`v߶ H;v , dbәd8@喳p N5>r_;4o }z"~7s򝓚B Ix*pteֺx3^hz$f9?+Tzo8-mrٿI>m'&1j93<`Cjɤ1؉ih\'2!]aX6P~^195 kdar p ٦ |# O";i(AgMKΗ}i?>hh8w2%bwWr OXc3X{nOil]sQ;[@3}41ɧ ך0o񢕍8\ S?:pz1{_Y|Lp g6mySG$}ƚ-JainHtQY*G$oܗI1scAO9Mq@.(R=3߾ukL#ln0FUz_&D. 5?U:FN 8N&&ط+Oh{N~y;m~'Ҽ9ỳj}+S`=&e.;>gh6'6O }.R 27S2`~5 Oaùi2랎(pȅø550w9yHlwypHrJEҔY.ÒgoǾbTmF\ Eྻ2|Vf7VPdȬ9Yw #;NQdW˵Pjlb$Iد׳"#J(&g -vW+f$K PbC%\vj^/̞D7o]%`z߃|̱:w1?[q4ۼopZczQ;tm=N y#t^'V.$k%@krab)Yltcw؜ uj?=${C1HG3쿃OH~6͋C3}Pp*5Dr9,%F¸LXf2ɹ5Xnp%'pk6P%B p Z<ˇ j&=E$|?O9^R!Hֳɒ37s Z'1+ )x\hvB{A.%D{e%U}CO앁sA<\Uws>~POhŁO[n|cb0Ǻ 0W&ju|Sf{FyC9lZh|ݿrl5)jvzzշvpΕܑ+,W/aq6{6)6^t.G;mbŏ$!oG8v}ƣȔhf,{sfsVϿdqf4$~v0g=<>KWrH/qԙ`~z]c!jFG2w,(d ,dzBڬTꗑvJmI2(j>:FC\ZF2 2챜Hg0sTiH sx Aަ=ðӣclI{O |]>,#Ew\w%5BsaG|9C)jA<,`*1Kh]2ZrnX@lLU(6}M[׬\ݢ8opaz Iϫ@dضL}]o}As_]x#0IE)gMq0`Uθ}LT~/\"7{븠> O8Yc\/ĩ8px-Vm)(LLjī:@w(MZ\RBB &V.DVdÎjj =tN.1K8xՠZfjX8+hV/lt;Qʡi]w'礱_B߫]Y[{DŽ 指_ obS"vf \J0E ْkr쁞sVٕMeEET' ZBj@", w߄ڃ6:$Qg׭Ci8Q>thi)j`|cF<f%f)΂ܒAQV:,3`>}Pꍆ^\B݅~BW}2?drv-,O"P;-b.Z?[+-$r)W}Y0LQO?ۚmNc'dgq䫫mF4nros*Щﲿ~;iuFY{ȉ8Y+!envU -O PۺrO\F]'4i3ϰ'x Ng߁J7`+C.8/:NxGg]<eHSZ[Ǿ|J^ i/[7L9|,U(~뒡׋75 _jznY%)sIv9KJh'HR ތsό\X\N<@\c8gU7 ~ ]0Il^$ >ˋo;n|X sW;] |B&N}"~6ym~uNȦ:<#Ռ`9RqMCa*Lz7uN:W{dms)8,êP!t"uׇ8Ҥ6/c2\ݓ߃?:?umj z& }N)m$ur,'٩* d2 7Jjg` m#vX`x'6f+tΗkB Y8;͘~%9nܹ̝0΀Vޫ6aGv@_WY`0:(=YF۾]T~=>0y4s]N*$]wv ƕo~*_ZjI`?rjkR7kPnUG-3/K%ҢW& Ъh_᥽dΠ{ ɡ_|W4c3$&м:|^і{ZmT(kk}'_g~ZÞ^B.tuV}%v0׆bz>grNkN c ~&,vPDOao~p\bR&]RsЄƬrb ,Af[REm?faLs>8P'.c;zylƤUOD"Td_ }UDBXyQe缲V@=d>v0y=f[FOS"|>\YW:t(Ci|f·8tu/_Ty76$Gof.K= 8B,A 0R_h<wq׏;B1@5qeY1->/ 7+b|-mN3!FrI:' |) 8\z2@4_Yl٣G>%x,*-|ko^|cND_ Up:hjI$ :im@nQ~.,F`* ]gao_Ϸ0|f f=+ыFo/=B`ai@ɡPGEqYP#3 w w`뿢""ҕi|VpћQ*Za= ^Mcͷ*3}dDžk׺ ቜJP8/S:] `Y*o֕g —8#S@}% \tm&&:ҳwC64v(׃nKwcUZG[̠fS7'JlNg8~){پBM(YZPr O;|% |l텻B7+)09s费BtƝ'[#pHg|3n "32.޺:оt6*" &.CEӽ{¢^|ʮnB@1b>JnG/|5ѽm{Ҁ?_qCNcpQcH+(W 㤳WOy5>|y 5g +B7~5TQOY/DR}8_l.L}<]^MMy`sǨ`(h?[FwRqp}KX[+fw/̢?wyX r=h#0FhFmt+{-ا4?IxSO%>v~{ؗ>>ɯ/D+˅H)o i#7v\qޥm"H}ACpFYBM*-p:î38!Ӊ7^-"Vtzxd5]CG)dhE_;K!gɬׄ>HpHV&'xZ;CTt`N30I/8@euhLtŴOǾU͔:A=^nB#N \e(Kp=XKf;-~77 ma?h}OM:YSŅSoq m2fynjgB_b\tt9=&Ԩl|4, :9٪+a[`EF/1<'rX3qW"PO$PV1cFOVSAHg jQr}"FpF36ߟґEŒt{=CiT C_`ZBMۉ^xRM.}H}hW&yRE9 , v6Bb)QM (@N_eI!:ño5柾&oeغZiTޥߤÉ(scg7礝^ u?k1Grx/ Re|nU?^,U\|8 #dMpNWvߕb%ޕvz uW@Gk!m`$wf石q~Ci(ZUUQ`\ (Gy`=\j:R Nhg{!M.Ck3(VSP36RLd6{x磽j]W]޻sY~悰:c}lǩ1йFABSL Rau'E}|q;le$Hc6.`lߩb|j_D=TL(俳nNq?|joˇ+9!_LYcnJd"oDF˰Lzg&bZ sNīNh~~"R3Э]q~ÙWiͤ}ۡ)m6CO(+KǠ߅y1-:wH,wzܗ=㍑fv2Mzx݊{UtO L讼kGYhZCw5LnbծN lkͮE )6sVR9Jv R.XN=3K l5vK~wvO[?26g8K%o~\H:&0Ӣ# &ܹv~7JpQ#KPɩ,':s:ysDv+2yOh<ʧ*fβJ]NQ)?Dbrv , *;JPwTj4n(!!eѿW%FG3|s]+]K<{~;Q=8Hsy(Ñk|X Mijvȿa L1†М8q#IOU+2BVvt@d,Zdq:b'uы(UIhR']D<N#+{1eA8+rWqoepJ њ}vсR)V-/{rHubh4=sQ,H*.?[*H@vƋ/\3*W:TV6 eK.[cgf7?V=S Y>,x;C)žǦ ?nǙ޺ pV=;D79'3WlhI0Y@R>TbOrtN˞B^ϋ^lX:[kgWNۡk NQ+&0k-!؛1r\;'K7 -7LTHV4Ec-:h# /O,sK`8E=ݨ,މ>ıvVAu[a {]KX{`rӞ8'&dyu;J!ˤj^U1$=j>!Rn;Kb3UPl[kV~"H#ArS;vRt'm/j-Zx?V"X_6s.W ڑA;2y2$;w^/(X˻M5>x)8{aMO?;쁓m$ҰMnwN\MwHHkUovn63 Ã=^PPi!6YZxΌ Mfwą-бj&jGe6 Pb8-^ em5ׄd4 ɜO?Cx{p$Po3<# A CviӫMdy<~6 &3sl ߑcgS}uL ?#1>DWo$QYt]m';Ĕ?OӖV2B Y]?+r˼JSNIrء)]%K.͏a~nzm+uy?ZAJ$n>`DƑҦߎaXԁd/`)Jp޶#BQHp ʊiC NNES,x >*k6 ~XxUmm{ C8S-6Fݜq9QtT>n>RTTHTB%mHV*HH$Tl{:˱y{g|sYdg9دԣthzu`b˷W>3h'.YtmQy>k,nc9k#rYF{"NA-/H\60Cdo1Mlx Cǥ~ۃ@]kbvŞҖ0onM0]㫶&c#҂W<*q^CNX&_o9IBH:0/Г[3`/h2^xa+AƧ[@ЋVof-Cuk iti@|->_)D3~O('o'y͊Ϻ6龳k`1Fݳ#ĺ/i/+/XJx'Ӧ7kq%g0}FǯLjү)ǷU lZК˷ō9[&!B>*trcZ`KИ)`>@`0zkAjŷL($9j>ƆsN3yl Kmpdj>Q{:ddH1g媪6s1}cR8?MI`[n҆4l{a|*B<&a ZH?rЃ- {D¡X$2/Uz+ E(0P H#>Jfa˦pzfD=ٌA7q+{=ʱ5^q̞Ux묓 LlۧV'Dww m!K}0GM@6:Z J⼛Uaz;eNdVQV!LI7yQ ̪]sV8|/ 6w-gq~M}5z)~egsPy U&.8?Ӥ#+Wj[ m73g"mVaPu rt֋ZÈi—FlعaD.P±q{bmG<",g8| PN> ouwZˣuTkrI\)ݹ-#س}EiW?~1:rZk]ܵ^u{6,|t1u5kSkV+B<{ &Z?UF^c4XLq& Kvݗ4wH7N .; n 8K;頳~/|\[-fM-Pۦҙbngr{ G8sYN[ ڭ3Gz$.4`a4壻#_d0iKK`cΫ8S~1}W! ݿ< fç.. YG?$1f [ n\2>Z,[ǐ _UcreY,Sp^ 7z^|Ih u&cS'!d9a\ *>l]b30o&P*)s"Ί 7ЏGrCkrl_oj؝(|[UdUOd3>pY"CCmʎnaxDd0`R% c"'ZAlpuH|[<:e 4ۡO25OzAFH}$f?8(k/'5ov<Z -\$t7ɞ%ݴd;oW៨@eauDCym;b )#E:Z2t5e*~:?g=gwwVS`^DRiw֊t輩sqZ*Iu_7 쾃 XÖdBSf{1!pkINdI.ap=ۯDԱ@K90TJ4j? ĥ詜O5('qmڸr-,p-k_"~pEH`}4wfyU91brp(R֗l]Sg. Af{u"hyBƮfgU/d%0wҀ`Wn~m,-9/Kea͚˖p}Qm`*kl (N>'}ׂ2yC\ȼdSh'46EvOR?M,ap*ƲXd,Hj*͡.4jWD%.ťSUV]dO 7ܸnv8= Wa9Z3Xr}.̈gëYd*$}q%)T9> nyhD )fW_[ -nbӨ&6>N=v-(^c/ sX[8fG"͑ISTv篭kq~ >[όZvp.69<4'#2Ww&$-Nf=K ;rT];|q>tE` &<=p$yégm"6N=~_ǣU/O^^gY&aPk̰dNgVn$Ag%`wj)pVR  T 7GccB $ZE/ּN۩c-_ݰWQcs3 ^յ},"t2˔GǰkQMo0LޅU)J$5[ε,cA90M83`8~%NofSZ&|rjż#d+B:@cwqLFQ? /Wʔ"}Pi*'&O bq{ 8$x$m*g KۅqltȈ~~d nuץN͛n>[̙.,\"Gy]>亮sxCY֝.A9[2~:Lӂ~gEWj>q 鮪ցDZ.|M}xt\Js:kP }Ga^@VBply5 u^4EL e5֎?FCYzkW(,_1=Sd8&LJsa1HcHtH@^kQndaOoDy|7 sz~Mg!~G|z?zpjxdM9:ܖF9e^V$/]؍X%w\ G=!}K\Jk9OfVyׁcͮ7^@}ixV,^r7S*4'Xm>Jx' ӧv6pe^h )m/NV$jF;.6Cado\d׆Sߜ&ׇLOɮ`2qGTIQk%I7la6y4XJ<j|ЋCo=7z]%O9&EU}9`pj>o͇=Peֳj2J`/xs 6mhHߦuSu!n;ߦda!ꡙ5,ܣ/P Xkrb% U5Me]oY'X2N~3-oIs%JqM1Hq &^Ǧ4I´Íd֫n_xYAn0vl_S/Άc3OlR}$޷1$w$NՌ+u=J;\t^YӥPXڢ {RI00f2z9~N 9G0P^ Ҥo jSYp1:y`tOݿVM21v,ف3k4Yq-K˨d4^{?~q꾾W4z+suBFtGpќFuF݉7^gWh7oFS9|K`54`~P]Ըҫt=#ah}SYzuZEk>YJ;”#aH`i~Zכּ% / :;{2Z q| ƾZtF{/e`k Y/<̓};A|g\;rm%|z3DiZ>=`Q߷ K; *gD8eAfެY$X=MiO:_Y@% Fvޝhq?qSw בԠ_P5SFⰬoRwp@^vyriw&PUضp|A@iBEe6Ktc ON!EeM5V_:5C/]>0:*{q#DH `T]Cav/C Vc9ּE;g|ybif= Uk]ɏ{ (Q;?^Ã^+^%7LkF~-GCTp57B! y>8\k  <)CpW%CLW6, {GX+-NPvӁl0GUtI :՘[p2ƲasJȦ65# ]0fәXJMĸ owLzBIBgŝa-4aYd+x37,pጣ%ȤBnjZ˿Hi~Tuaفg0 yM]F܊كFIeܾzE?bȜG% |O1;;S=z`>|OdՀ`u 9pV~' 6e#m[S/[3/ޱ/~3|n )ބ%rDj; H-ߋ 0h:}LWՐ!rSt7' !3v7lcf_ ة1G\lq:~#iR.K\PEƩ!uO1O褊O>3=bܑ!tu!ⶉ7xaFI7R}i<ث Sȳ{a7f+ϸ!9i7oJ1#6M-'>.Uٰ|Q$`^\'fn[#FŐHf]P:*]$ݺ}3>)pmy=F aɴc L7˥}=pF-BS!/܌ۋ1##]njgTȬ Ţ|pD뿶~1C&yd2\u۫c݂Q87Km:*=>rEp75q[=Qa7LO+ka^mc=$SG"nq@![%C5Bf؆0:t'rM"nV?2[oZыoAאǂ|VXý3i=O߃m[.!EN"Xie+C1Q@?徠M _X6>^X%oOm\%¯JW"ywJtU0qF{NHGn}=?CҶ#Xl;&aՉNo8V4܍þknJ;~v6g/'D5~;#<86%B9?nRȤaLI}/% ]Bɤic1ZG>Tudza6qYlwy 89O#cnx^Nlƴ^r.B 2AXCǎjXۡߎLbC2%wXfD{ ~+ oL`0W;vSOJLk7i, 2L:GL($PR ]RTy]~3$ uoř^}ś w\ׁ_ _ϭ7 @ ndi>1l 6*\(* dTOOaԷ0Q~g_(+jAh S 2p|M=~lـZ'KptE{FѬ޺zv5(M123 ůͬޟ7?U!r<p"llRcbXN 7 D50mm_|~@PX~%{덴X.'z$ުCހ.>x]\De#ezT;8c3B{*wM^2'`k:{ $VQsdvo3a#=b0gG.F FZk]"`Göt}4y0>6?,dS-~oQƱѐX&); (To6w%G씸D鄌o<+0=.FV3Ձ%6]j[yJ6+M \υ];k¥N=aK^1H 4h)3[ - NAoLj(׏Kj\\Sc+B[ZԱ=~,4>OiN)Y2C23Q+&52(A0V\}xAFIu K-&Hh**Ӳd)f̕1 su8gaB_eOi,K!s} f%X%؆WœK .z4y|0)vz=E`}8v&rǍW:E1 ͮyUL qX^B_uB& 2XlmlɊ n0U3Eu.|Wiʪk*aC/m(8̓e/>gM1HEq]^(ξ~nj97~R'Y4?T`*Ų%tqPšӂ'fEj`e r񽥹MSGy13z02xsD \XaEo^ f2BXz閚q`ȆMAfJ˿H8xr,E`ử.u˼A*S $![X紽xzVN'`4ϲjw 5=lA$H#rƞ;Q`Y qlj!3 K6!Q{V c o%v5z3Sl̍xFOԲ\b{־i[1F8]eלpbAΧ0@8 %gO^WJw~ ad/l f~o|\|qz@8$dvtʋAMc8.=^~"2CgZ?`P`S|h=[rŶ_dx 6d9S m4 X]3 mZKJ@RQz삃wx2s f)H;M.'-"ɳ֤bBM` -Kޢ{)`[dnHieV]}L5@i}yC}XZb/ޖ/BzwCz#{a2|F}h 4ל(0=I=P &'eC ;yt8̳Uo+:kX\PRGf~'LAh+'kn9`MR`5 威5ߡbcMt5aC\6](#o3!\? ?7#ջw B]ֆOq8j->ZQ)#: .t7δgĺj^E{~26)}$ ~iƆ nCs!|.lzi+;T{{uߘ0q}}EɗOpfN7bgs0`Nw5p@z+3oŇ'}8kڽ_- I8`xйX4iN<ۣPS4/B,!&vO^&y_¦pǚa[j,$"'Y81ѻiuwըw`X O5~qJ˪Gۭ;^u r2}|%.`w0K Xo^ .Q jޏf9aIԕkukޅ2읅0}.`=뻋Cd:OaN?$H\zrrlчk[rLKyjhyO_{-l6 Oc=Ds|{%g!:P@,flFj-GiqGBFk\n'irI=0{9[Ggfźn=Oe6GXƑݿnu-ưc? q%>|/}:$[@ξ# d+ +F-9{Yz ~qR;6<`mCw^굵Qyc_.FHQzcm:סQ-(#sln{kb5ev_yKV 2C;R~7ܾ_V闙ь 8l{y하 .tž7K$O*H[e E_ \xETEpgB>|ȍ?w.t Oƛ1D8.j&"5Fb -^P./+>C gaqDȄ$VMXjizmtUMS6 ݠ|\5Aݘa =;v]uА "v~j>F_nþA(8gD_U:<ìn&nO`ŖFi)* OiHswj -Oa hUc0 iGx`3@ Z]菞$3Hi`zɗ{]pKQh~Ty#uճ)jӹ6C+0 {]mqtHg#]z!q7p[KfM8uB:k>a=gnU˂dm?`ÍdVwCQ 5 @%.e4 MHX?-\Cu.+3"J&5ܱ 'p玳nH?sK?;žNxp80-yi^9alb7_p'8IVxDf%wh$ 0yaU]ry6O{0| Bp'$ݺĀi|O߂J~e^`Dha>| fy#/~6E <0u^?"]33en<_{U ]E+OkOI2}694^\y';=׊kuZl,}};=cth=6~U" NJvrp0LQ=*}O %mPT 0BZ8U[U sҞٳ3/7qnp=;P'qK06>H}z`2U?Tݱj.|dž%qB$]N4e>PsىQ@Z-lו8W`g_H(ڨ)e-IP}~z%!x|!'Sd{O’CdPwOM2vf1 WIC ؒv&N2bI[uDcگ[[oo[*ȷ9qbn,[/C}1,nh$F0/0Ʊi?65ΆMgTF=6btu3ts~+I?^=BEdx D~&?ܽ;Al"Ž42p4%HǵեMJ*}غ̲k82Zl5g"G2/EKލ}âُ/OV̕-gs1zm;pyu%ǡ*N9byTߙ& cJ<8)?}w2SV/}MKznҕ,^7)0B F"7;2`mu8f9"_.'V Y H}7|  71B@ {\xH}68X*d>?| ƴNo,_ ނ٢aEWq%fr6ZXIelTMk:-KI_{wdžwQMmonӘ4xg7dp51oae,Hzv7SѺ#P[\s¬}(WӜZ[kSAJWjXӰZ.h!v` ߕY}R=Gq(QN"۾ܛ9eR.rK4Ghs{&"K5isJUFΊ ,^:|,6=uNs"PZQ)nfM{Q {"AJdҝZFvWR-^lIXiƁ ;XӘlSC1\m(i&b_TLO4. =XA6 k82+D ؾ? dsW׋!u~1ek&lv;?ߗȕA;U^.]wuB$ד8밪.UGʹ6I^%L?[s]?mX9`` wHFJ^h3# /pb7? 8<܇ `v2_f;{Y]C_r6nn1&;%Go2%HwB,T^׺Q~G8kqxh+x ߇هӤ`T( 慦x~=¼ґ<84ײ䧏e[Ca.S*Sj5 gc .~^=c a|X{/{o$ hNbd2m7%@鍦C\0|q`(;rîal[5fzC ve?kڡ+6dYƯDjc Yx?QS>Fށa6pqg0t H{U-@#a|>]_& ˃l失vy)-nTI_++՘ZСp<[`˄IFDCpڵo2 o7J''I@y`R Ny4NL֜IFE.[L6/Z\4 d牵.3mǞʛ\Q\y6r9]2-d€ىdί*]#tK2KfWdf-&6*dy.Fչm-l@=u,M'C]^5W/~M\ =vZBGa1I=*V6LhJ.ґ3.5̙[S&ِJ;8Vazm+l6(Whؑ3^ߟڠ,oU՛SH;QyfkHJuz2*ˏx/EoCA(K-k+BIV76x\GX|zjq˿b@omG0+*N><ҭgkY',YP{IQlo)/,|t]?,/9R3#ҭ./I+{JM.ƫ?C WH5:%Zu8VxC[x_Uyy_5s Fvv(w|pO-pI*)Gϒl; sLjx8Euȡ37pώv\<"O`Sdqd ϯv|qMد~~';T޻N^$A1nUZsZhxn3 q3zC^2x&A ԰FɚE8۷ L󥹷=Hݰ(+!b-%js~(ҔS"UFH"4,{}="~k}o\.ak+3G2ZL͎ \rz_?پs#fnW!ZiL}:\|a@ɹPW [er֥gCɣt}¦r`q~ME-Ejcl)Ix7_cM:=lO y EO.GBOZBJPmyhB(f_Ă4 8pMe,y4zod<%[%V-jӧٯz>-`DYg/RrK۠kN~ yz;?LN-kztQ8KYkkÜS=),Qn1'Ԡn# ]_ΐt\om6T' ̻6 GbLʵ%K3+\nzVI7lꑛ !דOЮ/Nʚgꡓg$":c1c)\q>.+ $|éoC,|BsXngR7Q28$%yS y )Z;zrryf4`n\=D^?˒#"@-JOՎ:#P2PoskA7Q}9%Ύ]#V=kW@ nuk3y6MiŘV撘*倩;!v}%^~JSlK:szb- +}5]s&{8(踻PT &Z)P FusQmW8ZKvɬJh0\ {ҏtk76J9̈́0Uu4eYJ0R\p MY8pW1Q[x3d!$Hwc2< ] /g!3E;~ I:reJV;⌤=X|j~'R.bpŃ}ɍtt߭ORNf8~ɸW pG.nu(пsY/Hݢ %6Abb]'e{R'ߚoI#}U8vɧOPYS(}nmN3͸AlwtyhS"XP38pΝ,o[w\f=WHO-^4dcW\^i2 QXtڐ=x!ʇ'9K>uܑ7'} z&}ɦ.dvGxvBśO,['i%1owaא4|T&)B4U3@]ba6BM[dw]Co!jqΘK跏f]'#!]A?^+bǗ=G1?砺?+M3֘ t`= `Q*gzhߚBuϝԧ0h0Ȯ?;m# ]|c \#2u3f~0fACʣ9'N|@;9:g++m v힭27ON y $pP&vg9v'SI#]4he'n @CYؽ,v=0;[GVs?Dq?ݘ[G9 KNP\;tU_RNJ>4Ș#m|FRޮ'D0Chڪ&?~~ر(OJf۸uJz3XlΏu1yL36@cߎ "3a̔-/#sz}'ṳ8Le@,mK#yM!eOZQ<GL 36H\ 2}iX{o8BmCp's~< ڄ}Mf2n6aޛ-ޭ];idV.-z [& SNQ7z}rU?/.2/ smS2kCg[ Rt`k=[`6eLÂN y*p {SF6&߿c:*ӳ&q@'aTJ.,&ETXDZT˗Ӓ⸾G, ^pzG[2oaKù_͐v\}vۊ.&bw 03-UEktJ$a#@Nh=Dƿb1Ŵvdv9q/҄0˿.T 5έuX5e@'kv;X8p]| m00( =ª,_B)w,9PT$8Vi&eX[W-bqJTŀL=1]bUaZ~ދ\öS3$f7gሚy8=D__^|"[ jbcx)4E2+W!UTa4{N_qɁ5aC Oj\?,C_ j9ӵk9S$GKfqn!ʎ 7^wpf㺧eK:K.Vp_`y#r?܊TUksuLna 9yB၅;{X>e=e51Ko=`? T6?z(&/gf)I3?h|h sZ-8M }Hf= ɤWYp&/_W3;VV 8O#WK4̿ + uU:д)5[iސs5vMoo};i?o*;Tᱼ$L]2zboY?eϑRuq;\'~Տ0GII ")g9HM*O9P5-%p$b?]J{vsj}P3o]{C:4\| =B iކL Xa(\|z\kxڮ}6״'7Q0Ƈi"iI/M/͹%K"-eX` )U T.k>)ktA AN(`{мDĞ>* b׀``5  Bt|&-wb 3cKW؞l d6eVV}dW*qP}r^&-rYv^&־P|OF;InCwY:J\lSeKۜzyQ *yadֳD EEpPIUsZL6sb*oDq/o"+??LOpQAwf RTN\eUS} Cb*v^=3g4)47@z {Qd5ؐN6!{QK"6Qܸ4p}lX78uce"K$l$ XDRIԱz`U@B*Wy,Odo 9CQ,Rj/0X=g/Z|Pq GȠ/~ ZFYmN\{ɃPGF[dx=.I2B@Z?ۙLwlG²OM N w=8D-!wuqcW[fmVpmڞR6!4,l{5OuxE\C{e70@z ,My`ތ325Opl68L}+L& [r%pFq4B ΨI`?'6>_ǵy{1ll_9:ʑ. ?6=d 6Y\Y zּԸ_Tɡޠw;<1XگKK$/@Onz˭Dz, j ~XU4< B 6/h}zbvǦuC^8#!Ɋ M7G@aw1k 0Z}W޿idG_@g9oV,yjB %n˞s'`ZkǓs;Hbe6  gVUm;0i>% 4^:$?hldEޙ{p=fAWr`n̾73*WC匯0'\~Ww?: gXݩO_tpW!m$<.: M&XYYBnf\]>[ 4*w˓K6<넺ZcF&U܊mzQE#VJ>XP !8vfU=Mxgl3X(g.) /'5cWj"0ʜ]9,\z}7SÅԹ~E/ (ݭFE &lqO ; :NN97G)6۬iKxCg=N8쑖 4IQXg.ZaF*CRu:=^n ~71נ/WX?*ҹL\?B? \{qL6{fG<0 =vG iNJ?7abK85j݌7i~Uv3k a}bmz+(? <ߺawJa~(h_*Gʿæ mx_%)ݙӼ#G d+t"[,'|u3pyZ.y=#nYv r" %+H Bș`{~m*wǵvsZ(Ngc'Zӝ b[wVx]o_&L=@黾;v׸]l2_ΪCDm(!X9v<'!>Da8Q=~fQF7u(-iYxK;[<ǘxZ8ʘ`;6v[`>Nwx6{&s biJrEP~neWs$~q[4\@˝Jq;;oaRBdxc/kmq6n}c0l%޹1YogiI?A )oyi X0Oߣ CR]OR۾6`?I~:l+}ڞ;H*G$P]廇_+@{4ppR䅔oC6[FDFyMaְtm5{ѽfz_AC-u) Ł=`Cpm ,$tqE6( wʿ";!#Uzë>sHfnd .X|JvZ8y!2nB| bwsi::7yb WO%Q8XZ ͋)AM2/-÷s+yoN>(BϽym!c{uHez̦ KLjPVR]U!͹9YQp_?_s θhYiIe2bwu[tk, XҶ>wϨmh.=Pccu "z-2kM# y;o%ޤ}Y3>|Xt->-ӅwS:(4q"}?3ɻm6pK@sY}ʅ:&n>]  vͧgj҃=ٲc:0 \+ĚGl V8}Y i*tkO#-l2n W Ka$@l^!%c'mCFaw2Y MV]B?%?N&5=}Np i^%{`aQf"6(dgz҆?3@^{A4dg֫e[jĞ!.O2q4kXGӵy@~Q 8gN+(/`/v*d/K„#^\%w>xbGNλ(-&>PvI=J}twY)hSht':x2PТ~ǒ`/x8 _`b!j49FKH Wc8Jp٣@W~òA:ң/.;r|M=L2KI@h, _Q  ?~]mlWw!UK~ .~3dZرd+avKjb$ *~uAq&{>g\oZ*pFZȷ5csc_HCg[ޢD9HʉWAz_1*_ Oa7ʱู۹՜P '.\}/%&WhD쇵'jaHoR̳+SlN`$慼.VHS"{$)?y mj~_#*6o e ֙+&Š5ojq.׊/˯Zs(3V^C-ݧ ?\ aETR >TI -Ŷ=4Qs(] L#[a8?c2PӅNh((q ا0?~9A*4*,ݽlQÂؕnz59mO]A0NNѤc*wD0mlM+5f&& .aJwI?IrJ"=v:ېdu!gp?o5q?+Lz3)0j$y3zngsd-3&Ws[ bHHnH:gB9b/>l z.puMªWn] ǿOaTҚ0ѥvpuX]t-vh};΋d1ǧPΦ5/k JSrf-./Kϧ)[Z2ϵ6"nk &ˊg.C5me;5|o.*_ L7HMI#8)g-FE2a@8}s־Sʗl&bf;0ۚ:&eebqbTd#/vkUVEH@P+nʒy~ !00n2<ޡF^N;oi%k:8e\Fҍ9`iJ Ϫ۟N=s꫁ǔ "=%7<.̑/ p`-tFlMG6`;u1;0M;q@QA 8)ozm= ¯YFC72|p,NA 0SA$f;ݏT¢<@oX%6eh6KW8|v^;{Ç=#~8pT&w ̖ >sơnHs\o!Ea28c1e=vOQ Ȩ^«)/OqÚg|;`n}&)_VXwⶵg^㤶b%WfwRIe^/DTw=yZln7@S喫Fk1Ӗw @K8,؝[ѩoe\!曉W=ՍM($Lգ_k7äKIþP$*,HcKė/"+O#QeedZ Wƅ֒|t2Dג5i0`-0]I4rln.@)0:ʊsv%XKojƑ'^}SeǚOw/ąGM`a r뇱'_|odWpFjS_akn_̮3D@{5\U~K?nCκO#`a-ٿ~5m'$-IUٵ5wVƏ˽k(ߠ[̘sN䆡%K70[ C]j/qv]W;o>vL0 Qz"{졿!M@ `C+ NX'uC0,zHscأ} p+?~`;q0J-ݩtt&8-G\Oڋ 9m%#k?Y@oDr%&tSqzEQŵ=UFK),0 $H4*5fK^03x>^ciIM+Axw0jVypٺPgN%,{Qta^@NQ۸噾@x{HEuh`9|0}u︨w.^} yp~{o9&~XC9:^j?\&*hVf l@'a/C!tٗÐ:'N/8o7{bԸŏRsIثV Cvytɑǐ Eyvo P:wj`yDa\up\H&Fw|+ɨ [ eXhƃv xsc5(%>$Hy%+@3|6hn(n0癑 )33>QaSUǘ L_l&8x{^ahۂFR _{!!"_&oo:pϝ^|.C?3 7b%S;=07J^/2LY[j!C7X"e'aTgcvGPV7[iгEo5dvSUVb.w-w}i!'Fɬ|¶T,{uM9 x\rf2Z76Yn8qd.[z~!6V8}}CN4>ł\٠ؔp& FCN"OXz Z\:){6C@=kYQeדU"XҙSludVg7aGXwqU,7x.\֛{fla^vMf9o6Tl .xn'g.kU=]sn;)] m?\*&8R &K8} ;{o..'>j@ʷ]8lgoU'4cR@mqW'$=]Y^_!SomTDWM;YTB8v|j*J3\|ǤCZ%ϠD|~'OaT[TM8C{yv_+\:] G閹ߚ]$M=t- EY|`ڙn8\;F3TsVKt[w<Ŧ s=|K-aEfio;W0mM> ;OVƖ$>]ޠ8GǚFZ] >O!i Wdǥopl WL,/3i;P@FJ O 1XO!sҶf]j̾rY+X\r Vm^W/e};/ncndRĵSt2}u$.&b|3oG&[ž+ĸ`x޹)yz ؘ,k Ya۰6(S00%*~&B+v͒*ԡG&]M*?O>q;̱g09rwoU/^=/z_ ?#{^:c~Oײ2L6}i]sGF (_D&yV"il"qНJU DLeQ8nxiAhD(;r6hZF;yTnd¤ER?=˞~8 p mtnSL]f᥂C<.-g#hxD5k;O}cX7"k "6Cvd]I{o뱾Ǜ5~{lC?Gq.@:҈•pv@InSV.~q ٿ̓g{cw=V9xg6oZVn^%cW Ҭ,mpA+p*sj׏(>wN4 el;>|2Āk9H`ךkzԞհ#X~VV6g7Ͽ،ŀ]/L{Ő=ۭ7-WI{#s47wd t^5Uȼ#\O8Y]kq-98FJ"bvݡ !afYԃW\{8s,Xni zƿR#G.Azug{S| g'MÕ{Ͱ"56y1b lsjvWˇƈb\9qi4)?`1$6 )mSW2)q&).2܃~96Djɝ!sksfC\(qhP4G9},زn}YM(;{t3Nn5|UlJcݝ10wㆠ.Xmu|lNH/vy3zEg`.n0l%C0Sn 0:5/n)TWAAun>dS)Iy !ȿ5G?wfs<'_}xBI! ,O 7T΁8+X<~?b3>ٲ5B}'%E7ؓ6)V['6ÛI/x̱ ^!j;68nNa`Oq k@F-bli;7(aoWanK;`lMkߩB&`N*3\299R e  KI\IƇ3&Ѿ{M{wYCN{<җ_D=ysM8rV&wz PG4yg~>VLo ,Ѹv0RtX][R)+] нq맽ys> I#+ΟYj| # HGs'ӯp DXz_ h @Qz`J[bdN77aS?}9n ?a=|O ^'e$9CB!DGu)N6n=~욄f7oX emĠČ!k5 ̭JU_05`4- ؏y)j8/%/ě@=xవFoy_Ŕ?U[[S=?F 1-5kӹ{OEoroر HkY?(nWcA;8Sw0JE''FF2=\Gu-8}zzt7)[@_ڇم#>.É.`\yۿ5@fG껳dA¬,%ZnI@s{|;Ni`/Nm5kZS`d#|`81q@[{ l,|WiNH[@aE0Cj'7bN{S$?N^۝֖BڑuLá0>})DPR -cp5F c!&{lNU"Isl~_}qVZX=—[̧ oRw> D{\RvV =a̘߬*2چB؝CwoƅM4vt`?ųACCs(.\d}⎫ :6+̽' Xމ$f1parUfƧvOV/֍cF&[=*PrGm(ha#% P h{1RoV''?` DrN)._/>? O]'3_3:`љ`E!rɮzRI0z P&f3-l=<{"BM'`&\D8mxM[MºaOC`kE +Ec ^A?b.'VOx?c'{)-iC{Q5>sL ym7KI8ٱ\ _Sq܆)/8-K[O(<%yn΄bߦ)F6x3`zn VBqpשq470|_H$>_wmƶ$̪ܒ% |'KIB8ɾNjZ>IKxB8t>V&_mV]pIΝ^R5; }Ex-d:87 B7rPE@#w&)+qݧ:=ЮHK`u8ˆ?9']8}O% p[x6|%ۆ nI3',qQ|eh%}X }ۖ>/AR = )R [_o.m_KHeS<+;.BH8k6Ҁ'o0bf5dȪV5ƏY*;nC$X:=n0^ކ|A|~cTĄd^aHQQ`{]kSu'efmFolqT w}7\cYj[!I|=!*ʐԕ1z;>Å;5LKarCWdziZyHR?m]^2zgi'|c%SM+g 䑺2vGƼ%lo-r㽦:8zi 8qW GD:tZ$jGUMu!L3( iڏ|Uq%bүmKj[_Βm$>(T2ܙ|yK> ,L2Њ) v[dt 3;a&b.;:E2orxaS̗b;\Iٱ]0 ZnM~SqC[vbEP5^X ݾ}[w]nJ\q9F1ъӁ葌&蚾drEap 9K wBu[0nYh3q>| 0pb~/V>ZfR~O>ML\LNG3!FuӀ [=e[?\1Y~] GD< VӖNgjH\ ~LpI~?hl< 'pQJhc[ VAj koD2~K0sT%n4~[;B }˶XV|8P~n|XqV=ݐ%s1$v%ZoFOW3w J8vݮAda_x؋~IXUO\|ZJ18:]cJЉ{%KcrEt_]'aÕޟ>%Oq"E^X;=R,7U寛ev@"E_svS/ ޖâXYÆ̾HzI 5Il9'ê TwC_賶#)V<ѓXs7|>%ojni;hc?3߮t/S{8aD%]ֳ FrݠsN>Jih_ ͥt{81o::=P]Wh8m(TIAd[X7Zz) 00߄|KYvt=y,w~C_CHtpےlm.4נ?m jέ,3Smh:n#kO #='ݟ,D^vm&PٖН+$%׌[دe ե ;Q<Ɖ3ߧ-zac8fV:ޙdH}4woӐ.Eω3"Tr?0\Lݮ^El+x>=9ĵ̼ +*7c;Ǡ\aBfe- >U SVڪ0 UF=5Xaiӈ5_^C ~a|Ofgx=&.y=8Rjg\%އ(ܔL]>o^#]FHKFI=("]zTEDVz0 6M”b@4vs B^}ʪИ9`'mg/Q$@qK&Н4͒ی3n=7i,Bo_ 9[wLhmOz1V  L eDf}}{M%xY`ڇiI)g=~G$N]Vq,>:7$`(%3i*1!kԒz^_ nߎ+w%(㖽WӍ@ VǤ31J f7p (r:1{qSu&OdZ@߱ı{O=NBAI_{ݧd-}0з;Lz__ 5H3+¬F6md:eBXߵFdD.jQ+:Vs6cgzPn)_OM|'/#sJA}:Auf g=5zBupBWz"/`)&.Zf#G!脼V+ȹœo۸Xb9$ կnӁٓ@fTX@9%2쮵ޭNLHp8i? V/w1 :4>-`+<j5U|nd܂ʱ@ڟ!KX~Q8SR.on~n#UsW5ΜRtlNz?%;x luC'0{$ҸoKCqb<]ɓΈfH{˳+ԱKl>Xʋt`pS2673fJ~F:gB* ajkgqԸu,[moOb|/O}IA'xp'z PoEɾueq/vv,;ɸV 7kՋw0wW@m\:W^Ñؒ2в_(םO>g?09KJ[]|a+K9.w:׀\ &U xÑݛ6XiRz: Z?YâY.c#4JOγN,E{H ?1kN >IcCaNd-?"'%;8e:Jb?_R8/{[K _u(P ]_6LX,;p!8 kŔ_7a[gqn:j)$7b\ XlVu_Nݜ cqnϘ=Cծ9x;H7> 1y^?Ŀr {n쿀Yo- \tˈ=/@ۗ=.` H1{[l^$Aɘā?HKP8.gfZ31Iná"8z f(f aj &%'lH]+d= 4_M9RNWԖ:5ͤ01&z0&9{99)qc3\:LW0aBǢcd ߚb?-i5,v=EH;Cw %"{-L\x SvYޢ3>ah̙%<p?#^*kV /dؠI}./zT ,(ɄBkHyx+m\^zr$/}ՇzsU`#Bj kIXe{r);ڵna@^D3qݲTHSLbwM1GKh%QR6rm8&lc Ab U : l]q7[&LѰI;ns{8h/Bg{4cV#XܱiWQDD~ ,@{t(O!R/<2W"m O<n479H -& Wm'8@,&({hf8moVv $8 7h+\qHR軖e`=%Ds;d՛߷.5m+'~t6"ݯ ;w󟆚 Zq<5? ;+>%(L@]K (Ce ]-#λV8C߭dyi,7[y%csEpmP̓z(zf[y .>?\<5)Gd+wlqfy)8|,HgpCǙ ĚIilӖ0=.)ValHEVtѮBXtXK>v_iv '/($;+!InObR=+ m yK8?-wb[GU4ϲƙ&-pn˘"ڇvaOe׍`2qϋn7Y+-H踲㠢@I0A2:`֧;_xs_˜8DstG+Lv 2WΈjN5^=l=lCտ_"އ#C+f6lgʴHan&]3/hr Xll_E\|9FüIbp_JQ8x^-! 22.W|%9y2e, &Mɰ.38k߷o`é ({->;^;% )_h dVQ8 3z%ɼ zn~vO`̲J=Op]i3P3f4>ycC- @G-kX6aGrpWhH>WvAdI7zgߑ7 n_!3y } #+cҧ\G/aU0V;O:fY;P)S 3r)]@-M}Sp$j̕,YRI>IKyoUt-`,VX?dCP"ҪMF_{MuU ζ QH;$RSڈm/o)#P ?iIk; G9q䍘;;`P]XCiS_N{t ojakРTYf^=۱M~߯nf9יƇy B#Ȏ-zW$ټ+WSneZ|x=al $Z bo8a8C5%]qٴyR?4J}"g+d8gVX[VpGHK2RozWhemZ3~HgTMOM ]NШsMnm?N:?]0ms70>ك \Z`QA> plR/'q~(ǦMv1 u[7[33A3CC2 nC>:Xn*#GV=l }%r9"a*nQ6cɐTVh޿j`O7OZox)G%o~g3X3?,v)b.X'l(gfƣ0cU#c9JJ!φkdrTdg\Wհ7X ͖v< $담׃PqsSk2'?:₆oΔ͓Y`kTy? dDRq.wܡ0-TrǽK9_Oom#7.XOx.7bݛ R8&0u ?V)s2c6kyJ+\9{?}yXذ#b(8WWEG}2٘ Jz|Px/X8إxY9oEl=lxJDf0r͑hǎT0xl }_MfͫDb3'R%0SǒvےrO?% EqnأMldd(,N} ΅&@q^p?xd^ll7:6Bmv{]cw2.v{A0hg'K1vq ef\vZ̞aʫ?|*@=,W a8,d A0f c |5ob 6}c~i+3}{ 2$(u,T\5G\q}Ʊw΅s|L<)Ŏ`z#vHy}ސagfy2Ǭi(#Kq\_zuhYK8rvz)hw>ߒ /Pr6Ӊ+*²PQiO,8 #=Ü[ A?FomG)vk3[XmPuJ,$[Ltп 0`хO )03)?2q~ ՅòDJnkrtN?*6!:c/rF~_-[:)|ܝ}yL, óPstX%ɬK麐ʘ3'n#].ՈYoNM^g=|kshk+P2c^F΂^\tmrn*\r 9qGG#6fUV~s$` ]Bk"(93U_ XBкAD#R'elzŽ{j N6ޡ6e>6RyNkfnjiϚq憠KYh.K|X{S^;xJƄpy%\6{!oD"Ÿǖw۽ 8qaUtg@=;<>Ӛ9DĖy7Aw1|kqĜ`xwxVrOIAϪ"d3s߷ucO럻۲]Xiir*kY=傈ﶞ$'jҫ&k%oT,p7cFb lG17Mtrch>nz65k\FfZ6\n,$nl4{*xņ/JחVL/Cob:ϕ6j遵cVEvKy>7OHA|dwƐ8)jI\o2lųl[S[5[Պqͺ<影\QHzI {[[O7vu.yg|g[>FŴ3,R%lNvoK0m$=$ nZ6MB vw ߩ*;9X_RDc'msÐ {W _T[zZ>/4)4T07H3lzMfr:QCn5եEe%^{0'[ZKlpKk_;k/DyG xh^ f3eFMhW| OIab p tt0]ܾq%~Te{L6%\>IACp$ufOȆeL=÷ /"q!w%H,ڀƴ"3KE ]U}at^gFkBG1M;ʓX .Ԝw7ЧP\8$ k뜘qe!=B=<8L=/*(hsuZm]di6PS sG##l@X+b{]zds7KAu˼\ :+aX>" G+gX4 Ֆ+_cD*ʧk0cY8ɸ ;2qߛ X|9l:h( ‘wzhRb Ӆ~p뗙_␁ZiqeSrygӄ&pք(5BWhS}6$'YI**qQuqƂtޙ~0T:U<u*F2$@̷M:|VY,\?yK:vLAu*&([G 7떉ЭS~*Dղ]UC"!H:6P>0.~^1ýj+œ]@_M»'k]iu12NӉ'@+L;|V̨iOMR6ϿF3 v(iKpc'Hzur&{1]{[Xv8Ɣ7!k88.K!6Gۼ* $]kf^`$,{l7-d벨ߢ*s^?vdu"ϯ oƿW.\oO& Y1Ή>GNqM\#n-'cn7R`.hHui1ڒf]Q`H3'~V4S߲^=U;hA(; JufiL)+ vD&h|6ɇ kbF;Kzc+6/c[;c٣|uH.*,?Of_rݫ _`E,DʤlOzmԓ6*W8#ISqۋ N 9 ӛupsV%OaΓsdWQzR.vnnk:u=jaRɦĶM<<hBd0tbw9GDnxn'H.J=IF0,mSsjt3X@d6~}c>&Nnpgͫ6l쏈{k#M`ág#wLnBN?,a&!|v $,Ώ}rW~0,q%˒bSGjOTngWO0V5{ĵHTzxĈN1th>էHO%6Zzݩf^}AK6A,qwvG<|jzZCrbUgrYIx֩ʼ<[L`YZf=~%o]1yUz^t\=_Bw^dǜ'+_`hhÍwU'=e 2cqϗ|m쪙LJQg>vZ'ÙW@mQדpWXjdC?uv\{{S4W<ݷPy!LЕoUmLscTM^ѧG/m2ms ח />m)y/͗]TE{w&q"'OY.H$4?a׎m EK|v=tKz*:AH>}3%]:?, r{]SIᛷOLͽJ{qHQK|8DX} i{\U6 H~ 7|=yt˗S-kiZrr5Â71u! ( DwF9ns9ߛ^t"/mg =!h=VbxOnOsQHX-ð>5_.pRkNooT, $UW *UEndž|moߦ 0Uedm};$pޥc|pP@ᓆm\%Iv8.Nj\/8?J3{y=5SK0td<R2]f ֧zװ+t3GF8j~cN޲zt4bXfr~'E[Iر52dzIkX4U(sRHSz'hH4@Qhu>zHD}l1ꇅ:يn:.:E:N] q!醇@!?cyC?L6,W' z.֘?zg8]J3xZJȦicҵ[,djm52ER]EΟ7z|bW&mȠl acsiPo JJCl2DpXggm\zĂ̈́VjCRֻk 8qoi}0v+Y6f >^LIdcKq.xq;[\DJnv!*%XY]ƕ9>_HY89YLJZؤkVk #5aRRpWNS7ѫzBм^x;.xvG,6Ìbn]Qu\>Ú^A{%| _ g* 2׳r1<Ҍ)vsR;B:i8A㋫jF&Z,ޗ$HV5,R?P$\9Kf ?wa^m/(˜^00){>{͏tSKCw87mȤo ѳ~TS>zIOx.#>5 qT:(z:.GmJ韝d27ԫ3jlNPSS#hGwYrX]ϸ|GE܍oygN18B35LZZf!cwEMapJO@աuS"SCQ@6i%p i# {ߋ2߅{/aa44Ꭰkyb81[p]W=X1J\yϪ{vG!A0=Fۍ aJ"@O{&}MYU#ۨ=@ f5= Ot'r@7,f kEbޔJCA0‹9&feG)γ8XPhxR G-*qǞUkSnaBXRW }JZO_mT39e'\k6~T)tg[|ʤ#npŘ$'7®/qQQPDh.5\Ѕ:C0PZ^_|vT'b>3*Ws3qtPY0kק.T~ ye#_ RSw_{z#uݫ} Q"ngxҴev6ii"N [όؕwCF)V zDz} hB =W{BR3E[]qGp=+I0F,gtsxGH`Z+D~S`*Vzw)Иy>26>&",ϖqbmW Jo?r#Nm)!<9HLC&B\&'2a#пdar._O2) ZJ'O8ʣ9wS.ep"ÍU0z) 9FNepyTXSa%/v$%uA]~ mzS Vҷ?l5>!g:͞d@0%)YŸftLlQ3# x}&VtO29|%0y(Oh%~(KcGÐ)O MB4[!7aɷ7 7x"8LGq,ũԔG=/}A -2c t,@aڏЫL76<A;`iip]"7 3S~&1'O` NV*;gzFON<sۙ;tk-2Kgd>tuX$2gt%ռa >鲀0!%Q$"?$$E݊CP-B&Ny@bo}I {J+©k0)pVy/lz,VmۅigMl֓W08d)TkjDT,7 JVPڱFQq%Q)Yȫ[p4r*~>s(Wo"߰.GSP-U}^T' AQzB{ոrc}"⤸~ |,+>DzH:qNmq1ָ*Ԅ`'P@d[җSJЕ.{v&kaɾ%h9k6?~ounY )J صǸ ̧>H07﨤zɿ:کUt%ͷ!JW+||{^P| # t[Nm̭Ғ.~B84#jW;in~+8c,=s^e}™h1KR;X#/SkB^.NրKCA$ѣ~-]t_!-tH*D"bH;o4XbE]D_9H1G9q%ii1Y<43qeX0=z2qIɯ4oS8]4g#jkkob\?urvV۹o#Tڍ G`:oHpBqO7N|܍gy~=_˱˘~^Z&ԌL MUe`Xe8(._,#qd J-|,$Hݽvժ.aW +kIOC aHSV.`J<4+M'X>̕oÀz26_'=s ҅ SmPc+Vp/X> 7F0L! 92hOZ/k b|d۫} i넍LQ4Ǒțf=5YA"Q2 yHzQ;"U㴐 Hkj"3Z%vԾik#/f˅UP w?̻#-I}_qAַAd^L:e-J'EUkSLKVqͻwӤr ޲[l.3aIG<(1#8v)ƚ:,읗jbG:%=&N!OWZy]gJ*AEdoVUI%O V^;YѻtEyxs`}cWGٿ]GՀr¬[~7}*6_56гSa;c4% |Xxb-GbLz&kH9tU,yi0sE/:a7d[rҗB߶b \9" :nir3a&?Hs 'ޭW@ǃߗqP3OlaR}Yg& \ ${^^s󞪽Y8/uUWY_*Cn\.=-Xz.Nw:?迫EBKk冚7<k-b8hp<>s+LF8\#)ɑ8Wh3<(bn$C.8̣]h%$tz¬J >jD"xS> 8|+RvGߨ1㮃dJy̓d[F1zW#UoiGIpփ]93 3utx vJqFU,crtiCnv4ȶo+q7y}e[~ %ŚUH9 |o 7Srدiy- _n,<'XvXaK[83z5*&sAaN=4*;rB5F;vop@ږ̬a2擌p3E]t4BqeVH~ʺ[GG;d?zs}kɯ:'㐶m8r4)zWh0]IBQBowhDc U(2GV4$ݼ,38CogGu:JwZ7U/c)Cázo<\{PAs+k9v˨XNfoӍzTc! ˤK'Dr?attLigrJ!8ߞX,&U&3_To Qc2c;f,sku%qBد?_4]0$C򣖁)O0"q uքFS}Jks6RtOak#X;US"%l1U q\? 7]<9;/MjB+AooC׈0鼣;h%ut)e{2DaZBNJW/\ث1CI` -޸j)I0x9R'm\5CbRƞ`+}T"`lD.4HkjE>E~4uaW*_iP0;96񄇗 ~bɵ8˽o/WdCЗ#i r4jkƟ'ރ $XݟQji pG89ttg '3[@ǪB8/~urULd?;\Z&t'Zy蛏ӈv*c:&F&I_xԩ+nTA5y1uvoaIkPk.X h!{'dsg9jealHw6Ρ+».s+ďg6γ݌}$z W}^fWSGM}ĦBxlLg9|L2mw!uM,!ed枽~@/{ FjU>;)\M=Y@܋'Z2Ssl滝ߖ.L'w!v/8gy8}TP.ο4?{st58]A Fh"AJaTSEyo;Jd+"pw0^[^Kzgp`x著;p&wg )δK/b;pf>Y)9ޞt3mUO.W2C9#a^&1>f)nMY/l%{"pK{r"vFjH{~` \1KGV*tUAϟ8J6ArKs2wah܅x@ޓ&K&Y*.BϳrRar 2=p ܼo3A7si?c%N/SSXʸw1Lj8s ƭ"]9u#icy1/\ӵ vTnj܁nN8s}j'e?͋gG+r(]<¸ B?El!U7v>\_^ `5nsP{;I!B gb)r2}>i/׭1﹛Zִw @%fFth\\a?),6ps7WܹF0-?MP\tlǦOm} U}qf+ D>% _-LI+Ԑ"1Hbn|Dqػ]aiD=I D5{73׊!8U J1<>80)Cו5\\AӦ:avG^Tĕ_da]{ v@M+0^`1oJ:=^,ݳĠv0nv&;W>T6dsbsݓ[?6A",a\wKg촒qPOHG D8SßcQNŇ{$X-nTUD߯<8}EML .sjoqNǤP(ӲXG~^GRTᠣT6f Iތ ^kX7ʑ"* cMa^71gl=IL3 jևbHي3AWq1b0L|,<>yAFKcTX$ㅂzۨ`tx$T Li7K8@UA:3u*,j27Kc,ڟ`!Y ~wGt._"Ӛx>Q0m\S7ibZLSnOWCs+t8VE%qۧN}ڦsT&UPLo7k-M0Ⲵ,~ungd4-O쿅g qpykڝ,0\iKBq:LiO|}X+&* E#q,h*tѭB\νLf !5 Ty宇 @[^n E_lQu2SQ^ M6^$薝|PÑnq$D2nO-)Xq7en4_14O&ж4DE5n!Ν*|.,6qcr0]҈L/<_RZE PwSҶV9˿O0Hb)մOP cw/,uq0{ VvY #cb>u= :8XX_t8_ѕ6Ozԋ5ycӺXG+ `uҫ]ƝyW>֕ -W`ϟu.v[+~72ARD@l 6~?|$GG-G|6(pw/uoŔ& lSVu(\}p^6[;7ϣ!PXZX~3_˻C?[EZg3!( +gujӬ=طՐʖEszMY\ A-i26^ J8#6rpz֫ep̗@]}K2s8NNiO:2b|*s{8$vpaKHn#= 49]w\@@o(:!4A[7_0͛?@ t:6FKcy) W#8\|^=3{AG;{s^'ӵJ)-`[&ۢpk+zq7~ Y?~^hljG!d;_}t~ Eˋ,8< UI2grx͞]0;ZYw^g 9nXbؕn o.9K _h'&ÁøPF8N1)hyJ17]o']aJ'PMH sU% Kj6Z6,zŗ_:hCga>+L?%j6֭ӴեvVi16z\x^)@8)RkyإIP9 Bo1aبo*xDFp;v3QWxo90q:oכ}B>?:a{#i(;hRX0HJcmE>p'W'nj>W43rCk r`[1.~L m^Q${_q*,jWBG~J \x"Mm`1(>/xf`+*MfV_M ;b#i֍Z*U#}&530wxJkz}s# 8^xAҚȤ0Im=Xz^4L/ޭ7`yF;"=c_ zRM< qcY1["Hc?߆#_WJcwhvh+ ,߃ϋsK$3vvl9,[K3+ Иyl2>$g8m!?nZ gϨ􁚚V YΞ{z:dtrqS;tH^|CO@W7ofN&$qȶ(hıEqI5<|fF'R"}bx{S1ͺ]59s"pLGJ+cHjlZ90ߗ 8\/N:~K!,3O8q3cFvNZ#3 j*WW~9Zϑ8|`nɚ ^:_Jok)–`xEyV\~ndug,дy #|Ҷý-x؆OزUM9TZh٫=l0v/8)v=a*1*ϮI%`,*u_x&O#Ceev2L;8ଵ)L-|AЍtoUݕ4lhvq{{e%2]USrTh۪G#=k]oq_L'|zo$Yᶤ/@k=OޜCXӇsO;2`C}U5\4yRqFi,X jtFG+> RcƋ=JG(R,te%lYͩ8f=rkk\ԲRyo$mu?ܛ;{e Ғ_pi *n"3ʰ [x=dmrFft͚4ߡ܃eMuxǞPP-}ݎ !<Ќ=OquctW9L%gӛCIH' 3gpі^qnxkLg*.s,.Fb̾Pޮ4S%x z%\܇},6[걞L; Lh>)4"EJ۝-jiI3rgM8vnQ\?4SH8Xe{ԧ8on357DYꏽx[eD49'Fo|F%<7_SsKv̟ >n{7Rힳ?1Gya3+7qwTXᅴE>qK);c.ͯòh4/nަ)G}(TErM8Lm 2usލ8Yt)Ϋ=aۃGY3Bcv}6g"KJo/mUܣ[W.TMq3{p3ҏ`ϟBwppVK{dyцzoܒFp=fâ;G-IWƹSJ9aݽw@%YK.T^^K3)Gpk Xe/WkGmeFFO7s9x/H\ʼngaBry-qNY\Td-# K|ū-A_⧃NmwFUlz(w#ND$`:ϔF%?(7 ]Elas)\FE dK(2 hSfX" '4A7QUſ(Gfrg +ë`v7w''+ n6 YcI-/ӹf 3 OdViw>> |av>4ʊd@k饫![OdEn)c|Cpip,ʃc+jaiWXo@FcvZns8aYy|0@ 9}2PR)> _9sOXՍ-tƞ:!9Σr&j#g =;%ZEgsVF?qf3y2^EüB;ظ3X&|3lc ub%ad!ZZp/c.r_e->]Q@;_f!Azlϛ̄"o}8fO_N|",ϏnƑ.]XW6lZqf6 \F*|+; ̧]!CB7r0tPur'p&*M_&YÅZtǫ[j) 8'G#+ ti'=ROUaFL[=Zpǩn T);ݣF…)-0tڬ9!bt!#a;O0Xۓ 4aDj?1?Z͋߭,%ͭf^ ΧšmC|jwCE֛b$-`q r G|{!jg1ׂ~;/:{nQaCz]z8,o vƧ# H0yݭ't3 ;>%ⰸ]iYL=zqhCxL ,58;U DTt9۱=x(@r~0R"NJ#I'`1n]EQ{fdK]&3`.?$( Gc빯CY&0$Y^qO?%dًrm|q08#pjKj8ǣ).dkaA1̴BKzU抭|a{lA:"ljXQ/nIĒ{C?qP8> knpL^n dújd9%'Xqf[g-,$p(LY+iWSߑ0#O %%3\u2 =/$Iyn@w+WlvR.g4 v+Ž%o2߹=jx^b]fҖ _vF5fFY)rViÚй馚 _q.$oU|"}x#p))l铢Ǒ&Q[R^\(b,U~e6"}ȩyb C {bB8a3.Z)2 _+58xr-xCZ \}9wƴܗL`hj +o xmJ鍛d̷/BпNR/h|t6ի"da2@mR+73hcžD-@Oxp(?,r=\aw$nkV:B Aڭ'i#׫$!-;w`=VW1O$'^;c'UBL7uLsB2ˋ̃Xv[=t(p)gςD;}Λq]n uw=c63RB?9 VnwH+/!ߐƅ;`=Ij^*1XӪ!]kqY# &tOB_6,<_ogOŨex0FM+qMV*y&%Nr͛o~06N[:/[/j^jfNګN`[2qELD;dM ΃}0KԕVbS(˹dYpgrq#HΦ5ez!q2(x}wB.`j_,lg_=v_t!҆$g6@# 7. $!˘U;Ѝ,U5{*:_ljz @`>ƘE5`5ٝY_^k-6{{v/N}Yˬ|7 .Yܰ^ {T#a/ ,\ԁ_!^߽~tky+ q;|N'V8,=igajpi]V#G:ֹW}Jܾ7 ( Ka5HzOUEk6rX?ӧ]?p2oc)OXw.޻s`%E.݇ӻB] 3Ku N#n2 y].Э8,փ&7}A@_ںʊ\=Wzk[^8wڢ=i#gj|jmlWNa2ApA|V6ckbY_](m&|>S8q_Λhm9v}sSp}j U1bVO纵{z3_|MVB~^8f^'gTo yuwea]=C5xB*Ӻwn`V /ȋ fl "#8Bz=՘{M&'EkwiysϹ!5󬚎9֕Ei7soq~pt|ƹ9>Kkx%6`| Ӛbm9ؑge$NP`a<Hl5s'aTNu;m6C9Xؗ- C^Bމ2vB֣cXװ\ހW%CamՁḴRrӰ JQ9#},oAg8TNF1KvSfhruSdÎT `쏰Ng:M,_usV,iUP(-&?;BbpC:̩0Cln8q}` MڼT&FG\7.@dAwΦ˝]t=n.U|űG­UXzsdn9LM5kOqǹ,lxZ:qnmǺ^cƮlhDF0gN\=t&#pTR ib2KHӰj$H7LJZCډJ{ m`) tl(tMR66ј wׇa\X@z`ph"ùBM`B~yʅ +`~ΰC[[*twz՚qe0Y)0-" >Ls_E}̡f{0;Z1LR 7Zb3=#J I.^c&8\>r4+^s{߳dL}#]P#v }V8Tz3>5Vb} I*yj;M׎6O2s*K 0x4o ̥3Scj]avg{%Nɇ֭L~KX! tfpi\hd<65 ?G#3uM;8K Ky_쬻qZI)$8Ml^|E1\cbcJj%-TƨEzG M`cXzc> J̇HΦIǶMϑzIS)oZq0>AV݌{;a~hQg\̈"n$&w^u'i2fP'H&Wp}Ő'6wGRRed'PIR$iIZB**BPI}z_~tgx!uN OҸKlw,K*  ?P^R۰ATEum׵ mVu SJ2$rqb_Iw6]\.>RX渶拵:Ώy=?QOzO%rd(IS6g{׿0ߑtg=TSɤ{ `Q)OF=ܘ/壟>Zlܨ5,t30_69 .cQ1~+42";2;۽۫oAO Ln%K"<"ytJz%D1>"Wj"] Y-D\oٸɳ¬YQ3~T v x[=6K%JGŽLjVS)m-0s-fY1h mV.3PH@e,`j|<#z޽ ׬!\wq z\H06ÑJ2mwgY RK*U 6'#0fKqX+9=#3uD˚XQ22LRWh{ Mf|rN9<w^˨_yY‚fu-<߲r;gH@N1fgڢU N:f6C `Ö΢ܰ0D\{#s-`l%5nl)V ޾S)V]JЦphimL#HɛS`yM0ٴ՘`?gێ|R~nW~i| <ZSK_a\OAK<,/xC{ſNW tvqAʥ@-*;N&U=mj ܪ&|s2Q;D-MVSaMZN(m953YP V{AO(}ݽ)ˢI_`c)N٥RA~Mh^kl{yGMLes~7"Q='dX@o~WDAhs^&koY"9/I% \׃OMN_GҚU8?wʺ)+:]9} _MZ﫱I{sX4:nW9[NI02'ꚪXo))7t^(k *V@a`?0!WMOWKIz5kJPN ~*MFI<{Lm2'>) {%РJywt(38Jt{ڇ⿷XW {$^NЛ%/T)٠o%D/4=S'3ig_I}u}^񴽙 \vY4Of#RO }[~؃_1ؕ<K~tKlh2,1qm0~gZ X>=dAƩ8t"\z>ߓXS*uĕ(iTE1/vwX*ןWZ V˙V0=YJ ax&t%4f#xį}EhC +}{J8[W>cZÎF5n`_MWU|?a'odoh X ERvg?5n.{ܯ"r=+L%C߿ +S7/iX3wD؉dWeSE1G|! o0?{oI8㷳DG=*bJĐ}0*p9 2ܲ9ŖOn}* $)A]EGL?!>G趪oy"\`[uM$_*Yn,c33{cN!t{d7c|n8pkFJ$;\]"sGI>`Ǯ']*`.wlQXe'3b>Iqٽ~ؙ2Pqq>P/2*+LtPgLFOixJŨkWZ W2=iW<έ_Z668{l ւ6 GnJB|]jU3%X21-s}<ĎzE@츾#W~kLE)0O<,ZÐbVMSdaŅ\tuxTvc(~uXMy. VtpU6X>{%AwQVf/t[1X]ye;L:yyA|/1#:7jnBe^5 ':n%f?{PV1W zXcC8VC&$o2ڢP3L?t`b3\ӂ%mSzZ=/j-gMOqF\ K}oDԣNt#ͽ8vK,4 M5Ēnmq^aJl2N}]vQO fAXss\{zOh2`XVf;dg]d#!IO`ErYbh6*DUjpZ$|.i6ݺ )!ىMFĖӇl 4-y\9++Tԝ1ݢ;u.Tg=D8(ʹ}W+q:eszVFso>RwR@,! +c9M$ǡowŒmB8CO"3hk-!OowJGLy#t"̗x6Z.G'Hg^tB<\zG?3u_qjK^5 Ig$_ii?2鋑9Їojb-o(ݻSyԟ{ %y#+H z,3m2GTlvUضu?|RX)Kd|41ާXLͿ,D¾f{_B %nPn5Sw|`|܏`x>kFSD%eP~1\SRiX%d 'j0Sԉ a2d^b[ QYk8rښ5v |8)E"{E^B?gh>*4ڐK<_Rxɗɹ_0=/X2-O8|y29 JPx]}М=թ$NC0.A"I~H+2Dr^ekVx, CCt9j{le؎?2O^JNjÀ}{&@t`#.c N\ rFUIC̺s>nZԳRUu+A[j4s4#4IU o+cc×p*+KO„i%}*< ;k?snl|R͛mشfPv*<` nfϥa98王^HbV;YWua^r#?MWž;nOwzs2xcfn;R\ٱ!=T[3uy}R|.!WD-Ľ-/>Y w2ף9`+~(AZw0UE_f_|[~#ŸsXXʭytʒRe?δ7Iqتc v zGG14J8?@r7֦HsZsE|%}iMS_?~#oZ: = dJmn|.J- J\?L6W0=jGfqǑJߪ5?6d.q6#zb˜㭫"`D~nSVѝX7~~R@TJg4#>0 co`!9Ѧp4+8@2%pK}$fOt-X@j"3?*%%p =IןsJ;lIXһi 3dÀYYkVYlú\T{\ q=wecPd< MˋÜpgܩ W.uàiA NRoAqh>3#=nW3J} qaϡHǽ r>+>8ӝKOe`F0)=-+xpm)J9cWO,t=:ެX3'RBLG:W߭o-w0`EK B>%ksGq%;J;gz$3pF8_*vڿR y6.Ȗzdf ;=.{|`tSWs^Ղx]иg!ȱSWăj&L?!!m0KvFZ 9Kḱg%.Xn2dzcۃ~n*De]ݰWSO]1OCՎc×ae`Lʥ$r"Tد 60N?f|yG,[p]ChÕE7zP6$ "c~>|Kƞ귴6kx@T=ɰhwq:Fe? &]籹`(w`!7;j zҹS sʗ<<왟6'paaBBwr}Ґ;Uei+/`:X=z"v-򞚫fvQ.P?4꘮ 6YO>BE؉' [Y:(O岩Z1#Vad,{V:i!jSHzy.δ a}{u+C b gԢgx":l%Þ~,ΛǡzIi _}aKa?pXN΢Ʊ (_!X~n0|֞P#`;l)zLrN#٣JZO`y[ƀ LhA `iw1g0yKv8N >~_å&2MNfмE6y2:[qM6o+P/4 cQ?0Djj ׃dE`ґ2TYwXRU8QsfN]b˳LT?GR /i V 3~jYz(T [;XD7 /_\o$+vh}.z54Akc.+U,IօqmVΗ\t8a툼w(L9 `6VP{d".#vWɭ$u1 -/^ӎ;:N+LJű j$}p.if8G!{T#[bpݎM@);ɖ#K§Hh;TfL ׵@ª~a ĥ?-^~ AJ|SNBXwz1=yg;kY&[_H$dZ|K)&cB'[N՝Sp2nf쏛y&t=`Lx( Aof$|V_3dw4(|[ގ7fZ"G`]tT?Wu-.:>/ږ:yw6Wqn`=1PE^[JX3:6-ƊL54JzOD=}~q\ٚI8xo;H $W/L ƤX;H}52tc'̡;Q_ YӽO{SgKn(Y~ J?GRj]x?͈&¾#e춷pRZʷat;9/A %/a)vƅ;0՚x Ym#Ф8MmT\Iy T3]%nM{{s;T%${v:0ܧ$>Iozp%N3kX­ww2;߹qf.ޏB-;!#۷3r?ŜNAqH Xw9o~A  66.+u,ξr*jAJd9ߙNЄ ?AOoVWI%-ZpXas'̐>xn?~89=V 8Eнߧ`VY:9neʑr\UPw8ʸS ̹SttScUBG+ȋrtPmU4ujl#_e WBs|Rn2Z ~jD,L<~}T{]hT|sLjlWUAXr6~pm^xR*y986ft45{B : zwF'ױyofDy@DD'g9 3=HQZNm+\"f=-U=[>|ű˔=3g,oZՇ ~pY' ~̟GDaN"[aؓ ֱU׷q`P-sw}J˜HF1ACb-it}`lL?0#`)WƮVD"ݰ/11jSL3WO MfZ ; PV|48k^歡ea*"Z04SuAx|W;[v g@tGh;v˾wWY=b^8a+t)wfgZmO}=WK}3ڽ{}P6Nul'4A״k i_¥sZӖ_2؝]]GXJNLX ̠1Ff8@$?yFYirm.>p=!W8FQICpZv}<$dwn9-0234,d!9~9 xwTt桉h5oW nB4œhDS89xp8I"EH3 3<GZasƷ ^yK_5˞Y怀*8r!|JP~Cg7Bx_!hZߏ>|*s^:|=6%Ǥ:\Bp#q|iVafdrٲL`rtԮ o?_ys_$%#7ΟYZz7eB8hHy{FRE-wܺ0˸3>SE"uّn%6[ۉ3b|=4iHFԤ0V~2/+I9AVQ#*7N!%&^ :V0p dI JaAh+pFJ!ߩ4R VuLrC6Wf-^%Rm&U"O.|Qdx1Vtv+'."6nx'.QJ!< Ռޔ8qt[q?KF(}7. *BhdT(_&ؤhhq} e}lSS-EŽ-d`nP$ (G B)7+i 79ӗꦠ!\6|z }?mh2Dzvul<;-t)eVm}UZf{`i ͬe_^~X2ׁDaI:/e銶]"30Q/`seED+Ҏ5.`5`2etx.V&*dv^=zu4ԗՁ6?u_t RHwNtY#U?,Yoq~R)ztjGqnɃ[pǗ |4_}; r&G|l ?g;s>O:O* أ-n  lm?H̬ptg:DR SY7a_e孼bdg'OXycO4onY.=Jmt eǙFi%y/mqRpY% v_6}YdjFz l}{ ]V8C~~kt-:M  \ XG7"[8ʿ/1w9@|(jޅv oۮ0$dv )t{qS/7J̏{:{݊)LO17\.'TXa5c[0f} Lrދ;hs vN 5v7[kIT1] ,zkx_[N< ˱Z]/w cp+Έlu^0Ữo&U.̭ 5M18&ܣjJC+/Uŵqɜ%'#L0/Lן]p~ O^>=!_ү+8沇xHBCjn--X޲B,OjJ1+$ D*l~D ɎfL 'R*lӯݽFPȑLVضy37 ލm0nv{AעAʏdҭE["XYb^<; cl W)=۠$*7,ZBU<%?V[ʢ^c k:a ݕQJ >' 5q)e.PՓ;/c`ӣ.CbL0@M#ߓ)yWIE0_i. nAf:`7uCؘx{A{4t'E9pޫX-ly{mc!3{:.м0Oϭ l#.戔N2%qK/L4w26Nsk9+ $Lw8UQq"6+saOa[aM X+ZKP/ 0?t C~,/v~= ^]&8e'0Ki{wxfs+Gs.ȎS,io0*茋Z~V5ٟOnN&{Κin6Pr^ł YУ&;#MEb?#U| /뻓ItͻdE'+,*Cf?6;o"XzK{ !CWSK1څ`NʗA >}aг[s_vau '\w$<'Z,a}%a[l/ rk9}DG E}*KkC뮝B +8~zQC &')cx7's54ŕ Jёs,2OO? k<@ ºFf֬VT.Nsv?kzcVgǨ/tD\YXIY#bs؎N|"bd+0byC`QA7c AuI!پo\16rvlgi6U"2Âӳ==˩|{o޸đ~Q ?Y '\A ᫵rPyiQt~H<~ )\O"nAyB(.-X퀎,#N0W$w%ݴ O8sB6~yq𺟽{g:qJM8{dIgLZDZb/3Kә̾cQn}Wܼ/|vRzXQg?$?|a0hTecl;5}%kW$;Щ`z/7 {1߆seTa()S> וwTLyx>r5͆L;%P.w3Sa"U'M/r"-F6L-F|wo}͗EIuuvgXGm)Vs=fڱ(N =&HKKACMWaNSmLq#i|f%<ǰ&z@YHgt|M[V׆Wς}`Ni.cNZJv|쳝]Y~ .e0w1BsJɇHC7.$lg̿ 8S}߿Я6%YeռСd5`q"_/w&{?3F, kaҕO)Or(pqtRqt/P(y 3H_ kKRE&Xu;-I~Q@GkZGl Y2S[SH זùiY~a3 hm49b-<+ +l`?[x81o/pډsa!™Ll㝛D4 Ʒ6-ٍ} 1~{zBc7. oc;R>?J!/޸-wτ*nO> \lEqksOAh0J̒`&^bah:l-~"ݻOFZDeOka`۟0QP4jBkԵ+[Ruoqs7BXrpIq9k'k~Vᛷma9ITr;eÐ9!~4 vSl|+gp DO5/*EOŒ3n@utE'ߊsbVeȽfqx^i ?X|nE~ \6=I%'XzvHGv~#t͆[`_.q q?o*ISR/q.$K{TO+'#..LiŁWfmaN.C+:iK v]%q3+%bN:'`6pΙfᄏ/0@9嗋*Y8*M`f"3T"z WҿDa]Yᦦ"Г{ps4L~t! 磘eJciqU B1w0ra^"Hèq۝Roʵ<]Ɩd3ΗCMJ-4 *ݒ6d{$L E%j}5[.ȣzs#wI{t }vᆗ9|EV 8 $zU  C{E~6 aH9+oChrkvM**U zw\;3bUTtcB6,%xgfFf[fnr%v]}’lզ LG?+yt 6_ױuοgxhzM͂>0_T sEtF EԻL1cl09F*Znpf䯧?@%r5CtHd_yL{s ۪;{Q(fn;Y 4 9d`]\R+ZոG݆d~*|%d9.e,@kk =q@,[Ä>*dG cUbk(k UWp&yX%~-OU$ty[]* aqp .B|В]O[ƅu0iMi .*;{¤Ko^q?\3q{o_o]VdX׺gw5T~7/3q"6HKzQ`Z~o幠Ie>(MxB& %luO cr048ZL|RVIivk!ϫCIfzo9*aW =pNQ-z Vr+HO>t*rnWF]d,=ͥjEu|8|l*p$۫,^̜۾F2aù#0w{A%=e, 1(*{06w"GNO EsUo Ww>\l[EjW6o!vVQ&tBBDPMrr]~C0JAJ4>lR(~\!};Ʊ7Li!(LJrqb1LOsÛG<4kˑ5ߥLCw;p A,V;* T~o wv\V29#& od8|gk2J@Ϝ\LkՒL9sc6$zkcN]o"̻y`q^@7SrhUۘv̴t·Q έը}($Mz$ 3fW}s&FeKC‹kέ W?<Ǫ[JkmA̟PkQb͊ئtaϢCbk\TlW-)?F‡aaDbO谀dOTo85ö'?z=GreYpSU qð唚6\n #6|wUux"#ED;W5*8G i9f8Ţye&jL`j-L Яaޘ{P}]8W1L%0/lXӨ;E9ۜK7q/5n.LwՊ3:- srd˥K6Y(R5U? yzdkەa+0b#ckeA_*㴺՟~?wΛ(rDŽtf Uv_R׆Md u5ϼXW=\;!ZL%H'kr6x:JMY˪kQ7i`F^C&arŽ59 Y_=Ϋ_j>@xO߳;? v\Wr,L&Ou.U> u͙ujLca<3>ʓ;6ÌF-\;3G`.ZjX>_N"JIF8gkޓ0 6or? {g9h[w acڙp`,toϝBы0cpNA+A2ifFL4o2yFMG뚩':hN!\{h]8F047vMO;RI* z7xS:`탹;׵0lV0Kf;E2yftfMM*; Zũr]- 2=ͅ3TҨgv}JC!\N&΅ży^9 W .mClxm4_[& V Z1'2|1[ }R?@Ihvxklڷc6b͍߃Ly\i+C"< t O?%=Psajro83&mxb'ۓU{yqtV=jQ% '{u[ #Ҵe0kSяCVw2Uuƀ +}2ݜLط'W zU'f= dbj&KS$S=-d2^]ơnlQ3mo}"u }A;ĵvi-:ߐitFHnw\ѢN `f,5} m+ 8qjrRnc,c9Z\G|bu= ~j' E4-.w|V4wR O9N&s ոq!USxkaÏ=qnxH>8 KX5ʺ|Y1iL?Gs0v[c~Ey8eG=? k<回!o Wg#1%Ԟq{xsc5 ~vsVat]+> C~tƱdmhb֞C( #IkT㬐#uԻԤx^蟨qg?eaq|60²:$* &\h ư oa6^&$=)z4 ~=+_ ¸U10Ym#w|Q~r>-*rpP[g8mR)s7gc5 db.ed*.l}dVT9,NW0mriEfJk65P| F z6${mYydG{罰G8g̲V(EsHX :21qtǒ0Wo8s~5RϾ,\2u#ƋoPu2o5Pw8줵`d O#ȫ02܁Pv R•k@o1Q fW8\5¾q}?niCr3ײ! [woZ##gwHP5) Y<`LM,|/fgWULt>K @ۑjVGi@Ƿ`z> g^)3î>*{G 7Yƫ1!1ߜ6qv7/.aV^oY(}%NXmZ;Üj?M(Zb-+=k/AM`ά`n}>1gUCwftK/tnZfaƁBv lr&= 6 {>>QOfIu̯'9/2SsSꃛ9%`xZuWUZ}VH;O'7<Ń-i]6j.O`5;fMzc"0RO{{qي+>8sE1`zT?Ի·ܑH9>7~֬V^̀7\E:0_0⡘߱ǁh,]r>k<^iaw8HqnM ўշa3)k jlΥc߹(Xq{r9[f=&CȤ9.gHf=qO%Lږ0YeR+B8xQngQyD07oyBmaaaΩg)c 獿]IDG{v5mwT=.^: p*`&ȿj1͏rY df[QDºk?FaY$3}^l;{X~I#݆8(h/#0d3)ԫF~7o0'Eo& v$PI˺| {e:N5"nj\Coo髎h%>G9 >^ ;LG ԍ~±h]3M!`77F\;9 yc{#ZGӚ&p`LP08 =G2]-R y㿏"H!qzN꟟%aO,N~Zsw4Ǩd&֔ T/tmQtFJ9>KǛa|S}#pʇL,i`ػUzSʼn8 '՝JbDʣ-)8cO qU\מǂO80[H&3̜׽-PeՇFryLpVkW˜_ƇHeLbck9u˅wZ@ʑaEf?F:~~"zG;E-ғ;F0~ÊR/ۘq#KwϗI:is%2]qBC/u=znHl11獲&([q2[N`W_ʮA݇PTmU>RLæ$grOq4eNB+];)!|<)LdbZFDQ*G8Q9iG-96D+FzJ9,tiM޶e.ٞ <1=/CWlje! rE_G Fq8tlTԾfb4vͻL_5:^# 006 ze c{j"*#I{o, V\}T#ߦ[h|l.g=L, nmq' =[=lS> CfB-3~:xOzT*`h#/ekx.C,>]ram0k^?Onv,*aS׏8vY0[)K@S[aYV,'6)ǃa:4 |٩g~7RE\Y;,ɬcpWz[^88G;40N ?Q4 FeǠ֢T6XDYhPFxmXT`Pp&t|7)O øAiR>9&@9jy%8v&s|a(Kؽ&9L\Mhec:!t^ Au!38@Zơ1; .V f܄oQ0^XBZ3āJW,~:@O珞8ѭ ,M%=PV*x_ӆp&@"&6=8fo{ Ӝ4!jᔸx%Au9~1t\W?TfD;d}_ -W\~MLL) KIt}HX{o9*Ҽ&Uc|-Fܓ$lG2oq Og]S_nK%?  KJڠ1y% .q[X!4T㛳80sDpqUMԕ &c8c.4>oLB& vl˒a8tcfE8i #o[){OҎBq'.+UGqPSg 7}{1]@SyѤ .=G(q탥*+}sa~7En/Ob _q,ukG~ uǎXSC3P3VK$\x5]lֽc;HLut>{gҶ> ߇RTqFШHJsZv5:],2#8mWu|+A=B,,/u͖Y{q8._$du,_2f矉8j.{4K8 srsi:%( ÍM#fSLbdE>z}26ssC-K~CzDƜ.Tt8)؊snҘ 7aF׀H,~%cY'az簿:&"p;$i n!5o`8h2OۏDM)\i阧)$r}./g|YL>ܭ3Ir/֐z~̅lf*J]%턉ئX =aa±܀Sӱ@ZeO0!Tuvht9R7 ɌɸxPma5'`㉋NU'u\gvHũ[Tmd&oAr~ߚӞ4BѰu_mU# u@ȍKоC XBl7m/Vtw$aӕ)=63'??ӅҎ76_z̾T^;R{b&]ه PF4{L\hp_1}=~Hz%cIMpQ\A* ϟ܌ #^LU?{a4Nr3Vvc Q#\c&e: K\`_nA= h/q}pf\n|[%U*ΗgmqnObs/)9k9ХOr0m~r־:HUHIZ}Y +|ˇ3]WGuLzw(p|¢fA! 2icމrkd"٬#{,XyQ2TZUR|Gz[ kSj]' ;Sub]/Yve^I[jn0!Mk%\Ո~\?D qu )4Zo Ȇ՝ kt[afT+{>'vq56{*zSwRǍ3C{rx\f*\6=ROŜv'&֗~ʷM#n+3q2^ߙT2;{ XhjDZ4/ pG:ҐzX0bub뼛$=,uO7pjǯ0x-OֳBԅ#p;s?%x_ e0d32:'~ zݣU8ŨՋ Cob?>@տ7*ÇS_ (GD %,&/ ֨KqIa6M#컋m'{g|&3!ʖHsl1M :Yc#J3e``ϸZp^~s&I!}e>"o`C{y^8/&I&^b]i9hhI?S,bRې6Kù2=AS=)aSoyH}Xԗ?6$ԩ ʛK/i҈P WH(uWKRׯ׉Zj }2]Z̵S HSgC/҆?qu?!xOϑݻ`r*cI>ߟSݛ,nL]Hͯ+N8E8bT6'L ?;B)3GF~72]]$`&K5'!.&3 ߙ=gZM5a kJgI45$ƍ?KGKk-v8W~Y!R?Wns+uu%eo@An?XHzc"ZY=l~YX:#-̞6CK+ XquvX[y':]AXo`_8#׮}yfl_`}7e4aSȴU!+D_S /QYI۠zMihj6hkOO?`~xl*_yGzށeu!< pŦ#k#&hscQTEtWFe^]`1|?p m]tZX5x=Go%%aGr`Lu$FWc\V̢u0lpCgO8ո鴙"4qeRN%Ї8Fn\ܾS̙d_K?//0%`ky v} Y[ɌiE[jpV;| AQ۷? @-io㫩P%'v&K [/̈́q@Az\r34 Et"m챒;%8՟xgoFO;{)-n'T ƒt)~t( %T(VH>~j^L~U%ZSw P8|MUyD5Roi45z 8Z|S2d1,޳.S?HحVyvy!d>W] nx@+=o7D&AP{?6jt 'ÄV:0ܲ/ޝK;ƺ5(({?\_)rYu[pNnc?i˨'7O>nH0ʺ2$rM$`ffq0s,V\3+1@gp t *XryR #l`_MVhL"8/9[o~#W$2YoLfLk1YT25f V%n cd|@ٯdŒ&ʫ_Pq{N1Τ jacv< UmjS ?T}(UV4KcNj+ en۠w@;p>lC_52c%9VhÃn/ CіW Syz,Vff)ijφ'jr3yL};#G& !,sIy>3u3zƒU68-؄c}&r,_?7" Jco@֭nq"uy%Xk AtG5 )n8S_훔8&@Eb2V̝ 3wUxDf4)s"UdN-*^"aJ9;4{9$]x>/m=ˏ˾ @aCM1.=]a;+Dp߆ݏ\ax 6 14PX#X\ F#wm^Ko +^|a?N݇0)*ͷK&K`auμu&y6 n\9 Aj%*P(,aa{b) TCbޤ62+|(6vِo>g P D˵~ MO;`7D@~Yj%Rpy:_eUX8]%}H<# ?K~\n?8pv@̝\Ѻw졖uFmp&p @MJ8g(.`Bb黏 `OHQ5 !v11ThPxudfx_`V{Wa"6>r$ۍ[<EcޮVC\%,a-{.K5ʯі`,a2\>8z93)zZ8L74 \⪕@OD .J_?yamѽlfL=v;}_k;ML戇y-8=s ;Ap]}IU8fP`Xx';hr["Jk%޵edB§Vcgq=Iy:jD`Hr吟vG33aY <~ ^s |#H#G(#oμ]ZrXl8,adh{uY{9mƑݘLчc+dLbes{^J BB2No2z yWpj8&dN9 W>-?Ͻc5e*߰Arb/3tj s1~Ǿ-kXy~k<="XvyQ~ 8_pbC'p)Nr$IQi޸6.̨i6\ߜS41 O|p &nOƺusF)-\;5_7}?%?<Fc*tb*rC:Kl ;nHFv }K? O(o0ijha2C5;CkAhb^Rm/d޺؊@ȗDMʞ]HIap҇ ˇ~ލ3珜$GN0DQpf;+/q1K#'H[LO 5_C#W͇0*kǿVhR8& )7bӼK=5,H/ݷ\75?gE v%`R F8qHu㱲g$=G`s/sL6~Laqm q5lܐ[ 'e=x D+t{c-@*G,7kJ-+cB۩ú<Găդ:Abζ| =m;k]}x{Hn]7B4nI\*c La6qnm+0MHƲ.d rCc32Z RBjc(F(e"O-a%l>=9 Xn*90.pn ̨)S*gfԊ܎Tu;.pa&xÔ鏋G+NycsvwpQӗ0t4st 7c ӂlCOqNRޱS*8s$˹ISܯKÕ9W6{q5,{>2^LwR] :;a#%.\Tݚ+6;+WOoZ^]]}Wod CߛdSf2H8>p?Q#dԶoV!1XnyfwVԋ-r/˲fu~6}73 C10#\>:x鳅$ެmΊC պva0:Mb.dV߉{t~N8AG G1$jΉKCb.mQw[Bq厠E8k<1w&oHH a!vaC8{2G[GâvORSß0QZ*K}c?J F"6 tLfНN9?0ltl3t(ӎ&xן9CN@NrZ[[ {xk:6M<<`1eE]?۵]r4⡻k~7ET-I{4ձZqhPqf' y`pXBcsRA]_~5 E@7cHS?x`:/d|9aеk*a$ô~g뇰ӜBf6){EAw7fd`_}RgU,9;Q⽡ LN20@&=#y ,e_ǩ W`bz4m[d 럄|m҉4縋2V. Cg%/n3{Z"܀oԏ=Rq4L2E5esjz`j5[K;۪`cyn_) Ƥy\.˅͵@Ks:Pϻ''6 >F|d*SO±g7NCK~JuJoE fL_] oebik; 8:CM=>n/APDu>Lwt[ϟbKz,$>F@mvEsVVx7z.m9GV ZYPKe^fC'9] O򯝑o˽,15d٧g1^YڡEnѺ>! n[,TBm*m>T**) c݂.Ȱ mU7$soPq.m#Ӊ ĵ($)PxzԾ3 ;D#cQF r3J‰q;YKE]Q:b{=Xd(Q3YDЋ!qLvCALYˣ;wqI1(6h(/I$ I"*R} VQ"R,%IB(v.n 9s<3mC!/4ƁZB)X G*NZv( n邾05g5gqzWn&kb)Eb=3ڞ˞8qiL@Bז(d:DEv\ o@7ʼn*;\w;J 3uicGL?h|N_RKvՌ+maΘ"`>mkl Nے7u fzL.a-GaE1qt>qiq 84 ?NeMM8&8t}6KSU9%_:0^r8eK|~vu芽k6+BtUU{_3\5[y+&_rX ?y?|GXzR/`$8ݏ5eTX.h"KSȏ--7|p𜫦lJ`к zT쾐xP_dXNa9ۮ WrI߇ 8r\D@\Rܯ#F(`Ml+Ƚ=q ]G^IܛEk/2|bIzf F[*a^l5B \Xhei1R;4qgY sQWעdo 'HS \~:sշ0Y`3Q0/?(Y ͸}b~8)mC[a~8=X`+e' *(I׻ޭbKEȯBSO/I^ eӮޞXt?sga)t[}I,֛+'EߡH[Z` T!ǷEwLdns)v {Cܙ)ޝg$;F//!-W qQCs}:}nS G_ W6F!_Nfb^`X_EjZ jc6<öut6 G㾯лN9uO0q,Zs^Y4= 3hW :<[%E̤OswԹ~?,j];RS>v*[/{[p^nQMua6+Ym߉Um;5ڱ8BN1`ɆVtz}߱뻉܇eqm:?kJN >0S=IGߘ["Fع@FjgbxvP廫TflTpf?MX/`n:LbsLt!6l 3r#o%J}fD^Q.͍|PbO#^Yܟ`0mށ3rXqWS[o}&5Pøp!$5* 6_? i'F~\P5-:T ^LnQM^ϡJ$)=Mar F0O&Gv®)^{~tqB C°hZh ˎU0u)X hŖA\?cq.IvpƱ<3O6Z2 ys2zqQ "bd9Xp6rj`YpB* ?VӚ26BJsϟULySгYMfdVgИ-Z/ Hs l`XeE>Rs Y|-7LֹiMc=g3XT9뤘 -QƆ/`K:~ٻ ]R):֦CQz1%y/Gh-vU:Wc<;MalP\84. }@.S{e`g"m V[ۺ 5i`MA6^4:ꢎ"__zIa$<%5pUp`9鏾q8rDAUFr3r+i9fx< tYsr⦃"tܭM7!{xՠXʼnq@7HHϩ?*ЌсU)?_dy;D mz]1eP"éu\swW,xYs_`ڝ@!,{i=jE/\"乻&!9l+= u1L 6?~/Vj}ݶ2T PW`_0Șy7/7)L{FE5W V]HUj}& Z?#H {IA-CAѿ~9\cVۿ9't+] o냩c7taὮɑC8Ty5hP[&?/xb5ORaaW`Yq)%L ̀3n_us$d$`߹b~:Fs/v`uv3Fza_i_pkC0?Zlk%wYRu8Ol,UzK-ư?~M&Xâ:Y )+\@>5 A¬d lZ|[D Fq eGzu.9?һb8Q& "p"D?zX-ZSol3s&sŸqQk<9 7zv WGT|Tl }Ƈ>`A廰 v7:aG uΆB+S1֐|d ZuzϞц80|nX(`x+'WpiB[XV +9f5ֽر?H}\~ 81?k}\@!-OCwb`9)Wa 5*cyňOaL'z4g{Y`,<>˃ !&:۟OLuY, ҫaKg 2'FϠ96 j*gl/')Ena ӱj Zۚp`,/qSy:*MAb~j0Oܿ$Tء;z<7Խ!aF x 9h`;8=+V:i\b4hIk_8~CqoMǾ7LSTlH&Lw_m$ ѻQm9Ó{{o>5k俨:hpLt.ĩuy@Y{d 7 Cr/n3֚8ׂfzᒙ3;74sہe5.g[WԶ.QZf%Ve;L]p%bZSz2jk>DAhME!1YݨUoe~ ɪSnx ^g*MMkOcia%0c?;7°Ctfl*-Ł`C+8vm;ɷ= N\op쟡aq ms\K:F¦?0A#i#]$dzY&KIs{bo /؝Up3vnO<^- Bw߉%n?x;-K5tn0x'=q34q_{>mef:x<#g>iJ9h|Q]JKx6؟Tavb+Gu}N"]$ LVE piނo)SV,[Ă}| n7z.C5%]vέ |$-D@,9)Db%7u6Z-GwKq)o6׷8fCZu8)>XX{+y0)lz6a6~ 5zߓösN+Sï?-:o\+z0Ae *$. &tI=,xա> oHk̀i;} 8*Y<f~m Mš Kal hM\lysBa2JDLBņA_ߍHͺ}K(Y>׈[)*ԩmV8^bC*#ïNօY·juʟ"INQ 7cgȚu[S.s3d3þMߩ9U8piwS8v{ N9 sy^%RQI@SfwZ!7^#GqOxVcrв#߆2bcҎ_#4A~s=H6CT#p 7`8֗8dӜ#OR(8AbPdfCS q.\P88 q֊ {>lЗF>@=C"\^ǔ_03CrԄڒSֈ<5 7. <ـ[>f$~6\:[Fa.rOJwpc$sw9N ~]׽/$,G+kzZR.x 3o_͛bhdZOMQ3Fpl2 r: ("fo Ȝd g s@dbHP l+TT^] i0o'F=i61sNy 7{_$fs? &W+ .RnOoĽ8~>%>~8O i3OI7ߖ~LMbTbaL?P,/Dl_GTC~Guz6PJk;|ۯخϻa*Yٙ #Eo/F2QجFX66Xrc&T8|ŁG2(vϰ[O{N;J.}G8+}/J),;徽ۊ/ b߀S% wa@-(,fn+Gجp(^՛&}=;X_[žR ,V8!dj&#WR &IwS98t ?q{-o,2}N f>#`zO} #ڡsmq+s}ohwiKm5!VOZ0I̽AGݘ3+1L+xPFhJJ7KI0̚{WO/n (ϧ=~)GVϻaa y>G? ?-0AL{HelЌˁ6 '!&(TE=O'Y, fَ;3sq ??+SX|,̂4Oۢ#kJ0x!2wYB"Ђ fNA3lOIy4 Cì8w3k/^\~*yQR8 Ķ?if|OEżF[u(Ub #@#֩a6xjd(ý9%/`{fN8{8zNoS韦=v&R{D,Uw $\^Ʃ \4&?@ʫ;h/BU.!=#W G%_oNn|ꠏ]I 6[jǟkT 6Fu)IfiBT=;}} pqYMKq3g}#H85gk MhiݲÉK8RgR:Nhama5ljп A!* _ֲ. Л\sK8N-p܃ַkQ=X2À1ے(E\ugϢݑDʶ=M'*XQ0oĜ‡/iGZ Lܪ̸GGO_@OݬsB[낱뗳ޮ1ZBˡv m̢Ng_,.7tV>Qw2sH-) .+@HwOs`r Tz= ^+-#صZd zv42A|0/0@zL AV:H6i%U6cɶHB}O?l~7:ź;oz xAs癎&9| /,ɻs#$`܌ggS 3-72s:Oe-v",fD,Hc:0$0sL~v:mؽyx8 9D+z{zRѽ@OiDv Hq#j{g'w.0ºȗFIeX +ox/vX;䴇/Ԕ*8s r aKEAn|Ձy4Ψ8r7KKc5kJҐ^S/_J9EC7?6th>WR-<7F!$?n|ai_v uzTys bZqFOepLi7ͨl8@:Pv7$YցnMޖ (OY~%lpÖY3|'gjjg}ٵ_Z~p1Q_fx݉ :]b1X.U`?n=kEfAZN˶RzaA6poEu/&}B mQ2j39k8a_{7n.+*`MqS̏&?X)W͟V mܲ lJ A5(뿷V(Vx^7;e+ɊiCoHm+ǩP8$%' ^60NJ)ۧeF;iAg3~ײ@]|E$^(!”ZDvWO[R q/DX;Scy%@:'6]*LG%qM/ՙA>js!cl<g7݋K"ai9-{%&li LjJYYR(!Ox|Gre;M-?[\ف|}0T,v 2FqGKPr;"N 1d!}qZk `oizIp;dܿO*$Ա#x~^ wDkC-BC+[DXF] zQeWyÃ+03_h6yg{*W:}) **msI灚ySiV@F2_L6QzkfA^*]DŽ`>&a+wg95+@ʷw04P^qy_ J(+gtj X_W~Y3V~1G{^`s +7Cߩ΢BXqh->_ټ+o@_ N;b$&͓ 7/\"IO<rn 6]Ne}/.'Qȗ,c5Ǘj1wT5.peټ>OywϰxZhBQO:PzʦmXq$e>/XHeGƭu$ty G~u^H])]i)8QWcZh}:n0=Cϻ>ٓ]̇ jM`C`=-c#XdO >0i=ـz #?m]2A;w͝I5vlx؏1 ]e@ -˭bK!' , ;7&qecZҿ"d~=b#@ HH ~5x]-l!k(b-F v<p%]W |(֗ry\c>_Yp6aCwWO:q@uT%t]Qd y~U{Q64R0ӻhxXvk>vMu346H%>֙0 26;r[}}KA-kaO])7[ H=C#EŚ"l ʮynBL) ? #URK۠/!rNp\!:~ ĎHd "K"BynuMIOyWfej:7?1 )vH-k|'y,}4~7u$BT}BpANv8yJ?vJmhIBd؜:c٥y!*|!h;5)=^{NMx-aW/̩!]:2oT vPq^ssz^cz_f[#Bh`äO-V$@Mp#gBAw0\EP pxva_r=`q*lwK\xxe'r؈h:BmL'pam83, 1h7Co0=tOC:A|X^p5 Zjeq9;taV Ry>e񁾣@]gGkb1CwNG#}r;١_AN@C7/vsJЫ>=܂ܝ>/@Ix/6t1\ HEي1Oaƺ\W.t8XD֯Tg_?K`s<{QٺGٸk,$q!cwV]+ѐП!îag,ce\: S3B3{D8iB.B//ypto˯_)o6k^בw8M[$%LNEcdtxXۀ"PT$ b/Mn.5/qkj9,VɅ@M9=[j'N]@Q ͎[:PkptKi܁E%Pefm7LUOݦv#nj+nm6$vo^C:@o06cCP7˓ sǵ xS6]r̩Q=+ڞa{[CYiЮM*Rr^M6 Du1'z`H+_L?ߡqDbp%޽Jlx}5Y=1LaK0g?=6y9#H:QV>dνՁ8G=8Kz0xD˫8"q{rM7-'_NwIJNؒ`=f= }\0h̄ӭMM;@}}9PBrul޽$H+܇@?պGI47}kg}p*UEaΝJLU㮣Xϥc;,Bp#?J ,d @wڮ}Ӹv[ D^˵Jc\oi-.O0Kwwкm^P\{_U#9 ? 2r+.(&ҸߓgY4 j^KhO~۷2|0M? ki1H6l1Ě׈ Ҽu_/wLTֶ0|͉]f*W#gRt{+81%wԸkJMFajj ZWbp8~k}10]gkk,6c+z&?zG)#+f}*l .2m0U7ݺ cHul6/.RrFйgCѺߐ.$Rg/]I/) LgC$rr{cm5U]Vxf#g^r XNSyEfOYXGl?V2) 63~KaSX5, :e{TK٤Ťu/o3)8`8߷a2ׁ(]3. 1ʼnPH1&/'m~^n>WZG}ѳ1=|lo 8SaqΩɟTRL"%HpayoX/* 5w˭Q&c0b83d셣J 7cݤ[›8fp*+i]Wvp3ِW }jJ7b= `udL:7۝H0(*ATf;P5q[/jr3r[:yZ!|QJ-6f< Vv!V8);os?Hcϓ%[6jCjg^X xjm?3p^S\P#L^ b*&ItąKw9Ϝ an7;=%*c% 0_S잔Bថ5e\Vna[X?0Z<{Zd#pQa<Ӻge'y 57\r?z.j圑<<6O>3ǷDGvZ!Lxtɽ!CCn~VmA"Ԛ"bB71B>|0' VXcGU-DIf-qZKuy6G<;p\_NGx41|i% +;>θK}z"L8,x-KeY.|$8RFkW cUb xc {Q+)LDu_eߊ Ɣma>&͛#6_ 7O~1q{=K{Pt(xk߈cwYKegE9Jrs7T?wFʲ@N q,ӓ'`ߙzXU &/qvWӭ_+ӤqThmW J2Ųd3$7b5ɴU T{оP-;ٷSFPA%O1KZ)- o>|/nĺg''8?i}G5 zsOЃU+ +eVXM2zPٶDL9缲0KtVy챷뭚`^%jFWK=_Ic㑪.>apiO.kR-,Ϝ}kޫf~u eYU#P^롃$]uKh./w`7 ߉s;?%8\xvnEH<#v*qC}SͥX+ U{VW^}ޮ{;*v+vdeH.[*c9x/9Zx]19mjM/1;?r#`苬8.pX)yJ2Ù?w;}` 6ڕLN*2BM-`Śb?> Oc4.aT?]Z!L$9qQ0N ~x% 6[*¼M<>`=ֶ`p\Aq= qx@P{סbBU]0[~uywLHnLJc~ĵ3\~Q^sb}⏱ƕ`}gI ca8I{G^ؽ)8hZʚfBmTp !s6_cL6lE͏>1!o^_ ^o.go^)T8g7֔3E4L.zn#IY:~Li ӹv.-24 zЧLTZ<$;i4Y~Jn&6U`JXzλ֨t}(R]UnANU7${yT¦(wAr0[̂uG`!|cOq>fqOV3ڏ+}z._ -hI s?RhŢ(2AΝg}K!"uB;#7rՏ(,>FBeqZ0\:\?Dݗ8U_}8+F =okzԨ@5;s1ЏzM^)9LG3HkOI}ի6П;t^!dL JMÑO k<q`i5`;tg}XK۰h*#d+нљIpU:,V?S۬YSa~nU}DF UqccediGL2WAւrm!f:SQD`Z*t``[0ԡek#n:v~ |L9'7ʹh%CWƼiؘunI&}bݱHY䇱`}[K|;ȬD@hɦ- 15wj>/.2W<.-!~@dZvƍd[O =pwƱ-Wb=bϐ)H'FN_!}K_= RQ$q%m't-'gݸ+LzՆ3t8$}77N6&@W^| >,zbغNZ]v=J'/n-aм[sm\n:U?1^f_V5NR3M{*.ƧO`а't#+o`OHĮk.aoj{cUi NmKֆiٓאZV47򛊳R{"QȳVqO‰W*a@S2gU ^L&F~uyi0TCh-t^e:g񅎩c"L0;WfKe{|F``&Hghclj]\u{ItKun(~I=*Ma~ʓL0 ٦NpT!~!i͉`qbDBݯqVELNl&aP.ۛKX,{k7l^?}d\ m[)Nk~uą,…'fda QT5l\ מ-\쿀4ѕܓؠ7k =^RmqIUx`1 qjbƍm̈?IϰN`s2Yサ79k9Aƹ;`Ќ}` n: ڍJ៺0f7jhAK %֏auH_{R_2Q[y*&e\N,QUc׃3[l1`txs8N=5&U*])y9qK!J>>i.}V{<|4=& \y8b)ݹ%os kMjlyEaPq U˲R}N/ Z8p|d in *!wq`z$?yf!qyW-U%'_X;ǚ*+MfV|(v7rT\u|iG3J8QXN _c_`lvU~ d-lge} ƫϝ7o׷A9$X.Q<L.uIh{SNo|':~ RxoLa'PBne`4U9AXe\1yf )<b(b[\wk!QXZ݁M7ũX$]U2CMd,qBIxA Qvs RDFXe(ڕf}ɱ3OqU8[\~NA NJ8u?dx.n" ,נc?jsH3T,8%2⠭-6,07/ި ?]կKr%:R2Ū#@gJ@f.YVbLMHeM0ldF0+3poZk) ך?Qoh bݖg6ЯoIGe kwf0iWu ܄­ȧ{e ;^B@!`WGAz0n P}@ۼrvκ߽4jwHgƘƶ|83Qa1sKU].f,a?{qX5Y&h$Y<@]`Bfkfz5k`nM뾱:XGno+r7ϸF[EaK s*n໤DkyT!Xz֞]EZH>$>'aժ[!0_:hԥ%76{ޗI};ymk;wb<[-',E|_GuԫB;߬xߎuV0vr¿staV=[$t=2WgcL8e2{߮ _jq6mm 9uPڴ% s73cR(6g/ӻBhXņ%Y Nh_8lO'|܉JƂ3q3 McGۉF2Uϥ?DŽefI𮻃`xb? 0tP6E&F—?)Hܻ[!+{w \=iӫQD&>ƚۯ[E䉨Cq #qD FmX-Ճ,z>N嚎`oaurJ>UkeeUohȤEt?0ds<ܡ:8sSp (9%:WHaxU*=S(ꝍC:k^ښlTqd0W`˝RiG8Wl5u57N8km Gs걒5iotd+}ǫ &Jw>$vUCcc4mC?T \Z8,n~P'Y cㅜF:.$s{bI#$h5oݩ2;'5`&ԀB.~ Lj6͇_V_J \/e'fPtŦ[-whs; `? i!K,51J7xn{NlS]aC Q18Q׊JJqۧksv|`-#K!oqf{Ma4&_†Ich-ú4b~/ra6A=6.uW<BEoL /a)t 5{B6_{^:8Qs(?Qt*:e%p1PՒ*}.ϲ*bԽ:*hbI/p$:yvV l^7 r uTa썩R}C,:]$B\OT cO>lXA@.X-9sƞ*k=jUϗM6oSHA)źU9a,m0Xtͧ6(ļ1gs;׆a7ITVֹH:AXbpPw'N>]"<^M ^a&Pq!/V_pƫVtNm>Kni,#޲(M4~}ϙB:Tk4$>L`C}H йw `xbߘ\N[8գ/AY6õ?VbM.j}vKT^ZBΕG(zUY}% 8SzV&.ԑqU5M>i'dR f _%%1>@IEBc*۱Qd=D^8ϋV+MO >چo: _|89Z. +SDma%uhV4V#-|Sb{}"l:pvPPW: :Ko!sgI%7#Z:v%;`Fw^ݱ yROס+6 䋅dBqbs,C2y`Z ŗzZtf߳erVR<)d aRpKg[882eG2kc6]=0̩A,evJ eV|᪂0VÇ*z āsJtŽ!qgnƅI3sz0^e[gh+8oӓXY:r8'ij#SjĮk1$hՒ r JmJf K6ܛࠐ~'gفl_1|Fċv$fZ#.*a+:Hwv)^ٌT W+4Ȉj._|kعK(&Ρ֛ς F)MqJ,pNά3|6F^9ԇO3`lu7m"{G/_} z{iW#qcbo-vMx'uh{#uoEh2|arXU8I]UT'BE[r= ?åƶ;L`Ml˲Tw.[Nu m~|ӏ`VvQ[4D'K@SEE'tԿ 8Z=4-w$̈́cByX| g/^Pʷ>\M 1%Ɖ4?y`?^8Ø _eOEcz1˭.Ǚs:-=K/a?ɝȟBt/51=VPÊg׻I4:Pߟ- ЗMjXsJ$oO_Jkq:S@6xJ00nkE }M&}6f]4?Fq+33r%P|8銺Ir9\ R)h1&&ljPOzMI\, Ie,:+9%\siUnQVʬp*)yz6zXi"U~ {Ő<ԡCA<}{)p#n,3)A˫޷(OD H_á&Oo55,ګ!l`NsjQ#u?$+pM@-vZ4_'gxPl N.bYCf ܟF xVn [f?n'KutB/K}qؓ(*T+N_ OFj/\b7.x34&gN?)켹iqaLPIEw pΌ(77BvWu h BE؝1X枋FgO;k7t.?u~G` Ggh}ˀ+%?@Lm9ͬ8v0g G_6>1\U} iS?!]*A%TttGhV'-lx]UDQS:?ݐ wx>QQ0R Q8a.w?fsQgy 99:zykwygqa?.'1ٕYY|yr7nzϘ0cG]̟\SF0;v#tol)p5VI6~gX*؟_ `*ﰫ5h ۮ Uݔ 23Zh.O>=-s{ 8O:ДNY7Լ,&%,x2`.3<|TLrw^gMWC,qO$P^lz! uml>5mHgvx9% \& CINp^X sHE_I,2u@ȃ UmHgA10au/$/ a"ް8j²gătGZLh@Z .H^7!WO`u`[8z$1m(8ArKM8)K i4_%{ =oiG͍U썇j׀6!*E*yS_l6֕٬ >=k@Ǚ-˅*>X=1Vx/(㱹EiH"522>pLrע''zKm#99_'!F!ݧYax"sMB%w!e[1Xdw +OӮ#8sޅu6:u@&ŵ4a[E)_s#1T2sب-0w-$m^Dfw.!u2l@y#8.qEvC!`C0T]8jmN59l;-s_(G.0"W~Y(~w2aUݷSx{ +N8 UƷf |.)9J)д|"Lj>J(.;p^j(Blds'y o?|tȶg倁#C 5}*~FJH|D!d&iCx\E%\}`P*&\˓Nn?e +z̉"M<ڳ+~`{E/4ME-6 ]nso.y_].~$R9yPe/jcuAS'byZ_]yg,[`!+!tlm)m>|X4 NsCRħ71w) D ևMOfKL3Cޱ50B(aR'L*3UGGv]_Ky~+'#t238\Z+-u5w XR54m°*@1omx]nGge.&>@!ye? !I*ǹæPtC1ӊiA?H_')3'}bR@qXnIV5pHzja J־C[eqX& "7ClC0EǼ̂3{ ЗlYc8pv&$=F/[}.TLESہOxq~, 2!ص̲>=O!txS&7K* g|:vq>d\SS[ o;-9cbZh1[gК O+$ TɌv=Q4;0BV=X,epwll^Ԣdž };BGQtO0ptI?q+۳L!ώ RІE'„^_0Ϧ?y3 Mؤ/17]Re}UvwjbmՐ8ivdK0YIPC@êِx@ڐZá:.֕^]KV.B)Úje5S):?->Te>hh vІ3zK8V>%dюRaO?[Y/PlupP@̜e- K/cA^Vc›^&ALi5Qx?^YF39}r 6;&RaZU:6tƩ_o'(gp{gG5\=}[0mbۄ`)u?f0cx,2x^@1XXq uko3cǹG/, mxUJnW]!;fx  YX#6~1 npe1VA2 aV9pwRYmq4؞ސsY]qLL>6?L? Q o/$"末X!]HOۗ`ϑy |+䆳^(|,V.!_S+ĎLh̜P%ڬmꃏ8'ac)h_w;Q6N_ninÔNWʊЕ=i]Ua# {SDL7w{{cARm۩.}Ml|[-eH Np,ӅUpVӚq]'ʵ\kˏ'6 S$$%.S(k >ca@XE0>?ĩ?ꭍV`reyWl1-Gg'. yRO_v -ǜm_vWA^e?|W:;$φܧI+Ask{t< T3"F[G*- >'HJ#ON4M{Drn3.=xG%Oqtһ 0:-^"3s? |.vhс {M< Ҿd^R?=φ‡WkN3? +§+@2HfvH7Y@ X,6vܸ:+mkl> |py4/G泧$HHӶ; B~ѯԠJ+j'/X4Z!aX`b1}|% M;&a'ǺpÞ[GˡI!:NK<.}7h|T_3lFR a~;p(z!W;Golz;Xabt0bWۣxjo¼ũiʋSGWRWC)Le]P}΃Pڶ-tai2OF"vn5mWڰӁn813ĻiWv[k׾DU »#(% {Faw:6qKk!V#^G`rO籊 ʖ)#r̸( U76J$Ȼym>լ[췑h,%H5w\V6lSvD`Wq_:T Uj*%K=C(%E)ї7[¨Ϡ((K*p95 -1]G3;([,8N%Sz( Q*ϿM ݣKi( CF}}]l!us7%,ghֱ潧{& \nrzlgԗ:v랁VOawa&.fzV5q.VMЫe|Wل}D 6QPfz?2!LuAKz ͍̀ z{,^ΰtrc*]烃k2+c#v3{e7T(Wjr#6[vsWb7kRs|fh{ɖfjYpgp{ c+W@2`/9Nr*􂉎U*ߗI4B#$wlܸbۏV@W{a>79 &*`'Uak ,c>ke%{NʑgUmY`8HzpuثJu~F 4,bN*xv3 7Gj |gF<.eqx/ c~]=})MJDB' nxs89Cڄ2lvKSf֊kH}[Tץ^`_|ѕKigz bEAlJWAer'^Ηt'{$Iѳ= C?quu}=uvg`c)Rq6{A<+zoYlhgT+8?\ku^/j3]!tپAzl?tQß[JDYD:c4[nYF7̌%6*Gn ȧ ,n5`' /pɔMa{i6^ f__=w0G~T KOVEXVe7lZE}SL? g=I)_=&=4e;OPsJ4> USW[߯10A2ٵ.).]CpӐ}2:; 3\wpeioT?N!:dF:N:?)" õETKr$1?`*_@8/>*=lġ=Rb;vB`K͌(L[6`4>Y|l's ח3sx\3% 5Hbus#ʕ L>g:cк'm Y!yVs:("H08F0_=&cvO :m;K0׺U,z9pX0ZT#ieZD=]$Bf@elfX $ĩ/{pȏe5/fCgΌL0Ŷo2Z"ް6 2z8+}L1z^8XUM9땤ckyy~|˱]BX{|8f+Q޽7]琸<۠$B zߠB!+$KÔO_ 8\4'R|sk߾a܂V-:;<7ocukjUQnI9?VnIS^rOvw8%_mN)b]o.6ew!m7NǓ8yKC tuBL|wجxM%ve=xX>ŗ1yYx8 + OòSYEZsOo@ڦBG0qiXv/#&ц3 þ: ޡP>Pu}v20>UKI0-Pk*6̒O|5l듯qV@( R`KH'ha3qRyt )+dcoP0O?gDzIHXoajzڨ'9mw=~NXMV_TTf{(WT]WT>L<7;1Ghxb\t*0;yly fzqcy%a?7_atyP F6=[^< _'Ldr &AEu´s~oM8e#M'8YaQ(2 +6{eq^aYK@d*Kq.L}ΉZGjAeƘ!e|8ޟtcuu+Y` ]x_lqBăӁOi݄}Jݗ|pD>5`.LU `u\f7ej,_I~ED鶀uecgGj\a(tjG>X5pjz|12FV. "C 0:+e*;_gv*=mRR.-,"akťG8^i^T˘3ђ |og.-.Ld ib;zS!pM7_{%7'}<5I`( =(z%* eyK[9[&J@(VMv[kBٽwNbwSqX G\Uzk~3Jl0&j%@&z*osaAB#[iK<\ArՏsB5_;zvyԩRz pāʫ能 S ?o#2# 6 lmcx7@0Z wvPv +yae* x< ^>YץkR]@ו< z}K}N~9%Ӫ*Yˁ05R.۰  A*{.zat hn *%'$L&S?C05?`|U Me.X{]=qQ((o+Wԇ ֢XܠORqzpN9wE2h-ٳƟ|ңi8 7'L4/ <=`n26Rrph?5alН uumi~Uu]a%]'Bv2UG&XC;ٴ>_xM1go'vtlA3Rzj{ 5x ;NC[ |qjoApc;,r>Pz!M:3V'QutHO('jkNGZ%ڳ_7L1z]:t ؝a[;ٓ,0*]n/jI#L BrK릿բB皻v5ox髰sW^뀳BZ"wk#X?;#eRIQȺ(I*RTD䫌 I0*NNO{o9<~_?yps/^tӯ^{1LOو.cGB-jeR k|Oig8&Ml֮j I9uL8Lz'UeC5:u]{ H t)*~R' 7ˆ|@Wr(_6 cC#)"΄ a_t`|P;#K;lu9 3XpEAhCs%yp~?};MtC P3"xs"'=q{QRRZ87zzzj_AYN" &vcxmoc@.!rt a))QJ&bYpzo{g)}ǡ}M: DެHG"s-XEl9@ M{y}VIX~A+~Gäه B7uf&MtnTT׵3; ͔;"*!hQV+dn ^>IM,r--oy8h8űĐ.R8C#lf^SuG%h$5BO"1zG<=y~cتL?yeaj&~]Dq9 Ct3FWamMTFkbE*=g3#XzHAx=-+qʌF[Zon\5Uַ (a;R4b خ\1n(ۻ  x/-'l}+SKE-19@m+W?hb?5Q/;*x!YK^Of^`q,fՆ`LL\IP:VyQ׭W'ZiDavx,(Nu!8ihk ,ܳG$8>8t[ gzyRZMLK,Deh%`fc10! a?'-bT+Nn%Ӎ炽 2TmqƗH81־x샍Yh̒YڱZAʂd5un/'j5KoDx_{ܦWɯ X A]Qd:Z2itMmW 5efd}OHYbdm •}I Ð1+> 9W{sc-Ʀ`y"c%Ut) Nȟ;6LA\|~HjE8=|i=|j:ۋ܉̩iA鯰ܩ#U̬>0$AyFFve;ZS+&#[b؜caYS!iT|Jf2S _I`I)+2x3=f [ gA<}6TWsKǞ_+&< '/&i]BLpub2?0-Fq,k Z\q&B8t}J U7>O{ .$!o \#9kUY]E}? *|v 6^ֶ`e޲e(Gݏ kv8b4|zW<:7vhpƼXmW[79 8pGExvznuRǍPr9>^}%o>4]x=d&.|-ǓwƜ fU&H2aQ2=Rl6VP^Pt||<aZWC^ nÃE`9ŃP("ZE fsL{Wۢ%4 ?_,)Y%ò_`\w P#"ҋDC2b TX\lq V1܁jlqݭ c+sopfSX(IuEPN{|=@E Œb|c9?:I/y0.4ѩS@{[ glAfl;:I2"L8b kPT3t0*VwOf:Ƌ;/Ayv4^+|=Q WsgHUB^aLv:Z˲AT,}sLyV$Fo z/%k7?<f;d`u2&~G~ iz)Řuz{diiң._cX7>P-ίY۪ѡ'~A8zYf_:Dx| ;BwZli埖Z1V 'xrtlmaYx0u-^ΏZXp1%3U]oMRModa_jcNM r^Tp=?1@S&r&gH%>ɊiFBz82VJ[ă.,p!`jM7=^ضt[v@kANSvqM0EKmM5-q/ʵeʏ3gZ:]2P ԭ7iIc!{L|vlW,ۄatqS6߾(3Ugނ:m7kEe; rO{,Stf v(U3yzҎ~}t_YI{jxptyTSE[Y'<`ӪZ]bI9+Fߊ%BS(팬k~/}ԇ*sr78>w2iAvIFυχ"EA,zSg62j#`7NNId+Bw4wwų͞;W~]ct!тRAa)(lqƏ &cїbpjv&!p-wc>}~r@ẃ+G 'UlO_.O!iY.1YYs`8Md` Cl+ǕfحK;^ #bx `Roøj)U_87pZI ;^n|_/Wieg'nFPWAsC-e۞lH(mCw5.OaP66oox>:0 M þ|hZ- _6:R`r hgǯ Υ;@h$ܫ,Q8a7dOv3s"/:e8[|yrnZ^:MFbEnw`Nų8ߟgq!Li[u z.֓Q]7l '̠oKqU$`ے3zv65Im#=*p[L<=*3o8lZ]Gsv zO?p+6&Ƹ'.Jo-=`KeR-pBV^*$X.*-20-Wм2{U3v@/LGnܶPy24;;C35\\t!12=/mD;_dGttzQ8̯foJ dr8t&Eua]PsO=r:!5 觷iwVxT@fҙ5gK Dw* dRCfy k|6>?:Q _-(2Y'-a~vԝ?ZO[ENkzHX^.נV=d#Q>_qF\ێC\\ّwH/کk%Oa8w1zë'\>)y6$x-aeIWLH|vl/\S,F Ȍ>:;R3~X Sw+|=:׽6G'%د&.x_ 8\1y)xSjߴP0!И}c?cFoaflh~&'rU36r_JLO9Ly|V_Eya.sY3}x?9CRP$9\:@9gH7s;ԯM&7Ϊ.c ˓6SI D~q$:1(Ti +^q}'_VPKyLp. 'ZJo|r:0_:Z`?^"HOhpW, ?Lf )]OR]7G߱6]4.DGlĨC?Myp8܆oҬ>ö{/M°j}BQZ&Gp Nل:]wxm"9MjACVn9֥EPvpsFs# j.v?KI1$eò+_o6Y䔴~.@azD n|e1Hb/2IERciuB.V "fKO,Iwi'Tw8WAQBz8Cf`j֪_ YBfղ .+evGaN0_x|Jnn'kCXZ,ҍSTÊ߂PL>&%?(y'hB{e8dzФ[COp _}jlo}if|8n OWGaGVK>k>Fga+偦,mI;[=q2lONfԈg50%y`i1\=vU|ܖhmb[:r=v.y FSvdf;T8 ?%u`8LY&nzsU9jWiҵ'?sM0;sEq2i1y*ޏH,g%lP֗ L5[pyzW{q8:,l A`;Ά}Y 2yT'eȤgO/lԁ'X!{ )!?qV.u&7k6^ #O]u~ZݳV!4鬇QX:)sY*;,q{+*ErG0Ƕc޳COM1JFO|۸ۚpą|l#o's]|SǑ2$}}dt+Th]Ћ2Xt0j? u+w ˁ*I5=jq֯ѽX*l`X.{>X53r3Xϫk u<A]wj䟓w3*H{%V{,vjYSIw̓{#u.m*WX/̽ƶ_HE]0fl1J3 XVA_KIۏ%Bn)ߧQ8z0D(Bm`xI%k %X-s7#jlM=&3-7B&?{(E촿Q/6_ qGXigGU.lz˟H08t6!a\< :f_$0|Iܘh푵Te8tW4Av]dRt= TsLAaf@0! cETĤy:Z|NŢ-?G뺍 ,2{~t=vbspж%XcO0M3kԂ+Ń9mq7%+$u/Ep!٩c0]mA_K$Um^v;urcuT;\5㿶6l%/a[_6 ZXok9'Zb~4ܑMUkUa&ݩ9 l?ʲA0ql|aLbd4RTjnt}~tIC>-]`b&4;FHZ=Yr1%b8t)0y_6_ם j5'gz8ۉΩK6uG/C N]V5Tý&> K]h7?;b"mo 0)=﷯|v' i .pC;(Mqd>*;a G[ϺF~nec0UlR:bө9'~aԂC+OHE`M~T\>o<ӑV0&}6ea#7pPrAǪ> r (|;C+  ]W꾚ON8PU )73FLB?6-vl]P'qj̻PM,}] }@c /)8]\^;O[/,?X-{nBo{󑊡]Y@.$\R_Փ @NX>ҾPtL(t ͎&LcŎWq?c!uR NMa8IU>혼xBvyZq|ځa05}5 Ӽ%" Xx(Mg1YroV8cG/+Ly׭W: PLa/}2 -Xmnv9`\ l*C3!Y f %9=7g`qѭq%Z1=zݝ%2n2>'5 yU{6r}gRڛvTNo{-fm l+j&U6;Ce$Dn-)Xz+/P]їSڥle1e1l6po V,>]֞/fPQկD%wⱩ,*$]3-Iw6=f*8.g\ {mR %mj0|*xtH$üX¼x#R{%qm;77``lHM0@/: KV嶇_EUhjLSN]Þ$;#dfk @ڶ/̖m1_6 I0S?J^d3WuMʣU"`N "Ӷ~3_Z_; ۼ—/lRJ4*<}B{MŏBOmW8@qzBMqv81ki9SSla5Cc2T'ָ7w_?5⿿n~P'tPu glӁ' w`Yd"_t(P4xW0c@hء6Fz]dd\.sN7G^uqX&=]"oݺnԫL&N'b@Ҏ_(~~%&%`ʤ]]ΜJCMd:T_,5xs;a`u,׸f՛̿} M)7K6ckִ>\O)~O^͢ڣ?qis'Paq)09 '|^W!Łd0 Wzxn Ӎ}q-rd67#luW-VRی*SF[G2J xnN 'I3.?]8,8<܁5#_)8B{h0e:)qc<&NX7*aEp\Vtm:)-'g^;ûp:zU1%͒`٢c3j=D&}p ^KZ @- w+xgu?]#?9a>J\'uqEg VIy5-xpe0S q㵇w2).VqrHN URG θ+q\G~5˜LE*TWrۆ.`=7J)rNkhPvꅟC@S;v9[aݯtnlȍ\._on"A-%2]•d*"g?lOT|7j _ayq02]cBzy=̰e{~ ڨx9-jګa.Ksclx~5n[ #{a6YDiK9q8)﵏z]TpsB++sqAJ^Yg\Z9kB@[8xUzԸ'Ʈ;!ff]Z`ش!6øf[vn X7yASNFm6&缱?\Gtc^4 4D߮ 51ng6 ZN~&J.ǿ [0]I۝6.ȵ|3.8]k2fX=ǃpQz`Z7&T9 TnSݱ0j]d#Zե`E=lXqn>l ~'H̙1j@*U탥o@sCyUᚳR,=װNKf78Y:Yn}J>Lo*K}-/lϚotwFk`_)4?~zkxYhӌy9y=6ʋ/ñc0(Z6;̗Vq(ۗ%VQX:3 ̼R Mg5?#0fɷ0bo ^:ʐ#n#t%M^fpw0 V ylf~A|!RX^*hjFPis~Qwxl$3$.b:BN* !juh}'=w8Y[]x>k=4O2+kc-b<1}W{j?鮜~:ߪ&l?6sÙ7YwF!} PAHOwț ݸSlL3jocLrn'gY#i w>yro$n0p0O~B0Zk]y(Οím+ EiBEO/c̮cl@U*-^upц q¥68v2psѸІB0X#na|~K gCe8#YPuǹ.ϵA MB8|`g!In`W 졓 S:9$RQi}DŽo8^Ncy-,[t|J:劧>#/bI#@YuycG|?߄rcu<}H:5%bjD[~/Ǿʉl|ԇO"M^-b|'?+:hg$wQ-@ͣ<ͻR~#~O"xSw&,4s?ejc. . ^ll#~HwR9B[pN L8 q<}(aϠ\U? m?9eVq>[\iHa㇣ݪAcBde?%G;QFю?򧝠J=(^ ɍk*l~w[0t[oS=1iW+|fD#6o+T־4Goyؾ8]Xw'赛"-\,a߿42Q9LW)b\/z l|9 rRB]}&,C:$%7[Tc^&-IU 8+ PSS`0{c9 t{?v[ԳCiW^BJ򗰪V{Nzecq{s2LI fs6&0KЏ h>Rʗҭ /$G*"Ӗӊa=ƶ >w>Tӊ¡3f9N-ŶRYQ*TmW~@ P%| hL 7ߙ#Vk H<"*sނ{5޹1e8L&U՘Cry,XK攉qf`tug&qZp+N})β? !MqCג/W!xٺc97 #LI{DQB,iovC=_-%:V) z8j|Nd>8#msNrmzmwFP ϸʼMPGf_]yNiBQX$+> 39;%WODH@ˏC'yT9y }7/>;/HB{\enb>/D $e{Ry=.e 7pqNرLq/L #^L2sL#7_wZzTJɌf] ΁ ?<hȖ- l|K? MisaIJ++Cd,)*m΄YX'0Ƈ]yd3 'q.dmַn|-Pcͭ]ޖ0t=uaXmW̞P荠cѡ{+:8(V%LhYՄiғa&HKKL_ՕskMɤlw#ڄHV ahT*FT3ޤ~o'$]e3cӀbrOm%oj`M,wɖ'aO0J(J7>Ø޷"D+r^ja^24-ǒ嗷 \4_[=B5Z/>p6aK ˠC^>4v^ dj8fKala I{c7vlɎJZ> XgjCaSZr^}~, 1N!=#N)|: K]'bpNw PrJh#բL^u :) ּǺI8^veKHfMk_Mksk3ZD k[#W^Xܡks̰pFE՚L3@ܢX aMcɬ,jbgbJ(XԟR-z"U\>ak^tuLg-Of㰒M~V(އ:6$Scχm-)".77gg?Np9x {G -MQ8r#/%0: &s,CW^=xoCC/zwHN=dհD/>e Llj;:f'OGOzYGF&"C R')_*aL͉E𔉻g7oӻhe; \[%A U0WS~<㺊V~[q!sb>.LU A9aM2dV"rK1;( rOj%Ňн9Y?đq"x]/51佂0yȃLv lšPO19 ~3O=۱gߚ=.?-n"8.5}EjYܩX1 /,q} s :] g(O]T!?n_#O}G̸,a~i=aXfrw(Ѡf6-jeM::X*x]F="`e65iżI-3Ewiq:ڱk2q4+x?[g>Do3SEi2g԰}%7 |x#~J@<): thbL"(z=OP[* V#_MŽ ?6Xq}o'ֽܓ/<|[e#^}&IkI+k,E=~BKl#E>gG/NJ)˦s8}V?fogxY|%r`-m/Am3/ruXԍh;k>kޑ&φ<2 V4]xk!d[eiPr_-uf֔DOFNB֯F пaӔ?76ՌSߠ!9gh~~H=c}s9,iUi/699kz:BEʫɲhzq؟n`|ﬥp t\K({*4ڇ`ذ'G ӡ-dtkMƈw9Hm$0#&w7k>LEE,L[@ Q w;_ 97a˫)>jr,badw+8x|/;%Wa5} oՠƫeGâ?!Plɡ`|r+J-~C˩*d)5Թ_Gg&j $Yion*}Ԧ7|qϱ 4S7,`O O ~+?+HLEA*@:R8"x: 6'9d f?? l@Zm9)65_mLtzeAo`7_鍜m4O$Z (ϭ_y"/.*\xPjŃNx-`Hw(mrե%9V4?Af'%/>e{,{q-V L50*v3`ۆwPt߳mW8pp3Xȸ: vyfu\%s':O#W<:,^֋sQ/K7GXjz"Mfsj=reQpldw;X\n3_i=o hrĞ#cӔtOQx\"jA%_Eu!f^kLoka|'3G Vp= ~qjKvxsl1zuI2ˮ5H6WXIՕ'g 7J'0&!SQz:\3!IU>Lr1}׎ <<`Ɵ_gggΪ+1uG8⹕` ]y1) ~bl8&t&,\hWn#LԈo|O*S0}G>@1tZcFdp/y}"v-Y$1;'E4j|uEe*ɲ/x5ϭ EէSwmI?󆑗vvìŕW&.W`}"珼M7?wêvOT.B& k~+W̽23߾ qE6AXMf/uڸwՕ hTG(RHqp? Y=, W`8?^7y{ ؃5=ӫbiphN?&3)0+YXHa\RAwlqfǦdDkеۉhՙ8@i+ݰ\FIcE@B}[9? ԫ}owy\5:ag)8=;m?O cbGzS\B :*(uK"TJ9x <'3N+}/B^n$vLߔ/N秨f 36IFsok2v18O*)LJݹ C_V^BIox߹&;23r~9;o{!Tv[8lIOa(n`%[20xE2_7P.'0_˗([hn`<+>;Haj*ؤCf_#=zH&N+o8F;sG0:o Y}r#%2vt[L9uNky.6@k: .=O.UtcNB+-q,ѩ8PuCXXJ.j;0}g[ڍ1>y3@ٙM0$M? ]SCN#56V5R:(+kk˲9_ b1_4cuʃSr TAϰ1(]Z M#*qjb; Uzʷaq*mSxR 'xr@֌V1vOPJ/7Xɤ%gc`aIJԉ+~<^h& Ù&j +/x64O%;V~NUB^,;Ա Ormo$;^4F3uWOB_Ua6TYuF4gCliI5]O$.,ť<-ga$OHL{? UjP;»vbC=*}ut۔O~rK'L:v75jg[e;XD`WR丐H&pp*#=۠#Q1k.PƛŚSo1:I濟ėyW%ԀHym{Ō"#Ą*2,E@뵈33MɌwd_IVťSo|ti ѿ߹0\t rݶ[P=8h&FO A^,CG/׾bjޟ#u(>ǵQ}a9Eb mF.ʲOGsj`*ru;L*< &d2;+6doo蒡S5;ƞޣKUa&gQ@ڎLRdᖴ}3MX}kAqC޲R_ cSRҙ0b V &89|W$]~= ՉO 4`<嘯E/v_oc^ȼb'4;i),Q^8A[ɬ=/$Vx`> po$gCgF9Yl2^#gTueW8 'q|S+qJ%')U=̔ uȷl3S8pk $pPr `1&V=auUX 6?7/صS$vv7 ]U(и5n@}.}፣28WDaYʼdf'yTq@'.}gq'N?5w9۳F>s;n_dHs!d?V_K sh]Iµa. Zwn6.'݀f裼ڽ O|=CT6&<6S wV &鸰zW7-0T~&3 Li>st!N;^p ("c6SW'cݺbChl>}6ZW} K)31we=\[Uޏ3eS6U ~7ap8kߜv]\^6) zH;#Xtk32q3a\XJ&$4^-A5 ҃)cJw` o0ԘCeoкn ,W?8< ?{vO| K\QuiYox__Uru\;,%ZG2 o [!_{$O*XJ[+D.\}7<1 LXf؉%^@&8AhZV~+0:8P aw[f * lGqޟ.Sܒ >kJT}5;*(sb3%7]Y%\P(`V=o;M=/q0܆yC^?[ŻL#0v&WB\:@ap'~t9g$ar73=Lh96`Y5#U)$sA_Ks`}q+ z}pq8tuR6w[ym{{'G92Ggz{n]>GW&_' /e &U`iJߙ05^)lEпy]Asgt~1-[ZVN4dYdi&Y$eq'=A}Q8q"|uM#6ysX 8Zc]0~^\P]u-%xSHrzT*G[d a<=y0O?yw}VS_L-1& MBP)!,լ@0?Mx63dP|!I G?(8mA>PwmӃT;rE,D̗duom^}X7iV?djiFfZ dߟ>n+❱_E-8x|b&{ Z= V^SV{dK%Nj0)n>DzdF8oaTqv#\ G= k >X_]eEтzJ 8R7^]ڰjO5 2n+%FItThkeR2# t-z*㞛 O`).*lF!u8VTT=:|%l}}a"W)*,(nַЀ"ɦ8#Bꐾ;-v_6'kG!xu} }a=U߉(l񪘝T9]}(g0 %~L*s ؍$o ًw4vl>}c)vC@<9ko=],+k9&2goP2AVdGgxUi ;p1hbwoƮ_ iY,z6#ۻKLoB8|w0`x5c}2nY۽M{@?fub\lqXFSed枒DZI].{ 5S~RHn$9;e#H$&S2NCqB-@~о,^s7֭/eQy-U{&~&:{=DvOpl6LxXc5¶kJW~{֤d ,_1 m%R[qJ0T;z6i^dr oc\V˶Q*Lږ/nK4 BpG *n> {[ӀjtT<:_U!P[xVt(ib7_lMabl~u*>@U%;OQ`QnY6?Â>}0޾iniQ=\m/"Xo?ޫv/ 9O8.tMG3֒7ra0Ԡ`{_#<)vv׮*zٔ6/uVF'[a >JMV; oTϹ9/OL_r#al/kD}KjLՍ|J&<)Di } 7A{!c(Lv'$NW лnho=\Ώsy{XŤ*Tq]Citnt]7PMjv4 >n֜b=f1W>KL7غjmib,|/Qi,9)z8q&,&?Ak˻0j%W+*G`={H8R9rA '7%ajC0p֖yq|]nf,X>: [Ca,)Q8;`$.2VBA7"wٔpXIh .j֋N`t?Y0۠~Fs [6fXu{N5` ͅx!_ϟbH:T.6?H/V2OCWˡ7EnAI='Nd .R ,F?+#؇FTj 91 Neމ~;0axʕ ;cW>avie& ۜ{T ͘^W# 7}琙NGYL颫{aL},_NqGf8^~ێh'sBa2EY1WSCO'zA p!!A'<G@ دPЅQڌό0&bkllx:̐p\~ ݭ.02QcaAQG$:r`/vlzS2R!vo~'n+}E*y2p8oupߋ㹷-dfjí0n*&q!8@' No5gˬZqz)AR=< f{1.Gq&:~l|ve ƨrpv6W@7`|zwBR( ޻8~y+akJZi2PWbm9wL{`'|P_Ev?? Y$zL8W̲OjqpJB~8dS#gH{EyXKfo;{aizhy`0ǵ0jΨXOtd>%|JIÓ^g6e| ]Qi) '4׷3xrj p$@@K6NmQ^0=J||*.'ԁ\ja/D8&] 8MMpOw ßy{̓~)5_ZG>N?a.%ٚJ²E:a.H(㻶68yAoq4pL(O?pBx),Hf m &@2oTI(`z.8qr9hJ&mE8zMc_ &A%'#+G9oZ~S{HSCәe)GßNUdZA8~@v 3jVbS:ǿ1"}ARx=M"H!3x)}"ؽ7f@Q i"w4j"GM 0s! ,W#\:k{z,t1Ffu_}nتWc, 1.o[@謁i_Y~[;vK =$@GBY ނRG?n 7Yz΋)ew?Eلɣ 8촖vzpT^Xrް{\2 zK"!iI2 ȫd`Mt?p .})>Tl //hf3x: m׌O ZU=ksG=נ{ZGʻ7Ǻ77DD`!H~ 2V~tt7pÄڵ_8[8]ڙEBApuxX亴.^q}k:.aI#:sB%/i ‚_>X{tq3,JǑ3&zAv2RNy(g<4zSD7|}/PB7Śb8k"UO˯߻ 'Qwl=K龜OL_<+H6#eq"̍ߓKtͶ$ &}IG nϬßeXD:$+sJoPf/[Gase8qouE!y$kXs" ;Ɍ.`Q,- z WwdAq@qMt/MQ%'=Շ[/8k͟{W9Ƿ4s$`w#`\x$|=k_WBvG 8 ,X#t'}% 3hF,|xDցnO!y5X.06!% Zuiװ4~s",-񆉺'vB۳B'%,zh.Lv|6N3!-H#>s.MH 1uT-k + [Od cWka!/@mjTj>>F/!2FG>flj]R}u12C}s+ř|ƹapx!ӖMOc;;>Lnp-t3L%2x=qƥmОOvXdY#(Slw<.j9E0;*`V fQO0iN_3CS'&\3L޾0yԅ]d :N¾;au^8Pr@:vW$3QV~퐐CZt1Fm}/YXij{ALi0MÉ9I>m58.yEW JBTRJ,WYec[TՠaDž8oC̼S6-X| R A.3Es<fd9;`tuzdz|#Z- 2-LOc߾_FGw|,ɒ=oe8~zMI]#yT0dlL7ߑ?TXUKTݶXqҢgϡswBXi. f5n˲\Jڥk,ncd ubc;HCo#}>_kWZp;x;T$2oxEr Fh-Vkt[cZqZ( k_TGX[ iC,}h<ιVx{(-SCzpGLZ6Gr_꣇%M1nk,-,)ngkt6N*o/^Pg7f<8 jy}6n#Hurī,m^b6-f)*PsG~+e|wErd-9F/aa)9NG_4quCNJ@XKqrlMU_YV76? aXt㌮̫ǧ݃3[*8ahʰt0RrSn"ú pWe l^n? ;:Ӧ1bgH0Q%wj&#}* w~u td`7G~~m3P+( d܏z=(RgTɤ~mSЎM NDKudg]f{'X4j)@? xP}hR k 7\;=CK"^wNJ|0H7Ҡk4mrWȿ;^ƹ?w@ρ_q|Wֽ[c$U E uƑt qk5VrYq";IZ CӸ쿋"p֓ջYйC )/KxNjQL/ YsªQvhv0بCsqD0:$5wlYhԂwiTI?N'y`ʟsviև_v;:7[jX/mpXT 2qmHN dc+sJ\`\ȕO)9`btfu +0Fmƽ`1g=M!m3w-4!Z w?|U63 i,_ay3BS@N(uTD&ˇ8:pcEÓ` $ a *'z R6jgin;2xy۳Ez;h3}F3P,,Ѻ{q6N ) )qKHsV|yƿw@'S"vh> &ПgÔVi>̌];D-7ץPi97|.@_m mNy6 u@u+0R6A5C-Q6)#>y,+ mLP>b}32>8A'~Qfƍs5 ;xHt_rw?8iOj',Jv?Ď=Z=~CETn-G/qm f_^9 #AlAs&8.Ò~zt ƌo7!j+C5N8XKTv e<칍W¯tP n`R}z)iq|W'ѿp`\;vX]rEo kL"bJZ.W7y݂厼&Xh>%2]cX$G_ ;F`qi0* mq1Qf$tMR˱IV\ǹօ8{J[h#J׭Jn¹' dmGOVV[~;a 7;#m_!!nf߁Gvv0zjP}›k:Th(dk6;RT(tQj8a+4i{ Ӳ%l,LjY,޻:`ֆ3 b7_5_UӍbHPr#:3v>6jX %](@?ff{|xVq-5M& <>}ʬׯ^ĒZЭ 0I0}#kn0 Mo (gbe no=@z಩̨`EhW9V \ogL4]wwN{[;B0(yo5?:їȤ (ROY9&ua[?Jc?:ÑuBX>'Ҥ.`;oeŜwtaO&x V{o^nFpy52}`Q,W^Uj*Q(OGS;]z(\ЬW˞=zvyIluô3KܨwB72=9t'ӛotGLدU{W,L"ݾl #\`I'BU ƫ<щ#Ks+ (f}~+d];??7?mzmz erߔI:fcz_ǹu9:̜F5RYݷXn}MOedށN3olr/Cp1dh_dkZ l8wt8>~sXTv]0y8$ASf\xT;.eO`g>6*HxA0g]|QmLs,{X=.yv9Lf;එ!/{ /=C K6;bJ$yнg͜)ʖypa<.t(^IL҇ Ocf"Хm&<~F:utN좰k<5Q#h;zl W7en Pf¡뭑Ҳ[ZaALf,N4;m" Y;ĵhZc,ӊd4l)n5ua_ܝJ#6~Ϟ3wÿ1KB ]^ d3^CK*rx,Xٶ8/ amdОZ+vuJ-Uh[yM)Xh$u0|]l9ت#@x_WLS(t%E9ٶi_놪JdP*H)J$֭h^u~?xyƯ”)zrp2V`6^ddbkkzLϺ}3果4+50#g'C luz0/Q~oZ݄_y9blHݹ12.oS9ٰ'b{AUR.Li*!YY_}&+l`jdi:o`/ U W>[ah]/ soS{7^> I=׎qƩb28-aWBƄ8s(As7S?q"]z'&v_w@zx,P0~RWAOџCHyɹNhx~F{HX+-G:̡`h |$/p6 ƿh+πK$hʄy ޗ 3ʎg G0EYT(ۊ~8 U;;:y,|D7E0qT戩k4>5~>B`%,\wJyoRxȫ[t.En q%0-6ɒF65t> QUa ˹>>@p$[\VzU$8Y#-Q'/h^Jӓwa>;[ukz80mk-:} 9&lamu{FZc6ј̇q1N -!=  ܫ/F9݃r9ϴ{ *mpjU[y#7[/.6vaS0O2NH^9sy9<ꪇa 8xK̽vYg LK#ߥr^w: r 4W(yd+%8T l%n58>vEzt< =Wg}s]w?;ugo+:>!VUc$nx#ɺ"L[>$aM)^ z db[U=k,-3OMy"d ҩ!A%!3[wgvI]\Nq_I&G6E%zoIIjaQ'DʆOx^pľu6190}J[ʣ$gL0lCSS")0oB<5Y#9"ݩOU vXꛡ0|<{4C98#]e lq~ 8:\yH?C0{{/40 Nݼ ['ާI o!dTt`+C:űFk2뺚%$n6cNP3ҟK N-LJN6qrкo>2 -~2زvsޒH hŚ7*4=gO+3I1-=zg=D7r!v^iR1iki~tb% WvWLl>EUy /ҩ(T M';,8`Рls/ iEA&yu/; -X Eğba {%&h.~~9yB&vb6fi?8e-YEs{w3?jϏϾ݄*{ղ㚪+%GFFU{`ږϿǑ"iq%N=/Io鐪q#2G2o'$"u00&"[`F-d Feȸ81լv Dv=ٍ1wޅ4= 2jO `ܫ 7 f/_9.>Tφ~M+~v,w679agɤ7o*s$pAbWޘ,H4ssk/أ6,x.d>ҡZ0jS0 ۊ[x|dᐒە(5׬u 6$ Gh/$Mh~> )X\sw27>y_9|^uvUڱ*d~g_ge*oy:Nq70cEZ}%Clk~=s׷`be8u~? `sSJ)dRYv(Z猵YTdҚUnk*)CwK vEUݥ!'ܾwɫ֫V=V } K)$sQW7\ 6_߬33#w;7"od5y KA\\pՏC?~C/ eo3Am:t(q{.Sbhwm\OGbRFK斔CFgbMS;ڷ]sP%j,ZvhŁ6)$+$Ct'(P-=;>}߻gGdhΚ$2V':$ٔaҪbt_&#?+'g#y-fm(0nP8WF2U 7F7fL^#j.c7AQw 4>~TU<۫*_{Q @L'=YwGt_-O*cP Xd%S*|?DWEaf@-M8ƥ_v FEqW7R/gih3oCm+v ¦o;7GkKڮed_mz!gL5-%Gt2yע`1=")n5 LWWdTpI=Կe7`nXGu(L6`TK{SWHՐ«\tp1Qu%C CQܝ{fGk~f2?k|z3dҭad !cdN4|Q@|U u6}Aln,뇓Eg&dܾ/bVSU~=Yeqo$ R*Ldi.&# g)K ^ɭ[Z7yynYr߉|t< '~yk@].8Uy#0;v޾MTɤAˡ0%Kp]+m7]8)m(f&KyB{4ݚv>\DjBpEf~2 h:%~]s~Joz5jEKKk.x1[!)s[ u=BׇX|unUG=o*y]eHO(ky]%_б4\Rpgvxá00h"1 U>癅!g{ZߊaIߓ Oz:e>k@Jq? j9K:Hf+rTIW{Vq}dvʼnUjO8GlCŹqYKt/3ݏ7)-Z`kɑ/"6F&9"sطe[r~m2G֙ܨ8]y(ѓ M=o"ov1(-Y_Hu2&s簩D}k˾|.#h=tﭶ|%x -T,a䡒>srgB TXl=\ӡsajZԵ׬:$Se?*Cu=O8=?dpzXk4'_L:]kK2wxO7W=\V>, na͚4q`*S=9Z$%]Ŝ7-DÅdNUwvK7tK'm!M}&h9h1yHX`ɍ6-ZDpv}z0fN`ɮ641;eqj?ΒƆ, C͐,2ױ#8ZP*5]?Xڹw@e:ŷm7tqťk0My" YۣrFa%Y< NΚ@ֱ!YY9U}SUXHʌs.xLt~+won kxζAiofAДND'CD&zqUʎp/28KJ+MFxã=ß߉\bԸN4\Si "ݛLLP6Jk?f8ꯏc~vnE59}vAfUNlmm)3,|=H{Z%w)̀s#.vlD#L#0OV;G:o{ν8ar;G ]hoj&`k+h޵8.ʇn 01޷/sRZ2~Lmn8g'94Ms썕:7JsCmK=&\$EɟŪ=(QV iCqTXhzo<ruж[!: FtYP~̳cX1|O&qwpÐ {&&ƆI?B}#(hyE~SCȎ^i+Sub(᧎}!PmD:6~{dLZ4~^@5>8]}@X蹇qsI(0}zuw^rFfR'Ln)hU'OAUƵz1G)-G9-zسs@$& KvDWSqʅ~KuGsrkUMGq xY:u,z/XFN4lK@qp#V(lc*rW/U[6D6W5]:d#/̏`V;/~m7{z$D\'^Luy[CXCUZ% .9]gy>*{wwC :M|#TUv:8< k:SWO9T?L>h Ih:̷\P ]sHC'B6[ױ`j2gAݥCbg6e;; JM|#>%d`|;>A5y5 )\jynt6(6CKOڠiǃ^ih2[ںWDѷ'a;eк7Ht.O/CpNZ4,^mE|R})O @aX'~S2o|ys f- 4$`yg:J':(C^O`9 ]eeqwB5p34fJB``?k?8%*O {xo)cvV`sԊەl&N',R$sg!˷wXߞ<ԥ¯pWE]_9aea#@/^H{1~;w"]KZqA֥hZ`lKzW#;T+U䧤|@1Y_/G! pNr-{'[m)}za.(3eF\Ju)wyU>8NP\Xzߗ !@8g< Yg쟀 pI*] b\ 5*\R+R8eݨb2 {lY/K5`T)ȿ}+^,lS](A&#n0l9TbƏ{Ze+nR\0o"}DlSC*n jIqkqw['U($Ai#;OFn~-R> ?&4q*ֳ{T %|߽S`՘ՅL*N\lz-I|qS'em G HGGNbm<9DA8kA-|?1)@@c!Wΐ93"3\YW+'VB]Iݠ뼐{gp|{g#n{7~@a/^{2OUa ﳔ-z!~&aspk+Enړ! #~oS(eX(ZjC<EB:qT''v.YƟX,tjL8^hU~HčB4?zh?Q?ɂN9;(IcQa6&,~E+t薬!sE]asKy'9q4/-e_䲷qq{y2|V¨zg\_x.&kI%ᰎ7FwLKb G@Xj,O}]!}w,AUW<|d;xI sҺ PY<s7'w&hHa>]?qt0c9߆Hw sk4K0[5ϼԃ# hm(H>ĉͥGYؾ[0zҬC?77.{%uL ʔ7F@a TJCЭ]iP#v2 /"?5oClGH|7Ϳ7zc]je\M8 Ej?|y[/hakcCRǗbu{ɜ:RFI28o7za{Sq6Ŋ}O/yΟ{NQB[>eBR?n>EdL*RWrS9W ڇԎu vY\J6MMĩB81"E((@-čFUcjGCҼ 3%FOdRH8?&vWx}HzX>Dl j؊ VYq  ;[ޑ`& sAЉaM}UdF2g[̾kݱ:k}Ad(wESM>Xuݎ CZpXf~.~;xDZɅt(NC`n#aObDZuR%*Yl|̶ J |3]b^/ʨ[+ X3za^A2 hu)+BmpZ|IO)r Cinfq'qYOfB%ȸkM|m.ňd3 S{jO{`k&Ȏ+)XRShYwf`*)v?q H= KV{Tz+=%]qLFL{Wb4]ZfSTk'+Ї7GCKšxХ.#-`SGeWX'TQ삠T' Aʍ޶XO?"]jy`˅3G=YnDuaVy1dB?&įz,XGijvLxaDu0oұjqS5ZuzW6ʬ'oID:Bmƣj MõF),hi$!ř^-ڥt7ǝ{8y-m1Rf/Hd =Bnӡ[ ,Sm!h35/֒ IS'N'%B qά\[6OnC(6Rތ "ut%XLp¯PAiW4pz .o/pzcf[B=#Vd^&4/g]pa:0>Ad)N)c5?UpgH/q;lݿ*m*<=sDo#L|OUnqUn/POmf.a*Pge 6XMΓmq x8kGYy2{#0}nЏ۾dhÝ05oP1ʋ%鞷|QIrv]ްhGiůY7cI_plc8}`2d_5/vϛ理n<_?:#>`k?NCAndnDm0ml3E'k&rNJtsB4 pMS( [7HGk&=`Zwu}DWFI\:TF& ? {iMp~jI࠵з׮_w(-8f$ew {į,Z8>ߦWH'暗T~XShC)Wr0٪;9 [wT6X?4ͬM}ӡGbyo؝.Ԕa0sVN zŬ yznzPDմY+<-OHgK F_m2iUo9 tAŋZѸ!ށ1پc0~Ɯk`A<,R lMdq`C>g(v|-kF̶͚O) (g=#/x&nFLq1O F;- l"d_xuh;wN_{}_*dΝ]߿awB%L<13ivXhhN#4Xqpb12Jpͻ='a}/h WcIby6+dyVɉio5 cpY/NƼ^+ T| MlwjnYYpҏLJc2W{6lo"^_ZvRZG lrJ0'#tE}Xb:#Lx2zT)L:v1`;,,D^}[彀xCU%W9(q`ok,%My8bܞVq3zSn¦Vsgz?c{v5GV ѣU?}+F8q 4.b3E: FL2gߞ Ehs"//Tԑb}|v<ʐҁ7Ut@7'Cm4 ?'sKvU6Bl[-uloX"Y= }9$7T9Cכ@S L9e^wi F-),{(y䙢!!Ea^M;W>p)?iWģO[R@ꑛb Xdu {k'87 JX;˾ JGyٍO^q^`*d !n(sK]:1)ūTgrL|5VŝVdˌ <-307ԔF[8%U||y g˫W&TX/yad=:cFn_~XopFh6U Ts(9% 7[uΫ T6duy/=R>KIzFAO+= R4m"=ZslfH|aKxP& .lF^2D+1Կ n׍EfaDNxk[@VQnT9"l"^*Ưm*8gJj( .|L I~p6]PqLx{_Z].(\ʼÑ0v'kd jC#í&Ec8❧%:'^ȋ`;؏ JXxݕ/!A"?X6hϔ&0ؤ= ݩ?HI{w~UOv XM` ϚvF8ME&jrS myxڇB_`N==0r?w q; !-]p'>0BrSwHP!%$}u.8o+LN> 9ANs,)∵?cj\,xPYMp+JQz6m5.ڭ/ $W۟X7I낀*C$l~]JCzitq<$J| 3TzRdԑ5W@uqpň(ʯ2'o)@-X7&Ɯ;sF't%9,-%#e*z.\VzB:GjP8Y͆Ikg]C2w@i{؛g0B]q&Gr*`ƶI>t!! W?XOMd1#/V30EdR畽}kuك_A{bj&N~u`OUH"}D)FȈw3? R$n۸ .+y2r 0n=筃W$܏ߨ^y&GƉ0o='gYmyY;ԀC Ex8Gwꨌ05ۮ!EN An0,:@wλ |f`u57<9PY[ Tt.V%QRԭ`zpT_} tN(¥hiLfP(u}4ɿa|Bp\$. {FXwص!wW;B擬-{ߣMVZANzem_8b}x)O{sa8fn,P}覂$7|2T珯d_]-%ƱGT!@FTN#zڋξ᐀a px>q\%@9)$UBfeo#R3Y _{D׋\A"< 4[fcA|e:5ƍ#o_lO='qIN %m&lCRX_cEP9_Y抾%+Xףq#?7+Q7 җ̧K\ 0p`!erK TϮ][4>g4<v4<ߕb$AؙH~'逴tvl[F%\ m̃l2twlZ/]?.Oڻ xŸoǙg+G7A{ 8p0combǘJAӡ@z}`$RڬߜXX:Vy]H %k=Hs<<L3{lU6Nw^a@yU/  Lqū m#]ؿf86 ]p}ֆYA.!M7y .x=u?8qGS&8ǥM{V~x 9ZuMܚV:xld=jZFq.'id)ssţH ?15q'H\E =ߓw8 >wz߻ZRܵ@/BvC>HfV͇+gY_/o<_KE^:ga{9`@2+ )5_a m 4׬=shݡ{/q[4wCY|r1>I9Ua[Z8k`~ z}j9 T>N6?\./„&P FS:׼c_‡6Ns/7T Px dCi-ILf[vӖ" ěSzͷ+4|S O:'Gc۳CeQe\/Rw )z9\>Cb sPV˭JX;]zt"%5AE5Cd)ÖfdY9f\:e8'AvF3̼ηyп=/}DO. _0p9v ,X,G㱇s0~_5GYdm/Sz9-hFp 5_Tvc+F`;yPn ]:ko"V$WWv2Rz[!i0欠a#M9^Fb9ڤ7 Tf2r WR `idT82Vjjϯ^[+O肃 N)VQdlPx12m"7_ŚF`/jڝƲ+ C?F\:9aHͷ3dǯtV}wY}2LJQ玴YX8Nupf)GdRsӪ1`qzUު A'?emb`(q~Ӿ;heb zxQsr}ArO-i\Zqf7wXwLۅt|֥=qʑ! *?{>P ѻM!/Cp6m;Ԅ_FL<٣mVf{lOsp/j W +!<'%9);&aZ%LVm?L5kkfpq.X€׃/]?cע]){}wo2n)UO[=\_"&'*;ݪ9ִd Ƕ/s9\i NC;_Z(N?v7(yxq Ʃ44M4|x^ѱ&BMmzg96-ObE}7%>`Ɂ=Iܭ{ֿ[J#lQXs?나e~*) Me޹4hB&(ev]Gcj܉Ö=3G?CFWet` $m«"._  |{ NKW@_gXr˺=L>|=PQ +6Cw0gbl Dk6E|a䢲6AIϥEnVx>ė4ɤO{Xck1sLsfm6lĥ0c/MCMit`f<vGFƮ̡ yaXd!4+6झT]=#h*9R0}*6Kv@ŝc+=|6&G1GX>߸h@*ϺWdRח5Aip2\,pj_lY A_=o[6[wg ,$ Y$K N`oId> qKY\ 2G' 6¿Z^U o9DwMF<"*X)cŲ8>*I?zq5t)1HO@`LK=I+6Ș 9H,#޼wg|tZmE5U dK.-!5P^}y]}ۮs;[ڧKUDpQo X{#K7/{ |Va7E>udHpуMYhLb{'Ԑv֝d_={!dnp l -"=\#bK/ehFƧA Tmufӷ3lV|~[:QOp:9V{ UްBٻ@S X_>ԅD{ɘL 'K`pIAf?wf`ϛ*7Pj2P3[~E̿ $ KVVU˝}.9RWծ4`8nqVFJ RC:U IaE+ɜ:ۣKc% 5(%qjbF&Sʩ=y9u_ 86%o .+`ϒ3h34O VG>48w t]+! NO]S|569ҔKܵhCOu{N 4 Oj@WCy=_Ft2Osa㽞 + yĂ iY!֡@ca&e{3G8Zz5PTk_%;9;qԋ@㣇ф5Ӧ!$XG<>hG<qW칋=ݩo`fI}ȋg;^-s3=ٽ62n#x tmz׋ 7 ]=o.}6Jwytm֥C7'>{%#Lbm^%,6CqB]z.zmv!?SKaIy ynMw?>SƩٓpxz-,e\8ܼ,p41i竝q3MsJA ;)xُ(K 2dx|7Y`} %ߏ)lGTz%/%by gx},W-'k}8`W_l6L_ގn< [_\[XĚ0~xۿR%jM1 RayD S|VLW1<9y_f~dCW\ |[/\Fq㞶me?#sKΌZsv=0:BlB&o! K@z/J)y*c'-8zg֎e6hZ%aim;ܻ ;'%"3wME&7??ƚx}EOӆAgC k.ÂL-ol*/ayws`56QL6tt0yC &?3Nö#o_ܯ=ΡHa*_L/h&7u^]B^yZ}0B欿ezH>r[w)dpM^o9ߦ},v% x)t| Ցy޽J/ȃON%C1q}^[^*Mq%#fBd- ]C׽> kM{8ix텹;X-)w o0_qѵƳ >wn,^vdxE&8{غ8WHyaw)6Jm˞$r#rVGΌO}6U}3Bg %ef;=0u$c[ `fcSh/TK3csАı9஗aIҽJS!19h򊋌W)9`=ʞdg+00D7zKتmdɰ5^%zXIHmz hsN@QֲШ05wOjկxi~ =elY)عS[7Pu߮?HNǺ'+ R_YQiQ{P=̛YN8y/l,~>Z}?4M )L){%̖zĦ'0OĆ0,jL;7Y8@٤HC&tK!) ,vMӀRum;>61 s!p쁂о|>nšV(^2֩aƤJ,Գ#v<{Wj[tb6e-S|مOqGЯK# fk68-@ h&Γ+U:i(5>Zʱ`GH0 Zp[V r/L=* Wx:,z3 NZ}C ۾heF໡Sg$\X>X$o }Z\i'Шq+uݦ'k+m¿կ#H Lu0=]r~/6ج`6_MO<#B%N]y[z N4g%FX٫wWa ʓ@480:bD=RX+Q^^<-M#su@|d5N.5)Ǫo[~/£L2ž]>aR dx$ōIvr"ĕ9&YԱVB$vvb͍[%\qQ}Sro~%Dd'Hl$NҊ -"BnE`s0O0f$/-?& .ḸsKXn$(2<\8o@ W>>ˍw(o`gƢ83{~aUKrߎق]|y_az8˷~}Pl, ~ͷ [pjr#,{^g<)a7pqiN'hfs}{i'L%jQ6h}v"gZ<;kqCv^OC =cZ |[-T& V/wol{;b_ˠ}®|"};UG5iR3(\zU'jwWN`γiHV^E 637^؜T-+ҟpoi `U2;qff\]#KV^j#K+zv*q=߷VB}#d |;:KGqzcp= }>;NWWFcw4'7DcH1(2 &? #ո8Vq6gAHM\Ʌ^> O%tS8#uDJZcW# k{@u1"p]ֿٯXCS?A>Nw?'q{#*k7)e {:|y>[K*VF]m[e־GAVdT~..)oՃ>D(Y>.T9v4*8E0R[XI [&{HVVoN0h6xnr]{S~fqUOo N½Vd?^ M뷎LXחaEHKtiMtq1.Ӕpl軃2mg*5 ZlKh[ ޻ ^Xq^#fRTaxϟW@49+h!?@FK^K#W`qf[L#?C|24.+BH۩hGRR~`Jl@~q)}INiFjίa]i^|miqk>x*.+%9+ OP0@1tt>\'H&s z"_5Z>zPh/ UpGyU`9ɏЛsY92\THj8T zT(덝1 ~Bߓz:%[<܎n q[.߸!Y,OTERMz텲iҶ$z4Ne'1”ʫ* 6Xks{|CC X}OHe '1BZ"`=o1% adV_㮬ݖw[[v]yk v>#8,W+-&e9P+:(@E'>X%][êeh(~ZpT։-ep22bO 63)qi 'q5fL´ zv!MN ߰ri*ɔ?ؾo 0G&iLP})W`<B8vx13¿")BYwofn@ |q`Wc`ߕ18ţh6=WlhYxS$-lԹؗRy74^3&L8v&i[,ʻu=赓v9 U}s,;vlSGTms( V?}YmTa|I1b 0r|gj+wTp`ekX-֧EN.`n68y]Ra اf7a7ncw sy;RTE8Cϕ!Ū"Ex'+&N K᰻g䇞c-CMeZ8'))KW?l_TsIg%t7RInt%>ꫝMnLԟuVy2S\㳚7ުrAVfN' U ~@vRdƩ+^݇ =Žm ]SoLrTrI,;s+jKĒu~%7CF5=e5'`upZ>9ݶ.F&Z%EmIiDZ{^Vs &O5&'a3UkݬL0r;?|2ͿR׮30{@yՒsUgZ u+/3C˂v3b[u;\AjX~IRIh`q(S\|VQ.FطLnnNCV%h9"liDP߀!t#ʮ8[v`c)h^{)H y5S%"5Nz 7b ɲ[.qN5;6ׅGƌ߂Iƪ>8k>pE ̫,/QU vOWȱ?W^=⒬Xb|Àu:b3!u8{l Bi "M/ca9vN7N3̟ن?|sAu 鞟00jJܥwFr<ر/Ès]j[EȩB{@۰6U82y6BQ8q,*Us*܂Zbplǹ~=0d $N&²BvZ/f+Cx`MN=HA큧=rzDK!l6Ɉ/,aA= 64t> ~lY7`R9>,=A:05ݯ;0_膩ˤDպ6mȾ0C, |bL{3Wiw)0`q:T'C@'vW;p`_p^FJ ԍ=5 v8ccuՉD[qw9k7CIcgye ctjXL0Ue%Xt`V Ίt}NdwIf.'QD#iy2!=&_HAw'B`$M9܎Cۄ.p6y6wvgF!CQڜq2/hEXI[9at&OIE8ea1`ύXm6vk$s_y S[~_%q_{tΣMN$7O6]WY*Z~f:xYS&$@Y( XsVNj |a\F3#qHԚ3) )K`#js3G'tKyX1YkyR\IZ ;EBT,=m};NiuKъG-֓sd:c;=apkve0)pܗs ㅉ7b@GOVۛ0P  *_h5fɺۊ88q^ SQCk:Fؕ]qm15uC[io ֏Ό9a4rC3aΠzX(KժԷq[O}[Ka?gwyN3$鋻ÕFIӴ t"<6f.d`"/bҏ*b3/>0}'\:ޯ C{spᒦ%4l'IRy([˜zaCq̗Vt5u&Fm!JxpݽST~º@ E UFH=6݅T3 qC??jÿYtOnNêb3C/jBXSiY%Pߔ?gSErK=7 n=j'̇ygW8jZ*1]exÉ6]N>kT#6_= l9cI9تJkFŹˑرUqnxjY;ʇ>JaɄqٗoH6N_J+&QJ02UD$[U$:n\X/+W~ [p%%q{42bqr{8X( >o='`'lgIgfޚ{hBƫݳ9<v !6Lá"hkHvJ ̩z/_y[#˂Hh=ŋ uLygZ\$W/9z,Vc\!30}Od#N;;A>' X6ov '-hU>XZ=1-;1d#rb#mncYR8=YT%frhWA}Uɼ&μJrX?M"ϛ`WQb'PpbόA"D~=my' 7$I'M|8msehQkߐH/c*Azls\ڪ}C4{aAAT,?$cV,[N8}qNEI\u5I6ՙe@eu\mEp1[uHT:sB!O׮OV^(?\}LPeMԢshLl>&sɆu‘4f}d}/4D͚FgbpZy/аnfTz6Lo.) >M>S#R"^4X~zt)X֐7GQ[[qt䉞jSgU4Zʿ[) L ]x .tO7r_.eً8;4n q@[H;y6m|J2kZyl ?lpΒo4#Nd^@l{[ T1/qW}N0Uɨ ,ڤb謘+ 6%LZrBv̱D|t>8^enLDadom61'`}C;ia0{M[DVpV{`V\T993WSwjԛ@$?Jz7f3̑eE?.ϥ5Q tŪ~jEe/; |f#VBũPQy*LDZ^2q=H-;)V4ÊSpr[2oűJ^8|ȎjOx=ZN/ħ2 5{P*?:v'0dIIPY'utsp dX[@`*-l0է uκМUokhL2e~F*x=W m4`UYoXN NǥS7K%eN'B9pηFޤ5&S)A.yt"L}Mq4ۖ"a5 n+1G3 vaERo$ǵri Ҿdtt]'.-Q1][?j,1 /kN,?,FiUzQ!u;X[4/\S,\}kt8T7LG;q&)O {m|+.lʡP^|F(훀88#*)3a}-??V59pD8nT =]>dl\JDžќ83В1B-^ګSG6llx10>UCMޥ 7n7?]]2Fau ~8OIWù-w5bXq֯F\cŹ/F0K [ Ƹnc.Nh )ܕn 8bfXHGfsOI6DU%Cc5\s 94WO bJ@e =L)= 7#)dĴHÐd5WLg5ޏ2?{Z Fj K.=*1ͧpsTTqʵgBULg̟ZMA X"h?]yi,~ .Uk9(`w~r +.)k yJYؓkRPʔE n2g11DIWu8~q%-eÊ^xOu`nFv )V7Cw7lꗇ`]tl7鹅d8t']uK׸Qh.B""`ŃaKC2 Ӊ ت26jc%&?ns'Owz.[٢wEKq;5J'ۥ"6A7{mya/V&AyUgn[aݟ)m.Hh>5x_'LPVy3ҡXsԣ{;\STgud._cYxTݸ?Gdqc_HєşgcwV2{ᣝ[uk- y0bjR'FοեױKhzN8G &37UmJY8L#Mayƈuuwpw|6]J#гPz?Dk̭\6Z<[Zq=gUDŽ}ϯ_3WȡB%Q*φʽ K)@yy+.ӻIsNļ({]6x40 $GwaW3'.|? ]b@ݣ3R05iR;vbGJ \&,(zBo0]~oW702_qWZ j]!g|a;NB U@M?J{.aNRutl6.I2%a%<}:[k&3n>Q6 ]8KTA1.LvALk$,yqfzRmˑ ny苷 jѕQ캫 VVt~H4QJ/wEʭI 0]$V^OR~ Y_Zq"`z-q%OwvB~ڿ͙ryf=+;6<5?DR,NLn}-n ufI"RK>~`m_C.~<阇'JIƑ#8ОzHhL5R. jN'MϓطN)1t8q#vcSgYV=[Uqj >htp<&M+Jwo?l 1G5M<-٤}=ViaL~59: 4ҺAs3V(UFX`Nf,i{w`֧=[1 kqxLmi!XC+|zMßCVL ;I&I!IQGrFb )vXzw7,]"}̂' ?>]Lϵ==q¸{Iirc6t Ybn{׸xHٶv.GK}fR& rudTZq)XaewL}IبG%׍,Ҥx53@^j4*{ݸ]o96Nq$sJ]7I]U a@IT)Nn.jǦԞ\2ե-QOLF(8׸`2>cSn V or 7לz)BC;d[|1Fi 4@ܹ)E<7} nxV [NrT/w‚fY>/ۣxlS!< UTM皫a8lpa>.,m?,l. 6@Pf?d޵56J?D`;ehGZu-q8*q(x\^ٜ3ԘB[=ׇ&]paAo9^qiC Év%h~[N[Ņ'sː֛?_:nqk _/8=MLgg߆Aa60Tj MzK9~$C%39_ q6"?5o̫;v>aOpA= 㿝3' 걯36/plHw$ӓ墓oV4p+O8v݁qrBPxlfw8ME6Pig`.^awېfbu-v%x|ֵy,'}~Jš]8ߖ; u׊-Eï9B{{􇹋l07$kIiz~3s2Awv=cϞ=6:; ƣ솯˱j/}ͧd>` ɼo"c jnY9D:gt "-ˮZE-jz%ۿ[a?äkV菛qa$,~;&j`aÒ=hH:'V{U1d >e$ &$ gqMh\A$o ǾO!mOze=۬> #\ݲywRYt0qoe-9u=[Y, J@\ݸ/Ьf[(tn%YM)/gǒ_jBNAVK#]\n|Kr7 jNb]7mC/[a@ƺO02pRlO+m]>uhuo$ [LuBv(뻛eű/1><-;x \Aꖐ890wԢkYɽ'c_/T^@{f&k //8~{`©k{:aThXα"L2Ʉ3&넡cMq+>3 =gF vL hLZ /v8NiJ^" 16bϛ&5xuPi:W:Y`ao>_0Y3mre&X:JB'q Mpd]?goϕV3=ZtjIIl*}_ gX?6`4zͼG,]/υ # /jPqvzr=y#Ry=ޑH}ɘPdT|6ͳg>z@~1d/dGiBntܰKi 2R`LEgLD_pxۛ/XhصRdw8wɚj{k &YW/ >}R#@ꬄ)],;8Ur[ cl}rgs]X>P*N;E#xs[cjJ XmMPЅŰ]8q2f>*-_X`2N\AAz)(ߙA8 afÛDlk o=Ќ#O1^pKٞGܻû fW2aq˫zw|xKg-_. H VC1Kw8+D =TQi[yd"gX~O,bCP|sx/: LFBmxʼnOr0?iw"rC10.u؇~^>a2ɤ^W[RÙ'b82nl? :6s=ʇ%IIv!\mݝ2fHl.M{} ڋΦ78f*z\8N}{,8. m_PB oQEa,]"0p]n{W8.U\ $sYY0<: o1j}*5l)L@՘q{MLgz}&!_C ;#4aU5ɜo zɜ֖;=Nȫ&B7?KuwW x딺B]e9 mU^D SO|0juh`O3٬qaS+?'tVkw;-C3-Rbɱ.U*3y_a7X>D]r~/V|V]ЌХAIaO2>֒JЩ'%xt=Btl\Px^Zj 7ܿ;aUoA#[Pҵ׃u<ԿKdm>,zU -yO)'=ӶS`x\+h_>)Zǀ?t ɠ?"gc"Vb-VS9Oq(xwxWs髴55޹w*_Zư_1y`ZZ Tn)0p|A+txae(I|ޚf8 AM_B6v[VupyCp*w0tRZ\K+[,FPdË.$obyS#ጲ# s~L^;<~bf s\O4`gs\?qh-Yf8sgU&F~@͵9@N__w=ڰ.5jeCk%2hJJðFɊʇm_e< =@eˀw ?)bg{*G/Nn&~|ٓ*cBdPp^~c;T|TxV,ێhү ZasƇz+U& 5lz_Ns$#\܎QDí%Hh6Tw[a)h}WfIHR 9 OeY{׷-|]r*bم-'ّ zWph8navKPk<9V;f}Q8~bO0ءoc$=llUNUToiLӶd*tzK~I ,63{ȵ8UtR]r-\cazJ엍oye"cjh~|/}{1 $͏§HxI/R*3QXiˋ)%TAYZG Hld/[ ']y{-_-,)|u zB߈ J[4jm5@0.ӑo:&8̚To$GԎп$v> +VNzM'aUxMa#\O\Y$z6$o6RH,jk>P%y~`Sp_MzUCJpAuCaCXs#T;pI'+#|c/#{0[׊ᄖNRT2Ą-;Ll]~IXyg4Bl^иo]h:PY'%Yw%ھ]^ 'B:TIΰo\B8Zl3Wg}7Pj<&U@$<Ij+J {A[9Ua{ zv7':.@L܎^Yg2Ҹ Y?KMo^gcɅU ˼8Ǭ쀬0Z{a(4/^*3]&F Mguvgu)űdV7|eIBRr%!1;YCSq)SNX 0n2x7djڞreNaLp.1l o+*\83_&ҫzYG~Ex&pl7Y*"")'< |?g?$5HNAE*9vTBE hnuΦQ^BLy+]ܽ"\բu׋O:AgQPg9!RVӝ^XfTC~N17KI{dJXm|}/J$NY._?#`Z0u.V>^3o=N&)7NeLhʘ3eth:vi/ xNq+v$pȅϑ6b!jrϽ@c8x͖u𗑄|rק ǿ8w~)tc]D`j, 8{hAdxNdx`kq1n3f*IÁ}ޖЩU(6S%k~ xs7Uޖi-/֕ Ǫ7]̀o < Tn`X^ݳPdhqql3fe,* ҊS'nbQKif~3L5=ZZœv< v P^+vD`a5L1e ]/O2yPy>4w43<pMMB.ɵ|hs `_R]gkS+.o G/U+gw B;иiF$wRdw0`ہr0~IZ3Mg=EPyG]x)?: +ٖ06Q{)3р4z93ν- K!3>ksSJ cJcL6zHy?\ӷCLWfo8 #]>y; tZPxc>癵u ;zt2o YtsbHv/(3sV?;'ެ3fyDdʛAXfl?RlBk0Ycr;v\kgYbwE<x4u3I/JH̟ ;ퟫKkRd71gHT{ לp9 !7%/F,f9QFYi:t"׹I6ZC-9Nw-Y~q}ZNxXs)y-@>s~΍8Q&v)okV%Jho(Eǩ!8x)'eM T,{3V@ ~_[g=12?qXmYcH2ŗ &rS=MV%q9N[yNlHV6K,fyl$F?P̹w0~7s\/#۲]܍{Np_8 7.ȂA5>rXz;_o*FeǛގU2!MWPtsT%<?bzK PɽGTcQqCXBhYL}9wopsGtl%΍e @[AAX{ʩ7;d٠ e%Pi([z$[DI)v+ϋ+.\r (rq' yu8= >Uqi(%(c6[x?G 76C|>Ϋz{zr,l[F1 :Ow &˒5>M)a0GT5Dq~m VkGܝDe6Ől$oMoS27bӱw.G\3kǻ~0}?g,Z} M=_X`AbxA"Gu?{6|Ū80v\Mvqx9T8p&?d1D5{ G3WTg,*j5WO8;]񝂝ָMjn!fwlu=QC%]J`VUv_35I#p9 ocoYW Kck$kE gȌbpe&l7 pۏE֚cf0.h5[wfapc[AA_]:8L•"pgoVy M,Xfpշza! D V{[j+Avs0'|+:@Rɧ sztZlw<::Nyu*y] X`B~ l ~"n 'uEL9 Wݎ QN/o_7_Bq)4v|$%6{_~${lzbzcyRG;lgڨ8| ~\]".aL8vu1eGGHTD7c敯P@h JYS CZL>8eFzgGoVcy ^HU#n_dWB{Ÿ+{`}mSi&N 3KVŅkH^qwmݘY?s޷*;~eISvEaGMZ8*q>f\ZܜM ;Hmؿ`Gvǯ*^V.TS"(۔\bo*n ZY PfR8}kW1GY`_f!IʻZ,ou!yW!xhsޏn[S1I4qsN!FL]ݽ 5!U˴twNE3yU~ߞ8k[Le.iX`98S'g/tbYE7*i*0T+{Ľ@6v#xg<22vƓ#/,"DD&xIg{ ŝz0=pC1.~yk8&>ɦVqE/,]l> ?/n΁60è}{Iy{ L8ZKTEH6k~Qfb49̈[qnSIb<(,=ָG~?vNC 5Ԑ'a^={o J5Ӯi '`"e3vL?~X/<:K4C{g Ð ŞzڳKT» 0yaRqij8rfG?1VK xA kݥd5h:5-f27^1&9o ;zL };e(#- gd\=IC0] 2{"يIỰyXY&Yɛ2GbyMdr^p G0Gio'mS}5;ǁvuug=ۄs } m\.˜`0ze㘥=>Ss]m-+ g EF1;·$a˷瘀QWS",cpثKp\xO VD*]R( swn0pIW ]t@+w!mNFl_=bT}EO,eԆp63)ױ9 7E5c|O?e[l+94Dױǧ-썯|ӥBiu96uBB, U2f.0v X?5qiAyw3h.GFɆ5|"OOC7/ҋa^2']lޱÞAʐɐU:Tz;p-\b$%L]]6yT$̈́9MumtX6v6+. 'HuͨRL֯0'b=2fop;qzj}̯4% ε^&96$T`Aȯ݇ϔűkGjiσX)CoRS8@R*4&9 3O6vۓ4j%L{…fbgrG;\ͷA!Uo4׾&: 8uJZ@4#7aU 9BYu&X#ko4S|lZ bEWo`#W]YT=mw.yu_y+jnoIhj;` < oQ [-H~-.\Lاz6 [`뇽毰S=+`;Ŭkstۓr4hON|` -k^i g"+ۡ@\MSz{dh.ɔ^b=6*.="X$_@gỳןJ+gM g~4%/+{pj?*xz3gERz%W?|W~LOa[dcY9 4cÄzx{[}{pX)gDRX31@YL'Xn,ut\~"sEgN]Vn0rW8cud==S* l7%6h;=]m 1~?Nobv#IW{_k_ɥ&o;E)ue%LEΕZp=93Ln| m;jH־ƽBp9\^i?,<>IZ`LeFj˽ >H:$CMR9K9ކx d4l:^yQ3CkKx=̿~kd;e p>JK>2u~-;5e$pf0,$ɛ6n; s? qZGgjُK\9_!ը`~I|!g qk4ڈӾ Nd.G-N=7& Eټ_`矍Vo`yRQ|ƈNI/챍QWJ& za[^}@3%jr`H GϦK\yt9ɼU*Ykէn@ӚO{ZKG:T;MؙnXy\"c +߫I&eK0jNk[Yr㊧΍NPyN blӶ0`or:=k0L૛/HW>r,LW_c&gNNUXW WSTCLhb2v!򇷰rPD6Hyۉf$as-A8Ȯvn1ѯּ}G_r%\SҙzJɿtatYn;E0ɨ&X!3s/HWucH- hY<|-z_Ȭ ,>@hDα0O0 3)L乹W M W| %^ l?^Vpr@E5ɤ4dZwR,pw˂(ܼ#;|t p"Jjm7L\{JyF0l=z45 HttO5b1U4`@̙ˮ,3ɏ{qQaBu;FžcZ J$.F`lp dc}yԏM,Aj,\>$SGE엻}WI$VTS7`.`=8gífC0)=IoyuV-T)I[T+7P\/)gf?#uꈋi:oqeL_?o밟_Ʈ~egIFm#WqNXmq 3:z0]Ă&4؝S_] )0bDpȘ1kkQ) KAR+TEx7J#ӂdP~1aNum֭fvTut6 ByǗB鶂, G4u`cA Nz$ =Auߐ[":6%&Zv`6#v9rH-.ʂdZ򙾅%44h~ czu Qx  *z%W>d3G}Bz}~G@R8M\Y$%JmXbfNݖ%!)m$ WH%lojRK)G8B`7? cѪRӵ^[̚gtdH UNN3h^>7YN^ĥ/1'Y9f}ׄ˶Z8*tAT3N|pu%g]ry߫,B1P|vK=N&xo%M'K.IjL8W`%) ޚ } soW,o=X[ھ)_deM O#B[yN_뵾t=z+c /]cq̜ح*X2mn<6=쾂R0ws|cA5{>q:8# V690Ė& p\AKiJh{Kh"\ k8M~v4Y:cBS@j%xb^Pd73N} ^gt@w+% `ߥ=}/mE X+:rv a# x1V&lTehq*-DŽdݰo7d8k~ʶ+uˆhgz't3<g>e;Cv&<`'VyȖ'sy.mY7n HZv<=ko:V(x0°B2_.lsc;{~g)Hzm^sC/)קBîo%`tWY6O^<` h i7㈇,sZ76 #wUaF eLP~:RIu5k6tV|^a.g?Ð1@EJDt,TYf8SzȶPf8XU][&YHBJq!fq#/zL+wZJ$HT*H I{>c'(G~6ps0tΨ_6vW Y؆|P!t9~N셮/e.IS|DVF: L(KQhfaDFfpo,qwvbhQ ^Џ2|D3Q`yLHE?OhK+s=q&\a:AWw+lOU6{N C殥nخNyc sB4R| i#MBvZL7^~ eX'R Fo?pW缿/ĺष @^2Y' rЩ$#~@* < ɻ~//GS,ROiqM;`eOCCj.=-͟~]yQ.(=V#ٳY$y}\ECpգc}–{.BnHዷ3dAX?*.5:WLHj -;n: >=厓Mރ8JxQ` ig‘fݱXN)_Q(ųL+ eSݫϣMJ!XwUC259qK[2 0v{.L4-PF֍,aW]_x}Xd/]JސN g6/®ҝf/bHg|J (~29Gⱐ+u@H .>Jy]SU]sά)WnvRIPs|%#p̴$&[qK\Yz+ {x:Zz pB \9qĴ66N-Z}9s*W3귿M ^xL.v)+-BgN]=O}fm<$!/AǏ WЭaΠ}"k,|NIysi!!+Q<ܷY8:sJpGJC#̼g員J^ҳlGSV8}0w%^Sƚ=60R[g5A;$!N6h z؇>1 :<+!Mپ$F5/]Z&%|85SĠ3P/TfuXNnpMNzwaSgE$ӣu 026T8k }%ž{* /loZ]?u B[۱&0-azdڐ etq^^͔: eB {:ob~=e yU$[4_&2og`TZc@~:FiX.0ɵùSu8a SOs|NV!/ﱏT$( ޷L]a8^h[0zC;sX/ɶu$3Rȵ o$8 ;m{FĦQ(tZvbIߎ I9 򷜲Z֔.CىDZVWJQŪz2wنB3 , }{NR"xEMW-E=C-L8ҡ&'=bc`" e&&eىGmPfþ W:ƔiaÝjmycS٦&\l_0xEWG9oP\?v9ʛV'J}bZnԪR*}j/<]Sޱ*~kʲ/{11kjU&$CL;aik@PZI×\cy*+ Ͼ6&芭=>Urn8fsL2E25t={ 5Y9ѩ[Kk'"uڄqvW3\.~17ƞ]$ӉvW`2wIqu5dv7Hzox|$(0^zPg2x2 `E==!OM["r 'w6 5X/k$4=;PUd.Y؇n>WԖ:1YUm#QqLJLo*L2$rJ8F^ {@hywYG>xth?灶d.uD :KdFKufs5뼣+9N~ԃ nڪν;(fx$RPoA!se#',%_܋ѭV\flK>OdspQNɠqia,[f ]"ծay8TƿWNAp+aݕm pSZ% _K¹G ,I1a&/PqK7a"}#:IYҲ-_jc-d5i⇫3u_/%)ӗ⹶z;4_G]E!viVgCAO*.ʶv[na፪yP䭲$vج\:du1q{&jLޜ]!0{vD(PFK;g KIPJKz;c/,3Niކ-GpcǓh(?&_ j3tV=6~h"{o0')-.ma5lDF._r;WR_B#3]YィJgJܰGvz9UgQ* R0S9.v}X0xZB՘} Ea.Yz;F*%Jz4VfU)aàJA A \8,yJsRG_HS<+JFQڲ/2 3om ݱJ'>3%58@3f0'6jUl8iПgvkl]9VرtQX %4r )YJP$7hLb9y)02>}sES8 ;gX|uTJի_BtௌPt;a0$i \dE{Gg Ljn@iʴ&.ȝ9UҪA0^a~ {To^j*Ll \jhna̗~yEkȂmT6R%} ?$wZMܳp37ea%|ZhiIBhQ0թ6O7a8V5s`*(c9Mp{lr>I2~Cω#>]p-gA -hnʠtlƉs$: m|?c>~% pH{~ :L=vILd=Lm׎IZg~z"7=;9.L*TI$iY-ThIQTBFFHe'{{>9y|sCEc'c; ROs_ҫb⧽(^a\P+xfc(PQS -/v>^m)8]+3U>]O?.ZA%|)\Ip&g)=a:|M>U;rns:,V9lJ׃jnReK2`^߅ s0a[=_a]~L*ּQvv^E6X3s-j|x8H1Hk1KO Y|F8d-)Wt.^FYV  0WΘ Z`22{dxrmu vI?Yd=,xsz} ؆Cۤ#Ȯ엙'6VBw 9J8@2ލfP?68C'+TK^67)_ 0_qתo~)Ň##~[ZtX'.U$6Ș熎rґ54 Gچc)#~]^쳻/ݤya\iX9c_kI X}8hdR ]FMO8SqkDZ=Iƛi iМiץ|$Ϋ|˫ȇESM#Rl*#+ 3XTD2jIg<6}ؤ~V7sv fJ]ngpj{2[+bt8\>js'ڛ+=a`kr<RY.wl#2ܶtV'ݷH}+\5E?@ "0yL|iZC576f#ώ:Eӑ65& mް#4ﳾ= t\χmt.A7A %*P%to<(cT/3vp.{vْ[aVCFE !#Xϱ UD>Cs\Sp 0i ye5=A ֝5r l!\ >™ޮc G);DL%]@mO9ߕ*^©x.9D 18@j^qX׉+<7>$ai`ǘoPpX ƒvԳwZؼK2Fs6IIO jxK=%]-Ce3̙Լ/¥}SQ`U;@rRxיLω r+n_o3#ɺˌCêC'Ba~*Eji(0PׇSʂ޳ib",SӦZX+8w2N1$o!gP5 sL{TaAMj3+wy5}O㤸zq''ؿ }Ǘd Ʊ;vű\u/9Cg5@Q +0g+6{(v6/MO}[.JHq6<\ɲ{Dos"ijf@.y9gR/WU>QgϘS{zH" ,/p[Yt7KsƯO,\h9b9U\87ef-:dmFrܦ%gco޺AQa\xz(~ts.iL:J0dh06|G5V'u!'b]M$ js' fI2P3di1 +v7.|'`8*'R7(}:`ڻ,lO'$&dKI28K_9/S738i |ΰ\wSuIq>zG`ӳ%݃s u5vDIFa2uGLW.M$&gHU*hEP޾ˌ!R ܫu ?R} mQRQG`zltC"ȹ]=hO'⊁{B?(Sit莤F HcJ)7}}:jC^zD>X؄^ZfOaPI7bc1Zf9Ǚj`)\aKrNK k"KpDF@_[Xv>eq 3ֶ;_x b;!S٬Y=,n=9Z^bcbNl.3aƥ$ݱX[&LI{fhټԙr@ -]u"9^yhP:ш4|ŧ>1,® fqu\=ʾS0:' 5f.@r'zzR[Ϯcַ\} ?߳6bNMӒ/b.ԍ)Ò#à԰&|_#42M ]{}H+s5\:"ZV/ !aVLjڂ SfvmBڳCq|1_Z,jtEp tؒnsصSdem\-wp6Bs-[(`n=yf T%du+106zK v\8EbMY~oVٱXȒtۉ@V&Ӓl'\zP@+"Pym9Xa/Α->e_C&#dNJI1]NrShT&V>N8%E2ś=qS['Urr_$O(}'SNpX9l]K~Γ{F0f,frs F Gn)5boC8iI};f']nVrzpȎ߹׵"&..Mٍ 7m؞&Ya͏i3dKa*tJ lC{ ̇>_q 0ݖ(MFR`n"CHag_ [& vT#/IJvYIp?MW܋ COB#BRlXx'"{/VŎ _'AGKje,0)&Xqr"6^W& kv@ޅL):aw]:,w`V~ⰬU\ص;&b;Tʾ/tulno^wRWZVJ?/у{Y.JDa{GrJ! "pv^+v(l黑{7OsFRZJnϕs0%Vb[r6T7wD`$ܙr\|9S>B/a!-{cP?]x,سAOېQ/ΊA/NgG`#*QL.<(ly|3*٥;Sh}M~%[s1ۥv7Kp3ʏ3ʫUaz|Ƞ/!;N_P )YnÎVֻJg,R?yk>Vc,9}IxC?:C?\1I;o>oqzv1Y%A+>:v5snJt%X<`|BRޞ(QGhPh/cd$Z$6o$Y㘣a\bQV v֮#7`ݗ4疰Mk\)KfXԊrI5iϺ+Ay RUnY@wXwh@j\s+]v^Ş`+ZW7Dxsշ|`=peС]oѭ1M G-ACL9T,i[T0ѝ0'N0w= 2#roPPݭ{JVS 黡u硥0xbn cʏX kREՈ Uѥ#հ{Gs%{uЛgY{CR`?|kw_^{2!X^1D K†٫-H<4DVB{WڽKgz"E~K9̬zTyB#ռWOaod-48LIw3NZl:.y'7&ƻq" _ݷt9LXB1~_H O.hiNq@ᎆ {ao@~? Sq7J4N}]ǝ[f޼<uz$KƳ0^F՚]V2Xu7Ti|A'pR󔯐oRY  UWƶfyc. 8x:boAc0߻wqfݠV;M7&ƧW?`7>7O睫SCc竸X=BEeر:;ԡ;wपZW W|Ut,ZxS|Bޡqϴ-R tD>Ϫ4ЄվLޚyϬ:&I.O!_o|kIr߲y)QGC}i8/qsV>̉=qRo:ӻ\U-h罰k赩 +VWp ⼽̹WʟC)?}^&fHLpnL?`ƾ1aÜjGy0 aՋ{?V_YfL`Mk6:2A&>wnmG8 -[ azVh)M2.,])rYs LK2.݉[|H VmDZquY!J6޸lQoԺ<^ 3>t]2V~y3 ~9QmJ|m)x Vk7*v%&/=ǎn1xgrJ\CxSl霒/M_w< )% կyj*!.<ЦR8-XZgbן仆Y3}Ǖ_޲ݖOW A7rX_ɨ,Pb0nS w X3y"W %a&v]&br\$9 ×p`>/҇hOOÇp*oė aO[k\ƞu)Vv|lkk㬕In;[6|}{4G]W(6aۭ#ҴQdzjκ ۸6v/Fӷ! &*x]B ٫LU\Ḻ@X3% 󖶽?܆JS~!8{eAuD&ɖ2qs\8Ї{C>kGMIS$#Cst*Jvi73v879OuN}䏘 .y?c6KGaxy19_`y`VzX|w9%TW/s /n\P<^aszpZo P _.2/H֓m>$/+nrasynuPzŠEj`7ϳw̖dJYyfw{^Џe3`sZ PQt'l3л,37NmI0y<&E6}5>' gE  ScT=)0BkJX ôE;$,d_c ;⠖k#HO>3Ov{cD&KṯO&qjmWsa ZΐPF <~P,td a:$#z\pU+JAeL{vF[FmAFW5]j{8F&,q:Pjp]:mXZ38ZnNw=P ZF­ػSUղq5t%M`勿 \-|7Ir|I0tk֐/78Wg>U ?jZ.lJ|0%5 {"V,_!vp/Q.sGtޞYY٦mLO.m'Ygz[QE}zwT_|e ֋g-T޳*1kVePg?iXSyx}a'd1ƴ䫋p`_`3_+ MƶdI~}9b0i̓]{&F&z^@)z,?+;] ?V]߫XE9p#.՜I_߻ahl@x>b)'t.X 'MY á4M%[p gd%b8bM{ tʈO*f5fSU?>nFCl\P{M<5U然 WsU7 gi $7|?&",iu:{/bA`=z?8WgK 尹q} ob`´W270$e_'N/<;LsW:d6&z6U⌬xwgJܠ]1t)1 W>G1?T4 N=2 ae` ٌ. J,5cKl~I66BxU:CXۅ2FXT8ZIkԾmTp珍&}O^3-Ws]t)}! /OUBv"N>%n@=!n̍}g鏟l|>nD "&.tR}U٨/|8EP-B){p?s1ڴ-{/3 `.ѕ~3ǻ߮퓷! U0%K](=/]8"bK K1q&TH7Q$,<$&Kէ3NSƼ g87W#)@q~ip]8Xg r`6{'G#GapA:q|iKqVˑzO5l8zM9Lߕ"}p|w'0ߑ`yRS0i*iVTͧ5Wܐl0}5/tXVx|h ]ܔqzeʥ8Zr8XxI5uĆEӭ?/a>`vꒄ+r@¬$aIXܧ\iY#ޘpP [t`9,".m 5DeI`c6EC`>"Iά3MkJ5lgi[`epB8X~J_SpXts^ Hf=qaegZ3e6٘҇7_Cp`LȿHFiۓ4MXxj Sq , $cZޛ7q|UA1[CLN_on` >`0[Ŷٮ Th^w6R&T ٶ>Kv zhFYK\nTcqw3XymkF\wvI井B5Yy r۝"]^,A@ k&)Bo| Nxg iοQ=<$'\rR7aJY} 8O<]2ST?[M9׌oƥHjgwP:JkH)(N??Z#fK%nĻ/>>[gXa͍Ԇs0HͳZ=8."y.$fKXm/OR o<|'TnHDlݷ < |@tA} 5Lwө]HWm47棗q[g5K/@{\ꨕ{2G_/7y9q_?-nx[9(%HsrL X}cYE`pѫNcHL! {Ϙk[B}B\u;!~vh#u)@ۓt8ÉIJ9VVOJgQIEab69Zi N?6MI]K2ﮱaس ?CUOoȋ] M{7Ͷm[6F[^fȿޯGdDfp$a+mW5/tQ[p͋CǤ8W+&cRzLK_J6*T ա{$wG(P^ĩ;,mK$s&a,N^fglK=w Nn¥½Mn8}.\}.:r؄N%R:8!KCg4bWh+_WB,ZJHsFV0zW oRwZK3,$6hu.ǼFKH"iE+}g+;0y潆t-ڱL[ߓIX$,;pF{<ỳT7M6=.X8-P2Z'fG!US}^شKnw<bfZQ7wGq}Ea]XP8~8z < 5Zj S&0r/?pY0A9jc,NIA=ћM {j ~Bӟ]taCQsݦSEnh7x+QͶqCzq~JpXV@ڎlnox1wrγp,4|9M4`_ Kn 4.y TqDX;FdT/ gmt_S: JGXj44޶v1v9>)kvPo{ s PST1m6xƙ.o5Z6`QE2];C:1RO0:%CܻgM==/8G[Hlݷ󧜯Ua~xm.^y {e} +ϚE+{;w k|skk:tg,&Lm,c![Ba}7ͮgH> ]sK8VV@( ﻅl 5"|w`(t5m?4 TŠ~`r+s'6k7agum9~2KU]^YWz NOuJb&ָpBdUX&&/U BǒjZad~atdtu'?ޟ! JZ6^JW͎j0)MZo/&/Y~sW}9TZba0.ӊ/ w ,8)pc+YeH;m%4O1@PL0)opHw]FG ݮsqؿED(/ozc(Nt &UGb97&I.69tNݙ!A- =XT+^0{u@g5"u/2^43ObG\Xv`$V,odϮwԠDM}xUҙ2ɐ(ͫ jAu<|FabdW,T¾wxv`J,6Xp~T)[)默+үᜦlsVo 913/$^DhPѸz]9i#])@-R=HV\^y}LҫT mNGFqՍ1⋕VڭANa ,f$wl 688Mgp=1#ΒE]{8N)D?,>C6v K]@Sǡ:ԦWu8WJž.1RFSn`NJ',Y]ƿ?|OAUCbS #<]ma$Hi L :F&s|Wҫ6@O흓rmaA.3 0NCz l^А~ hlx-d!ncDup,K7/̜{_ KFyB[}.![Ty"{D}[q}mmX5/ C^3fu$oO0;~S/i=`;y \]8؞@0WN$/\-&׌~ŧ:)O0@mwL[}Qbu'`, t>]YFn, H{iKܭm革309ɷT|'Y^oQ!n|+dO?Xm/32n%"g0OXڣ*=5}#HiN0i\ޝTz%kB-k9=>p `u~$T\SOf$2/FLMn<O@OH޼q\Ъz3Q;ҏR!7?~"\֎_ZmY/qY!`h'7TֆAk3,؟vi>+Yk>h~H,Է9݂gՀ.m[{t=4k0  pCNy99 T{q,;lah.Hm;.+pd ʹFYRRscb}} O ꊃA3k[{Mj_5dêB#j3YGPE2~|eg;8]\S~dy|P탳BN*@MVМ)ީI2G2MCG~uς qrùTm1(.]յiMQ]T02?0OE+7.ۡߗ Cć{@ g0{c^]%]Pq4RrYиmգOGH i;{+GHm{zV&Xpm?S(+=S t1K$ڹ^\7<_?0S<^"-YiH5 ]cM,P'Siv(K})[rd/}3K_,K<Һ^@;n[]Q+$ 6:G%/g BI~0qrJR?mR 6`weSF{'Y pM6\6bTkc%m{[Zo}Koĩw$N_0\y]D- d=O$)&pZkzԋHlZnf~ y]4[d\x,8cyhRTi޿+W,q*>S9].FX>dV7D9cOiۯ]s8S na MP$vA*]hV8+~bǻCSkgcP^d8 '˝"Uf xumra݁+Tau/]%v$;`zcFvua^nԔ%iu#иQ̊9fӈJo[oBVկB0vcޜ_qt0eg[oR S-"|`IN๒ +j 2/v45f,;w%]ݔ`q`e'SI>qgA3M4bWYڛgʪ0fgs`r}%=չ7ݕ\N },50[dR,wL;R(K2AQ02Nv9+Ʈ7#5;]_Ժ-BDTBhz /Dp߇nLEA61tQXY2vi_QB…:Ys qud==5re:GBv\ǎ#{a.jwmxÐ=(f\ XOk>h,ʛu`65Ϛ>Hi+ö)װiRQ.5:f  XST]i̒ E` ͙;u\6嚑z`8o3nmJI4,Xlr]IMtp7qU5ڣHl@ i$ã•c{K]%>,σug/æ 0~X.&qFo e|9\??qvͳ$[\5Djc1jS2f~IM*P`.IyStE&|g!O%Ac~N,IПO۷6$BH|#B2/]&WK{-`[q mԩ#|Q?s#d 7 {wHcuszӝ'p%ްmr[>@ɉNAp k%vh*%kt~"$&j輤pGPd:RFU,?!kNHcwW "M_-U5Ur,z ªಓ`-VӼ_OQ"(ȓʖHR:oc#5>ذ=v/N b5M Nn9B²!2L~zyWdz5]xy-ym3~+r{,,l$qMĦm$ɮ@ް1Aw݉8[ᕱ2^M~ηIFp±{-wmWzָs"uTջpC*6[/4I.vlPW|S<6̓>&aZM4qn. ʌ0gsB$36vBm#H7n][$O,&^IE ޫ>lIuK<{*m9bq`44fJ &J&П_iu}WjA;?>TSU®UA=F۠o98Sχ'\S*׉J:đǵ8Q!# %pri9VzDP6 x=s%qyدwBa/$9afTy%[TӪG<ZoN).8ݪ[^1b6W'7qqӹPf{_X9Sm&aȃ+R^ }%nkAM^~} \YbsF^\eG"87ӏ͂D|qy!3xg{NmlY4o% &m iO* |X5޽5/̾9mƷ=&̅XQ vV@iFSs8].F_Z'[sɄީ'˫}X^e& #? ī{< tLa},w_a"$;yZp-ަ/H5_I6wnr>N$YHC4ë.Qn5E}@'7ƱV_X7_&XjFL\(P3NIT0ftuc#&'$vM㈃^5XKoI0Z>8xd YUpץ·fP9~,९ֻ)0K})M v"Y[,\ֻ8B!;@?vC&lzhW9)Gغz,RnŞg8)fUE%X*캜F5OiKPhSYCm;D!;>??q`=NL4-b\:$#$Ý1$qH|w̠7̖ݻÛ-G0~kl=. 5YRxCwE[;y_C+0޴#fҚ['[-\'mzwSoٝ-0MkFXbn+w7ȥl3f;G̛8e~^w^>r SVwoք-bHVv7s2yQ?7')o}vxZ=ڹAs bzܖ=Ýr„tbaBl <>&@cG=i!.ˌjw@f)z812rdXdkn6X$"(EWJݡYג}pl-]̂%J ߲ܸBзXwlj&XBŐO 2@YEkI#/C u(QKKvZ>'z h! }u=)DѰ5$͞Mw Nb3X8ԜiV6'^t({ϗKuςyvȇo}`N~usJNck7C\haTrı$ݴ XiF.[%k RY2,k`aRfop2˲׀yKx"O.H"?+컹7]8e' DnGO*D$[ Z" S3W7`eW` ]Y̵Z zpa9[C7.2pO4mWc?]^UK5?&X#T}\tC"k~W "-Gh{o{vS#ɰc>,(vK:M&lLs1V֦;]8M_s<fk͸=Iнqo?7c#fFCf+s<)gxM-.Lo g7) ☬tĻ+ohTzi)0z{JK]/DS8ag=goフ2tpojt 6ʹGxAU\"{ߎKTDz^niJ*2Iח;^!w/yR=#X4s5C 5qf}1yjԾ-8u̦fHa6_r7_qb]N|6Ɖaੵ<]F׻XsNq;n=r:k V۸<G91I;8ek}R'roUȐS?{4?$0@V赕$7`dž =Y;Λ<{KtV7;Wv> @=G0r Ъd4^DxeN|76_gT/ӏUf{'-j:+~~ # fTvEŃ=:,}b|%{:Oϟ-2N Ps;t+l͌TjC6jRzl:eMr1m* 4Ol aDg&R7^R^'>{o/<ʾ%h|eB9c,ϧW{ZIysH퀚MVUa_C`wppF 'GTqdƁطnHU>ƍ7Ԫᔙv+= efOyYf켘ּdب͔%ۀi|@2L\_ͩ;0W,9ldՈ{&MzaYA&ڳk;w ha|߇2$ `keqgH._QH2h$ &Ys0n7?fTIǂGaUJyX$AIuɢ]ߡ6QyRNy?T w,/x! 3VU>~t]U-@4*zL/2RG aB!= SG$R΃\#sl>5IF[I.,% Hc>tQ?9~ q^C)f~1(g~aGiorpMۼ]Fݏ_9qn 1k{+w̧UmTرnsF8X6f )׻ W=g{|: ƒL7 nSYaW˸CkKy~މ@54A0޹m7VS"ٖ%yK0/ 8W~1%,*}]1Y 3r˾PSux3 U%eŎ%b0١+ğuܻ8s6w`v\gW~O@mnr37ov7NJkE`¼&X:-)|@r)kR?!xW 1 &-m8mTEd'nё>BM0e3fر&9!'$Sl?U;\zLX {r=M=CX=~ԯxj>_zоmwqaM2H0;:|p{+yNlBHS /$ yjCIV${õu |ra=֭/y7gzʽ^)B`AO`@} .4O!`f /eI{>@Ρ /}e ΉV*Kg9ЛK ]>P5O=V^n VM,##`%Nr~;zolnK9qe4Ygf3:U`L>>~\jto؁-0+mˁ{s%% yffv[@ۃ}gZE:f[O efb~dEGX׋9nvʱ &JJP|Ew.D  ͡ZЧ&8MZz2^8IAےhmR5'cᣝ!Q8vHi:8mah/}Py7;`I>;Щ16:dj6Hj!`mBX~~DjS{}" {k0pfl ;bcm#s/a]?9X8W+l޽ԋ_g#L3~ZN҅0ƥJ5 _C{{}G|eo۩4Xٛ0t+)ץ| MإEWf7֑/B0׺.C2} m) N&<ySyRc[\|Cϫ`O ;|ҌYn>dܢ[ΈPUWm@X}D,=ĵDbIhW =4&o8WJ)\;8MD/s}5al7=q'HԲLEPq5N=CDnFZٿ,]0+2A彉kJb,e'j6yQ$H_vLZEaf׻j%3ݩFN!}T޽ g_}\;4{Y{xS~F[bq^OM8eibm6۪.`ey&|Ⱥti \<^ON |;kB?3r GN>}:jVܚ<ܲLk}}O 0Y>ɺo*t9);o&vHY1Ⱦ ,|`d@M̛#`40  v"ͬ{k8~9ھOi۹gzeNr 4_ J~1C0nr+X] NinhH` jXp>|Ea!tR}?LZ2~}F'?.VV]rqs{)6G=z!l-*B;J8v1:}xmWKnDdHw C[Faay' CX}mj-v>Pc}HlJjV!M0(*nv{vd)+fDy'K}Xņe/wn%Bbtd0q#u2Àa=9ws?̖(Rd 1/abe?y fbiX9cF"29>(G2:;N΂q/9.ޭ #˯:ƅb=kc=QLǪ]eYf(0ς-3$ vjo [G }SػH':z?VGx;|L*qթɪJ@Syl4g4& /pXIoj,r:K2o+y{ KܵqQW8MB񿯸hŪWKaVZ}AqYu U^Z-,>\;D"O{#O0=-.3%g^@ju*SIVn@wz7tppLT'Ъdpr㼃;(d܄ ̋b 27xC\@οogTo;oD45K)kk[/, Zn0`9 SY ZaW AxЍYۦ?ocofNm4F`]Wt,xzG@̿37 Nފߧ5 7gz@2۾M)c6'X o^סG}W'iH&GK{@0 K_Gݙ@Vb{p#vHY,n]-v{' a 7qKp./.>J~'}Z3[bQ1ҧ'WvX,jR#S^)c_rҘI  3ǜێj+ M`\>0 .~p, l;Z-7LcEz7fԠ3Mi l"F_qADžsES$%Iu d a #q*AXGr;HT~H* 5\FIDAo {C 'ʻ~߮)ǩۅ/>8,4.%|Âҵ03U/c3.8_sW]°Z›pn!s7LZ 7a4 j@#X:#!)lD0MD0qu}Ѕ礵 ?\Hq$#4S'.2aۺc35qD0r&А<\O34#qZ'~În8G> =㸀H`PѰyr->|/ P۫5lG'z\Z=iLOqK}&N:{g9 LG=kJ\aͥ[Q?~Qa`7wz gLq|,gC bLZrF8yqwZz:2o|"ZvɎ$e8{n7nRda?șm s.?b`TlXeȔ<;._AӠaahP_w${j9~錤X= {{"v | #J̢Oq~z\6:=_ywB"ҩzIAENuuh>#f]H9+0"׸뷚&V1k1ܲ'b[ӭ/w A{d9bdJPFl{Oq"g\IWž^{nT{dR"wtJ(t?t'kdlBԏĺ7nX}au]y_ sh+{{VyJ',O38yh,цr:XMQ΀! =w1ɤ+UύBT]2{(bPf (Ԣs./m!yX'O(Lz!gt-BŁg8 aG~ a\Tf!@6DX@}>)y5`%yl)QU5'^N2 zh/9 $cck9) XQhch/?e҆IbaL٢ã 3~wC/x |Y"x>⦆+r jfCrPĆ5~aUz|>N࿫*[ʊD`ܨ9rTT):X8pr$4'H7N/ *kI"[.YQ&Y:>nm]azz''m>b/}8ʜg@0>1aa{C>, PbD9 } ZܓBn0a%g}]8p]݄; l=zsfXSlוبkR[ 4F㞈PW м\BRpWZ tWd@]{1vPG> \<ljSpv![v_m)/X{2:őWec37U &` Dhm\7ZvJ9u1U9\֛!('@RXGN< f'8z5EUX_"GVS,||I+NgCm` yWnW9yqӖ*R{qYsX}6+ R1Tؽ:kS3ڨE,mʲdoWʶб%q# 0ؽmBN"RrI{ڒ_4[OA(7G*${7"6!,F;9qZC8|ծg6R]:lOуЫ[ˈ+\aڿ̑O3֖yqwz|J &1: >tk}+YRޮw|9r0 C}Nۚ"35HƸ~W"agmS1|L w CKABB 1g$`$(0n= FE?A3PssbEQl.8s݋-K½#|ϯ[:8͚U5\' JDJoNG#f|U/ՊO)nW&$Qj`|sdzdv5O +mgw̰Bv*~džƘ_å+'X7Ij\DrQmYq/ l/8>v}Q\29ŝF,qU?}A2e;뤇vD9˨MX#Ep׍]C09c~ nSi+$é-gA.o x^WI"}ֻ[ROOC66|0FdkۊWq`n?<+d_sΟ#YLexe+tn }|gS? Þ[.l5[e,SaF%!)(`iCO>aKµ^~tρŃ%<0C$'V7v:d+?f{~!#z89.s_"N0[HJf=lٸ0 Mn@J[Ǖ,MOɌ{Oݦq \f! 7Z?=.ncSJXu.$Ξgp9|^%Յaxár>~ǂa}HT}{/ Mxt7cIwlq&!͉}Dέ`gw@~LjUܡCuĶIs ƦVӌƢڭ8}V{-o^$6^hj`0R"g5%3&a@x 6}$`N  ̟b5a-M0p c.h1n3#0tPT|?k3f]{my3e;ǽ_C۱,#i1!ǿBo{qIȝyQXb[z+.3\a1Q @;$xa#, um0 ]V0ev̹=aSaSF߳MTxT5dH"b~>$'j)X,.Paq(«;8&/ Kd;5OW Xks -g6=um?^:C>uRywpyʖwP}l??):y_MNM/[#*2~ Λ<Ӽi?oyNXp-Ep|~h~wK)S|o^nw (q:Ga~*V;ʼJNU.9?sZf%1[)mFen[by Y'i6O?#]WwP͌rIUk[v+ZݪWND=7C|)SfW](ݎ5?]y8Uo>֭l1g%vLSA:X<3']ӱ00l{r:_'fl)@|rf+_VN+qҖwɞM>%G#Jܦ!q:%6 G?hzpׂ߅]IuoXg {n\m# }ho†!p>x@)^7lƩq+$/ㄑx[m.S, Y9'izÚ-BʌW_ݺeTEu&DkXśx{uV&jFqA/Fvu31t3s\&!vfﻞmĚF qu9t<܋sNdTw&`LW< cm2WdX 7{_BvgO7f#`oav]8T.L q>΅PG1//~V8ŧS9dzxJKaĬKL͹M~m 7XU-""݁=fOPf %trr3?gu}Rp"Sn¹;LaaƬl[r hgr?W/x}90,X-K0|IJÔzh?7 ~NaHnT1Sjרvhm{*  1پvn!lȾa l *U8Z)GZWzĵg;}k9ϰZCM~r9hix>{^ p1V.hr?* /r.EvM eĚ[QPdhxUR?C}Eq3X0 ;X؁]+gX ȴV^h٧0KJՠVxEvO^vݖCeC^X%Tupi8}Cu{0%ELM>\cN#+Wi`IKnP䆿y$Y{6ϓ.-Yv^*',|1Nr*3@rlӒUsY($sJ/ߍg@99*/{D̆jl/`رJ)`^z"̞K{m/zLدcM^ByJbOVH9nrree*`Z>9GqRUx #)^Xq\I6S w ',pDTQ7jj.!^M:\mo{om!(7l{~z U@k`U]GaM"/Obů(9s00rIk˳0׻eHء|]$FiJ/JR(1Ɋ/ Q]qb]>ɡk5HgI(;ܴ׵Bocš #3ocW(`~MPHMs% $l$4EpHk+u5uؗcfn^5V4H vh-pt_{n{ÊÐ~܏5:0|N̽>q2gȬ /N wr7obkiwYm[u}k\ bn}[A4]OQ,b0e>RWD2J^og+K0%OR˩_gU*0_jԋ|cAO`hlun:\ If\cL8:e?.]t5R4.^Ji\`AzR7Ԯ~8(A)- Ƣg a m2 =O|d;۳g02kgvn,+qG $r^oرx `>`r'˭X _:҄?|j%-i`+ڱWő<%y&߲Ê<-Ϲm&8w?Nrj0V{2cOVݮ0$'[8?S]- pp,P~ @Vl5ge*?P jUqp2oI('9L9쌆VgK*Bhk9x`2ỵ*?~N`JKdHXa-C"8'|#mw:p=IyacitzͿheI! JCYPR$m" ]?2=O:Pdl3}w|.8J/㍬ڱ-EP43&t $U@*c}g[dY0n˒ieo/hVN{U]/D'Y-;^l ҖT%.AW%I(u9k/Fj"je߮A q6xZ)Me5h'+e1 5B]d8N06.vHn, h2CKU255[/S{D>"m Źnow2zX6I&^vi;8m4@s״Ԭ~V]E&h1كV~1/(z08TY:iWS;R L6,# ^lL!95EggDw+96p%WB֕Ey@0@{S-CqEVزH0q* }~>NkE`I^!5Nd9pశo/iEZ Z`D,]2No%N0>V s.3/qB@$n~9։q4a$wa%NkXFvU?wzN.bS79"k?( ~xgD9< ݀w3 K-R[qmmV@8dc[@RQt`~v@g4ۊ#vl=1GKت" 'G ʢtX6pǡC?Y%˟C1z_ ?tحΉNǹPbh (>19Xal*l7.ewgՀ2 lXnGjmߙ0Ԟ@!(hH+|w%6e4eVL}evjZSl{\ԙq0!X77Uv,}X\^ xUxPÀG5Iv970q}TzncOK) h NF{|(eks߄b3oUd%vܾװS ޚ\s>y.4Sdv#sn(OcΛJ9If<}6Gw qoH~f6[o2ڗMyaBla -K~1WT|^Ҫ>ۋKM!VɣPbOfZ 3|eu!8io_ֈS @IW) =PO@;bA2?V׭d`^9 Np۱JX;{gې̛Ao{,;a$r_{5Ŧ}4;7o<#Vy,ӫab.7r>L5Hk_HRSx)_0Lhgʮ7RAmiuӲk/h`Vq@xd-?~,`:I@B}'YmsGp$P(ւƫ+s:;зOxa(;muH5KO?UBcJ%{ z*y >? sZ^SBrg2I۬~9aoČMwwzI Aj+#Y&\yG2H-a2]Aeh6~޴u/u-f{ZI A7n/A NTv) Qh5tP-YXG5I]yIN{(=IҾ pdž3[oCgD85s|,|>[S5_T_{>i< QҨ;tk3 ,-dGMHeQgp<߉d eCL&LqF򌀾2R#/ɅZ9nyx2C`=z5}G.(W`yGVyv"S*znjE=E9\z_ /[ fUF_°ЙOҮ%X{"(6 wWۑ9W6 77 նofEN/;!|?Éa+8v4;jor_^0ah~ʱ91.aIH/}~)rxMh¹ux@l.$?'5]NCǟ`!Vg7Uh{Gn0޲`Hw@uSb'6uaʯ6H8r]p)!fd0 YB,csiv8~,`z?%9{J mN1d惾=^%?yq2s:jOàХ0Nݞ9B1&x&qn!R X[' .IWj`鿇'r,L)vzr 9r=mN4& jIKqAiIhrهB3񲍼'03 l|pZ LTKs)7~cMAW* vܭn оYnX 3 QVcK%FGԽxq,2Ph/hZa˥Kj w{$)pȦ<5VC'ReJ;1!Pv^'e` ,rǏSgFqPϮ?$ w|ն0RGpb%x Q}Vk`yHn?6"̧INítBs' )#z4~kyv sD=G1GUv]X~{Fv}ͻs5N1,W|˗ch/moޜ]ghi/>sΊ;YB-s$Ha뢱iXޗ8a[3o! ZaS.d(81;SN(>C-9kj fJ}THh޳=&T|ިj3j!6C4$2#(ޒEGHfVݶ8)keN2%,Ư{N y*9q_)hW8K>d"Maܛl0ۮ719S./} =~9aΧ~8{@w+]TB*#JORʄ6K @}KΰPx)%VaƩ $,q6-N!'ֺuЯ絲= x'(?zlyf0Ipd;H~, ~8]EGvߔ:kgǻqp?ϖcwdhEJ)Ls]oGDz 5zF|!%&yk?gyymYZ\qXFmf~ zMٷٙ5pNܓxt6\V/]8Xf}z\rL=xMF/3Sc@n!G(t<t0+buDɍd~ryݞT)$Cuvףϴ~'9'pƘv/R9ߟ&N):x _NNGwi!=r!4#5B%f_iҧB۴ƙSNC#7t*!bJ,飵U\%[1ǢZ+VÖḋ/L uToOȓGe6u voxكaեX,H0shJTA,TmUٷf[ޑ{\fs$gzV>\Tp5<*>43KLۣ7Z7B|ղh/LhOLU XYfSFw7K,8{r`$Vk` zG,H&X7k<[Lr+ŶέA/3I;[@ 6ԌdSoy3ӧϲz{ȅo΍;%q UiIS8{ V¼iۮ(+(ByZy}aD'8s]]H./FuŎV }wmuB67uJ#ɴj?a*rD!E ҖIC%HO̩Zt>ĹS_=_Gp l[m3 ,grʌ¤H~v,R_cVm` Jy~х^ yE1?&%6ؼzN7@$cYZ$c&F?jp뢑r{@ݍ6[k'j Ƽ5֤6sH[ ]lV144wZsKS}󺿵87og5;m6~ڷ_v0`PFmɼߋcsǡH1(RtUO=ˎPR,HOyCeqwY%*(LD[-H|.9 _*O0D ;tь/N?x;Vc+±R}Hr߹bpWrŕm->BCf i$ˆǃΜX˦_Op&p(\d/ohҏtt;q9 e0T6 绛u⤁JAV& ӌY*HWs8gLĎ~ɇ>${̵\KG |^V*e!3Ʋ?w WD]j'7GH7WZ;o\7ťqMc{W_*K$ꖻvW/k4/ DV֋  3OaO&웩2!Hx}1ǟHC9mF(;/XĶ6Aq[{Jaﭻs{BnG7L3)bg!.~'F~G0+v =?D{ '~|̷XTz"~FrRE~!1(i* 5vkw\5[j3GZ]_ĚbMklhrh$yׯ\B̩>ً Ű3/T3יP`$R(G[_W$υB!V cDqJ?-B%jsWjܷ7 $Zھ XfW(V\t}63aG fz 6! '~%kmр`~CII EÎl!ľkIP61‚NjSbJ_T)Ӣ4~Ƨ'T <2o-}6߂o=bEl:S 0 `!g:5roVvۇ+unV,j8 c5k弙ds:>P9Il(f1GRČTuGat4) oCφ]0AiBX61H2={k,nAOm~Rٲn@28S%EkӦ& ;`F+\%]8se2Bs`xia4''o=#!_aĦ2R[\pn%Hd:d]cyB/P)XTp3S/P2u ûЧ^S2od~ J@UrIĹg|8dG2_LJ}~Ag;ϯ?ָosl҉I*tJQ0^Ty'%G@6>':&A =fi4iNEԩH/[S@ ?{n/FZ5_8yz+0Y#J>3a٥v $FkI'g' }L7'D(hKϷ8DI+@yqX7 }e $˕} .=hDܒΆr{p ]fEM[Y\4wa%ɬ56jwݩ8-qȑainФ>nA= @!>KavE,l~ 7HQמxgyW613ɠ7 Ap𦞐yCRB~<ʁz;z[5^ H'*OX~gwm]>%5hH8݊z;/l:&I'gbQ)P$"L=|AODUrD"Alm9N |sJ^q}h`ȹubܱK5:lK\^f#YM Gx#b|3~±7`z{l}LB_“8"ɽXO ЋujūuJŽ,\հNni܊cNE#8sk`;l"Uzi!R0P,岋u>Z3A<`Kվ52߽Q3Tl3́u8%# 7Y<r;bvgM,e$a;]Zzf'e]Jʼn{eP }OhN3Pz_=?WC݁}{# +Um O|\B>p:{ ?\~o/j}aa Ϋi_]{I*pCi[fY҆dtmKS]v3r1 jFRvg#+IH?Ϫ0s4Gu2HǕ~0mo&)?ԘTDjHpMp8$ ! v)$ߚg5nڐh-:ĭ4 KOvv<)2Vʾ\cjsW]ީAaD {,'W"k i$z~> 4;-Nr LH>Najd> K$)ܾk";K`PwS5*ݼREo(M~VuNp>d9ؿ_Vn\.[S?0KmC q{N}іb1 ]m>K7n_seu:cynL0~MK&JqBîrSϛ0xlh$vN]sI C듛;x L 3ð^j\LI]l =t}MWQqQpd^e|5fQX0ws Bd%"6;ZW&G7L"YT*cR ~|T Gp,:&M~Dh'4No%W^ބ+Tc8;b #;="rn2C>uBUlDpݺn3.U 쓂E'̫`qEչvhn|4/#X7,n_Ͳ̫ YhʰhVQgdfT&ކc [y>` tw4@\H`3q?7Q0?H`C)W{)Iʃi  גC_0?I6mP!/&Uv__`?6e9)X W= Cgxh&{X~)1:{,)l|k.L͏\X$>['n.P'(z=0Z˕W2P%b6V0/ujX#Ƞ)d!(D8?T!43qK+Tw2Оvr<u8;gܢ?I U1K2dUiH,-ii)翳Pט` cK~A|"Y9A0[7SC>ݼCo`'_g߁^jSc- ?~\xA )ᶇ9H.pCRaJr;dJHGpdr^W ?+PĴ@2*[uWzy5Nh4&X[NpOo($9^L06fJžՙ;'C$ױ@kz^wF3; K:QGMO-6.r$ _xڵDޑlݭcsNe8\d[m$,9۹&B'.G U;N,\L]…}I+jIJ8񁢇@12 (0](_g<۴cİ5*7?18#ɧ'!U礭-Vy_ 3>|]ma7ayj-u]G)ء0|Or>7Nr EJrwW8TՅW%4DMFMܷ T4PYt#CՋ_^0rst_:)沊1+T:yŒfg|P{V;R"h2gfK+ ]sq[Y.Fa4hr(R~Or(/gIpn:'5MD4ڎk8slx⏱¢^B Dr_AFܹ;bߑ5-dPNWvb}U돓7k޺J2s,$8$Rɋڻ#kx{uEqvy1, 3lÁRUHF"H7{qھ*wcvw'A[#gOZv ,g~GH>I0,؂ݿO<"Y4kw~I$k78$*ݯi]GTK#NG3;C_o&q~_mrd0=wi` W5/\?& 4)z(>oMĚ~I .( HƃbB `uɔ8Ro#F{s;7co88+:ZZ8YsO,}쳒Tׁ3G#\He. R+^C YZgJpi4< A%c)u\i8rBv|g54S9 X}| ~2U=gӈߘsc'#'Y:{% aqsMe/E$Sm {&J2)Bk<UWqbP5D}me,&8& ٛt_ ؏=Oĩ+}\d*d-!A!̌ws|? 4ssӃh{ pobpnH TunÅZ5}~'w8$-8g9.;a@nT:G`7N> I zoXµ/2~L|cTAN>{eөVx=TA9l <1GlCɿiCo7*# sfQ-wnDЧPGix&)_Mf}."d}{ Vm0d TT0 7/XqvNw(s˰:Gm gLz#ϋÐPYh{mg֔3kX0*4J|pkuO}IU]NWCXP C9H3dx?dj+\>r&͒Oe!0f4 G\:v}ihϿu/X{'KlTչ은iju8AYOPJ_x~5XWn/NW')1  5S6o''ϊ "J< ϙq*yB0oo5 ;A/EσNpn+}:K!G:ڷ ک> 8Ȥ=;L?F=2oaf'潎 #ǭ$ )_ = SL[Z} ]*Hoz")s-A]S}_PfR,Q*-c]ϑrk m+:[XLQfXV>{}gau8}#;)SM%A:Z7lY1Oex \tjb` dmdi=$3 0Ano˴Z}_7P<]]#[ է/6L{6`㎯cͲ8KYpx4(Hpyb EOS}٫`ns+H8h S^nY3=s+^s_d+]4='\w">€R3k=}1׽ݶvgHJ-fT(۝OrΊ$Uw߮zH +֮?OpNy$RHvAbs~ެMcR9.i Гq'dJLig-Y;dN|7HIo!Q߸]?6f9Ҳ}.N-a0{7vo!-T M7uɳ^{`>̨?K aݢʃ[UnmKN+PdvPO\{{ OT3ԥ(R7w"8S6Dsͮ8͈,;=%4s]$[ӫoa\Hҝ֞$+0~2,igŹ;c/8o Ώ?& gz`ƪMIɬfִ%1&&7tNשuцmH+3Kl~Q&).j=I8zr7WBuX7zi'^x$b-Vt0 ǪFo:|# Uq]irZ9?mc͌$nO? 0GJ$3W⑵}VƲR$Μ~)!Mvpယ.9kyK7c~taB݈2K*wΤK1x bt,u^ֵ:CPvdyS$Ӎ5Fd!1]}` k3c_*q5r#N|c-NJq`|y)^A UqDd{ Ljr7n&/1pѮxC`e'rv TUB5GI1nRvMiW$qYxG% m?Es0W͋/SJ& #RΫdXPgXsŐ1֌+8NqU09`u=ci/bIzjKՆpN٫dy=ؽGs9Way47n47vSE)D*owJ%uW,_j|pAu5vxN-.o0K0r8 ޯ/LcsBO^%qŢmpu7A- ]1v<{~kiH[t j4`U;ǭFb;L2mfE#S0.6w| wvmPsR@keZ6NHSi煪yS]hmp=ରsmL "7[Bg94NUsHGK:e)ttS!0e+N{[at?YCJp/b*WhcRO/''A}nqiR0F2nDrR+]z]U/l&{Qk> ] T$;5ܵyZXI T*^B%}V>$% {I{ZLqd~Xft]\Y&L?J¾$v,Hu(gY+׸r?&|6^g4XHknn7zk5̗|z}zGǓ0 ={:]X!ݻIVS17& S]9~R䨋}mʞ?&9^Va'в!9Ye5;\PwE#x_ÁvCQW'I8'vgU iƐY\=eYhՖʛ]$zf տ< ;]Hskz!vl4TPO2 ^>rN=bQt9uخ^%'Bp5RPowm\ʇ HGKCĶIDŶQ*gɡCN0TUэc/":p+Vj3<|h( 21'vg32ؓJ/"9{ {XmuSZ)L?lܷ%l Ko#18?1U: ,q'u4٤i3Ώaco6Ynv;Hd]J|Xy.?xaFda2sԆT2;S^ܜ|Ru(˚N(m&vu\28g80ݩHr8r;rk~p}xB^Z^rN䱒Q#0Q*Ev9aiuv KAJ&lﱏxPtlZ1Ǟ3e) x^[WzލC@\@qץ~%P^4}=sb CQ) ÷|+Aڰٷ ,'<{gM?3uDjBwbU8 Ԉ. TUsmx A 4EUFJUlsΎ]}Ӛ ?%YD Q,Y@Mxxu T D{~)14PLf%RN1?D TP[9岵"Y.\yRǟ?P\a5{%\|;'/J_M| up>jP|mMm CM n;"~铭$N?US[) yI#x[ob+]/UMpk2|:S- I=KRB=OΩdRywìv=VG0﬇xwy揩Hv}1UA:t %7H8}?tO7'HLEk>y!Z"9&eIʞ,(ݭ6AoBÄr8)1cv["-{F/hmŮk?8 F/_n\ꡟElmpY`<U!dgnoŒj4035yٹTxҰ*LW$:ab~~ S@|ѓ>r#b?M<ZIEZ6mO̢&␇f=/=p.{KXyy^)8[ԆF՗ҽDJ~w?gQN54}'mN$Ht@Sh}OdBPgIʕ?֕ _ R>0aqI@vHӛչS'"UE=ؘSY7n&(4a;ITށM^ucp~_ qDɞD<~0,W_^n}KBG}|'<ԙrl׭>o9,k 5I`Tܫ_H78}J_㰩af3RX[2cRB~$P٪]l~P m2URyZV6͚oV"^/Ҏdе75z_IB㢍i K{cbV3dgؼw4H5Ov(.1 ?N)K%_N[B߿5*8UU(W5n]T= 88R8~3I &cQq[գW-\,(OuЦ}fnsi%aJP Ht쫋k=mj o//S$y' _c[Ew[` H`1ٱ5lX"jw HKl/TAd N0ZMWJ*L'qqKvX2ۈӉm8p &lgH%fʹbJ ig<ٸtmc$uwK#vaJY~/e)  ~r%* LZߐt:&BЄ`Ulњ/]ce^ZA|_z2ybb-Z*UVb+5%@_u5$ۛEx0嬛qx5?u δ8h KNi؏{ycۖ#3̚ |V=W`5]jŹk{;FqoY`iq~0?)vkd%^g)%\Тuऄλqy0t>SA',=8T/vٸ_O3N O>J$B^XZQV1 CGGu'1׷va;;߆ҳVEi) FVtv)ɂqR YY뿸-8HN/9zn/l؆CL|IVNKnG  I0(!}{vw?=`ݽF_\D>#& ]q7MԍOÈ.Nݔ릜ʜ$8<; {d`awX w<=~:bv"\J];O0SBRR؟SǟHhWdX¿L~ZuP ~]$w[*Lva~孃T(A}I2>.RXuںoMc}űcfjvRRj87 8٨]t(6+4虿F{BpPмmɡtM9&,c=U>qNWA!cN?6C3U{{L= Kx0ήCQ0~KSQ4 uGN 3` ˆXm'/ Cra)冬 Q80KK~ \|=h[[b{h} qnQm~n۶mR Ҋ [C/Uwʎހ jF~K7Vo"q/ԇVsY}Yd^ۢ)A2ٟ(S&@}'CKIHy%Ч+ 5fS+-H6O706epOW%@5$ /`.jB2y {\15d7 V{֯n¾R5/zo@ \V_/ŕ`wH˃AɁII0Uc=tJrQRpve-ح\ifxKIw3榲oݗ]^ 3Iǜ3aȱ/A!f߶ nLwG`UNm}0ͭxO.[n:?,3قC!Sl9 >;. ]3b^7²1SKJTwXuay/nB>HV4u~]jNW #[ESQLM;f'BJl\=DHG'X~sarxatĢ?{;쏒,ۯ^Q݉\g+2[\^S24L#8}LB5e'2jux8Us,Еʹ+ ]WC+tZ Hga^L*Nfzg/g/Y|cjo* ?΋aևXezX,wLx>xƎ/7AI T??c*F$c3JF/<\>KtAʦsD+zz~P`ԤW#νfc1V!X&*+:8,q0UsȆkґK* At0U2z'Xj0!jm?O$WU`i`;{3.0.U `Bd DdIҮhDճ8:!!1I>Zf͇A$/-xgarM{:. (ӵtB Æmt\<]K3.{cFBgjyRh ɹO,Ҵc$G~,Xgn pE;;%>ٛ}ph. ElC'G |oL]JQc6+ةWDDŽF{Ml쩼T5v~ʭ*Xs0e+d%rFa/Y?>h+f4j7L&|[ Y{Jpu^c) |~d #o'Lw;`%h[_(O?Z> 3όaɼc#ɻzL4A9hi-ǭGya:z؊d{+#%`Иnf5 o[$PыƹFa8|[zy;~-O>xھ!V/,B1W[]ucXE=6c6@9 Ttm28D0 I]q_{SqXETŁcv5?wO-%-p4o$ȏ 'ڬv GYt~rdc}wϏ^ح8/|}z3uD2 go";_||6{1_*Ns8C%@Z_+.Z|Г~՘[P&PLf%(f/w#t3<_s'd՘,+'I<+v ]6B04s!ץzpeZ,NXrȁ﷗euHz71Qh$CO.@{[rD` bvg+[y'˓.8D5?Ÿ{lw g$W`/˿84!٣$ 2;  )h,aTEs8׻-87e[ ;Lt }\nAYP D%prkuiäO& f6ځ /E4^roX3BՔ# !Xc wOBwذ3laa' N X۩rv8%v)}iG<" } a)up8Ōe ]u1''> 7:8HhU(I,-}z~˵@ E9)CK8c2b/7o7#XSOBB;DŽ5]‚ݔ.x%~2ܒdr[;O-\I/0cX6Tx;EyћO-Q' Нd.U~>Z1 /VSarp<̹}_( Zw:0F492+Jo8^p,g1aⅩH='j< ^q?Y9[fqRX!s՞}y_x.s$q騒zaUIm}`|"F:#[ut[Hu~:ʧh  -pvn3d Oh,N=Ue~1ϳZH#wTC1&B;/@G3P[kp:UoL Z: SQk6sY\ -gci v w n&髢:7b8H %wwYY5wK~~ݤp~F>08V Kjv}4~\=V4zoiDUn;p{{Hq$}(Yu=I1SwhΥ+j.ߑ2[zVd,ʵ곅x6i+VAnWi:qHa_ Kr1\J!:!?/dYo\LYZMթl`jr=\R`ɦXǝ[pSr[HP(gZipmMa_bskGYYg}[)oVRV*[`WJg+AxsemTools/data/exLong.rda0000644000175100001440000003351613000201061014721 0ustar hornikusers{?UqQfJd IjLIdΔY*D$D1D$!IuIf<㾟y?#,;[v,<5p8jH=;7"; w ;?YK vuDG{˿s3w\wcޱjnZ)h~ͪ-2##q^ZK7ZĊiGQ1_hõ`Ρo*. 3UR=J3zٔ ?c[J:gJǡx@7guY!Dil} y QQ]~#5NEK1-FHÌ:/Q+:Ïv:=t>*\WʇoIȓ>,;^}D f {uwҙ j4@lLu&sh2)'sϽȏtZ] ɬ'Х5HfJ,5>?k m~wcG<0Psu:.<#JrY?)fiꧣоŢr飾>tq{zbsTnEMGצj)0/vk^0teZ))/iϨFFu!:`.:=w"6xx=rʁ0IZʶ/ݳ4Z1\2äm36,nrq&XlEL-4&,#.܋g`tQ8bBexH0|jOQ+\G\91@;$B]Gjeq7gǪS$/ -r<9'=3Eڡ -gPu1fxlӬ,kBYi@٥;{;|ǥTa Ra^-O!*ȋr>lfrrw8m.ZN5Kk.~&DVfvw0Zic:w )Q=5\0~ZLgmia( ǧ?~ iKd(04T1rLNMR\o|^:>0/1\qh4&Sm%䡥,̭hrV'2:DŽ4HeB5.4ƸQ__ hfb BU;1M u/L9${l Pg[O@3fo IXfh@#{ʪ Me)BwAQK!Q^u<`UՊ9ysspAidBa@;H1 TR6\Sa.g.T/۹~3OG.CwWV%ۏ_5 =.bhW d@Ge>^=.^EF']sݾ 7AcMgr'r 3T-@KYGǰ5N^YL=L%vͅ2\A{z/|B &]hwUQs6Xd: LgP-M:)㳾D`޵[o< ]τCZ+[_H <?ˡytchaX$ _@daA6]ߏD9q^Dm~O"F̈ &jRn0}G~*B$eɷa E+ ly_j84HĻMp ]S*Pu {8p.Ѡ-d*D~! ;ċSH yثPV'>KPE#9E6d Tc6sxUDCcyk+hRiCUP3;|P|0Z>,UmS/5Lr;_Mg$ ^4|LW4I፶Pҙ/fXfaX '"қQQo9 ,J-;~E;CBS&Ƣ&ڦq ~s8:EPcӜԣ{F$tmeH@m8ᥒҮɂq=ӨVw)ϢS<7<_>z"8r$?7U\Z!шF#f}Cɵ:( cb|иfy0Q/'y9COԫq3 6y 6w o|u| s*4L!ftȠo f;5Ѳ&#Z(D1$6ʡS f4$W/: {wѠaUʝwLݸ,L]Z{ RO&F^~QJǾ -ePG ERAY@Zv9Lyil_GCbAJyhhްڢd7A gB?Oއ|҈6[7Ԩ V0ς *I^4ϭ?8BB~r7C Bw~1EW묐uwt+~=? g=>Zv"hQYv!Qݺ)L&C]bl<<3?1 Vvh ڟW.Ynzqjʷ]n0wlԩi).*;MN)a̻ĥ$eظ+Ȭ\2\o |`*7A_rNJ`1չ 0xWc *V;+2t%m0d& \caP[zPA(䅍S2VB4uY Uǃ%1h2{ZX9baJ*Ku$b7?r\聉?.{)Un*Ætb@! 10o0~J~9g1\ -G%q{i Du Pd̆.rVV=\a0dK74`Yzz9싄rF$L}X<!heOM)7ڗH0ʞٓc`F4Ry -t|4x-˒+ݡ$? 6 L~+я5=GXaP}``lcGIOVkVeg0o3w~~~NsPX 4Gb[ h*<ަuݏ1Wb(sҔRCΫ(gm\X<ԙl[kbcB`JCcx (O/\OA<;:huMagug2T-4k4zGr.K6H[ 0+)X&fF>1An쁜{2 6ƆvU P'5 u'{׾y=z\0{Su l58T[]yDTX`.tao':v5Hq܋.dX)}v%"&'RyBՊۘ=\a">Bƹ{U0|ؐa蹼x|dԍva|>!Ubt9X ㎬u^FSZocĚzrb,s$g`I`HCe?/В!hs U <򿈇[g`;ԹrRL/}0h^>:S/,U(SE)􈸻wk3&E[+~C'"`*`̛8?ElPmP@>+X<%S \A)4@i|U%+ zYsUZ%-,}K0{q}Fp4ucXg =DC9mÐKgS@/\l]6 *5`ⅵxl4Jӭ-9z.(| t=x -wH%K9ܨ|i8I3+ˠvPx!)drUDbZǻ`H:,,ľ,m<8~UtZe?Y\C&.JeS2\b3颪;lBԠ ^l*1<'>J*[9!Cч8R#YQJ\?[{L/H||_Nh^*yez7 T305' Qu%&UKә5aWijB}١}U >9'"Hk"]~Gxoh1aX+Kx_XĦT\9qjc: 閥~ 'cL8L:+B T^^)?Z^ O+xduםPM( 6 R0Pwc&^>#~1"̵0Č\(m~z5VaGы }gM3[}/`G?A2Pɛrxr^u& |{V}at srͷgaXcMק@ѝ?;, .HSlj8mLS;k @H :^H7Cog0 )`+eݷtCկs>,lK4*Nح;xg:L?:̞l mOЦ9z\KCB'7>i:Yww'o8A!X3LnWB!uV+t(' +Mȩ?'<1\˚24Է\ 3",G7peڻ%j~D|E/kFϢ-N+7-h V{Q C Eo1أ(ͻ!3 aKVA 6fuSDDt f˥jݿgVa%-m DaXL8RzjЏHYR܅yI;Ըj<G^8qEͽ+~_L+{`$,}T ɛzt>BԳF(} %zq ĕo݈/,JVO}52:^DNuhbR%;<[Vcϭwp0B;p6 ',ta0]LOtDj̬N]??Z%x̏ZW^_ߺfSvK=kW-{UZ$Du]0L.ļ@?9IyĿ$-jQz{P8+h8݆SJ ŜRBvt* E7#V#`r5${Ejfrja6b8H/-Y仅 +hf,v^ ـeeP̵4a4'ivXs[mAM_ IPIv:u|d0swߴٷSq3z[(:i:V$έ( 镧/HQ Zb{GZʩsa{{`I%e*t~_W ׀Ur0dVTSAE`u[05W9HGP\5&}lqatBO4,3&@1GDaI.Qp=F5kOvԗ{cKJ_ u0Gh*s%Y@ kSIGz}6*LT ]w~Y{30hjW*myFbqm-yX VD|bwIa+ettaUp$X {kVCap83bU ]kR<֕@M& blOl~hjgAY#rLtYhMw2W=b'bPV]4Y(Gd1F1WdzbڏT9_uMy=kS-͓6Yu: d`D‰}1cnNW0k }Yp}]-T6}p:bӄi%q:cgn9 EvO킖*=267rC@vM)U2'CcȘYn3I-9UbTד”B,]aoH&Jw8Y-a\ &RZTp(*!T]Z mT%za}Gqd ÖŠ$h8Ra? B+5VBG>W=~6ߔLMFd3 V^&PNGJL+/`]L^uT ä캜Yg|/%PW\_I*;ڲuCɁ]h"z0FoE{k`D$' @;ђu Ý/@ _0Ic}ka+vЭ6z5jD=ڎmCM/m5-mŇ_=*#Q r,^nq+ y_n!ZS2}@aT=(&$J7`%g!U\W_,Ry32).tǯz4?zJK,zoGtpvBi#8hϽci9iz}|Q;g)VU:ϲEX 7 Q^$ŅwpEn0jFk`nGX :=\e^%x%Cۭ;ѧam mآgS4^)<\/TɊ>]];pڛ y%P?m@T:#^~^} T~FvN'Sz@CdNQۢ#n_pOak䠏VkX9btW*Ƶ19na4!4B7UxchbFt?°V,'|_O3sPe|yOHX4 dݷ"92ޑՍ.hS+;hf &%irp@k%Y9,ONn}ĕWo2{jB}kxiQ Z<5D "_`AN qժXjG#th&%X~=v鱝 DB{`9Ly:(ܶTEUVRӟF}'[?eQ/ qw/ LU]A$ [b4}+;+U@B0=9d V>$%'IF<$<*!r"V_X&z8|q LfZt(B1M`2M0%U",,o BŃ)b#gOo|_GaŠl0FUS0jZj忯 k'bwiKnl=0ptþH^ˇak4Ft 5~"Ys}Ine C4g91awG [0I}Flμ ˞e':1&1&LfPHiL`~艽׌po?q na{7}&Mb<Fm\X+۟vwX5%ڽQsM .IauX4X^esLywgs]} MŦ&F-9(rկXe-א~GupDc}hxwwUyx4fʽ0cTL9>iD$m>`l⺧.0"`[(l}AoDoGs^Bt5FlWh%ߒ9WΒuYybxpcn_u*_UL*-/]]. ,g(΃o)k8HiƒVrTr<Sݓ<cۧ$49_}w Ȃq47czDghcN B}g냯8@=>G OŪa$H_?U?y9oG L?w #dJEs*7`Oy>˖L^fD PI f0_nF0ɾ8D]vV|k'elB6ƨgѼ>> ~1up.O/NLm^] SM-R#Nsȼ^f~3Q ge3V Ɖ|Ї,cOJmM ArJ=E"\SFX e5U;**s~f*sg7גoy Q;sr&Zqp?^7 }D,y @ydci_̨>,)sGP_}>g8ar*aTJ I;67&1sWFh>OX>v]-~3 \ kPC*G8,G|@W}Wg8!K)4AF ʻԤÐхJq?PW^ Ðѫ"暃Hc~sqz@ZOu>}t*Lhj+xa "(]3?alReC `15O5v`4>;/@IO O%J'̵B`l,֮x8z+ɸ+qq `z͒j&eœ\J`^CY3F/8=7Iٿ`K˭ZlW!hYYJr7ՠ 2"24aaMꎮ̏ @'.[KC~P4ߡ7pR&h/z[zuX[k~J2?ǡ?<6X/dVrgC;i;F-L L zO_WsFvًf6^ʡժ<^"͍`=螺2,_vѳÆPEƙv u91 h",4:bRC0az^yGc}"[r-8~Z7Y*5Bg1z2~p 圿. ^f,yG7ݳ%?砝3eMX 9NtAu|5(avaARpAdxF|W8Xbt(lGd@$Yd*Ia-NoU|Lw;ʺ(71cH eoќ}`uvUýj(Ѐ[f40|Ԡ65pյFc~f_NMWƢ0} s_`31ԡX w0XhLټLlΦ`B;9Ao9伏 \M^<9#|sUc[C{z wUX EPLvSI M/kHB>Kgo2]J 'cal#; 6={c%O~1h9{| +38`95\5鹲O&vc;}goXcxϐ0(kLyOV`u0Fk!p1o,`E^bJ)hXRUiJ)jlsJmJT yKš|w#k_55pcz`4qx' } υnhBꇧN8Nϰ\b&{6O%sE4P|Ž8a]YfCE>L͎F^lhph }V;alE̽e< W.Yq\4@k"aDdO@7 m(Lq~/rXҋ*TEoo?bl_"ʣA @VXL[003'@FL+bIe9#tO$ [;|15}[M$c+# 7Z }W0\ ,̊s |X'0r2Kxfl5~4(Ԃ"Ű\1#@l[K"ABg6`vZcFe@qL;ag+(uG%2rǨ_.Ӄu. MYV éLvAW3T1衏0tOkrUd)lE~L)aϪ잯}3mᶽſ$ZNǗ7'\-Fb-ŜcN=$5a7zY2v-a3Z-/=semTools/R/0000755000175100001440000000000013002112720012262 5ustar hornikuserssemTools/R/permuteMeasEq.R0000644000175100001440000011256413000250017015173 0ustar hornikusers### Terrence D. Jorgensen ### Last updated: 24 April 2016 ### permutation randomization test for measurement equivalence and DIF ## create s4 class for result object setClass("permuteMeasEq", slots = c(PT = "data.frame", modelType = "character", ANOVA = "vector", AFI.obs = "vector", AFI.dist = "data.frame", AFI.pval = "vector", MI.obs = "data.frame", MI.dist = "vector", extra.obs = "vector", extra.dist = "data.frame", n.Permutations = "integer", n.Converged = "integer", n.nonConverged = "vector", n.Sparse = "vector", oldSeed = "integer")) ## function to check validity of arguments to permuteMeasEq() checkPermArgs <- function(nPermute, modelType, con, uncon, null, param, freeParam, covariates, AFIs, moreAFIs, maxSparse, maxNonconv, showProgress, warn, datafun, extra, parallelType, ncpus, cl, iseed) { fixedCall <- as.list(match.call())[-1] fixedCall$nPermute <- as.integer(nPermute[1]) fixedCall$modelType <- modelType[1] if (!fixedCall$modelType %in% c("mgcfa","mimic","long")) stop('modelType must be one of c("mgcfa","mimic","long")') if (fixedCall$modelType == "long") stop('modelType "long" is not yet available.') if (fixedCall$modelType == "mgcfa" && lavaan::lavInspect(con, "ngroups") == 1L) stop('modelType = "mgcfa" applies only to multigroup models.') if (fixedCall$modelType == "mimic") { uncon <- NULL fixedCall$uncon <- NULL fixedCall <- c(fixedCall, list(uncon = NULL)) } ## strip white space if (is.list(param)) { fixedCall$param <- lapply(param, function(cc) gsub("[[:space:]]+", "", cc)) } else if (!is.null(param)) fixedCall$param <- gsub("[[:space:]]+", "", param) if (!is.null(freeParam)) fixedCall$freeParam <- gsub("[[:space:]]+", "", freeParam) if (fixedCall$modelType == "mimic") { # PT <- lavaan::lavaanify(fixedCall$param) # checkCovs <- unique(PT$rhs[PT$op == "~"]) # if (is.null(covariates)) covariates <- checkCovs # if (length(setdiff(covariates, checkCovs))) # warning('Argument "covariates" includes predictors not in argument "param"') ##### ordVars <- lavaan::lavNames(con, type = "ov.ord") fixedCall$covariates <- as.character(covariates) } fixedCall$maxSparse <- as.integer(maxSparse[1]) fixedCall$maxNonconv <- as.integer(maxNonconv[1]) fixedCall$showProgress <- as.logical(showProgress[1]) fixedCall$warn <- as.integer(warn[1]) fixedCall$oldSeed <- as.integer(NULL) parallelType <- as.character(parallelType[1]) if (!parallelType %in% c("none","multicore","snow")) parallelType <- "none" if (!is.null(cl)) { if (!is(cl, "cluster")) stop("Invalid cluster object. Check class(cl)") parallelType <- "snow" ncpus <- length(cl) } if (parallelType == "multicore" && .Platform$OS.type == "windows") { parallelType <- "snow" message("'multicore' option unavailable on Windows. Using 'snow' instead.") } ## parallel settings, adapted from boot::boot() if (parallelType != "none") { if (is.null(ncpus) || ncpus > parallel::detectCores()) { ncpus <- parallel::detectCores() - 1 } if (ncpus <= 1L) { parallelType <- "none" } else { fixedCall$showProgress <- FALSE fixedCall$old_RNG <- RNGkind() fixedCall$oldSeed <- .Random.seed if (fixedCall$old_RNG[1] != "L'Ecuyer-CMRG") { RNGkind("L'Ecuyer-CMRG") message("Your RNGkind() was changed from ", fixedCall$old_RNG[1], " to L'Ecuyer-CMRG, which is required for reproducibility ", " in parallel jobs. Your RNGkind() has been returned to ", fixedCall$old_RNG[1], " but the seed has not been set. ", " The state of your previous RNG is saved in the slot ", " named 'oldSeed', if you want to restore it using ", " the syntax:\n", ".Random.seed[-1] <- permuteMeasEqObject@oldSeed[-1]") } fixedCall$iseed <- as.integer(iseed[1]) if (is.na(fixedCall$iseed)) fixedCall$iseed <- 12345 } } fixedCall$parallelType <- parallelType if (is.null(ncpus)) { fixedCall$ncpus <- NULL fixedCall <- c(fixedCall, list(ncpus = NULL)) } else fixedCall$ncpus <- ncpus ## check that "param" is NULL if uncon is NULL, and check for lavaan class notLavaan <- "Non-NULL 'con', 'uncon', or 'null' must be fitted lavaan object." if (is.null(uncon)) { if (!is.null(fixedCall$param) && fixedCall$modelType == "mgcfa") { message(c(" When 'uncon = NULL', only configural invariance is tested.", "\n So the 'param' argument was changed to NULL.")) fixedCall$param <- NULL fixedCall <- c(fixedCall, list(param = NULL)) } if (class(con) != "lavaan") stop(notLavaan) } else { if (class(con) != "lavaan") stop(notLavaan) if (class(uncon) != "lavaan") stop(notLavaan) } if (!is.null(null)) { if (class(null) != "lavaan") stop(notLavaan) } ############ FIXME: check that lavaan::lavInspect(con, "options")$conditional.x = FALSE (find defaults for continuous/ordered indicators) if (!is.null(fixedCall$param)) { ## Temporarily warn about testing thresholds without necessary constraints. FIXME: check for binary indicators if ("thresholds" %in% fixedCall$param | any(grepl("\\|", fixedCall$param))) { warning(c("This function is not yet optimized for testing thresholds.\n", "Necessary identification contraints might not be specified.")) } ## collect parameter types for "mgcfa" if (fixedCall$modelType != "mimic") { ## save all estimates from constrained model PT <- lavaan::parTable(con)[ , c("lhs","op","rhs","group","plabel")] ## extract parameters of interest paramTypes <- c("loadings","intercepts","thresholds","residuals","means", "residual.covariances","lv.variances","lv.covariances") params <- PT[paste0(PT$lhs, PT$op, PT$rhs) %in% setdiff(fixedCall$param, paramTypes), ] ## add parameters by type, if any are specified types <- intersect(fixedCall$param, paramTypes) ov.names <- lavaan::lavNames(con, "ov") isOV <- PT$lhs %in% ov.names lv.names <- con@pta$vnames$lv[[1]] isLV <- PT$lhs %in% lv.names & PT$rhs %in% lv.names if ("loadings" %in% types) params <- rbind(params, PT[PT$op == "=~", ]) if ("intercepts" %in% types) { params <- rbind(params, PT[isOV & PT$op == "~1", ]) } if ("thresholds" %in% types) params <- rbind(params, PT[PT$op == "|", ]) if ("residuals" %in% types) { params <- rbind(params, PT[isOV & PT$lhs == PT$rhs & PT$op == "~~", ]) } if ("residual.covariances" %in% types) { params <- rbind(params, PT[isOV & PT$lhs != PT$rhs & PT$op == "~~", ]) } if ("means" %in% types) { params <- rbind(params, PT[PT$lhs %in% lv.names & PT$op == "~1", ]) } if ("lv.variances" %in% types) { params <- rbind(params, PT[isLV & PT$lhs == PT$rhs & PT$op == "~~", ]) } if ("lv.covariances" %in% types) { params <- rbind(params, PT[isLV & PT$lhs != PT$rhs & PT$op == "~~", ]) } ## remove parameters specified by "freeParam" argument params <- params[!paste0(params$lhs, params$op, params$rhs) %in% fixedCall$freeParam, ] fixedCall$param <- paste0(params$lhs, params$op, params$rhs) } } if (is.null(AFIs) & is.null(moreAFIs)) { message("No AFIs were selected, so only chi-squared will be permuted.\n") fixedCall$AFIs <- "chisq" AFIs <- "chisq" } if ("ecvi" %in% AFIs & lavaan::lavInspect(con, "ngroups") > 1L) stop("ECVI is not available for multigroup models.") ## check estimators leastSq <- grepl("LS", lavaan::lavInspect(con, "options")$estimator) if (!is.null(uncon)) { if (uncon@Options$estimator != lavaan::lavInspect(con, "options")$estimator) stop("Models must be fit using same estimator.") } if (!is.null(null)) { if (lavaan::lavInspect(null, "options")$estimator != lavaan::lavInspect(con, "options")$estimator) stop("Models must be fit using same estimator.") } ## check extra functions, if any restrictedArgs <- c("con","uncon","null","param","freeParam","covariates", "AFIs","moreAFIs","maxSparse","maxNonconv","iseed") if (!missing(datafun)) { if (!is.function(datafun)) stop('Argument "datafun" must be a function.') extraArgs <- formals(datafun) if (!all(names(extraArgs) %in% c(restrictedArgs, "data"))) stop('The user-supplied function "datafun" can only have any among the ', 'following arguments:\n', paste(restrictedArgs, collapse = ", ")) } if (!missing(extra)) { if (!is.function(extra)) stop('Argument "extra" must be a function.') extraArgs <- formals(extra) if (!all(names(extraArgs) %in% restrictedArgs)) stop('The user-supplied function "extra" can only have any among the ', 'following arguments:\n', paste(restrictedArgs, collapse = ", ")) } ## return evaluated list of other arguments lapply(fixedCall, eval) } ## function to extract fit measures getAFIs <- function(...) { dots <- list(...) AFI1 <- list() AFI0 <- list() leastSq <- grepl("LS", lavaan::lavInspect(dots$con, "options")$estimator) ## check validity of user-specified AFIs, save output if (!is.null(dots$AFIs)) { IC <- grep("ic|logl", dots$AFIs, value = TRUE) if (leastSq & length(IC)) { stop(paste("Argument 'AFIs' includes invalid options:", paste(IC, collapse = ", "), "Information criteria unavailable for least-squares estimators.", sep = "\n")) } if (!is.null(dots$uncon)) AFI1[[1]] <- lavaan::fitMeasures(dots$uncon, fit.measures = dots$AFIs, baseline.model = dots$null) AFI0[[1]] <- lavaan::fitMeasures(dots$con, fit.measures = dots$AFIs, baseline.model = dots$null) } ## check validity of user-specified moreAFIs if (!is.null(dots$moreAFIs)) { IC <- grep("ic|hqc", dots$moreAFIs, value = TRUE) if (leastSq & length(IC)) { stop(paste("Argument 'moreAFIs' includes invalid options:", paste(IC, collapse = ", "), "Information criteria unavailable for least-squares estimators.", sep = "\n")) } if (!is.null(dots$uncon)) AFI1[[2]] <- moreFitIndices(dots$uncon, fit.measures = dots$moreAFIs) AFI0[[2]] <- moreFitIndices(dots$con, fit.measures = dots$moreAFIs) } ## save observed AFIs or delta-AFIs if (is.null(dots$uncon)) { AFI.obs <- unlist(AFI0) } else { AFI.obs <- unlist(AFI0) - unlist(AFI1) } AFI.obs } ## Function to extract modification indices for equality constraints getMIs <- function(...) { dots <- list(...) if (dots$modelType == "mgcfa") { ## save all estimates from constrained model PT <- lavaan::parTable(dots$con)[ , c("lhs","op","rhs","group","plabel")] ## extract parameters of interest params <- PT[paste0(PT$lhs, PT$op, PT$rhs) %in% dots$param, ] ## return modification indices for specified constraints (param) MIs <- lavaan::lavTestScore(dots$con)$uni MI.obs <- MIs[MIs$lhs %in% params$plabel, ] } else if (dots$modelType == "mimic") { if (is.list(dots$param)) { MI <- lapply(dots$param, function(x) lavaan::lavTestScore(dots$con, add = x)$test) MI.obs <- do.call(rbind, MI) } else MI.obs <- lavaan::lavTestScore(dots$con, add = dots$param)$uni } else if (dots$modelType == "long") { ## coming soon } MI.obs } ## Functions to find delta-AFIs & maximum modification index in one permutation permuteOnce.mgcfa <- function(i, d, G, con, uncon, null, param, freeParam, covariates, AFIs, moreAFIs, maxSparse, maxNonconv, iseed, warn, extra = NULL, datafun = NULL) { old_warn <- options()$warn options(warn = warn) ## save arguments from call argNames <- names(formals(permuteOnce.mgcfa)) availableArgs <- lapply(argNames, function(x) eval(as.name(x))) names(availableArgs) <- argNames nSparse <- 0L nTries <- 1L while ( (nSparse <= maxSparse) & (nTries <= maxNonconv) ) { ## permute grouping variable d[ , G] <- sample(d[ , G]) ## transform data? if (!is.null(datafun)) { extraArgs <- formals(datafun) neededArgs <- intersect(names(extraArgs), names(availableArgs)) extraArgs <- do.call(c, lapply(neededArgs, function(nn) availableArgs[nn])) extraArgs$data <- d originalNames <- colnames(d) d <- do.call(datafun, extraArgs) ## coerce extraOut to data.frame if (!is.data.frame(d)) stop('Argument "datafun" did not return a data.frame') if (!all(originalNames %in% colnames(d))) stop('The data.frame returned by argument "datafun" did not contain ', 'column names required by the model:\n', paste(setdiff(originalNames, colnames(d)), collapse = ", ")) } ## for ordered indicators, check that groups have same observed categories ordVars <- lavaan::lavNames(con, type = "ov.ord") if (length(ordVars) > 0) { try(onewayTables <- lavaan::lavTables(d, dimension = 1L, categorical = ordVars, group = G), silent = TRUE) if (exists("onewayTables")) { if (any(onewayTables$obs.prop == 1)) { nSparse <- nSparse + 1L next } } else { ## no "onewayTables" probably indicates empty categories in 1+ groups nSparse <- nSparse + 1L next } } ## fit null model, if it exists if (!is.null(null)) { out.null <- lavaan::update(null, data = d, group.label = lavaan::lavInspect(con, "group.label")) } ## fit constrained model, check for convergence try(out0 <- lavaan::update(con, data = d, group.label = lavaan::lavInspect(con, "group.label"))) if (!exists("out0")) { nTries <- nTries + 1L next } if (!lavaan::lavInspect(out0, "converged")) { nTries <- nTries + 1L next } ## fit unconstrained model (unless NULL), check for convergence if (!is.null(uncon)) { try(out1 <- lavaan::update(uncon, data = d, group.label = lavaan::lavInspect(con, "group.label"))) if (!exists("out1")) { nTries <- nTries + 1L next } if (!lavaan::lavInspect(out1, "converged")) { nTries <- nTries + 1L next } } ## If you get this far, everything converged, so break WHILE loop break } ## if WHILE loop ended before getting results, return NA if ( (nSparse == maxSparse) | (nTries == maxNonconv) ) { allAFIs <- c(AFIs, moreAFIs) AFI <- rep(NA, sum(!is.na(allAFIs))) names(AFI) <- allAFIs[!is.na(allAFIs)] MI <- if (is.null(param)) NULL else NA extra.obs <- NA nTries <- nTries + 1L } else { availableArgs$con <- out0 if (exists("out1")) availableArgs$uncon <- out1 if (exists("out.null")) availableArgs$null <- out.null AFI <- do.call(getAFIs, availableArgs) ## save max(MI) if !is.null(param) if (is.null(param)) { MI <- NULL } else { MI <- max(do.call(getMIs, c(availableArgs, modelType = "mgcfa"))$X2) } ## anything extra? if (!is.null(extra)) { extraArgs <- formals(extra) neededArgs <- intersect(names(extraArgs), names(availableArgs)) extraArgs <- do.call(c, lapply(neededArgs, function(nn) availableArgs[nn])) extraOut <- do.call(extra, extraArgs) ## coerce extraOut to data.frame if (!is.list(extraOut)) extraOut <- as.list(extraOut) extra.obs <- data.frame(extraOut) } else extra.obs <- data.frame(NULL) } options(warn = old_warn) list(AFI = AFI, MI = MI, extra = extra.obs, n.nonConverged = nTries - 1L, n.Sparse = nSparse) } permuteOnce.mimic <- function(i, d, G, con, uncon, null, param, freeParam, covariates, AFIs, moreAFIs, maxSparse, maxNonconv, iseed, warn, extra = NULL, datafun = NULL) { old_warn <- options()$warn options(warn = warn) ## save arguments from call argNames <- names(formals(permuteOnce.mimic)) availableArgs <- lapply(argNames, function(x) eval(as.name(x))) names(availableArgs) <- argNames nTries <- 1L while (nTries <= maxNonconv) { ## permute covariate(s) within each group if (length(G)) { for (gg in lavaan::lavInspect(con, "group.label")) { dG <- d[ d[[G]] == gg, ] N <- nrow(dG) newd <- dG[sample(1:N, N), covariates, drop = FALSE] for (COV in covariates) d[d[[G]] == gg, COV] <- newd[ , COV] } } else { N <- nrow(d) newd <- d[sample(1:N, N), covariates, drop = FALSE] for (COV in covariates) d[ , COV] <- newd[ , COV] } ## transform data? if (!is.null(datafun)) { extraArgs <- formals(datafun) neededArgs <- intersect(names(extraArgs), names(availableArgs)) extraArgs <- do.call(c, lapply(neededArgs, function(nn) availableArgs[nn])) extraArgs$data <- d originalNames <- colnames(d) d <- do.call(datafun, extraArgs) ## coerce extraOut to data.frame if (!is.data.frame(d)) stop('Argument "datafun" did not return a data.frame') if (!all(originalNames %in% colnames(d))) stop('The data.frame returned by argument "datafun" did not contain ', 'column names required by the model:\n', paste(setdiff(originalNames, colnames(d)), collapse = ", ")) } ## fit null model, if it exists if (!is.null(null)) { out.null <- lavaan::update(null, data = d, group.label = lavaan::lavInspect(con, "group.label")) } ## fit constrained model try(out0 <- lavaan::update(con, data = d, group.label = lavaan::lavInspect(con, "group.label"))) ## check for convergence if (!exists("out0")) { nTries <- nTries + 1L next } if (!lavaan::lavInspect(out0, "converged")) { nTries <- nTries + 1L next } ## If you get this far, everything converged, so break WHILE loop break } ## if WHILE loop ended before getting results, return NA if (nTries == maxNonconv) { allAFIs <- c(AFIs, moreAFIs) AFI <- rep(NA, sum(!is.na(allAFIs))) names(AFI) <- allAFIs[!is.na(allAFIs)] MI <- if (is.null(param)) NULL else NA extra.obs <- NA nTries <- nTries + 1L } else { availableArgs$con <- out0 if (exists("out.null")) availableArgs$null <- out.null AFI <- do.call(getAFIs, availableArgs) if (is.null(param)) { MI <- NULL } else { MI <- max(do.call(getMIs, c(availableArgs, modelType = "mimic"))$X2) } ## anything extra? if (!is.null(extra)) { extraArgs <- formals(extra) neededArgs <- intersect(names(extraArgs), names(availableArgs)) extraArgs <- do.call(c, lapply(neededArgs, function(nn) availableArgs[nn])) extraOut <- do.call(extra, extraArgs) ## coerce extraOut to data.frame if (!is.list(extraOut)) extraOut <- as.list(extraOut) extra.obs <- data.frame(extraOut) } else extra.obs <- data.frame(NULL) } options(warn = old_warn) list(AFI = AFI, MI = MI, extra = extra.obs, n.nonConverged = nTries - 1L, n.Sparse = integer(length = 0)) } ## Function to permute difference in fits permuteMeasEq <- function(nPermute, modelType = c("mgcfa","mimic"), con, uncon = NULL, null = NULL, param = NULL, freeParam = NULL, covariates = NULL, AFIs = NULL, moreAFIs = NULL, maxSparse = 10, maxNonconv = 10, showProgress = TRUE, warn = -1, datafun, extra, parallelType = c("none","multicore","snow"), ncpus = NULL, cl = NULL, iseed = 12345) { ## save arguments from call availableArgs <- as.list(formals(permuteMeasEq)) argNames <- names(availableArgs) if (missing(datafun)) argNames <- setdiff(argNames, "datafun") if (missing(extra)) argNames <- setdiff(argNames, "extra") for (aa in argNames) { if (!is.null(eval(as.name(aa)))) suppressWarnings(availableArgs[[aa]] <- eval(as.name(aa))) } ## check and return them fullCall <- do.call(checkPermArgs, availableArgs) ## assign them to workspace (also adds old_RNG & oldSeed to workspace) for (aa in names(fullCall)) assign(aa, fullCall[[aa]]) ###################### SAVE OBSERVED RESULTS ########################## AFI.obs <- do.call(getAFIs, fullCall) ## save modification indices if !is.null(param) if (is.null(param)) { MI.obs <- data.frame(NULL) } else MI.obs <- do.call(getMIs, fullCall) ## anything extra? if (!missing(extra)) { extraArgs <- formals(extra) neededArgs <- intersect(names(extraArgs), names(fullCall)) extraArgs <- do.call(c, lapply(neededArgs, function(nn) fullCall[nn])) extraOut <- do.call(extra, extraArgs) ## check that extra() returns a named list of scalars if (!is.list(extraOut)) extraOut <- as.list(extraOut) wrongFormat <- paste('Function "extra" must return a numeric vector or a', 'list of scalars, with each element named.') if (!all(sapply(extraOut, is.numeric))) stop(wrongFormat) if (!all(sapply(extraOut, length) == 1L)) stop(wrongFormat) if (is.null(names(extraOut)) | any(names(extraOut) == "")) stop(wrongFormat) extra.obs <- do.call(c, extraOut) } else extra.obs <- numeric(length = 0L) ######################### PREP DATA ############################## argList <- fullCall[c("con","uncon","null","param","freeParam","covariates", "AFIs","moreAFIs","maxSparse","maxNonconv","warn","iseed")] argList$G <- lavaan::lavInspect(con, "group") ## check for categorical variables # catVars <- lavaan::lavNames(con, type = "ov.ord") # numVars <- lavaan::lavNames(con, type = "ov.num") # latentVars <- lavaan::lavNames(con, type = "lv.regular") ## assemble data to which the models were fit if (length(argList$G)) { dataList <- mapply(FUN = function(x, g, n) { y <- data.frame(as.data.frame(x), g, stringsAsFactors = FALSE) names(y) <- c(n, argList$G) y }, SIMPLIFY = FALSE, x = lavaan::lavInspect(con, "data"), g = lavaan::lavInspect(con, "group.label"), n = lavaan::lavNames(con, type = "ov", group = seq_along(lavaan::lavInspect(con, "group.label")))) argList$d <- do.call(rbind, dataList) } else { argList$d <- as.data.frame(lavaan::lavInspect(con, "data")) names(argList$d) <- lavaan::lavNames(con, type = "ov") } ## check that covariates are actual variables if (modelType == "mimic") { if (length(covariates) && !all(covariates %in% names(argList$d))) stop('These specified covariates are not columns in the data.frame:\n', paste(setdiff(covariates, names(argList$d)), collapse = ", ")) } ## anything extra? if (!missing(extra)) argList$extra <- extra if (!missing(datafun)) argList$datafun <- datafun ###################### PERMUTED RESULTS ########################### ## permute and return distributions of (delta)AFIs, largest MI, and extras if (showProgress) { mypb <- txtProgressBar(min = 1, max = nPermute, initial = 1, char = "=", width = 50, style = 3, file = "") permuDist <- list() for (j in 1:nPermute) { permuDist[[j]] <- do.call(paste("permuteOnce", modelType, sep = "."), args = c(argList, i = j)) setTxtProgressBar(mypb, j) } close(mypb) } else if (parallelType == "multicore") { if (length(iseed)) set.seed(iseed) argList$FUN <- paste("permuteOnce", modelType, sep = ".") argList$X <- 1:nPermute argList$mc.cores <- ncpus argList$mc.set.seed <- TRUE pmcl <- function(...) { parallel::mclapply(...) } permuDist <- do.call(pmcl, args = argList) ## restore old RNG type if (fullCall$old_RNG[1] != "L'Ecuyer-CMRG") RNGkind(fullCall$old_RNG[1]) } else if (parallelType == "snow") { stopTheCluster <- FALSE if (is.null(cl)) { stopTheCluster <- TRUE cl <- parallel::makePSOCKcluster(rep("localhost", ncpus)) } parallel::clusterSetRNGStream(cl, iseed = iseed) # clusterExport(cl, c("getAFIs","getMIs","permuteOnce.mgcfa","permuteOnce.mimic")) argList$cl <- cl argList$X <- 1:nPermute argList$fun <- paste("permuteOnce", modelType, sep = ".") tempppl <- function(...) { parallel::parLapply(...) } permuDist <- do.call(tempppl, args = argList) if (stopTheCluster) parallel::stopCluster(cl) ## restore old RNG type if (fullCall$old_RNG[1] != "L'Ecuyer-CMRG") RNGkind(fullCall$old_RNG[1]) } else { argList$X <- 1:nPermute argList$FUN <- paste("permuteOnce", modelType, sep = ".") permuDist <- do.call(lapply, args = argList) } ## extract AFI distribution if (length(AFI.obs) > 1) { AFI.dist <- as.data.frame(t(sapply(permuDist, function(x) x$AFI))) } if (length(AFI.obs) == 1L) { AFI.dist <- data.frame(sapply(permuDist, function(x) x$AFI)) colnames(AFI.dist) <- names(AFI.obs) } ## identify badness-of-fit measures badness <- grepl(pattern = "fmin|chi|aic|bic|rmr|rmsea|cn|sic|hqc", x = names(AFI.obs), ignore.case = TRUE) ## calculate all one-directional p-values AFI.pval <- mapply(FUN = function(x, y, b) { if (b) return(mean(x >= y, na.rm = TRUE)) mean(x <= y, na.rm = TRUE) }, x = unclass(AFI.dist), y = AFI.obs, b = badness) ## extract distribution of maximum modification indices MI.dist <- as.numeric(unlist(lapply(permuDist, function(x) x$MI))) ## calculate Tukey-adjusted p values for modification indices if (!is.null(param)) { MI.obs$tukey.p.value <- sapply(MI.obs$X2, function(i) mean(i <= MI.dist, na.rm = TRUE)) MI.obs <- as.data.frame(unclass(MI.obs)) rownames(MI.obs) <- names(param) } ## anything extra? if (!missing(extra)) { extra.dist <- do.call(rbind, lapply(permuDist, function(x) x$extra)) } else extra.dist <- data.frame(NULL) ## save parameter table for show/summary methods PT <- as.data.frame(lavaan::parTable(con)) PT$par <- paste0(PT$lhs, PT$op, PT$rhs) if (length(lavaan::lavInspect(con, "group"))) PT$group.label[PT$group > 0] <- lavaan::lavInspect(con, "group.label")[PT$group[PT$group > 0] ] ## return observed results, permutation p values, and ANOVA results if (is.null(uncon)) { delta <- lavaan::anova(con) } else { delta <- lavaan::anova(uncon, con) } ANOVA <- sapply(delta[,c("Chisq diff","Df diff","Pr(>Chisq)")], function(x) x[2]) out <- new("permuteMeasEq", PT = PT, modelType = modelType, ANOVA = ANOVA, AFI.obs = AFI.obs, AFI.dist = AFI.dist, AFI.pval = AFI.pval, MI.obs = MI.obs, MI.dist = MI.dist, extra.obs = extra.obs, extra.dist = extra.dist, n.Permutations = nPermute, n.Converged = sum(!is.na(AFI.dist[,1])), n.nonConverged = sapply(permuDist, function(x) x$n.nonConverged), n.Sparse = sapply(permuDist, function(x) x$n.Sparse), oldSeed = fullCall$oldSeed) out } ## methods setMethod("show", "permuteMeasEq", function(object) { ## print warning if there are nonConverged permutations if (object@n.Permutations != object@n.Converged) { warning(paste("Only", object@n.Converged, "out of", object@n.Permutations, "models converged within", max(object@n.nonConverged), "attempts per permutation.\n\n")) } ## print ANOVA cat("Omnibus p value based on parametric chi-squared difference test:\n\n") print(round(object@ANOVA, digits = 3)) ## print permutation results cat("\n\nOmnibus p values based on nonparametric permutation method: \n\n") AFI <- data.frame(AFI.Difference = object@AFI.obs, p.value = object@AFI.pval) class(AFI) <- c("lavaan.data.frame","data.frame") print(AFI, nd = 3) invisible(object) }) setMethod("summary", "permuteMeasEq", function(object, alpha = .05, nd = 3, extra = FALSE) { ## print warning if there are nonConverged permutations if (object@n.Permutations != object@n.Converged) { warning(paste("Only", object@n.Converged, "out of", object@n.Permutations, "models converged within", max(object@n.nonConverged), "attempts per permutation.\n\n")) } ## print ANOVA cat("Omnibus p value based on parametric chi-squared difference test:\n\n") print(round(object@ANOVA, digits = nd)) ## print permutation results cat("\n\nOmnibus p values based on nonparametric permutation method: \n\n") AFI <- data.frame(AFI.Difference = object@AFI.obs, p.value = object@AFI.pval) class(AFI) <- c("lavaan.data.frame","data.frame") print(AFI, nd = nd) ## print extras or DIF test results, if any were requested if (extra && length(object@extra.obs)) { cat("\n\nUnadjusted p values of extra statistics,\n", "based on permutation distribution of each statistic: \n\n") MI <- data.frame(Statistic = object@extra.obs) class(MI) <- c("lavaan.data.frame","data.frame") MI$p.value <- sapply(names(object@extra.dist), function(nn) { mean(abs(object@extra.dist[,nn]) >= abs(object@extra.obs[nn]), na.rm = TRUE) }) MI$flag <- ifelse(MI$p.value < alpha, "* ", "") print(MI, nd = nd) } else if (length(object@MI.dist)) { cat("\n\n Modification indices for equality constrained parameter estimates,\n", "with unadjusted 'p.value' based on chi-squared distribution and\n", "adjusted 'tukey.p.value' based on permutation distribution of the\n", "maximum modification index per iteration: \n\n") MI <- do.call(paste("summ", object@modelType, sep = "."), args = list(object = object, alpha = alpha)) print(MI, nd = nd) ## print messages about potential DIF if (all(MI$tukey.p.value > alpha)) { cat("\n\n No equality constraints were flagged as significant.\n\n") return(invisible(MI)) } if (object@modelType == "mgcfa") { cat("\n\nThe following equality constraints were flagged as significant:\n\n") for (i in which(MI$tukey.p.value < alpha)) { cat("Parameter '", MI$parameter[i], "' may differ between Groups '", MI$group.lhs[i], "' and '", MI$group.rhs[i], "'.\n", sep = "") } cat("\nUse lavTestScore(..., epc = TRUE) on your constrained model to", "display expected parameter changes for these equality constraints\n\n") } } else return(invisible(object)) invisible(MI) }) summ.mgcfa <- function(object, alpha) { MI <- object@MI.obs class(MI) <- c("lavaan.data.frame","data.frame") PT <- object@PT eqPar <- rbind(PT[PT$plabel %in% MI$lhs, ], PT[PT$plabel %in% MI$rhs, ]) MI$flag <- "" MI$parameter <- "" MI$group.lhs <- "" MI$group.rhs <- "" for (i in 1:nrow(MI)) { par1 <- eqPar$par[ eqPar$plabel == MI$lhs[i] ] par2 <- eqPar$par[ eqPar$plabel == MI$rhs[i] ] MI$parameter[i] <- par1 MI$group.lhs[i] <- eqPar$group.label[ eqPar$plabel == MI$lhs[i] ] MI$group.rhs[i] <- eqPar$group.label[ eqPar$plabel == MI$rhs[i] ] if (par1 != par2) { myMessage <- paste0("Constraint '", MI$lhs[i], "==", MI$rhs[i], "' refers to different parameters: \n'", MI$lhs[i], "' is '", par1, "' in group '", MI$group.lhs[i], "'\n'", MI$rhs[i], "' is '", par2, "' in group '", MI$group.rhs[i], "'\n") warning(myMessage) } if (MI$tukey.p.value[i] < alpha) MI$flag[i] <- "* -->" } MI } summ.mimic <- function(object, alpha) { MI <- object@MI.obs class(MI) <- c("lavaan.data.frame","data.frame") MI$flag <- ifelse(MI$tukey.p.value < alpha, "* ", "") MI } setMethod("hist", "permuteMeasEq", function(x, ..., AFI, alpha = .05, nd = 3, printLegend = TRUE, legendArgs = list(x = "topleft")) { histArgs <- list(...) histArgs$x <- x@AFI.dist[[AFI]] if (is.null(histArgs$col)) histArgs$col <- "grey69" histArgs$freq <- !grepl("chi", AFI) histArgs$ylab <- if (histArgs$freq) "Frequency" else "Probability Density" if (printLegend) { if (is.null(legendArgs$box.lty)) legendArgs$box.lty <- 0 if (nd < length(strsplit(as.character(1 / alpha), "")[[1]]) - 1) { warning(paste0("The number of digits argument (nd = ", nd , ") is too low to display your p value at the", " same precision as your requested alpha level (alpha = ", alpha, ")")) } if (x@AFI.pval[[AFI]] < (1 / 10^nd)) { pVal <- paste(c("< .", rep(0, nd - 1),"1"), collapse = "") } else { pVal <- paste("=", round(x@AFI.pval[[AFI]], nd)) } } delta <- length(x@MI.dist) > 0L && x@modelType == "mgcfa" if (grepl("chi", AFI)) { ####################################### Chi-squared ChiSq <- x@AFI.obs[AFI] DF <- x@ANOVA[2] histArgs$xlim <- range(c(ChiSq, x@AFI.dist[[AFI]], qchisq(c(.01, .99), DF))) xVals <- seq(histArgs$xlim[1], histArgs$xlim[2], by = .1) theoDist <- dchisq(xVals, df = DF) TheoCrit <- round(qchisq(p = alpha, df = DF, lower.tail = FALSE), 2) Crit <- quantile(histArgs$x, probs = 1 - alpha) if (ChiSq > histArgs$xlim[2]) histArgs$xlim[2] <- ChiSq if (delta) { histArgs$main <- expression(Permutation~Distribution~of~Delta*chi^2) histArgs$xlab <- expression(Delta*chi^2) if (printLegend) { legendArgs$legend <- c(bquote(Theoretical~Delta*chi[Delta*.(paste("df =", DF))]^2 ~ Distribution), bquote(Critical~chi[alpha~.(paste(" =", alpha))]^2 == .(round(TheoCrit, nd))), bquote(.(paste("Permuted Critical Value =", round(Crit, nd)))), bquote(Observed~Delta*chi^2 == .(round(ChiSq, nd))), expression(paste("")), bquote(Permuted~italic(p)~.(pVal))) } } else { histArgs$main <- expression(Permutation~Distribution~of~chi^2) histArgs$xlab <- expression(chi^2) if (printLegend) { legendArgs$legend <- c(bquote(Theoretical~chi[.(paste("df =", DF))]^2 ~ Distribution), bquote(Critical~chi[alpha~.(paste(" =", alpha))]^2 == .(round(TheoCrit, nd))), bquote(.(paste("Permuted Critical Value =", round(Crit, nd)))), bquote(Observed~chi^2 == .(round(ChiSq, nd))), expression(paste("")), bquote(Permuted~italic(p)~.(pVal))) } } H <- do.call(hist, c(histArgs["x"], plot = FALSE)) histArgs$ylim <- c(0, max(H$density, theoDist)) if (printLegend) { legendArgs <- c(legendArgs, list(lty = c(2, 2, 1, 1, 0, 0), lwd = c(2, 2, 2, 3, 0, 0), col = c("black","black","black","red","",""))) } } else { ################################################### other AFIs badness <- grepl(pattern = "fmin|aic|bic|rmr|rmsea|cn|sic|hqc", x = AFI, ignore.case = TRUE) if (badness) { Crit <- quantile(histArgs$x, probs = 1 - alpha) } else { Crit <- quantile(histArgs$x, probs = alpha) } histArgs$xlim <- range(histArgs$x, x@AFI.obs[AFI]) if (delta) { histArgs$main <- bquote(~Permutation~Distribution~of~Delta*.(toupper(AFI))) histArgs$xlab <- bquote(~Delta*.(toupper(AFI))) if (printLegend) { legendArgs$legend <- c(bquote(Critical~Delta*.(toupper(AFI))[alpha~.(paste(" =", alpha))] == .(round(Crit, nd))), bquote(Observed~Delta*.(toupper(AFI)) == .(round(x@AFI.obs[AFI], nd))), expression(paste("")), bquote(Permuted~italic(p)~.(pVal))) } } else { histArgs$main <- paste("Permutation Distribution of", toupper(AFI)) histArgs$xlab <- toupper(AFI) if (printLegend) { legendArgs$legend <- c(bquote(Critical~.(toupper(AFI))[alpha~.(paste(" =", alpha))] == .(round(Crit, nd))), bquote(Observed~.(toupper(AFI)) == .(round(x@AFI.obs[AFI], nd))), expression(paste("")), bquote(Permuted~italic(p)~.(pVal))) } } if (printLegend) { legendArgs <- c(legendArgs, list(lty = c(1, 1, 0, 0), lwd = c(2, 3, 0, 0), col = c("black","red","",""))) } } ## print histogram (and optionally, print legend) suppressWarnings({ do.call(hist, histArgs) if (grepl("chi", AFI)) { lines(x = xVals, y = theoDist, lwd = 2, lty = 2) abline(v = TheoCrit, col = "black", lwd = 2, lty = 2) } abline(v = Crit, col = "black", lwd = 2) abline(v = x@AFI.obs[AFI], col = "red", lwd = 3) if (printLegend) do.call(legend, legendArgs) }) ## return arguments to create histogram (and optionally, legend) invisible(list(hist = histArgs, legend = legendArgs)) }) semTools/R/poolMAlloc.R0000644000175100001440000011542413000201061014450 0ustar hornikusers##PoolMAlloc poolMAlloc <- function(nPerPar, facPlc, nAllocStart, nAllocAdd=0, parceloutput=0, syntax, dataset, stopProp, stopValue, selectParam = NULL, double = FALSE, checkConv=FALSE, names='default', leaveout=0, useTotalAlloc=FALSE, ...) { StartTimeFull <- proc.time() #### start clock for calculating loop runtime if(is.character(dataset)){ dataset <- read.csv(dataset) } { nloop <- 0 ### start loop counter nAllocStarttemp <- nAllocStart ### save initial nAllocStart for final calculation options(max.print=1000000) ### allow many tables to be outputted BreakCounter <- NA ### start break counter for double and checkConv options repeat { StartTime <- proc.time() #### start clock for calculating loop runtime nloop <- nloop + 1 ## add to loop counter if (double==TRUE & is.na(BreakCounter)==FALSE) BreakCounter <- BreakCounter + 1 ### add to break counter after stopping criteria reached if (checkConv==TRUE & is.na(BreakCounter)==FALSE) BreakCounter <- BreakCounter + 1 ### add to break counter after stopping criteria reached if (nloop > 1) { ##Final output## if (is.na(BreakCounter)==TRUE){ Parmn_revFinal <- Parmn_rev[[nloop-1]] ## save parameter estimates and pooled se table from previous loop for final output nConvergedOutput <- nConverged ## save # allocations converged from previous loop for final output nConvergedProperOutput <- nConvergedProper ## save # allocations converged and proper from previous loop for final output PooledSEwithinvarFinal <- PooledSEwithinvar ## save pooled se within variance for final output PooledSEbetweenvarFinal <- PooledSEbetweenvar ## save pooled se between variance for final output PooledSEFinal <- PooledSE ## save pooled se between variance for final output FitsumOutput <- Fitsum ## save Fit table from previous loop for final output nAllocOutput <- nAllocStart - nAllocAdd #### save nAlloc for output AllocationsOutput <- Allocations ## save datasets from previous loop for final output ParamFinal <- Param } ParamPooledSE_temp <- ParamPooledSE ### make current "pre-loop" parameter estimates a temporary vector for comparison with "post-loop" estimates ParamTest_temp <- ParamTest #### make current "pre-loop" parameter estimates a temporary vector for comparison with "post-loop" estimates (parameter estimates only) PooledSE_temp <- PooledSE #### make current "pre-loop" parameter estimates a temporary vector for comparison with "post-loop" estimates (pooled SE only) ParamPoolSEdiffmin <- abs(ParamPooledSE_temp*stopProp) ### create vector of minimum differences to continue looping ParamPoolSEdiffmin[ParamPoolSEdiffmini & facPlc 0){ ##Bug was here. With 1 factor Maxv=0. Skip this with a single factor for (i in 1:Maxv){ Mat <- match(i+1, Locate) if(Npp[Mat] == Npp[Mat-1]){ stop('** WARNING! ** Parcels incorrectly specified. Check input!')} } } ## warning message if parcel crosses into multiple factors ## vector, parcel to which each variable belongs ## vector, factor to which each variables belongs ## if variables are in the same parcel, but different factors ## error message given in output Onevec <- facPlc - round(facPlc) NleaveA <- length(Onevec) - sum(Onevec==0) NleaveP <- sum(nPerPar==1) if(NleaveA < NleaveP){ print('** WARNING! ** Single-variable parcels have been requested. Check input!')} if(NleaveA > NleaveP) print('** WARNING! ** More non-parceled variables have been requested than provided for in parcel vector. Check input!') if(length(names)>1){ if(length(names) != length(nPerPar)){ print('** WARNING! ** Number of parcel names provided not equal to number of parcels requested. Check input!')}} Data <- c(1:ncol(dataset)) ## creates a vector of the number of indicators ## e.g. for three indicators, c(1, 2, 3) Nfactors <- max(facPlc) ## scalar, number of factors Nindicators <- length(Data) ## scalar, number of indicators Npar <- length(nPerPar) ## scalar, number of parcels Rmize <- runif(Nindicators, 1, Nindicators) ## create vector of randomly ordered numbers, ## length of number of indicators Data <- rbind(facPlc, Rmize, Data) ## "Data" becomes object of three rows, consisting of ## 1) factor to which each indicator belongs ## (in order to preserve indicator/factor ## assignment during randomization) ## 2) randomly order numbers ## 3) indicator number Results <- matrix(numeric(0), nAllocStart, Nindicators) ##create empty matrix for parcel allocation matrix Pin <- nPerPar[1] for (i in 2:length(nPerPar)){ Pin <- c(Pin, nPerPar[i]+Pin[i-1]) ## creates vector which indicates the range ## of columns (endpoints) in each parcel } for (i in 1:nAllocStart) { Data[2,]<-runif(Nindicators, 1, Nindicators) ## Replace second row with newly randomly ordered numbers Data <- Data[, order(Data[2,])] ## Order the columns according ## to the values of the second row Data <- Data[, order(Data[1,])] ## Order the columns according ## to the values of the first row ## in order to preserve factor assignment Results[i,] <- Data[3,] ## assign result to allocation matrix } Alpha <- rbind(Results[1,], dataset) ## bind first random allocation to dataset "Alpha" Allocations <- list() ## create empty list for allocation data matrices for (i in 1:nAllocStart){ Ineff <- rep(NA, ncol(Results)) Ineff2 <- c(1:ncol(Results)) for (inefficient in 1:ncol(Results)){ Ineff[Results[i,inefficient]] <- Ineff2[inefficient] } Alpha[1,] <- Ineff ## replace first row of dataset matrix ## with row "i" from allocation matrix Beta <- Alpha[, order(Alpha[1,])] ## arrangle dataset columns by values of first row ## assign to temporary matrix "Beta" Temp <- matrix(NA, nrow(dataset), Npar) ## create empty matrix for averaged parcel variables TempAA <- if(length(1:Pin[1])>1) Beta[2:nrow(Beta) , 1:Pin[1]] else cbind(Beta[2:nrow(Beta) , 1:Pin[1]],Beta[2:nrow(Beta) , 1:Pin[1]]) Temp[, 1] <- rowMeans(TempAA,na.rm = TRUE) ## fill first column with averages from assigned indicators for (al in 2:Npar) { Plc <- Pin[al-1]+1 ## placeholder variable for determining parcel width TempBB <- if(length(Plc:Pin[al])>1) Beta[2:nrow(Beta) , Plc:Pin[al]] else cbind(Beta[2:nrow(Beta) , Plc:Pin[al]],Beta[2:nrow(Beta) , Plc:Pin[al]]) Temp[, al] <- rowMeans(TempBB,na.rm = TRUE) ## fill remaining columns with averages from assigned indicators } if(length(names)>1){ colnames(Temp) <- names } Allocations[[i]] <- Temp ## assign result to list of parcel datasets } Param <- list() ## list for parameter estimated for each imputation Fitind <- list() ## list for fit indices estimated for each imputation Converged <- list() ## list for whether or not each allocation converged ProperSolution <- list() ## list for whether or not each allocation has proper solutions ConvergedProper <- list() ## list for whether or not each allocation is converged and proper for (i in 1:(nAllocStart)){ data_parcel <- as.data.frame(Allocations[[i]], row.names = NULL, optional = FALSE) ## convert allocation matrix to dataframe for model estimation fit <- lavaan::sem(syntax, control=list(iter.max=100), data=data_parcel, ...) ## estimate model in lavaan if (lavaan::lavInspect(fit, "converged")==TRUE){ Converged[[i]] <- 1 } else Converged[[i]] <- 0 ## determine whether or not each allocation converged Param[[i]] <- lavaan::parameterEstimates(fit)[,c("lhs","op","rhs","est","se","z","pvalue","ci.lower","ci.upper")] ## assign allocation parameter estimates to list if (lavaan::lavInspect(fit, "post.check") & Converged[[i]] == 1) { ProperSolution[[i]] <- 1 } else ProperSolution[[i]] <- 0 ## determine whether or not each allocation has proper solutions if (any(is.na(Param[[i]][,5]==TRUE))) ProperSolution[[i]] <- 0 ## make sure each allocation has existing SE if (Converged[[i]]==1 & ProperSolution[[i]]==1) { ConvergedProper[[i]] <- 1 } else ConvergedProper[[i]] <- 0 ## determine whether or not each allocation converged and has proper solutions if (ConvergedProper[[i]]==0) Param[[i]][,4:9] <- matrix(data=NA,nrow(Param[[i]]),6) ## make parameter estimates null for nonconverged, improper solutions if (ConvergedProper[[i]]==1) { Fitind[[i]] <- lavaan::fitMeasures(fit, c("chisq", "df", "cfi", "tli", "rmsea")) } else Fitind[[i]] <- c(NA,NA,NA,NA,NA) ### assign allocation parameter estimates to list } nConverged <- Reduce("+",Converged) ## count number of converged allocations nProperSolution <- Reduce("+",ProperSolution) ## count number of allocations with proper solutions nConvergedProper <- Reduce("+",ConvergedProper) ## count number of allocations with proper solutions if (nConvergedProper==0) stop("All allocations failed to converge and/or yielded improper solutions for a given loop.") ## stop program if no allocations converge Parmn <- Param[[1]] ## assign first parameter estimates to mean dataframe if(is.null(selectParam)) selectParam <- 1:nrow(Parmn) ParSE <- matrix(NA, nrow(Parmn), nAllocStart) ParSEmn <- Parmn[,5] Parsd <- matrix(NA, nrow(Parmn), nAllocStart) ## assign parameter estimates for S.D. calculation Fitmn <- Fitind[[1]] ## assign first fit indices to mean dataframe Fitsd <- matrix(NA, length(Fitmn), nAllocStart) ## assign fit indices for S.D. calculation Sigp <- matrix(NA, nrow(Parmn), nAllocStart) ## assign p-values to calculate percentage significant Fitind <- data.frame(Fitind) ## convert fit index table to dataframe ParamSEsquared <- list() #### create empty list for squared SE for (i in 1:nAllocStart){ ParamSEsquared[[i]] <- cbind(Param[[i]][,5],Param[[i]][,5]) if (any(is.na(ParamSEsquared[[i]])==TRUE)) ParamSEsquared[[i]] <- 0 ParamSEsquared[[i]] <- apply(as.data.frame(ParamSEsquared[[i]]),1,prod) ### square SE for each allocation Parsd[,i] <- Param[[i]][,4] ## assign parameter estimates for S.D. estimation ParSE[,i] <- Param[[i]][,5] Sigp[,ncol(Sigp)-i+1] <- Param[[i]][,7] ## assign p-values to calculate percentage significant Fitsd[,i] <- Fitind[[i]] ## assign fit indices for S.D. estimation } Sigp <- Sigp + .45 Sigp <- apply(Sigp, c(1,2), round) Sigp <- 1 - as.vector(rowMeans(Sigp, na.rm = TRUE)) ## calculate percentage significant parameters Parsum <- cbind(apply(Parsd,1,mean,na.rm=TRUE),apply(Parsd,1,sd,na.rm=TRUE),apply(Parsd,1,max,na.rm=TRUE),apply(Parsd,1,min,na.rm=TRUE),apply(Parsd,1,max,na.rm=TRUE)-apply(Parsd,1,min,na.rm=TRUE), Sigp) colnames(Parsum) <- c("Avg Est.","S.D.","MAX","MIN","Range", "% Sig") ## calculate parameter S.D., minimum, maximum, range, bind to percentage significant ParSEmn <- Parmn[,1:3] ParSEfn <- cbind(ParSEmn,apply(ParSE,1,mean,na.rm=TRUE),apply(ParSE,1,sd,na.rm=TRUE),apply(ParSE,1,max,na.rm=TRUE),apply(ParSE,1,min,na.rm=TRUE),apply(ParSE,1,max,na.rm=TRUE)-apply(ParSE,1,min,na.rm=TRUE)) colnames(ParSEfn) <- c("lhs", "op", "rhs", "Avg SE","S.D.","MAX","MIN","Range") Fitsum <- cbind(apply(Fitsd,1,mean,na.rm=TRUE),apply(Fitsd,1,sd,na.rm=TRUE),apply(Fitsd,1,max,na.rm=TRUE),apply(Fitsd,1,min,na.rm=TRUE),apply(Fitsd,1,max,na.rm=TRUE)-apply(Fitsd,1,min,na.rm=TRUE)) rownames(Fitsum) <- c("chisq", "df", "cfi", "tli", "rmsea") ## calculate fit S.D., minimum, maximum, range Parmn[,4:ncol(Parmn)] <- Parmn[,4:ncol(Parmn)] / nConvergedProper ## divide totalled parameter estimates by number converged allocations Parmn <- Parmn[,1:3] ## remove confidence intervals from output Parmn <- cbind(Parmn, Parsum) ## bind parameter average estimates to cross-allocation information Fitmn <- Fitmn / nConvergedProper ## divide totalled fit indices by number converged allocations pChisq <- list() ## create empty list for Chi-square p-values sigChisq <- list() ## create empty list for Chi-square significance for (i in 1:nAllocStart){ pChisq[[i]] <- (1-pchisq(Fitsd[1,i],Fitsd[2,i])) ## calculate p-value for each Chi-square if (is.na(pChisq[[i]])==FALSE & pChisq[[i]]<.05) { sigChisq[[i]] <- 1 } else sigChisq[[i]] <- 0 } ## count number of allocations with significant chi-square PerSigChisq <- (Reduce("+",sigChisq))/nConvergedProper*100 PerSigChisq <- round(PerSigChisq,4) ## calculate percent of allocations with significant chi-square PerSigChisqCol <- c(PerSigChisq,"n/a","n/a","n/a","n/a") ## create list of Chi-square Percent Significant and "n/a" options(stringsAsFactors=FALSE) ## set default option to allow strings into dataframe without converting to factors Fitsum <- data.frame(Fitsum,PerSigChisqCol) colnames(Fitsum) <- c("Avg Ind","S.D.","MAX","MIN","Range","% Sig") ### bind to fit averages (changed to dataframe) options(stringsAsFactors=TRUE) ## unset option to allow strings into dataframe without converting to factors; PooledSEwithinvar <- Reduce("+",ParamSEsquared)/nConvergedProper #### calculate within variance for pooled SE PooledSEbetweenvar <- Parmn[,5]^2 ## calculate between variance for pooled SE PooledSE <- sqrt(PooledSEwithinvar + PooledSEbetweenvar + PooledSEbetweenvar/nConvergedProper) ### calculate pooled SE ParamPooledSE <- c(Parmn[,4],PooledSE) ### create vector of "post-loop" paramater estimates and pooled SE ParamTest <- Parmn[,4] #### create vector of parameter estimates if (nloop>1){ ParamPoolSEdiff <- abs(ParamPooledSE_temp - ParamPooledSE) ### create vector of absolute differences between "pre-loop" and "post-loop" vectors Paramdiff <- abs(ParamTest_temp - ParamTest) #### create vector of absolute differences between "pre-loop" and "post-loop" vectors (parameter estimates only) PooledSEdiff <- abs(PooledSE - PooledSE_temp) #### create vector of absolute differences between "pre-loop" and "post-loop" vectors (pooled SE only) ParamPoolSEdifftest <- ParamPoolSEdiff - ParamPoolSEdiffmin ParamPoolSEdifftest[ParamPoolSEdifftest<=0] <- 0 ParamPoolSEdifftest[ParamPoolSEdifftest>0] <- 1 ##create vector of difference between (absolute differences between "pre-loop" and "post-loop" vectors) ##and (minimum differences required to continue looping) and set all negative values to 0 Paramdifftest <- Paramdiff - ParamDiffMin Paramdifftest[Paramdifftest<=0] <- 0 Paramdifftest[Paramdifftest>0] <- 1 PooledSEdifftest <- PooledSEdiff - PooledSEmin PooledSEdifftest[PooledSEdifftest<=0] <- 0 PooledSEdifftest[PooledSEdifftest>0] <- 1 ##create vector of difference between (absolute differences between "pre-loop" and "post-loop" vectors) ##and (minimum differences required to continue looping) and set all negative values to 0 (parameter estimates and pooled SE separately) if (nloop==2){ ParamPoolSEdifftesttable <- cbind(ParamPoolSEdifftest) Paramdifftesttable <- cbind(Paramdifftest) PooledSEdifftesttable <- cbind(PooledSEdifftest) ### create table of whether or not parameter estimates/ pooled se met stopping criteria for each parameter } if (nloop>2){ ParamPoolSEdifftesttable <- cbind(ParamPoolSEdifftesttable,ParamPoolSEdifftest) Paramdifftesttable <- cbind(Paramdifftesttable,Paramdifftest) PooledSEdifftesttable <- cbind(PooledSEdifftesttable,PooledSEdifftest) ##create table indicating whether or not parameter estimates/ pooled se met stopping criteria for each parameter } PropStopParam <- 1-(Reduce("+",Paramdifftesttable[selectParam,nloop-1])/length(selectParam)) PropStopPooled <- 1-(Reduce("+",PooledSEdifftesttable[selectParam,nloop-1])/length(selectParam)) PropStopParamPooled <- 1-(Reduce("+",ParamPoolSEdifftesttable[c(selectParam,selectParam+nrow(Parmn)),nloop-1])/(2*length(selectParam))) ##calculate proportion of values meeting stopping criteria if (checkConv==TRUE & is.na(BreakCounter)==TRUE) { print(nAllocStart) print("Proportion of pooled estimates meeting stop criteria:") print(PropStopParam) print("Proportion of pooled SE meeting stop criteria:") print(PropStopPooled) #### print number of allocations, proportion of parameters meeting stop criteria, and proportion of pooled SE meeting stop criteria } if (checkConv==FALSE){ print(nAllocStart) print("Proportion of pooled estimates meeting stop criteria:") print(PropStopParam) print("Proportion of pooled SE meeting stop criteria:") print(PropStopPooled) #### print number of allocations, proportion of parameters meeting stop criteria, and proportion of pooled SE meeting stop criteria } } nAllocStart <- nAllocStart + nAllocAdd ### update # allocations for potential next loop StopTime <- proc.time() - StartTime #### calculate time taken to run loop print("Runtime:") print(StopTime) #### print time needed for loop Parmn_rev <- list() Parmn_rev[[nloop]] <- cbind(Parmn[,1:4],PooledSE) Parmn_rev[[nloop]][,4:5] <- sapply(Parmn_rev[[nloop]][,4:5],as.numeric) colnames(Parmn_rev[[nloop]]) <- c("lhs","op","rhs","Estimate","Pooled SE") #### calc estimates + pooled SE table if (nloop==1){ Param_revTemp <- cbind(Parmn[,1:3],Parmn_rev[[nloop]][,4]) Param_revTemp[,4] <- as.numeric(Param_revTemp[,4]) Param_revTotal <- cbind(Param_revTemp) PooledSE_revTemp <- cbind(Parmn[,1:3],Parmn_rev[[nloop]][,5]) PooledSE_revTemp[,4] <- as.numeric(PooledSE_revTemp[,4]) PooledSE_revTotal <- cbind(PooledSE_revTemp) } if (nloop>1){ Param_revTemp <- cbind(Parmn_rev[[nloop]][,4]) Param_revTemp <- as.numeric(Param_revTemp) Param_revTotal <- cbind(Param_revTotal,Param_revTemp) PooledSE_revTemp <- cbind(Parmn_rev[[nloop]][,5]) PooledSE_revTemp <- as.numeric(PooledSE_revTemp) PooledSE_revTotal <- cbind(PooledSE_revTotal,PooledSE_revTemp) } ## create table of parameter estimates and pooled se for each loop if (nloop==1){ ParamTotal <- Param FitindTotal <- Fitind AllocationsTotal <- Allocations nAllocTotal <- nAllocStart - nAllocAdd nConvergedTotal <- nConverged nProperSolutionTotal <- nProperSolution nConvergedProperTotal <- nConvergedProper } if (nloop>1){ ParamTotal <- c(ParamTotal, Param) FitindTotal <- c(FitindTotal, Fitind) AllocationsTotal <- c(AllocationsTotal, Allocations) nAllocTotal <- nAllocTotal + nAllocStart - nAllocAdd nConvergedTotal <- nConverged + nConvergedTotal nProperSolution <- nProperSolution + nProperSolutionTotal nConvergedProperTotal <- nConvergedProper + nConvergedProperTotal } #print(Parmn_rev[[nloop]]) #print(ParSEfn) #### print all relevant tables if (nloop>1 & double==TRUE & is.na(BreakCounter)==FALSE & BreakCounter==2){ if (Reduce("+",ParamPoolSEdifftesttable[c(selectParam,selectParam+nrow(Parmn_rev[[nloop]])),nloop-1])==0) break; ### with double option selected, break loop after two consecutive hits } if (nloop>1 & double==TRUE){ if (Reduce("+",ParamPoolSEdifftesttable[c(selectParam,selectParam+nrow(Parmn_rev[[nloop]])),nloop-1])==0){ BreakCounter <- 1 } else BreakCounter <- NA ### with double option selected, start break counter if stopping criteria are met, otherwise reset BreakCounter to NA } if (nloop>1 & checkConv==TRUE & is.na(BreakCounter)==TRUE){ if (Reduce("+",ParamPoolSEdifftesttable[c(selectParam,selectParam+nrow(Parmn_rev[[nloop]])),nloop-1])==0) BreakCounter <- 0 ### with checkConv option, start break counter if stopping criteria are met } if (nloop>1 & double==FALSE & checkConv==FALSE){ if (Reduce("+",ParamPoolSEdifftesttable[c(selectParam,selectParam+nrow(Parmn_rev[[nloop]])),nloop-1])==0) break; } ### break loop if differences between "pre-loop" and "post-loop" estimates are sufficiently small if (nAllocAdd==0) break; ### break loop if nAllocAdd is 0 if (checkConv==TRUE & is.na(BreakCounter)==FALSE & BreakCounter==9) break; ### for checkConv option, break loop after 9 loops after stopping criteria met } ##Write objects for Output when nAllocAdd is set to 0 if (nAllocAdd==0){ Parmn_revFinal <- Parmn_rev[[nloop]] ## save parameter estimates and pooled se table from previous loop for final output nConvergedOutput <- nConverged ## save # allocations converged from previous loop for final output nConvergedProperOutput <- nConvergedProper ## save # allocations converged and proper from previous loop for final output PooledSEwithinvarFinal <- PooledSEwithinvar ## save pooled se within variance for final output PooledSEbetweenvarFinal <- PooledSEbetweenvar ## save pooled se between variance for final output PooledSEFinal <- PooledSE ## save pooled se between variance for final output FitsumOutput <- Fitsum ## save Fit table from previous loop for final output nAllocOutput <- nAllocStart - nAllocAdd #### save nAlloc for output AllocationsOutput <- Allocations ## save datasets from previous loop for final output } ##Write parceled datasets if(as.vector(regexpr("/",parceloutput))!=-1){ replist<-matrix(NA,nAllocOutput,1) for (i in 1:(nAllocOutput)){ colnames(AllocationsOutput[[i]])<-names write.table(AllocationsOutput[[i]],paste(parceloutput,'/parcelruns',i,'.dat',sep=''),row.names=FALSE,col.names=TRUE) replist[i,1]<-paste('parcelruns',i,'.dat',sep='') } write.table(replist,paste(parceloutput,"/parcelrunsreplist.dat",sep=''),quote=FALSE,row.names=FALSE,col.names=FALSE) } ##Results for using all Allocations if (useTotalAlloc==TRUE) { ParmnTotal <- ParamTotal[[1]] ## assign first parameter estimates to mean dataframe ParSETotal <- matrix(NA, nrow(ParmnTotal), nAllocTotal) ParSEmnTotal <- ParmnTotal[,5] ParsdTotal <- matrix(NA, nrow(ParmnTotal), nAllocTotal) ## assign parameter estimates for S.D. calculation FitmnTotal <- FitindTotal[[1]] ## assign first fit indices to mean dataframe FitsdTotal <- matrix(NA, length(FitmnTotal), nAllocTotal) ## assign fit indices for S.D. calculation SigpTotal <- matrix(NA, nrow(ParmnTotal), nAllocTotal) ## assign p-values to calculate percentage significant FitindTotal <- data.frame(FitindTotal) ## convert fit index table to dataframe ParamSEsquaredTotal <- list() #### create empty list for squared SE for (i in 1:nAllocTotal){ ParamSEsquaredTotal[[i]] <- cbind(ParamTotal[[i]][,5],ParamTotal[[i]][,5]) if (any(is.na(ParamSEsquaredTotal[[i]])==TRUE)) ParamSEsquaredTotal[[i]] <- 0 ParamSEsquaredTotal[[i]] <- apply(as.data.frame(ParamSEsquaredTotal[[i]]),1,prod) ### square SE for each allocation ParsdTotal[,i] <- ParamTotal[[i]][,4] ## assign parameter estimates for S.D. estimation ParSETotal[,i] <- ParamTotal[[i]][,5] SigpTotal[,ncol(Sigp)-i+1] <- ParamTotal[[i]][,7] ## assign p-values to calculate percentage significant FitsdTotal[,i] <- FitindTotal[[i]] ## assign fit indices for S.D. estimation } SigpTotal <- SigpTotal + .45 SigpTotal <- apply(SigpTotal, c(1,2), round) SigpTotal <- 1 - as.vector(rowMeans(SigpTotal, na.rm = TRUE)) ## calculate percentage significant parameters ParsumTotal <- cbind(apply(ParsdTotal,1,mean,na.rm=TRUE),apply(ParsdTotal,1,sd,na.rm=TRUE),apply(ParsdTotal,1,max,na.rm=TRUE),apply(ParsdTotal,1,min,na.rm=TRUE),apply(ParsdTotal,1,max,na.rm=TRUE)-apply(ParsdTotal,1,min,na.rm=TRUE), SigpTotal) colnames(ParsumTotal) <- c("Avg Est.","S.D.","MAX","MIN","Range", "% Sig") ## calculate parameter S.D., minimum, maximum, range, bind to percentage significant ParSEmnTotal <- ParmnTotal[,1:3] ParSEfnTotal <- cbind(ParSEmnTotal,apply(ParSETotal,1,mean,na.rm=TRUE),apply(ParSETotal,1,sd,na.rm=TRUE),apply(ParSETotal,1,max,na.rm=TRUE),apply(ParSETotal,1,min,na.rm=TRUE),apply(ParSETotal,1,max,na.rm=TRUE)-apply(ParSETotal,1,min,na.rm=TRUE)) colnames(ParSEfnTotal) <- c("lhs", "op", "rhs", "Avg SE","S.D.","MAX","MIN","Range") FitsumTotal <- cbind(apply(FitsdTotal,1,mean,na.rm=TRUE),apply(FitsdTotal,1,sd,na.rm=TRUE),apply(FitsdTotal,1,max,na.rm=TRUE),apply(FitsdTotal,1,min,na.rm=TRUE),apply(FitsdTotal,1,max,na.rm=TRUE)-apply(FitsdTotal,1,min,na.rm=TRUE)) rownames(FitsumTotal) <- c("chisq", "df", "cfi", "tli", "rmsea") ## calculate fit S.D., minimum, maximum, range ParmnTotal[,4:ncol(ParmnTotal)] <- ParmnTotal[,4:ncol(Parmn)] / nConvergedProperTotal ## divide totalled parameter estimates by number converged allocations ParmnTotal <- ParmnTotal[,1:3] ## remove confidence intervals from output ParmnTotal <- cbind(ParmnTotal, ParsumTotal) ## bind parameter average estimates to cross-allocation information FitmnTotal <- FitmnTotal / nConvergedProperTotal ## divide totalled fit indices by number converged allocations pChisqTotal <- list() ## create empty list for Chi-square p-values sigChisqTotal <- list() ## create empty list for Chi-square significance for (i in 1:nAllocTotal){ pChisqTotal[[i]] <- (1-pchisq(FitsdTotal[1,i],FitsdTotal[2,i])) ## calculate p-value for each Chi-square if (is.na(pChisqTotal[[i]])==FALSE & pChisqTotal[[i]]<.05) { sigChisqTotal[[i]] <- 1 } else sigChisqTotal[[i]] <- 0 } ## count number of allocations with significant chi-square PerSigChisqTotal <- (Reduce("+",sigChisqTotal))/nConvergedProperTotal*100 PerSigChisqTotal <- round(PerSigChisqTotal,4) ## calculate percent of allocations with significant chi-square PerSigChisqColTotal <- c(PerSigChisqTotal,"n/a","n/a","n/a","n/a") ## create list of Chi-square Percent Significant and "n/a" (used for fit summary table) options(stringsAsFactors=FALSE) ## set default option to allow strings into dataframe without converting to factors FitsumTotal <- data.frame(FitsumTotal,PerSigChisqColTotal) colnames(FitsumTotal) <- c("Avg Ind","S.D.","MAX","MIN","Range","% Sig") ### bind to fit averages (changed to dataframe) options(stringsAsFactors=TRUE) ## unset option to allow strings into dataframe without converting to factors; PooledSEwithinvarTotal <- Reduce("+",ParamSEsquaredTotal)/nConvergedProperTotal #### calculate within variance for pooled SE PooledSEbetweenvarTotal <- ParmnTotal[,5]^2 ## calculate between variance for pooled SE PooledSETotal <- sqrt(PooledSEwithinvarTotal + PooledSEbetweenvarTotal + PooledSEbetweenvarTotal/nConvergedProperTotal) ### calculate pooled SE ParamPooledSETotal <- c(ParmnTotal[,4],PooledSETotal) ### create vector of "post-loop" paramater estimates and pooled SE ParamTestTotal <- ParmnTotal[,4] #### create vector of parameter estimates #Parmn_revTotal <- list() Parmn_revTotal <- cbind(ParmnTotal[,1:4],PooledSETotal) Parmn_revTotal[,4:5] <- sapply(Parmn_revTotal[,4:5],as.numeric) colnames(Parmn_revTotal) <- c("lhs","op","rhs","Estimate","Pooled SE") #### calc estimates + pooled SE table df_tTotal <- (nConvergedProperTotal-1)*(1 + (nConvergedProperTotal*PooledSEwithinvarTotal)/(nConvergedProperTotal*PooledSEbetweenvarTotal + PooledSEbetweenvarTotal))^2 crit_tTotal <- abs(qt(0.05/2, df_tTotal)) ### compute degrees of freedom and critical value for t pval_zTotal <- 2*(1-pnorm(abs(Parmn_revTotal[,4]/PooledSETotal))) pval_tTotal <- 2*(1-pt(abs(Parmn_revTotal[,4]/PooledSETotal),df=df_tTotal)) ### calc p-value for z and t distribution CI95_Lower_zTotal <- Parmn_revTotal[,4]-1.959963985*PooledSETotal CI95_Upper_zTotal <- Parmn_revTotal[,4]+1.959963985*PooledSETotal ## compute confidence interval for z-tests CI95_Lower_tTotal <- Parmn_revTotal[,4]-crit_tTotal*PooledSETotal CI95_Upper_tTotal <- Parmn_revTotal[,4]+crit_tTotal*PooledSETotal ## compute confidence interval for t-tests Parmn_revTotal <- cbind(Parmn_revTotal,pval_zTotal,CI95_Lower_zTotal,CI95_Upper_zTotal,pval_tTotal,CI95_Lower_tTotal,CI95_Upper_tTotal) colnames(Parmn_revTotal) <- c("lhs","op","rhs","Pooled Est","Pooled SE","pval_z","CI95_LB_z","CI95_UB_z","pval_t","CI95_LB_t","CI95_UB_t") ## add confidence intervals to final output table for (i in 1:nrow(Parmn_revTotal)){ if (Parmn_revTotal[i,5]==0) Parmn_revTotal[i,6:11] <- NA } ## make all z/t p-values and CI's NA for fixed parameters (or when pooled se = 0) RPAVTotal <- (PooledSEbetweenvarTotal+(PooledSEbetweenvarTotal/(nConvergedProperTotal)))/PooledSEwithinvarTotal PPAVTotal <- (((nConvergedProperTotal+1)/(nConvergedProperTotal))*PooledSEbetweenvarTotal)/(PooledSEwithinvarTotal+(((nConvergedProperTotal+1)/(nConvergedProperTotal))*PooledSEbetweenvarTotal)) PAVtableTotal <- cbind(ParmnTotal[1:3],RPAVTotal,PPAVTotal) ### create table for RPAV and PPAV Parmn_revTotal[,4:11] <- apply(Parmn_revTotal[,4:11], 2, round, digits = 4) FitsumTotal[,1:5] <- apply(FitsumTotal[,1:5], 2, round, digits = 4) PAVtableTotal[,4:5] <- apply(PAVtableTotal[,4:5], 2, round, digits = 4) ### round output to three digits FitsumTotal[2,2:5] <- c("n/a","n/a","n/a","n/a") ## Change df row to "n/a" for sd, max, min, and range ConvergedProperSumTotal <- rbind((nConvergedTotal)/(nAllocTotal),(nConvergedProperTotal)/(nAllocTotal)) rownames(ConvergedProperSumTotal) <- c("Converged","Converged and Proper") colnames(ConvergedProperSumTotal) <- "Proportion of Allocations" ### create table summarizing proportions of converged allocations and allocations with proper solutions } ##Output results if (nAllocAdd!=0){ if (nloop==2) PropParamMet <- matrix(data=1,nrow(Parmn),1) if (nloop==2) PropPooledSEMet <- matrix(data=1,nrow(Parmn),1) if (nloop !=2) PropParamMet <- (1-apply(Paramdifftesttable[,1:nloop-1],1,mean))*100 if (nloop !=2) PropPooledSEMet <- (1-apply(PooledSEdifftesttable[,1:nloop-1],1,mean))*100 #### calc percent of loops where stopping criteria were met for parameters and pooledse FirstParamMet <- apply(Paramdifftesttable==0,1,which.max) FirstPooledSEMet <- apply(PooledSEdifftesttable==0,1,which.max) #### determine first loop in which stopping criteria were met for parameters and pooledse } if (nAllocAdd==0){ PropParamMet <- matrix(data=NA,nrow(Parmn),1) PropPooledSEMet <- matrix(data=NA,nrow(Parmn),1) FirstParamMet <- matrix(data=NA,nrow(Parmn),1) FirstPooledSEMet <- matrix(data=NA,nrow(Parmn),1) } ### if only running one loop, change columns regarding stopping criteria to NA PerLoops <- cbind(Parmn[,1:3],PropParamMet,PropPooledSEMet) colnames(PerLoops) <- c("lhs","op","rhs","Param Criteria Met","PooledSE Criteria Met") FirstLoops <- cbind(Parmn[,1:3],FirstParamMet,FirstPooledSEMet) colnames(FirstLoops) <- c("lhs","op","rhs","Param Criteria Met","PooledSE Criteria Met") NumbAllocations <- cbind(Parmn[,1:3],(FirstParamMet-1)*nAllocAdd+nAllocStarttemp,(FirstPooledSEMet-1)*nAllocAdd+nAllocStarttemp) colnames(NumbAllocations) <- c("lhs","op","rhs","Param Criteria Met","PooledSE Criteria Met") ### create tables with parameter estimates, pooled SE, and critical value if (nAllocAdd!=0){ for (i in 1:nrow(Parmn)){ if ((i %in% selectParam)==FALSE) PerLoops[i,4:5] <- NA if ((i %in% selectParam)==FALSE) FirstLoops[i,4:5] <- NA if ((i %in% selectParam)==FALSE) NumbAllocations[i,4:5] <- NA ### if parameter is not used for stopping criteria, change "percent of loops when met" and "loop when first met" to NA } } df_t <- (nConvergedProperOutput-1)*(1 + (nConvergedProperOutput*PooledSEwithinvarFinal)/(nConvergedProperOutput*PooledSEbetweenvarFinal + PooledSEbetweenvarFinal))^2 crit_t <- abs(qt(0.05/2, df_t)) ### compute degrees of freedom and critical value for t pval_z <- 2*(1-pnorm(abs(Parmn_revFinal[,4]/PooledSEFinal))) pval_t <- 2*(1-pt(abs(Parmn_revFinal[,4]/PooledSEFinal),df=df_t)) ### calc p-value for z and t distribution CI95_Lower_z <- Parmn_revFinal[,4]-1.959963985*PooledSEFinal CI95_Upper_z <- Parmn_revFinal[,4]+1.959963985*PooledSEFinal ## compute confidence interval for z-tests CI95_Lower_t <- Parmn_revFinal[,4]-crit_t*PooledSEFinal CI95_Upper_t <- Parmn_revFinal[,4]+crit_t*PooledSEFinal ## compute confidence interval for t-tests Parmn_revFinal <- cbind(Parmn_revFinal,pval_z,CI95_Lower_z,CI95_Upper_z,pval_t,CI95_Lower_t,CI95_Upper_t) colnames(Parmn_revFinal) <- c("lhs","op","rhs","Pooled Est","Pooled SE","pval_z","CI95_LB_z","CI95_UB_z","pval_t","CI95_LB_t","CI95_UB_t") ## add confidence intervals to final output table for (i in 1:nrow(Parmn_revFinal)){ if (Parmn_revFinal[i,5]==0) Parmn_revFinal[i,6:11] <- NA } ## make all z/t p-values and CI's NA for fixed parameters (or when pooled se = 0) RPAV <- (PooledSEbetweenvarFinal+(PooledSEbetweenvarFinal/(nConvergedProperOutput)))/PooledSEwithinvarFinal PPAV <- (((nConvergedProperOutput+1)/(nConvergedProperOutput))*PooledSEbetweenvarFinal)/(PooledSEwithinvarFinal+(((nConvergedProperOutput+1)/(nConvergedProperOutput))*PooledSEbetweenvarFinal)) PAVtable <- cbind(Parmn[1:3],RPAV,PPAV) ### create table for RPAV and PPAV colnames(Param_revTotal) <- c("lhs","op","rhs",c(1:nloop)) colnames(PooledSE_revTotal) <- c("lhs","op","rhs",c(1:nloop)) ### create column names for tables with parameters estimates and pooled se for each loop Param_revTotal[,4:(nloop+3)] <- sapply(Param_revTotal[,4:(nloop+3)], as.numeric) PooledSE_revTotal[,4:(nloop+3)] <- sapply(PooledSE_revTotal[,4:(nloop+3)], as.numeric) Parmn_revFinal[,4:11] <- apply(Parmn_revFinal[,4:11], 2, round, digits = 4) FitsumOutput[,1:5] <- apply(FitsumOutput[,1:5], 2, round, digits = 4) if (nAllocAdd!=0) Param_revTotal[,4:(nloop+3)] <- apply(Param_revTotal[,4:(nloop+3)], 2, round, digits = 8) if (nAllocAdd==0) Param_revTotal[,4] <- round(Param_revTotal[,4],8) if (nAllocAdd!=0) PooledSE_revTotal[,4:(nloop+3)] <- apply(PooledSE_revTotal[,4:(nloop+3)], 2, round, digits = 8) if (nAllocAdd==0) PooledSE_revTotal[,4] <- round(PooledSE_revTotal[,4],8) PAVtable[,4:5] <- apply(PAVtable[,4:5], 2, round, digits = 4) ### round output to three digits FitsumOutput[2,2:5] <- c("n/a","n/a","n/a","n/a") ## Change df row to "n/a" for sd, max, min, and range ConvergedProperSum <- rbind((nConvergedOutput)/(nAllocOutput),(nConvergedProperOutput)/(nAllocOutput)) rownames(ConvergedProperSum) <- c("Converged","Converged and Proper") colnames(ConvergedProperSum) <- "Proportion of Allocations" ### create table summarizing proportions of converged allocations and allocations with proper solutions #Output_mod <- list(Parmn_revFinal,FitsumOutput,ConvergedProperSum,nAllocOutput,PAVtable,Param_revTotal,PooledSE_revTotal) #names(Output_mod) <- c("Estimates","Fit","Proportion of Converged and Proper Allocations", "Allocations needed for stability (M)", #"Indices to quantify uncertainty in estimates due to sampling vs. allocation variability","Pooled Estimates by Loop","Pooled SE by Loop") ### output summary for model estimation when checkConv is true (includes results by loop) StopTimeFull <- proc.time() - StartTimeFull #### calculate time taken to run loop if (useTotalAlloc==FALSE){ Output_mod <- list(Parmn_revFinal,FitsumOutput,ConvergedProperSum,nAllocOutput,PAVtable,StopTimeFull[[3]]/60) names(Output_mod) <- c("Estimates","Fit","Proportion of Converged and Proper Allocations", "Allocations needed for stability (M)","Indices to quantify uncertainty in estimates due to sampling vs. allocation variability","Total runtime (minutes)") ### output summary for model estimation } if (useTotalAlloc==TRUE){ Output_mod <- list(Parmn_revFinal,FitsumOutput,ConvergedProperSum,nAllocOutput,PAVtable,Parmn_revTotal,FitsumTotal,ConvergedProperSumTotal,nAllocTotal,PAVtableTotal,StopTimeFull[[3]]/60) names(Output_mod) <- c("Estimates (using M allocations)","Fit (using M allocations)","Proportion of Converged and Proper Allocations (using M allocations)", "Allocations needed for stability (M)","Indices to quantify uncertainty in estimates due to sampling vs. allocation variability (using M allocations)", "Estimates (using all allocations)","Fit (using all allocations)","Proportion of Converged and Proper Allocations (using all allocations)", "Total Allocations used by algorithm","Indices to quantify uncertainty in estimates due to sampling vs. allocation variability (using all allocations)","Total runtime (minutes)") ### output summary for model estimation } } return(Output_mod) ### returns output for model } semTools/R/missingBootstrap.R0000644000175100001440000005761713000201061015767 0ustar hornikusers### Terrence D. Jorgensen ### Last updated: 26 February 2016 ### Savalei & Yuan's (2009) model-based bootstrap for missing data setClass("BootMiss", representation(time = "list", transData = "data.frame", bootDist = "vector", origChi = "numeric", df = "numeric", bootP="numeric")) ######################################### ## Define methods for class "BootMiss" ## ######################################### setMethod("show", "BootMiss", function(object) { cat("Chi-Squared = ", object@origChi, "\nDegrees of Freedom = ", object@df, "\nTheoretical p value = ", pchisq(object@origChi, object@df, lower.tail = FALSE), "\n i.e., pchisq(", object@origChi, ", df = ", object@df, ", lower.tail = FALSE)\n", "\nBootstrapped p value = ", object@bootP, "\n\n", sep = "") invisible(object) }) setMethod("summary", "BootMiss", function(object) { cat("Time elapsed to transform the data:\n") print(object@time$transform) cat("\nTime elapsed to fit the model to", length(object@bootDist), "bootstrapped samples:\n") print(object@time$fit) cat("\nMean of Theoretical Distribution = DF =", object@df, "\nVariance of Theoretical Distribution = 2*DF =", 2*object@df, "\n\nMean of Bootstrap Distribution =", mean(object@bootDist), "\nVariance of Bootstrap Distribution =", var(object@bootDist), "\n\n") show(object) invisible(object) }) setMethod("hist", "BootMiss", function(x, ..., alpha = .05, nd = 2, printLegend = TRUE, legendArgs = list(x = "topleft")) { ChiSq <- x@origChi DF <- x@df bootDist <- x@bootDist bCrit <- round(quantile(bootDist, probs = 1 - alpha), nd) theoDist <- dchisq(seq(.1, max(c(ChiSq, bootDist)), by = .1), df = DF) Crit <- round(qchisq(p = alpha, df = DF, lower.tail = FALSE), nd) Lim <- c(0, max(c(ChiSq, bootDist, Crit))) if (ChiSq > Lim[2]) Lim[2] <- ChiSq histArgs <- list(...) histArgs$x <- bootDist histArgs$freq <- FALSE if (is.null(histArgs$col)) histArgs$col <- "grey69" if (is.null(histArgs$xlim)) histArgs$xlim <- Lim if (is.null(histArgs$main)) histArgs$main <- expression("Model-Based Bootstrap Distribution of" ~ chi^2) if (is.null(histArgs$ylab)) histArgs$ylab <- "Probability Density" if (is.null(histArgs$xlab)) histArgs$xlab <- expression(chi^2) if (printLegend) { if (nd < length(strsplit(as.character(1 / alpha), "")[[1]]) - 1) { warning(paste0("The number of digits argument (nd = ", nd , ") is too low to display your p value at the", " same precision as your requested alpha level (alpha = ", alpha, ")")) } pVal <- round(x@bootP, nd) if (x@bootP < (1 / 10^nd)) { pVal <- paste(c("< .", rep(0, nd - 1),"1"), collapse = "") } else { paste("=", round(x@bootP, nd)) } if (is.null(legendArgs$box.lty)) legendArgs$box.lty <- 0 if (is.null(legendArgs$lty)) legendArgs$lty <- c(1, 2, 2, 1, 0, 0) if (is.null(legendArgs$lwd)) legendArgs$lwd <- c(2, 2, 2, 3, 0, 0) #if (is.null(legendArgs$cex)) legendArgs$cex <- c(1.1, 1, 1, 1, 1, 1) if (is.null(legendArgs$col)) legendArgs$col <- c("black","black","grey69","red","", "") legendArgs$legend <- c(bquote(chi[.(paste("df =", DF))]^2), bquote(Critical ~ chi[alpha ~ .(paste(" =", alpha))]^2 == .(Crit)), bquote(Bootstrap~Critical~chi[alpha ~ .(paste(" =", alpha))]^2 == .(bCrit)), expression(Observed ~ chi^2), bquote(.("")), bquote(Bootstrap ~ italic(p) ~~ .(pVal))) } H <- do.call(hist, c(histArgs["x"], plot = FALSE)) histArgs$ylim <- c(0, max(H$density, theoDist)) suppressWarnings({ do.call(hist, histArgs) lines(x = seq(.1, max(c(ChiSq, bootDist)), by = .1), y = theoDist, lwd = 2) abline(v = Crit, col = "black", lwd = 2, lty = 2) abline(v = bCrit, col = "grey69", lwd = 2, lty = 2) abline(v = ChiSq, col = "red", lwd = 3) if (printLegend) do.call(legend, legendArgs) }) ## return arguments to create histogram (and optionally, legend) invisible(list(hist = histArgs, legend = legendArgs)) }) ## Function to execute Transformation 1 on a single missing-data pattern trans1 <- function(MDpattern, rowMissPatt, dat, Sigma, Mu) { myRows <- which(rowMissPatt == MDpattern) X <- apply(dat[myRows, ], 2, scale, scale = FALSE) observed <- !is.na(X[1, ]) Xreduced <- X[ , observed] Mreduced <- as.numeric(Mu[observed]) SigmaChol <- chol(Sigma[observed, observed]) S <- t(Xreduced) %*% Xreduced / nrow(X) Areduced <- t(SigmaChol) %*% t(solve(chol(S))) Yreduced <- t(Areduced %*% t(Xreduced) + Mreduced) Y <- replace(X, !is.na(X), Yreduced) Y } ## Function to execute Transformation 2 on a single group trans2 <- function(dat, Sigma, Mu, EMcov) { ## Computing Function of A (eq. 12), whose root is desired eq12 <- function(A) { ga <- rep(0, pStar) for (j in 1:J) { Tj <- Mjs[[j]] %*% A %*% Hjs[[j]] %*% A %*% Mjs[[j]] - Mjs[[j]] ga <- ga + Njs[j] * Dupinv %*% c(Tj) # same as vech(Tj) } ga } ## Computing Derivative of Function of A (eq. 13) eq13 <- function(A) { deriv12 <- matrix(0, nrow = pStar, ncol = pStar) for (j in 1:J) { Tj1 <- Mjs[[j]] %*% A %*% Hjs[[j]] deriv12 <- deriv12 + 2*Njs[j]*Dupinv %*% kronecker(Tj1, Mjs[[j]]) %*% Dup } deriv12 } ## get missing data patterns R <- ifelse(is.na(dat), 1, 0) rowMissPatt <- apply(R, 1, function(x) paste(x, collapse = "")) MDpattern <- unique(rowMissPatt) ## sample size within each MD pattern Njs <- sapply(MDpattern, function(patt) sum(rowMissPatt == patt)) J <- length(MDpattern) # number of MD patterns p <- ncol(dat) # number of variables in model pStar <- p*(p + 1) / 2 # number of nonredundant covariance elements ## create empty lists for each MD pattern Xjs <- vector("list", J) Hjs <- vector("list", J) Mjs <- vector("list", J) ## Create Duplication Matrix and its inverse (Magnus & Neudecker, 1999) Dup <- lavaan::duplicationMatrix(p) Dupinv <- solve(t(Dup) %*% Dup) %*% t(Dup) ## step through each MD pattern, populate Hjs and Mjs for (j in 1:J) { Xjs[[j]] <- apply(dat[rowMissPatt == MDpattern[j], ], 2, scale, scale = FALSE) if (!is.matrix(Xjs[[j]])) Xjs[[j]] <- t(Xjs[[j]]) observed <- !is.na(Xjs[[j]][1, ]) Sj <- t(Xjs[[j]]) %*% Xjs[[j]] / Njs[j] Hjs[[j]] <- replace(Sj, is.na(Sj), 0) Mjs[[j]] <- replace(Sj, !is.na(Sj), solve(Sigma[observed, observed])) Mjs[[j]] <- replace(Mjs[[j]], is.na(Mjs[[j]]), 0) } ## Compute starting Values for A if (is.null(EMcov)) { A <- diag(p) } else { EMeig <- eigen(EMcov) EMrti <- EMeig$vectors %*% diag(1 / sqrt(EMeig$values)) %*% t(EMeig$vectors) Sigeig <- eigen(Sigma) Sigrt <- Sigeig$vectors %*% diag(sqrt(Sigeig$values)) %*% t(Sigeig$vectors) B <- Sigrt %*% EMrti A <- .5*(B + t(B)) } ## Newton Algorithm for finding root (eq. 14) crit <- .1 a <- c(A) fA <- eq12(A) while (crit > 1e-11) { dvecF <- eq13(A) a <- a - Dup %*% solve(dvecF) %*% fA A <- matrix(a, ncol = p) fA <- eq12(A) crit <- max(abs(fA)) } ## Transform dataset X to dataset Y Yjs <- Xjs for (j in 1:J) { observed <- !is.na(Xjs[[j]][1, ]) XjReduced <- Xjs[[j]][ , observed, drop = FALSE] Aj <- A[observed, observed, drop = FALSE] Mj <- as.numeric(Mu[observed]) Yj <- t(Aj %*% t(XjReduced) + Mj) Yjs[[j]] <- replace(Yjs[[j]], !is.na(Yjs[[j]]), Yj) } Y <- as.data.frame(do.call("rbind", Yjs)) colnames(Y) <- colnames(dat) Y } ## Function to execute Transformation 3 on a single group -- TRANSFORMATION DOES NOT RETURN CH-SQ = 0 trans3 <- function(dat, Sigma, Mu, EMcov) { # Computing Saturated Means as a Function of A (eq. B1 in Appendix B) mut <- function(A) { M <- matrix(0, ncol = 1, nrow = p) for (j in 1:J) { M <- M + Njs[[j]] * Mjs[[j]] %*% A %*% Ybarjs[[j]] } Mjtoti %*% M } # Computing Function of A (eq. 18) whose root is desired eq18 <- function(A) { ga <- rep(0, pStar) mutilda <- mut(A) for (j in 1:J) { Tj <- Mjs[[j]] %*% A %*% Hjs[[j]] %*% A %*% Mjs[[j]] - Mjs[[j]] dif <- A %*% Ybarjs[[j]] - mutilda middle <- dif %*% t(dif) Tjnew <- Tj + Mjs[[j]] %*% middle %*% Mjs[[j]] ga <- ga + Njs[j] * Dupinv %*% c(Tjnew) } ga } # Computing Derivative of Function eq. 18 deriv18 <- function(A) { d18 <- matrix(0, nrow = pStar, ncol = pStar) for (j in 1:J) { Tj1 <- Mjs[[j]] %*% A %*% Hjs[[j]] mutilda <- mut(A) dif <- A %*% Ybarjs[[j]] - mutilda Tj2 <- Mjs[[j]] %*% dif %*% t(Ybarjs[[j]]) Tj3 <- kronecker(Mjs[[j]] %*% dif, Mjs[[j]]) %*% Mjtoti %*% Tj3add d18 <- d18 + 2*Njs[j]*Dupinv %*% ((kronecker((Tj1 + Tj2), Mjs[[j]])) - Tj3) %*% Dup } d18 } ## get missing data patterns R <- ifelse(is.na(dat), 1, 0) rowMissPatt <- apply(R, 1, function(x) paste(x, collapse = "")) MDpattern <- unique(rowMissPatt) ## sample size within each MD pattern Njs <- sapply(MDpattern, function(patt) sum(rowMissPatt == patt)) J <- length(MDpattern) # number of MD patterns p <- ncol(dat) # number of variables in model pStar <- p*(p + 1) / 2 # number of nonredundant covariance elements ## create empty lists for each MD pattern Xjs <- vector("list", J) Ybarjs <- vector("list", J) Hjs <- vector("list", J) Mjs <- vector("list", J) Mjtot <- matrix(0, ncol = p, nrow = p) Tj3add <- matrix(0, nrow = p, ncol = p * p) ## Create Duplication Matrix and its inverse (Magnus & Neudecker, 1999) Dup <- lavaan::duplicationMatrix(p) Dupinv <- solve(t(Dup) %*% Dup) %*% t(Dup) ## step through each MD pattern, populate Hjs and Mjs for (j in 1:J) { Xjs[[j]] <- apply(dat[rowMissPatt == MDpattern[j], ], 2, scale, scale = FALSE) if (!is.matrix(Xjs[[j]])) Xjs[[j]] <- t(Xjs[[j]]) observed <- !is.na(Xjs[[j]][1, ]) pj <- p - sum(observed) means <- colMeans(dat[rowMissPatt == MDpattern[j], ]) Ybarjs[[j]] <- replace(means, is.na(means), 0) Sj <- t(Xjs[[j]]) %*% Xjs[[j]] / Njs[j] Hjs[[j]] <- replace(Sj, is.na(Sj), 0) Mjs[[j]] <- replace(Sj, !is.na(Sj), solve(Sigma[observed, observed])) Mjs[[j]] <- replace(Mjs[[j]], is.na(Mjs[[j]]), 0) Mjtot <- Mjtot + Njs[[j]] * Mjs[[j]] Tj3add <- Tj3add + Njs[[j]] * kronecker(t(Ybarjs[[j]]), Mjs[[j]]) } Mjtoti <- solve(Mjtot) ## Compute starting Values for A if (is.null(EMcov)) { A <- diag(p) } else { EMeig <- eigen(EMcov) EMrti <- EMeig$vectors %*% diag(1 / sqrt(EMeig$values)) %*% t(EMeig$vectors) Sigeig <- eigen(Sigma) Sigrt <- Sigeig$vectors %*% diag(sqrt(Sigeig$values)) %*% t(Sigeig$vectors) B <- Sigrt %*% EMrti A <- .5*(B + t(B)) } ## Newton Algorithm for finding root (eq. 14) crit <- .1 a <- c(A) fA <- eq18(A) while (crit > 1e-11) { dvecF <- deriv18(A) a <- a - Dup %*% solve(dvecF) %*% fA A <- matrix(a, ncol = p) fA <- eq18(A) crit <- max(abs(fA)) } ## Transform dataset X to dataset Y (Z in the paper, eqs. 15-16) Yjs <- Xjs for (j in 1:J) { observed <- !is.na(Xjs[[j]][1, ]) XjReduced <- Xjs[[j]][ , observed, drop = FALSE] Aj <- A[observed, observed, drop = FALSE] Mj <- as.numeric((Mu - mut(A))[observed]) Yj <- t(Aj %*% t(XjReduced) + Mj) Yjs[[j]] <- replace(Yjs[[j]], !is.na(Yjs[[j]]), Yj) } Y <- as.data.frame(do.call("rbind", Yjs)) colnames(Y) <- colnames(dat) Y } ## Get a single bootstrapped sample from the transformed data. If there are ## multiple groups, bootstrapping occurs independently within each group, and ## a single data frame is returned. A new column is added to indicate group ## membership, which will be ignored in a single-group analysis. getBootSample <- function(groupDat, group, group.label) { bootSamp <- list() for (g in seq_along(groupDat)) { dat <- groupDat[[g]] dat[ , group] <- group.label[g] bootSamp[[g]] <- dat[sample(1:nrow(dat), nrow(dat), replace = TRUE), ] } do.call("rbind", bootSamp) } ## fit the model to a single bootstrapped sample and return chi-squared fitBootSample <- function(dat, args, suppress) { args$data <- dat lavaanlavaan <- function(...) { lavaan::lavaan(...) } if (suppress) { fit <- suppressWarnings(do.call(lavaanlavaan, args)) } else { fit <- do.call(lavaanlavaan, args) } if (!exists("fit")) return(c(chisq = NA)) if (lavaan::lavInspect(fit, "converged")) { chisq <- lavaan::lavInspect(fit, "fit")[c("chisq", "chisq.scaled")] } else { chisq <- NA } if (is.na(chisq[2])) return(chisq[1]) else return(chisq[2]) } ## overall function to apply any of the above functions bsBootMiss <- function(x, transformation = 2, nBoot = 500, model, rawData, Sigma, Mu, group, ChiSquared, EMcov, writeTransData = FALSE, transDataOnly = FALSE, writeBootData = FALSE, bootSamplesOnly = FALSE, writeArgs, seed = NULL, suppressWarn = TRUE, showProgress = TRUE, ...) { if(writeTransData) transDataOnly <- TRUE if(writeBootData) bootSamplesOnly <- TRUE check.nBoot <- (!is.numeric(nBoot) | nBoot < 1L) & !transDataOnly if (check.nBoot) stop("The \"nBoot\" argument must be a positive integer.") ## Which transformation? if (!(transformation %in% 1:2)) stop("User must specify transformation 1 or 2. Consult Savalei & Yuan (2009) for advice. Transformation 3 is not currently available.") if (transformation == 2) SavaleiYuan <- trans2 #if (transformation == 3) SavaleiYuan <- trans3 ###################### ## Data Preparation ## ###################### ## If a lavaan object is supplied, the extracted values for rawData, Sigma, Mu, ## EMcov, and EMmeans will override any user-supplied arguments. if (hasArg(x)) { rawData <- lapply(lavaan::lavInspect(x, "data"), as.data.frame) for (g in seq_along(rawData)) colnames(rawData[[g]]) <- lavaan::lavNames(x) ChiSquared <- lavaan::lavInspect(x, "fit")[c("chisq", "chisq.scaled")] ChiSquared <- ifelse(is.na(ChiSquared[2]), ChiSquared[1], ChiSquared[2]) group <- lavaan::lavInspect(x, "group") if (length(group) == 0) group <- "group" group.label <- lavaan::lavInspect(x, "group.label") if (length(group.label) == 0) group.label <- 1 Sigma <- lavaan::lavInspect(x, "cov.ov") Mu <- lavaan::lavInspect(x, "mean.ov") EMcov <- lavaan::lavInspect(x, "sampstat")$cov } else { ## If no lavaan object is supplied, check that required arguments are. suppliedData <- c(hasArg(rawData), hasArg(Sigma), hasArg(Mu)) if (!all(suppliedData)) { stop("Without a lavaan fitted object, user must supply raw data and model-implied moments.") } if (!hasArg(model) & !(transDataOnly | bootSamplesOnly)) { stop("Without model syntax or fitted lavaan object, user can only call this function to save transformed data or bootstrapped samples.") } if (!hasArg(ChiSquared) & !(transDataOnly | bootSamplesOnly)) { stop("Without a fitted lavaan object or ChiSquared argument, user can only call this function to save transformed data, bootstrapped samples, or bootstrapped chi-squared values.") } if (!any(c(transDataOnly, bootSamplesOnly))) { if (!is.numeric(ChiSquared)) stop("The \"ChiSquared\" argument must be numeric.") } ## If user supplies one-group data & moments, convert to lists. if (class(rawData) == "data.frame") { rawData <- list(rawData) } if (class(rawData) != "list") { stop("The \"rawData\" argument must be a data.frame or list of data frames.") } else { if (!all(sapply(rawData, is.data.frame))) stop("Every element of \"rawData\" must be a data.frame") } if (class(Sigma) == "matrix") Sigma <- list(Sigma) if (is.numeric(Mu)) Mu <- list(Mu) ## check whether EMcov was supplied for starting values in Trans2/Trans3 if (!hasArg(EMcov)) { EMcov <- vector("list", length(Sigma)) } else { if (class(EMcov) == "matrix") EMcov <- list(EMcov) ## check EMcov is symmetric and dimensions match Sigma for (g in seq_along(EMcov)) { if (!isSymmetric(EMcov[[g]])) stop("EMcov in group ", g, " not symmetric.") unequalDim <- !all(dim(EMcov[[g]]) == dim(Sigma[[g]])) if (unequalDim) stop("Unequal dimensions in Sigma and EMcov.") } } ## Check the number of groups by the size of the lists. unequalGroups <- !all(length(rawData) == c(length(Sigma), length(Mu))) if (unequalGroups) stop("Unequal number of groups in rawData, Sigma, Mu. For multiple-group models, rawData must be a list of data frames, NOT a single data frame with a \"group\" column.") ## In each group, check Sigma is symmetric and dimensions match rawData and Mu. for (g in seq_along(rawData)) { if (!isSymmetric(Sigma[[g]])) stop("Sigma in group ", g, " not symmetric.") unequalDim <- !all(ncol(rawData[[g]]) == c(nrow(Sigma[[g]]), length(Mu[[g]]))) if (unequalDim) stop("Unequal dimensions in rawData, Sigma, Mu.") } ## Check for names of group levels. If NULL, assign arbitrary ones. if (!hasArg(group)) group <- "group" if (!is.character(group)) stop("The \"group\" argument must be a character string.") if (is.null(names(rawData))) { group.label <- paste0("g", seq_along(rawData)) } else { group.label <- names(rawData) } } ## save a copy as myTransDat, whose elements will be replaced iteratively by ## group and by missing data pattern within group. myTransDat <- rawData names(myTransDat) <- group.label output <- list() ######################### ## Data Transformation ## ######################### for (g in seq_along(group.label)) { if (transformation == 1) { ## get missing data patterns R <- ifelse(is.na(rawData[[g]]), 1, 0) rowMissPatt <- apply(R, 1, function(x) paste(x, collapse = "")) patt <- unique(rowMissPatt) myRows <- lapply(patt, function(x) which(rowMissPatt == x)) ## for each pattern, apply transformation tStart <- Sys.time() transDatList <- lapply(patt, trans1, rowMissPatt = rowMissPatt, dat = rawData[[g]], Sigma = Sigma[[g]], Mu = Mu[[g]]) output$timeTrans <- Sys.time() - tStart for (i in seq_along(patt)) myTransDat[[g]][myRows[[i]], ] <- transDatList[[i]] } else { tStart <- Sys.time() myTransDat[[g]] <- SavaleiYuan(dat = rawData[[g]],vSigma = Sigma[[g]], Mu = Mu[[g]], EMcov = EMcov[[g]]) output$timeTrans <- Sys.time() - tStart } } ## option to end function here if (transDataOnly) { for (g in seq_along(myTransDat)) myTransDat[[g]][ , group] <- group.label[g] ## option to write transformed data to a file if (writeTransData) { ## Set a few options, if the user didn't. if (!hasArg(writeArgs)) writeArgs <- list(file = "transformedData.dat", row.names = FALSE, na = "-999") if (!exists("file", where = writeArgs)) writeTransArgs$file <- "transformedData.dat" if (!exists("row.names", where = writeArgs)) writeArgs$row.names <- FALSE if (!exists("na", where = writeArgs)) writeArgs$na <- "-999" ## add grouping variable and bind together into one data frame for (g in seq_along(myTransDat)) myTransDat[[g]][ , group] <- group.label[g] writeArgs$x <- do.call("rbind", myTransDat) ## write to file, print details to screen do.call("write.table", writeArgs) cat("Transformed data was written to file \"", writeArgs$file, "\" in:\n\n", getwd(), "\n\nunless path specified by user in 'file' argument.\n", sep = "") return(invisible(writeArgs$x)) } return(do.call("rbind", myTransDat)) } ############################################# ## Bootstrap distribution of fit statistic ## ############################################# ## draw bootstrap samples if (!is.null(seed)) set.seed(seed) bootSamples <- lapply(1:nBoot, function(x) getBootSample(myTransDat, group, group.label)) ## option to write bootstrapped samples to file(s) if (writeBootData) { ## Set a few options, if the user didn't. if (!hasArg(writeArgs)) writeArgs <- list(file = "bootstrappedSamples.dat", row.names = FALSE, na = "-999") if (!exists("file", where = writeArgs)) writeTransArgs$file <- "bootstrappedSamples.dat" if (!exists("row.names", where = writeArgs)) writeArgs$row.names <- FALSE if (!exists("na", where = writeArgs)) writeArgs$na <- "-999" ## add indicator for bootstrapped sample, bind together into one data frame for (b in seq_along(bootSamples)) bootSamples[[b]]$bootSample <- b writeArgs$x <- do.call("rbind", bootSamples) ## write to file, print details to screen do.call("write.table", writeArgs) cat("Bootstrapped samples written to file \"", writeArgs$file, "\" in:\n\n", getwd(), "\n\nunless path specified by user in 'file' argument.\n", sep = "") return(invisible(bootSamples)) } ## option to end function here if (bootSamplesOnly) return(bootSamples) ## check for lavaan arguments in (...) lavaanArgs <- list(...) lavaanArgs$group <- group ## fit model to bootstrap samples, save distribution of chi-squared test stat if (hasArg(x)) { ## grab defaults from lavaan object "x" lavaanArgs$slotParTable <- lavaan::parTable(x) lavaanArgs$slotModel <- x@Model lavaanArgs$slotOptions <- lavaan::lavInspect(x, "options") } else { lavaanArgs$model <- model lavaanArgs$missing <- "fiml" ## set defaults that will be necessary for many models to run, that will ## probably not be specified explictly or included in lavaan syntax lavaanArgs$meanstructure <- TRUE if (!exists("auto.var", where = lavaanArgs)) lavaanArgs$auto.var <- TRUE if (!exists("auto.cov.y", where = lavaanArgs)) lavaanArgs$auto.cov.y <- TRUE if (!exists("auto.cov.lv.x", where = lavaanArgs)) lavaanArgs$auto.cov.lv.x <- TRUE } ## run bootstrap fits if (showProgress) { mypb <- txtProgressBar(min = 1, max = nBoot, initial = 1, char = "=", width = 50, style = 3, file = "") bootFits <- numeric() tStart <- Sys.time() for (j in 1:nBoot) { bootFits[j] <- fitBootSample(bootSamples[[j]], args = lavaanArgs, suppress = suppressWarn) setTxtProgressBar(mypb, j) } close(mypb) output$timeFit <- Sys.time() - tStart } else { tStart <- Sys.time() bootFits <- sapply(bootSamples, fitBootSample, args = lavaanArgs, suppress = suppressWarn) output$timeFit <- Sys.time() - tStart } ## stack groups, save transformed data and distribution in output object for (g in seq_along(myTransDat)) myTransDat[[g]][ , group] <- group.label[g] output$Transformed.Data <- do.call("rbind", myTransDat) output$Bootstrapped.Distribution <- bootFits output$Original.ChiSquared <- ChiSquared if (hasArg(x)) { output$Degrees.Freedom <- lavaan::lavInspect(x, "fit")["df"] } else { convSamp <- which(!is.na(bootFits))[1] lavaanArgs$data <- bootSamples[[convSamp]] lavaanlavaan <- function(...) { lavaan::lavaan(...) } output$Degrees.Freedom <- lavaan::lavInspect(do.call(lavaanlavaan, lavaanArgs), "fit")["df"] } ## calculate bootstrapped p-value output$Bootstrapped.p.Value <- mean(bootFits >= ChiSquared, na.rm = TRUE) ## print warning if any models didn't converge if (any(is.na(bootFits))) { nonConvMessage <- paste("Model did not converge for the following bootstrapped samples", paste(which(is.na(bootFits)), collapse = "\t"), sep = ":\n") warning(nonConvMessage) } finalResult <- new("BootMiss", time = list(transform = output$timeTrans, fit = output$timeFit), transData = output$Transformed.Data, bootDist = output$Bootstrapped.Distribution, origChi = output$Original.ChiSquared, df = output$Degrees.Freedom, bootP = output$Bootstrapped.p.Value) finalResult } semTools/R/efa.R0000644000175100001440000003314713000250017013150 0ustar hornikusers### Sunthud Pornprasertmanit ### Last updated: 14 October 2016 ### run EFA model in lavaan setClass("EFA", representation(loading = "matrix", rotate="matrix", gradRotate="matrix", convergence="logical", phi="matrix", se = "matrix", method = "character", call="call")) printLoadings <- function(object, suppress = 0.1, sort=TRUE) { loading <- object@loading nf <- ncol(loading) loadingText <- sprintf("%.3f", object@loading) sig <- ifelse(testLoadings(object)$p < 0.05, "*", " ") loadingText <- paste0(loadingText, sig) loadingText[abs(loading) < suppress] <- "" loadingText <- matrix(loadingText, ncol=nf, dimnames=dimnames(loading)) lead <- apply(abs(loading), 1, which.max) ord <- NULL if(sort) { for(i in 1:nf) { ord <- c(ord, intersect(order(abs(loading[,i]), decreasing=TRUE), which(lead==i))) } loadingText <- loadingText[ord,] } as.data.frame(loadingText) } testLoadings <- function(object, level=0.95) { se <- object@se loading <- object@loading lv.names <- colnames(loading) z <- loading/se p <- 2 * (1 - pnorm( abs(z) )) crit <- qnorm(1 - (1 - level)/2) est <- as.vector(loading) se <- as.vector(se) warnings("The standard error is currently invalid because it does not account for the variance of rotation function. It is simply based on delta method.") out <- data.frame(lhs=lv.names[col(loading)], op="=~", rhs=rownames(loading)[row(loading)], std.loading=est, se=se, z=as.vector(z), p=as.vector(p), ci.lower=(est - crit*se), ci.upper=(est + crit*se)) class(out) <- c("lavaan.data.frame", "data.frame") out } setMethod("show", signature(object = "EFA"), function(object) { cat("Standardized Rotated Factor Loadings\n") print(printLoadings(object)) cat("\nFactor Correlation\n") print(object@phi) cat("\nMethod of rotation:\t") cat(object@method, "\n") print("The standard errors are close but do not match with other packages. Be mindful when using it.") }) setMethod("summary", signature(object = "EFA"), function(object, suppress = 0.1, sort = TRUE) { cat("Standardized Rotated Factor Loadings\n") print(printLoadings(object, suppress = suppress, sort = sort)) cat("\nFactor Correlation\n") print(object@phi) cat("\nMethod of rotation:\t") cat(object@method, "\n") cat("\nTest Statistics for Standardized Rotated Factor Loadings\n") print(testLoadings(object)) }) efaUnrotate <- function(data, nf, varList=NULL, start=TRUE, aux=NULL, ...) { if(is.null(varList)) varList <- colnames(data) lavaancfa <- function(...) { lavaan::cfa(...)} nvar <- length(varList) facnames <- paste0("factor", 1:nf) loading <- outer(1:nvar, 1:nf, function(x, y) paste0("load", x, "_", y)) syntax <- "" for(i in 1:nf) { variablesyntax <- paste(paste0(loading[,i], "*", varList), collapse=" + ") factorsyntax <- paste0(facnames[i], " =~ NA*", varList[1], " + ", variablesyntax, "\n") syntax <- paste(syntax, factorsyntax) } syntax <- paste(syntax, paste(paste0(facnames, " ~~ 1*", facnames), collapse="\n"), "\n") isOrdered <- checkOrdered(data, varList, ...) if(!isOrdered) { syntax <- paste(syntax, paste(paste0(varList, " ~ 1"), collapse="\n"), "\n") } if(nf > 1) { covsyntax <- outer(facnames, facnames, function(x, y) paste0(x, " ~~ 0*", y, "\n"))[lower.tri(diag(nf), diag=FALSE)] syntax <- paste(syntax, paste(covsyntax, collapse = " ")) for(i in 2:nf) { for(j in 1:(i - 1)) { loadconstraint <- paste(paste0(loading[,i], "*", loading[,j]), collapse=" + ") syntax <- paste(syntax, paste0("0 == ", loadconstraint), "\n") } } } if(start) { List <- c(list(model=syntax, data=data), list(...)) List$do.fit <- FALSE outtemp <- do.call(lavaancfa, List) covtemp <- lavaan::lavInspect(outtemp, "sampstat")$cov partemp <- lavaan::parTable(outtemp) err <- try(startload <- factanal(factors=nf, covmat=covtemp)$loadings[], silent = TRUE) if(is(err, "try-error")) stop("The starting values from the factanal function cannot be calculated. Please use start=FALSE instead.") startval <- sqrt(diag(diag(covtemp))) %*% startload partemp$ustart[match(as.vector(loading), partemp$label)] <- as.vector(startval) partemp$est <- partemp$se <- partemp$start <- NULL syntax <- partemp } args <- list(...) args$model <- syntax args$data <- data if(!is.null(aux)) { if(isOrdered) { stop("The analysis model or the analysis data have ordered categorical variables. The auxiliary variable feature is not available for the models for categorical variables with the weighted least square approach.") } auxResult <- craftAuxParTable(syntax, aux = aux) args$model <- auxResult$model args$fixed.x <- FALSE args$missing <- "fiml" result <- do.call(lavaancfa, args) codeNull <- nullAuxiliary(aux, auxResult$indName, NULL, any(syntax$op == "~1"), max(syntax$group)) resultNull <- lavaan::lavaan(codeNull, data=data, ...) result <- as(result, "lavaanStar") fit <- lavaan::fitMeasures(resultNull) name <- names(fit) fit <- as.vector(fit) names(fit) <- name result@nullfit <- fit result@auxNames <- aux return(result) } else { return(do.call(lavaancfa, args)) } } getLoad <- function(object, std = TRUE) { out <- lavaan::inspect(object, "coef")$lambda if(std) { impcov <- lavaan::fitted.values(object)$cov impsd <- sqrt(diag(diag(impcov))) out <- solve(impsd) %*% out } rownames(out) <- lavaan::lavNames(object@ParTable, "ov", group = 1) if(is(object, "lavaanStar")) { out <- out[!(rownames(out) %in% object@auxNames),] } class(out) <- c("loadings", out) out } orthRotate <- function(object, method="varimax", ...) { requireNamespace("GPArotation") if(!("package:GPArotation" %in% search())) attachNamespace("GPArotation") mc <- match.call() initL <- getLoad(object) rotated <- GPArotation::GPForth(initL, method=method, ...) rotateMat <- t(solve(rotated$Th)) LIST <- seStdLoadings(rotated, object, fun = GPArotation::GPForth, MoreArgs = c(method = method, list(...))) orthogonal <- rotated$orthogonal loading <- rotated$loadings rotate <- rotated$Th gradRotate <- rotated$Gq convergence <- rotated$convergence method <- rotated$method phi <- diag(ncol(loading)) lv.names <- colnames(loading) dimnames(phi) <- list(lv.names, lv.names) new("EFA", loading=loading, rotate=rotate, gradRotate=gradRotate, convergence=convergence, phi=phi, se=LIST, method=method, call=mc) } oblqRotate <- function(object, method="quartimin", ...) { requireNamespace("GPArotation") if(!("package:GPArotation" %in% search())) attachNamespace("GPArotation") mc <- match.call() initL <- getLoad(object) rotated <- GPArotation::GPFoblq(initL, method=method, ...) rotateMat <- t(solve(rotated$Th)) LIST <- seStdLoadings(rotated, object, fun = GPArotation::GPFoblq, MoreArgs = c(method = method, list(...))) orthogonal <- rotated$orthogonal loading <- rotated$loadings rotate <- rotated$Th gradRotate <- rotated$Gq convergence <- rotated$convergence method <- rotated$method phi <- rotated$Phi lv.names <- colnames(loading) dimnames(phi) <- list(lv.names, lv.names) new("EFA", loading=loading, rotate=rotate, gradRotate=gradRotate, convergence=convergence, phi=phi, se=LIST, method=method, call=mc) } funRotate <- function(object, fun, ...) { stopifnot(is.character(fun)) requireNamespace("GPArotation") if(!("package:GPArotation" %in% search())) attachNamespace("GPArotation") mc <- match.call() initL <- getLoad(object) rotated <- do.call(fun, c(list(L = initL), list(...))) rotateMat <- t(solve(rotated$Th)) gradRotate <- rotated$Gq LIST <- seStdLoadings(rotated, object, fun = fun, MoreArgs = list(...)) orthogonal <- rotated$orthogonal loading <- rotated$loadings rotate <- rotated$Th convergence <- rotated$convergence method <- rotated$method phi <- rotated$Phi if(is.null(phi)) phi <- diag(ncol(loading)) lv.names <- colnames(loading) dimnames(phi) <- list(lv.names, lv.names) new("EFA", loading=loading, rotate=rotate, gradRotate=gradRotate, convergence=convergence, phi=phi, se=LIST, method=method, call=mc) } fillMult <- function(X, Y, fillrowx = 0, fillrowy = 0, fillcolx = 0, fillcoly = 0) { tempX <- matrix(0, nrow = nrow(X) + fillrowx, ncol = ncol(X) + fillcolx) tempY <- matrix(0, nrow = nrow(Y) + fillrowy, ncol = ncol(Y) + fillcoly) tempX[1:nrow(X), 1:ncol(X)] <- X tempY[1:nrow(Y), 1:ncol(Y)] <- Y result <- tempX %*% tempY result[1:nrow(X), 1:ncol(Y)] } stdRotatedLoadings <- function(est, object, fun, aux=NULL, rotate=NULL, MoreArgs = NULL) { ov.names <- lavaan::lavNames(object@ParTable, "ov", group = 1) lv.names <- lavaan::lavNames(object@ParTable, "lv", group = 1) ind.names <- setdiff(ov.names, aux) # Compute model-implied covariance matrix partable <- object@ParTable # LY load.idx <- which(partable$op == "=~" & !(partable$rhs %in% lv.names)) loading <- matrix(est[load.idx], ncol=length(lv.names)) loading <- rbind(loading, matrix(0, length(aux), ncol(loading))) # Nu int.idx <- which(partable$op == "~1" & (partable$rhs == "") & (partable$lhs %in% ov.names)) intcept <- matrix(est[int.idx], ncol = 1) # Theta th.idx <- which(partable$op == "~~" & (partable$rhs %in% ov.names) & (partable$lhs %in% ov.names)) theta <- matrix(0, length(ov.names), length(ov.names), dimnames = list(ov.names, ov.names)) for(i in th.idx) { theta[partable$lhs[i], partable$rhs[i]] <- theta[partable$rhs[i], partable$lhs[i]] <- est[i] } OV <- loading %*% t(loading) + theta invsd <- solve(sqrt(diag(diag(OV)))) requireNamespace("GPArotation") if(!("package:GPArotation" %in% search())) attachNamespace("GPArotation") # Compute standardized results loading <- invsd %*% loading obj <- do.call(fun, c(list(loading), MoreArgs)) # GPArotation::GPFoblq(loading, method="geomin") loading <- obj$loadings rotMat <- t(solve(obj$Th)) # %*% rotate est[load.idx] <- as.vector(loading[seq_along(ind.names),]) intcept <- invsd %*% intcept est[int.idx] <- as.vector(intcept) theta <- invsd %*% theta %*% invsd rownames(theta) <- colnames(theta) <- ov.names for(i in th.idx) { est[i] <- theta[partable$lhs[i], partable$rhs[i]] } # Put phi rv.idx <- which(partable$op == "~~" & partable$rhs %in% lv.names) templhs <- match(partable$lhs[rv.idx], lv.names) temprhs <- match(partable$rhs[rv.idx], lv.names) # rotate2 <- t(solve(rotate)) # phi <- t(rotate2) %*% rotate2 phi <- obj$Phi if(!is.null(phi)) { for(i in seq_along(templhs)) { est[rv.idx[i]] <- phi[templhs[i], temprhs[i]] } } est } seStdLoadings <- function(rotate, object, fun, MoreArgs) { # object <- efaUnrotate(HolzingerSwineford1939, nf=3, varList=paste0("x", 1:9), estimator="mlr") # initL <- getLoad(object) # rotate <- GPArotation::GPFoblq(initL, method="oblimin") rotMat <- t(solve(rotate$Th)) gradient <- rotate$Gq loading <- rotate$loadings phi <- rotate$Phi if(is.null(phi)) phi <- diag(ncol(loading)) est <- lavaan::parameterEstimates(object)$est aux <- NULL if(is(object, "lavaanStar")) { aux <- object@auxNames } # Standardized results JAC1 <- lavaan::lav_func_jacobian_simple(func=stdRotatedLoadings, x=object@Fit@est, object=object, aux=aux, rotate = rotMat, fun = fun, MoreArgs = MoreArgs) LIST <- lavaan::lavInspect(object, "list") free.idx <- which(LIST$free > 0L) m <- ncol(phi) phi.idx <- which(LIST$op == "~~" & LIST$lhs != LIST$rhs & (LIST$lhs %in% paste0("factor", 1:m))) JAC1 <- JAC1[c(free.idx, phi.idx), free.idx] VCOV <- as.matrix(lavaan::vcov(object, labels=FALSE)) if(object@Model@eq.constraints) { JAC1 <- JAC1 %*% object@Model@eq.constraints.K } COV1 <- JAC1 %*% VCOV %*% t(JAC1) # I1 <- MASS::ginv(COV1) # I1p <- matrix(0, nrow(I1) + length(phi.idx), ncol(I1) + length(phi.idx)) # I1p[1:nrow(I1), 1:ncol(I1)] <- I1 # phi.idx2 <- nrow(I1) + 1:length(phi.idx) # p <- nrow(loading) # dconlambda <- matrix(0, m^2 - m, p*m) # gradphi <- gradient %*% solve(phi) # lambgradphi <- t(loading) %*% gradphi # lambphi <- loading %*% solve(phi) # lamblamb <- t(loading) %*% loading # runrow <- 1 # descript <- NULL # for(u in 1:m) { # for(v in setdiff(1:m, u)) { # runcol <- 1 # for(r in 1:m) { # for(i in 1:p) { # mir <- (1 - 1/p) * sum(loading[i,]^2) + sum(loading[,r]^2)/p - loading[i,r]^2 # dur <- 0 # if(u == r) dur <- 1 # dconlambda[runrow, runcol] <- dur * gradphi[i, v] + 4 * mir * loading[i, u] * phi[r, v] + (8 - 8/p)*loading[i,r]*loading[i,u]*lambphi[i,v] + 8*loading[i,r]*lamblamb[u,r]*phi[r,v]/p - 8*loading[i,r]^2*loading[i,u]*phi[r,v] # descript <- rbind(descript, c(runrow, runcol, u, v, i, r)) # runcol <- runcol + 1 # } # } # runrow <- runrow + 1 # } # } # dconphi <- matrix(0, m^2 - m, m*(m-1)/2) # runrow <- 1 # descript2 <- NULL # for(u in 1:m) { # for(v in setdiff(1:m, u)) { # runcol <- 1 # for(x in 2:m) { # for(y in 1:(x - 1)) { # dux <- 0 # if(u == x) dux <- 1 # duy <- 0 # if(u == y) duy <- 1 # dconphi[runrow, runcol] <- -(dux * phi[y, v] + duy * phi[x, v]) * lambgradphi[u, u] # descript2 <- rbind(descript2, c(runrow, runcol, u, v, x, y)) # runcol <- runcol + 1 # } # } # runrow <- runrow + 1 # } # } # I2 <- matrix(0, nrow(I1p) + m^2 - m, ncol(I1p) + m^2 - m) # I2[1:nrow(I1p), 1:ncol(I1p)] <- I1p # I2[lamb.idx, 1:(m^2 - m) + nrow(I1p)] <- t(dconlambda) # I2[1:(m^2 - m) + nrow(I1p), lamb.idx] <- dconlambda # I2[phi.idx2, 1:(m^2 - m) + nrow(I1p)] <- t(dconphi) # I2[1:(m^2 - m) + nrow(I1p), phi.idx2] <- dconphi # COV2 <- MASS::ginv(I2)[1:nrow(I1p), 1:ncol(I1p)] COV2 <- COV1 LIST <- LIST[,c("lhs", "op", "rhs", "group")] LIST$se <- rep(NA, length(LIST$lhs)) LIST$se[c(free.idx, phi.idx)] <- sqrt(diag(COV2)) tmp.se <- ifelse( LIST$se == 0.0, NA, LIST$se) lv.names <- lavaan::lavNames(object@ParTable, "lv", group = 1) partable <- lavaan::parTable(object) idx <- which(partable$op == "=~" & !(partable$rhs %in% lv.names)) matrix(LIST$se[idx], ncol=length(lv.names)) } semTools/R/partialInvarianceCat.R0000644000175100001440000011053113000201061016465 0ustar hornikusers# Wald stat did not show up partialInvarianceCat <- function(fit, type, free = NULL, fix = NULL, refgroup = 1, poolvar = TRUE, p.adjust = "none", return.fit = FALSE, method = "satorra.bentler.2001") { # model <- ' f1 =~ u1 + u2 + u3 + u4 # f2 =~ u5 + u6 + u7 + u8' # modelsCat2 <- measurementInvarianceCat(model, data = datCat, group = "g", parameterization="theta", # estimator="wlsmv", strict = TRUE) # fit <- modelsCat2 # type <- "weak" # free <- NULL # fix <- NULL # refgroup <- 1 # poolvar <- TRUE # p.adjust <- "none" # return.fit <- FALSE # method = "satorra.bentler.2001" type <- tolower(type) numType <- 1 fit1 <- fit0 <- NULL # fit0 = Nested model, fit1 = Parent model if(type %in% c("metric", "weak", "loading", "loadings")) { numType <- 1 if(all(c("fit.configural", "fit.loadings") %in% names(fit))) { fit1 <- fit$fit.configural fit0 <- fit$fit.loadings } else { stop("The elements named 'fit.configural' and 'fit.loadings' are needed in the 'fit' argument") } } else if (type %in% c("scalar", "strong", "intercept", "intercepts", "threshold", "thresholds")) { numType <- 2 if(all(c("fit.loadings", "fit.thresholds") %in% names(fit))) { fit1 <- fit$fit.loadings fit0 <- fit$fit.thresholds } else { stop("The elements named 'fit.loadings' and 'fit.thresholds' are needed in the 'fit' argument") } } else if (type %in% c("strict", "residual", "residuals", "error", "errors")) { numType <- 3 if("fit.residuals" %in% names(fit)) { fit0 <- fit$fit.residuals if("fit.thresholds" %in% names(fit)) { fit1 <- fit$fit.thresholds } else if ("fit.loadings" %in% names(fit)) { fit1 <- fit$fit.loadings } else { stop("The element named either 'fit.thresholds' or 'fit.loadings' is needed in the 'fit' argument") } } else { stop("The element named 'fit.residuals' is needed in the 'fit' argument") } } else if (type %in% c("means", "mean")) { numType <- 4 if("fit.means" %in% names(fit)) { fit0 <- fit$fit.means if("fit.residuals" %in% names(fit)) { fit1 <- fit$fit.residuals } else if ("fit.thresholds" %in% names(fit)) { fit1 <- fit$fit.thresholds } else if ("fit.loadings" %in% names(fit)) { fit1 <- fit$fit.loadings } else { stop("The element named either 'fit.residuals', 'fit.thresholds', or 'fit.loadings' is needed in the 'fit' argument") } } else { stop("The element named 'fit.means' is needed in the 'fit' argument") } } else { stop("Please specify the correct type of measurement invariance. See the help page.") } pt1 <- lavaan::partable(fit1) pt0 <- lavaan::partable(fit0) pt0$start <- pt0$est <- pt0$se <- NULL pt1$start <- pt1$est <- pt1$se <- NULL pt1$label[substr(pt1$label, 1, 1) == "." & substr(pt1$label, nchar(pt1$label), nchar(pt1$label)) == "."] <- "" pt0$label[substr(pt0$label, 1, 1) == "." & substr(pt0$label, nchar(pt0$label), nchar(pt0$label)) == "."] <- "" namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) if(length(table(table(pt0$rhs[pt0$op == "=~"]))) != 1) stop("The model is not congeneric. This function does not support non-congeneric model.") varfree <- varnames <- unique(pt0$rhs[pt0$op == "=~"]) facnames <- unique(pt0$lhs[(pt0$op == "=~") & (pt0$rhs %in% varnames)]) facrepresent <- table(pt0$lhs[(pt0$op == "=~") & (pt0$rhs %in% varnames)], pt0$rhs[(pt0$op == "=~") & (pt0$rhs %in% varnames)]) if(any(apply(facrepresent, 2, function(x) sum(x != 0)) > 1)) stop("The model is not congeneric. This function does not support non-congeneric model.") facList <- list() for(i in 1:nrow(facrepresent)) { facList[[i]] <- colnames(facrepresent)[facrepresent[i,] > 0] } names(facList) <- rownames(facrepresent) facList <- facList[match(names(facList), facnames)] fixLoadingFac <- list() for(i in seq_along(facList)) { select <- pt1$lhs == names(facList)[i] & pt1$op == "=~" & pt1$rhs %in% facList[[i]] & pt1$group == 1 & pt1$free == 0 & (!is.na(pt1$ustart) & pt1$ustart > 0) fixLoadingFac[[i]] <- pt1$rhs[select] } names(fixLoadingFac) <- names(facList) # Find the number of thresholds # Check whether the factor configuration is the same across gorups conParTable <- lapply(pt1, "[", pt1$op == "==") group1pt <- lapply(pt1, "[", pt1$group != 1) numThreshold <- table(sapply(group1pt, "[", group1pt$op == "|")[,"lhs"]) plabelthres <- split(group1pt$plabel[group1pt$op == "|"], group1pt$lhs[group1pt$op == "|"]) numFixedThreshold <- sapply(lapply(plabelthres, function(vec) !is.na(match(vec, conParTable$lhs)) | !is.na(match(vec, conParTable$rhs))), sum)[names(numThreshold)] #numFixedThreshold <- table(sapply(group1pt, "[", group1pt$op == "|" & group1pt$eq.id != 0)[,"lhs"]) fixIntceptFac <- list() for(i in seq_along(facList)) { tmp <- numFixedThreshold[facList[[i]]] if(all(tmp > 1)) { fixIntceptFac[[i]] <- integer(0) } else { fixIntceptFac[[i]] <- names(which.max(tmp))[1] } } names(fixIntceptFac) <- names(facList) ngroups <- max(pt0$group) neach <- lavaan::lavInspect(fit0, "nobs") groupvar <- lavaan::lavInspect(fit0, "group") grouplab <- lavaan::lavInspect(fit0, "group.label") if(!is.numeric(refgroup)) refgroup <- which(refgroup == grouplab) grouporder <- 1:ngroups grouporder <- c(refgroup, setdiff(grouporder, refgroup)) grouplaborder <- grouplab[grouporder] complab <- paste(grouplaborder[2:ngroups], "vs.", grouplaborder[1]) if(ngroups <= 1) stop("Well, the number of groups is 1. Measurement invariance across 'groups' cannot be done.") if(numType == 4) { if(!all(c(free, fix) %in% facnames)) stop("'free' and 'fix' arguments should consist of factor names because mean invariance is tested.") } else { if(!all(c(free, fix) %in% varnames)) stop("'free' and 'fix' arguments should consist of variable names.") } result <- fixCon <- freeCon <- NULL estimates <- NULL listFreeCon <- listFixCon <- list() beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) if(numType == 1) { if(!is.null(free) | !is.null(fix)) { if(!is.null(fix)) { facinfix <- findFactor(fix, facList) dup <- duplicated(facinfix) for(i in seq_along(fix)) { if(dup[i]) { pt0 <- constrainParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups) pt1 <- constrainParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups) } else { oldmarker <- fixLoadingFac[[facinfix[i]]] if(length(oldmarker) > 0) { oldmarkerval <- pt1$ustart[pt1$lhs == facinfix[i] & pt1$op == "=~" & pt1$rhs == oldmarker & pt1$group == 1] if(oldmarker == fix[i]) { pt0 <- fixParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) } else { pt0 <- freeParTable(pt0, facinfix[i], "=~", oldmarker, 1:ngroups) pt0 <- constrainParTable(pt0, facinfix[i], "=~", oldmarker, 1:ngroups) pt1 <- freeParTable(pt1, facinfix[i], "=~", oldmarker, 1:ngroups) pt0 <- fixParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) fixLoadingFac[[facinfix[i]]] <- fix[i] } } else { pt0 <- constrainParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups) pt1 <- constrainParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups) } } } } if(!is.null(free)) { facinfree <- findFactor(free, facList) for(i in seq_along(free)) { # Need to change marker variable if fixed oldmarker <- fixLoadingFac[[facinfree[i]]] if(length(oldmarker) > 0 && oldmarker == free[i]) { oldmarkerval <- pt1$ustart[pt1$lhs == facinfix[i] & pt1$op == "=~" & pt1$rhs == oldmarker & pt1$group == 1] candidatemarker <- setdiff(facList[[facinfree[i]]], free[i])[1] pt0 <- freeParTable(pt0, facinfree[i], "=~", free[i], 1:ngroups) pt1 <- freeParTable(pt1, facinfree[i], "=~", free[i], 1:ngroups) pt0 <- fixParTable(pt0, facinfix[i], "=~", candidatemarker, 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, facinfix[i], "=~", candidatemarker, 1:ngroups, oldmarkerval) fixLoadingFac[[facinfix[i]]] <- candidatemarker } else { pt0 <- freeParTable(pt0, facinfree[i], "=~", free[i], 1:ngroups) pt1 <- freeParTable(pt1, facinfree[i], "=~", free[i], 1:ngroups) } } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("load:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- esz <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) colnames(esz) <- paste0("q:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$rhs %in% varfree) & (pt1$op == "=~") & (pt1$group == 1)) facinfix <- findFactor(fix, facList) varinfixvar <- unlist(facList[facinfix]) varinfixvar <- setdiff(varinfixvar, setdiff(varinfixvar, varfree)) indexfixvar <- which((pt1$rhs %in% varinfixvar) & (pt1$op == "=~") & (pt1$group == 1)) varnonfixvar <- setdiff(varfree, varinfixvar) indexnonfixvar <- setdiff(index, indexfixvar) pos <- 1 for(i in seq_along(indexfixvar)) { runnum <- indexfixvar[i] temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) loadVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 2:ncol(estimates)] <- loadVal facVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~~", pt0$lhs[runnum], 1:ngroups) totalVal <- sapply(thetaImpliedTotalVar(tempfit0), function(x, v) x[v, v], v = pt0$rhs[runnum]) names(facVal) <- names(totalVal) <- grouplab ifelse(poolvar, refFacVal <- poolVariance(facVal, neach), refFacVal <- facVal[refgroup]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdLoadVal <- loadVal * sqrt(refFacVal) / sqrt(refTotalVal) stdestimates[pos,] <- stdLoadVal stdLoadVal <- stdLoadVal[grouporder] esstd[pos,] <- stdLoadVal[2:ngroups] - stdLoadVal[1] if(any(abs(stdLoadVal) > 0.9999)) warning(paste("Standardized Loadings of", pt0$rhs[runnum], "in some groups are less than -1 or over 1. The standardized loadings used in Fisher z transformation are changed to -0.9999 or 0.9999.")) stdLoadVal[stdLoadVal > 0.9999] <- 0.9999 stdLoadVal[stdLoadVal < -0.9999] <- -0.9999 zLoadVal <- atanh(stdLoadVal) esz[pos,] <- zLoadVal[2:ngroups] - zLoadVal[1] } listFreeCon <- c(listFreeCon, tryresult0) waldCon[pos,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) pos <- pos + 1 } facinvarfree <- findFactor(varnonfixvar, facList) for(i in seq_along(indexnonfixvar)) { runnum <- indexnonfixvar[i] # Need to change marker variable if fixed oldmarker <- fixLoadingFac[[facinvarfree[i]]] if(length(oldmarker) > 0 && oldmarker == varnonfixvar[i]) { candidatemarker <- setdiff(facList[[facinvarfree[i]]], varnonfixvar[i])[1] temp <- freeParTable(pt1, facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups) temp <- constrainParTable(temp, facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups) temp <- fixParTable(temp, facinvarfree[i], "=~", candidatemarker, 1:ngroups) newparent <- freeParTable(pt1, facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups) newparent <- fixParTable(newparent, facinvarfree[i], "=~", candidatemarker, 1:ngroups) newparentresult <- try(newparentfit <- refit(newparent, fit1), silent = TRUE) if(!is(newparentresult, "try-error")) { tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, newparentfit, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(newparentfit, tempfit)) } waldCon[pos,] <- waldConstraint(newparentfit, newparent, waldMat, cbind(facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups)) } } else { temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } waldCon[pos,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } listFixCon <- c(listFixCon, tryresult) if(length(oldmarker) > 0 && oldmarker == varnonfixvar[i]) { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 2:ngroups) } else { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) } estimates[pos, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) loadVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 2:ncol(estimates)] <- loadVal facVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~~", pt0$lhs[runnum], 1:ngroups) totalVal <- sapply(thetaImpliedTotalVar(tempfit0), function(x, v) x[v, v], v = pt0$rhs[runnum]) names(facVal) <- names(totalVal) <- grouplab ifelse(poolvar, refFacVal <- poolVariance(facVal, neach), refFacVal <- facVal[refgroup]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdLoadVal <- loadVal * sqrt(refFacVal) / sqrt(refTotalVal) stdestimates[pos,] <- stdLoadVal stdLoadVal <- stdLoadVal[grouporder] esstd[pos,] <- stdLoadVal[2:ngroups] - stdLoadVal[1] if(any(abs(stdLoadVal) > 0.9999)) warning(paste("Standardized Loadings of", pt0$rhs[runnum], "in some groups are less than -1 or over 1. The standardized loadings used in Fisher z transformation are changed to -0.9999 or 0.9999.")) stdLoadVal[stdLoadVal > 0.9999] <- 0.9999 stdLoadVal[stdLoadVal < -0.9999] <- -0.9999 zLoadVal <- atanh(stdLoadVal) esz[pos,] <- zLoadVal[2:ngroups] - zLoadVal[1] } listFreeCon <- c(listFreeCon, tryresult0) pos <- pos + 1 } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[c(indexfixvar, indexnonfixvar)] estimates <- cbind(estimates, stdestimates, esstd, esz) result <- cbind(freeCon, fixCon, waldCon) } else if (numType == 2) { if(!is.null(free) | !is.null(fix)) { if(!is.null(fix)) { facinfix <- findFactor(fix, facList) dup <- duplicated(facinfix) for(i in seq_along(fix)) { numfixthres <- numThreshold[fix[i]] if(numfixthres > 1) { if(dup[i]) { for(s in 2:numfixthres) { pt0 <- constrainParTable(pt0, fix[i], "|", paste0("t", s), 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "|", paste0("t", s), 1:ngroups) } } else { oldmarker <- fixIntceptFac[[facinfix[i]]] numoldthres <- numThreshold[oldmarker] if(length(oldmarker) > 0) { if(oldmarker == fix[i]) { for(s in 2:numfixthres) { pt0 <- constrainParTable(pt0, fix[i], "|", paste0("t", s), 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "|", paste0("t", s), 1:ngroups) } } else { for(r in 2:numoldthres) { pt1 <- freeParTable(pt1, oldmarker, "|", paste0("t", r), 1:ngroups) } for(s in 2:numfixthres) { pt0 <- constrainParTable(pt0, fix[i], "|", paste0("t", s), 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "|", paste0("t", s), 1:ngroups) } fixIntceptFac[[facinfix[i]]] <- fix[i] } } else { for(s in 2:numfixthres) { pt0 <- constrainParTable(pt0, fix[i], "|", paste0("t", s), 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "|", paste0("t", s), 1:ngroups) } } } } } } if(!is.null(free)) { facinfree <- findFactor(free, facList) for(i in seq_along(free)) { numfreethres <- numThreshold[free[i]] # Need to change marker variable if fixed oldmarker <- fixIntceptFac[[facinfree[i]]] numoldthres <- numThreshold[oldmarker] if(length(oldmarker) > 0 && oldmarker == free[i]) { candidatemarker <- setdiff(facList[[facinfree[i]]], free[i]) candidatemarker <- candidatemarker[numThreshold[candidatemarker] > 1][1] numcandidatethres <- numThreshold[candidatemarker] pt0 <- constrainParTable(pt0, candidatemarker, "|", "t2", 1:ngroups) pt1 <- constrainParTable(pt1, candidatemarker, "|", "t2", 1:ngroups) for(s in 2:numfixthres) { pt0 <- freeParTable(pt0, free[i], "|", paste0("t", s), 1:ngroups) pt1 <- freeParTable(pt1, free[i], "|", paste0("t", s), 1:ngroups) } fixIntceptFac[[facinfix[i]]] <- candidatemarker } else { for(s in 2:numfixthres) { pt0 <- freeParTable(pt0, free[i], "|", paste0("t", s), 1:ngroups) pt1 <- freeParTable(pt1, free[i], "|", paste0("t", s), 1:ngroups) } } } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } maxcolumns <- max(numThreshold[varfree]) - 1 tname <- paste0("t", 2:(maxcolumns + 1)) estimates <- matrix(NA, length(varfree), (ngroups * length(tname)) + length(tname)) stdestimates <- matrix(NA, length(varfree), ngroups * length(tname)) tnameandlab <- expand.grid(tname, grouplab) colnames(estimates) <- c(paste0("pool:", tname), paste0(tnameandlab[,1], ":", tnameandlab[,2])) colnames(stdestimates) <- paste0("std:", tnameandlab[,1], ":", tnameandlab[,2]) esstd <- matrix(NA, length(varfree), (ngroups - 1)* length(tname)) tnameandcomplab <- expand.grid(tname, complab) colnames(esstd) <- paste0("diff_std:", tnameandcomplab[,1], ":", tnameandcomplab[,2]) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") facinfix <- findFactor(fix, facList) varinfixvar <- unlist(facList[facinfix]) varinfixvar <- setdiff(varinfixvar, setdiff(varinfixvar, varfree)) varnonfixvar <- setdiff(varfree, varinfixvar) pos <- 1 for(i in seq_along(varinfixvar)) { temp <- pt1 for(s in 2:numThreshold[varinfixvar[i]]) { runnum <- which((pt1$lhs == varfree[i]) & (pt1$op == "|") & (pt1$rhs == paste0("t", s)) & (pt1$group == 1)) temp <- constrainParTable(temp, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) } tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) temp0 <- pt0 for(s in 2:numThreshold[varinfixvar[i]]) { runnum <- which((pt0$lhs == varfree[i]) & (pt0$op == "|") & (pt0$rhs == paste0("t", s)) & (pt0$group == 1)) temp0 <- freeParTable(temp0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, s - 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) } tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) for(s in 2:numThreshold[varinfixvar[i]]) { runnum <- which((pt0$lhs == varfree[i]) & (pt0$op == "|") & (pt0$rhs == paste0("t", s)) & (pt0$group == 1)) thresVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, maxcolumns*(1:ngroups) + (s - 1)] <- thresVal totalVal <- sapply(thetaImpliedTotalVar(tempfit0), function(x, v) x[v, v], v = pt0$lhs[runnum]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdIntVal <- thresVal / sqrt(refTotalVal) stdestimates[pos, maxcolumns*(1:ngroups - 1) + (s - 1)] <- stdIntVal stdIntVal <- stdIntVal[grouporder] esstd[pos, maxcolumns*(1:length(complab) - 1) + (s - 1)] <- stdIntVal[2:ngroups] - stdIntVal[1] } } listFreeCon <- c(listFreeCon, tryresult0) args <- list(fit1, pt1, waldMat) for(s in 2:numThreshold[varinfixvar[i]]) { runnum <- which((pt1$lhs == varfree[i]) & (pt1$op == "|") & (pt1$rhs == paste0("t", s)) & (pt1$group == 1)) args <- c(args, list(cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups))) } waldCon[pos,] <- do.call(waldConstraint, args) pos <- pos + 1 } facinvarfree <- findFactor(varnonfixvar, facList) for(i in seq_along(varnonfixvar)) { # Need to change marker variable if fixed oldmarker <- fixIntceptFac[[facinvarfree[i]]] if(length(oldmarker) > 0 && oldmarker == varfree[i]) { candidatemarker <- setdiff(facList[[facinvarfree[i]]], varnonfixvar[i]) candidatemarker <- candidatemarker[numThreshold[candidatemarker] > 1][1] numcandidatethres <- numThreshold[candidatemarker] newparent <- constrainParTable(pt1, candidatemarker, "|", "t2", 1:ngroups) for(s in 2:numcandidatethres) { newparent <- freeParTable(newparent, varnonfixvar[i], "|", paste0("t", s), 1:ngroups) } temp <- newparent for(s in 2:numThreshold[varnonfixvar[i]]) { runnum <- which((newparent$lhs == varnonfixvar[i]) & (newparent$op == "|") & (newparent$rhs == paste0("t", s)) & (newparent$group == 1)) temp <- constrainParTable(temp, newparent$lhs[runnum], newparent$op[runnum], newparent$rhs[runnum], 1:ngroups) } newparentresult <- try(newparentfit <- refit(newparent, fit1), silent = TRUE) if(!is(newparentresult, "try-error")) { tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, newparentfit, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(newparentfit, tempfit)) } args <- list(newparentfit, newparent, waldMat) for(s in 2:numThreshold[varnonfixvar[i]]) { runnum <- which((newparent$lhs == varnonfixvar[i]) & (newparent$op == "|") & (newparent$rhs == paste0("t", s)) & (newparent$group == 1)) args <- c(args, list(cbind(newparent$lhs[runnum], newparent$op[runnum], newparent$rhs[runnum], 1:ngroups))) } waldCon[pos,] <- do.call(waldConstraint, args) } } else { temp <- pt1 for(s in 2:numThreshold[varnonfixvar[i]]) { runnum <- which((pt1$lhs == varfree[i]) & (pt1$op == "|") & (pt1$rhs == paste0("t", s)) & (pt1$group == 1)) temp <- constrainParTable(temp, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) } tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } args <- list(fit1, pt1, waldMat) for(s in 2:numThreshold[varnonfixvar[i]]) { runnum <- which((pt1$lhs == varfree[i]) & (pt1$op == "|") & (pt1$rhs == paste0("t", s)) & (pt1$group == 1)) args <- c(args, list(cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups))) } waldCon[pos,] <- do.call(waldConstraint, args) } listFixCon <- c(listFixCon, tryresult) temp0 <- pt0 for(s in 2:numThreshold[varnonfixvar[i]]) { runnum <- which((pt0$lhs == varfree[i]) & (pt0$op == "|") & (pt0$rhs == paste0("t", s)) & (pt0$group == 1)) temp0 <- freeParTable(temp0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, s - 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) } tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) for(s in 2:numThreshold[varnonfixvar[i]]) { runnum <- which((pt0$lhs == varfree[i]) & (pt0$op == "|") & (pt0$rhs == paste0("t", s)) & (pt0$group == 1)) thresVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, maxcolumns*(1:ngroups) + (s - 1)] <- thresVal totalVal <- sapply(thetaImpliedTotalVar(tempfit0), function(x, v) x[v, v], v = pt0$lhs[runnum]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdIntVal <- thresVal / sqrt(refTotalVal) stdestimates[pos, maxcolumns*(1:ngroups - 1) + (s - 1)] <- stdIntVal stdIntVal <- stdIntVal[grouporder] esstd[pos, maxcolumns*(1:length(complab) - 1) + (s - 1)] <- stdIntVal[2:ngroups] - stdIntVal[1] } } listFreeCon <- c(listFreeCon, tryresult0) pos <- pos + 1 } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- paste0(c(varinfixvar, varnonfixvar), "|") estimates <- cbind(estimates, stdestimates, esstd) result <- cbind(freeCon, fixCon, waldCon) } else if (numType == 3) { if(!is.null(free) | !is.null(fix)) { if(!is.null(fix)) { for(i in seq_along(fix)) { pt0 <- constrainParTable(pt0, fix[i], "~~", fix[i], 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "~~", fix[i], 1:ngroups) } } if(!is.null(free)) { for(i in seq_along(free)) { pt0 <- freeParTable(pt0, free[i], "~~", free[i], 1:ngroups) pt1 <- freeParTable(pt1, free[i], "~~", free[i], 1:ngroups) } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("errvar:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- esz <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) colnames(esz) <- paste0("h:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$lhs %in% varfree) & (pt1$op == "~~") & (pt1$lhs == pt1$rhs) & (pt1$group == 1)) for(i in seq_along(index)) { runnum <- index[i] ustart <- getValue(pt1, beta, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1) temp <- fixParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 2:ngroups, ustart) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[i,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 2:ngroups) estimates[i, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[i,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) errVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[i, 2:ncol(estimates)] <- errVal totalVal <- sapply(thetaImpliedTotalVar(tempfit0), function(x, v) x[v, v], v = pt0$rhs[runnum]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdErrVal <- errVal / sqrt(refTotalVal) stdestimates[i,] <- stdErrVal stdErrVal <- stdErrVal[grouporder] esstd[i,] <- stdErrVal[2:ngroups] - stdErrVal[1] if(any(abs(stdErrVal) > 0.9999)) warning(paste("The uniqueness of", pt0$rhs[runnum], "in some groups are over 1. The uniqueness used in arctan transformation are changed to 0.9999.")) stdErrVal[stdErrVal > 0.9999] <- 0.9999 zErrVal <- asin(sqrt(stdErrVal)) esz[i,] <- zErrVal[2:ngroups] - zErrVal[1] } listFreeCon <- c(listFreeCon, tryresult0) waldCon[i,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[index] estimates <- cbind(estimates, stdestimates, esstd, esz) result <- cbind(freeCon, fixCon, waldCon) } else if (numType == 4) { varfree <- facnames if(!is.null(free) | !is.null(fix)) { if(!is.null(fix)) { for(i in seq_along(fix)) { pt0 <- constrainParTable(pt0, fix[i], "~1", "", 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "~1", "", 1:ngroups) } } if(!is.null(free)) { for(i in seq_along(free)) { pt0 <- freeParTable(pt0, free[i], "~1", "", 1:ngroups) pt1 <- freeParTable(pt1, free[i], "~1", "", 1:ngroups) } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("mean:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$lhs %in% varfree) & (pt1$op == "~1") & (pt1$group == 1)) for(i in seq_along(index)) { runnum <- index[i] isfree <- pt1$free[runnum] != 0 if(isfree) { temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) } else { temp <- fixParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 2:ngroups, ustart = pt1$ustart[runnum]) } tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[i,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) isfree0 <- pt0$free[runnum] != 0 if(isfree0) { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) } else { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 2:ngroups) } estimates[i, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[i,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) meanVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[i, 2:ncol(estimates)] <- meanVal facVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~~", pt0$lhs[runnum], 1:ngroups) ifelse(poolvar, refFacVal <- poolVariance(facVal, neach), refFacVal <- facVal[refgroup]) stdMeanVal <- meanVal / sqrt(refFacVal) stdestimates[i,] <- stdMeanVal stdMeanVal <- stdMeanVal[grouporder] esstd[i,] <- stdMeanVal[2:ngroups] - stdMeanVal[1] } listFreeCon <- c(listFreeCon, tryresult0) waldCon[i,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[index] estimates <- cbind(estimates, stdestimates, esstd) result <- cbind(freeCon, fixCon, waldCon) } if(return.fit) { return(invisible(list(estimates = estimates, results = result, models = list(free = listFreeCon, fix = listFixCon, nested = fit0, parent = fit1)))) } else { return(list(estimates = estimates, results = result)) } } thetaImpliedTotalVar <- function(object) { param <- lavaan::lavInspect(object, "coef") ngroup <- lavaan::lavInspect(object, "ngroups") name <- names(param) if(ngroup == 1) { ly <- param[name == "lambda"] } else { ly <- lapply(param, "[[", "lambda") } ps <- lavaan::lavInspect(object, "cov.lv") if(ngroup == 1) ps <- list(ps) if(ngroup == 1) { te <- param[name == "theta"] } else { te <- lapply(param, "[[", "theta") } result <- list() for(i in 1:ngroup) { result[[i]] <- ly[[i]]%*%ps[[i]]%*%t(ly[[i]]) + te[[i]] } result } semTools/R/powerAnalysisSS.R0000644000175100001440000000547713000201061015523 0ustar hornikusers###Function to do power analysis for parameters with Satorra & Sarris method ###Alexander M. Schoemann ###11/4/2014 ##Steps: ##1. Specify model (use lavaan syntax based on simulateData) ##2. get model implied covariance matrix ##3. Fit model with parameter constrained to 0 (or take a model specification for multiparameter tests?) ##4. Use chi square from step 3 as non-centrality parameter to get power. ##Function to return power for a given model parameter #inputs: popModel = lavaan syntax specifying data generating model (be sure to provide a value for all parameters), n =sample size (either scalar or vector), powerModel=Model to be fit, with parameter of interest fixed to 0, fun = lavaan function to use, nparam = number of parameters fixed in the power Model, ... additional arguments to pass to lavaan SSpower <- function(popModel, n, powerModel, fun = "cfa", nparam = 1, alpha = .05, ...) { ##Two item list, first item is covariance matrix, second item is mean vector popCov <- lavaan::fitted(do.call(fun, list(model=popModel))) ##Fit model with parameter(s) fixed to 0 out <- list(model=powerModel, sample.cov=popCov[[1]], sample.mean = popCov[[2]], sample.nobs=n) out <- c(out, list(...)) mod <- do.call(fun, out) ##get NCP from chi square ncp <- lavaan::fitmeasures(mod)["chisq"] critVal <- qchisq(1-alpha, nparam) 1-pchisq(critVal, nparam, ncp) } #Test the function # model <- ' # f1 =~ .7?V1 + .7?V2 + .7?V3 + .7?V4 # f2 =~ .7?V5 + .7?V6 + .7?V7 + .7?V8 # f1 ~~ .3?f2 # f1 ~~ 1*f1 # f2 ~~ 1*f2 # V1 ~~ .51?V1 # V2 ~~ .51?V2 # V3 ~~ .51?V3 # V4 ~~ .51?V4 # V5 ~~ .51?V5 # V6 ~~ .51?V6 # V7 ~~ .51?V7 # V8 ~~ .51?V8 # ' # model2 <- ' # f1 =~ .7?V1 + .7?V2 + .7?V3 + .7?V4 # f2 =~ .7?V5 + .7?V6 + .7?V7 + .7?V8 # f1 ~~ 0*f2 # f1 ~~ 1*f1 # f2 ~~ 1*f2 # V1 ~~ .51?V1 # V2 ~~ .51?V2 # V3 ~~ .51?V3 # V4 ~~ .51?V4 # V5 ~~ .51?V5 # V6 ~~ .51?V6 # V7 ~~ .51?V7 # V8 ~~ .51?V8 # ' # SSpower(model, 150, model2) #Get power for a range of values # powVals <- NULL # Ns <- seq(120, 500, 10) # for(i in Ns){ # powVals <- c(powVals, SSpower(model, i, model2)) # } # plot(Ns, powVals, type = 'l') #Test with multiple params # model3 <- ' # f1 =~ 1*V1 + 1*V2 + 1*V3 + 1*?V4 # f2 =~ .7?V5 + .7?V6 + .7?V7 + .7?V8 # f1 ~~ f2 # f1 ~~ 1*f1 # f2 ~~ 1*f2 # V1 ~~ .51?V1 # V2 ~~ .51?V2 # V3 ~~ .51?V3 # V4 ~~ .51?V4 # V5 ~~ .51?V5 # V6 ~~ .51?V6 # V7 ~~ .51?V7 # V8 ~~ .51?V8 # ' # SSpower(model, 150, model3, nparam=4) semTools/R/indProd.R0000644000175100001440000002112513000201061014000 0ustar hornikusers## Title: Orthogonalize data for 2-way and 3-way interaction in SEM ## Author: Sunthud Pornprasertmanit and Alexander M. Schoemann ## Description: Orthogonalize data for 2-way and 3-way interaction in SEM ##----------------------------------------------------------------------------## # indProd: Make a product of indicators using mean centering, double-mean centering, or residual centering indProd <- function(data, var1, var2, var3=NULL, match = TRUE, meanC = TRUE, residualC = FALSE, doubleMC = TRUE, namesProd = NULL) { # Get all variable names if (all(is.numeric(var1))) var1 <- colnames(data)[var1] if (all(is.numeric(var2))) var2 <- colnames(data)[var2] if (!is.null(var3) && all(is.numeric(var3))) var3 <- colnames(data)[var3] dat1 <- data[, var1] dat2 <- data[, var2] dat3 <- NULL if (!is.null(var3)) dat3 <- data[, var3] # Mean centering on the original indicators if (meanC) { dat1 <- scale(dat1, scale = FALSE) dat2 <- scale(dat2, scale = FALSE) if (!is.null(dat3)) dat3 <- scale(dat3, scale = FALSE) } if (match) { # Check whether the number of variables are equal across variable sets if (length(var1) != length(var2)) stop("If the match-paired approach is used, the number of variables in all sets must be equal.") if (!is.null(var3) && (length(var1) != length(var3))) stop("If the match-paired approach is used, the number of variables in all three sets must be equal.") datProd <- NULL if (is.null(var3)) { # Two-way interaction datProd <- dat1 * dat2 if (residualC) { notmissing <- which(!apply(datProd, 1, function(x) any(is.na(x)))) colnames(datProd) <- paste("interactionProduct", 1:ncol(datProd), sep = "") # Write the expression for linear model and residualize the products temp <- data.frame(datProd, dat1, dat2) express <- paste("cbind(", paste(colnames(datProd), collapse = ", "), ") ~ ", paste(c(colnames(dat1), colnames(dat2)), collapse = " + "), sep = "") datProd[notmissing,] <- lm(express, data = temp)$residuals } } else { # Three-way interaction datProd2way <- cbind(dat1 * dat2, dat1 * dat3, dat2 * dat3) datProd3way <- dat1 * dat2 * dat3 if (residualC) { notmissing2way <- which(!apply(datProd2way, 1, function(x) any(is.na(x)))) colnames(datProd2way) <- paste("interaction2Product", 1:ncol(datProd2way), sep = "") # Write the expression for linear model and residualize the two-way products temp2 <- data.frame(datProd2way, dat1, dat2, dat3) express2 <- paste("cbind(", paste(colnames(datProd2way), collapse = ", "), ") ~ ", paste(c(colnames(dat1), colnames(dat2), colnames(dat3)), collapse = " + "), sep = "") datProd2way[notmissing2way,] <- lm(express2, data = temp2)$residuals # Making all possible products to residualize the 3-way interaction datProd2wayFull <- matrix(0, nrow(data), 1) for (i in 1:length(var1)) datProd2wayFull <- data.frame(datProd2wayFull, matrix(rep(dat1[, i], length(var2)), ncol = length(var2)) * dat2) for (i in 1:length(var1)) datProd2wayFull <- data.frame(datProd2wayFull, matrix(rep(dat1[, i], length(var3)), ncol = length(var3)) * dat3) for (i in 1:length(var2)) datProd2wayFull <- data.frame(datProd2wayFull, matrix(rep(dat2[, i], length(var3)), ncol = length(var3)) * dat3) datProd2wayFull <- datProd2wayFull[, -1] colnames(datProd2wayFull) <- paste("interaction2Product", 1:ncol(datProd2wayFull), sep = "") notmissing3way <- which(!apply(datProd3way, 1, function(x) any(is.na(x)))) colnames(datProd3way) <- paste("interaction3Product", 1:ncol(datProd3way), sep = "") # Write the expression for linear model and residualize the three-way products temp3 <- data.frame(datProd3way, dat1, dat2, dat3, datProd2wayFull) express3 <- paste("cbind(", paste(colnames(datProd3way), collapse = ", "), ") ~ ", paste(c(colnames(dat1), colnames(dat2), colnames(dat3), colnames(datProd2wayFull)), collapse = " + "), sep = "") datProd3way[notmissing3way,] <- lm(express3, data = temp3)$residuals } datProd <- cbind(datProd2way, datProd3way) } # Mean-centering the final product if (doubleMC) datProd <- scale(datProd, scale = FALSE) # Rename the obtained product terms if (is.null(namesProd)) { if (is.null(var3)) { colnames(datProd) <- paste(var1, var2, sep = ".") } else { colnames(datProd) <- c(paste(var1, var2, sep = "."), paste(var1, var3, sep = "."), paste(var2, var3, sep = "."), paste(var1, var2, var3, sep = ".")) } } else { colnames(datProd) <- namesProd } } else { datProd <- NULL if (is.null(var3)) { # Create all possible combinations of the products of indicators datProd <- matrix(0, nrow(data), 1) for (i in 1:length(var1)) datProd <- data.frame(datProd, matrix(rep(dat1[, i], length(var2)), ncol = length(var2)) * dat2) datProd <- datProd[, -1] if (residualC) { notmissing <- which(!apply(datProd, 1, function(x) any(is.na(x)))) colnames(datProd) <- paste("interactionProduct", 1:ncol(datProd), sep = "") # Write the expression for linear model and residualize the two-way products temp <- data.frame(datProd, dat1, dat2) express <- paste("cbind(", paste(colnames(datProd), collapse = ", "), ") ~ ", paste(c(colnames(dat1), colnames(dat2)), collapse = " + "), sep = "") datProd[notmissing,] <- lm(express, data = temp)$residuals } } else { # Create all possible combinations of the products of indicators datProd2way <- matrix(0, nrow(data), 1) for (i in 1:length(var1)) datProd2way <- data.frame(datProd2way, matrix(rep(dat1[, i], length(var2)), ncol = length(var2)) * dat2) for (i in 1:length(var1)) datProd2way <- data.frame(datProd2way, matrix(rep(dat1[, i], length(var3)), ncol = length(var3)) * dat3) for (i in 1:length(var2)) datProd2way <- data.frame(datProd2way, matrix(rep(dat2[, i], length(var3)), ncol = length(var3)) * dat3) datProd3way <- matrix(0, nrow(data), 1) for (i in 1:length(var1)) { for(j in 1:length(var2)) { datProd3way <- data.frame(datProd3way, matrix(rep(dat1[, i], length(var3)), ncol = length(var3)) * matrix(rep(dat2[, j], length(var3)), ncol = length(var3)) * dat3) } } datProd2way <- datProd2way[, -1] datProd3way <- datProd3way[, -1] if (residualC) { notmissing2way <- which(!apply(datProd2way, 1, function(x) any(is.na(x)))) colnames(datProd2way) <- paste("interaction2Product", 1:ncol(datProd2way), sep = "") # Write the expression for linear model and residualize the two-way products temp2 <- data.frame(datProd2way, dat1, dat2, dat3) express2 <- paste("cbind(", paste(colnames(datProd2way), collapse = ", "), ") ~ ", paste(c(colnames(dat1), colnames(dat2), colnames(dat3)), collapse = " + "), sep = "") datProd2way[notmissing2way,] <- lm(express2, data = temp2)$residuals notmissing3way <- which(!apply(datProd3way, 1, function(x) any(is.na(x)))) colnames(datProd3way) <- paste("interaction3Product", 1:ncol(datProd3way), sep = "") # Write the expression for linear model and residualize the three-way products temp3 <- data.frame(datProd3way, dat1, dat2, dat3, datProd2way) express3 <- paste("cbind(", paste(colnames(datProd3way), collapse = ", "), ") ~ ", paste(c(colnames(dat1), colnames(dat2), colnames(dat3), colnames(datProd2way)), collapse = " + "), sep = "") datProd3way[notmissing3way,] <- lm(express3, data = temp3)$residuals } datProd <- cbind(datProd2way, datProd3way) } # Double-mean centering if (doubleMC) datProd <- scale(datProd, scale = FALSE) # Name the resulting product terms if (is.null(namesProd)) { temp <- NULL if (is.null(var3)) { for (i in 1:length(var1)) temp <- c(temp, paste(var1[i], var2, sep = ".")) } else { for (i in 1:length(var1)) temp <- c(temp, paste(var1[i], var2, sep = ".")) for (i in 1:length(var1)) temp <- c(temp, paste(var1[i], var3, sep = ".")) for (i in 1:length(var2)) temp <- c(temp, paste(var2[i], var3, sep = ".")) for (i in 1:length(var1)) { for(j in 1:length(var2)) { temp <- c(temp, paste(var1[i], var2[j], var3, sep = ".")) } } } colnames(datProd) <- temp } else { colnames(datProd) <- namesProd } } # Bind the products back to the original data data <- data.frame(data, datProd) return(data) } # orthogonalize: the shortcut for residual centering orthogonalize <- function(data, var1, var2, var3=NULL, match=TRUE, namesProd=NULL) { indProd(data=data, var1=var1, var2=var2, var3=var3, match=match, meanC=FALSE, residualC=TRUE, doubleMC=FALSE, namesProd=namesProd) } semTools/R/quark.R0000644000175100001440000003144613000201061013533 0ustar hornikusersquark <- function(data, id, order = 1, silent = FALSE){ if(!is.data.frame(data) && !is.matrix(data)) { stop("Inappropriate data file provided.") } if(!silent) cat("Data Check Passed.\n") if(is.character(id)) id <- match(id, colnames(data)) for(i in 1:length(id)){ if(id[i] > ncol(data) || id[i] < 1){ stop("At least one of the IDs is out of bounds.") } } if(!silent) cat("ID Check Passed.\n") if(!(order %in% 1:2)) stop("Currently, the order argument can take either 1 or 2.") final.collect <- list() final.collect$ID_Columns <- id final.collect$ID_Vars <- data[,id] final.collect$Used_Data <- data[,-c(id)] final.collect$Imputed_Data <- imputequark(data = final.collect$Used_Data, order = order, silent = silent) final.collect$Big_Data_Matrix <- bigquark(data = final.collect$Imputed_Data, silent = silent) cmp <- compquark(data = final.collect$Big_Data_Matrix, silent = silent) final.collect$Prin_Components <- cmp[[1]] final.collect$Prin_Components_Prcnt <- cmp[[2]] return(final.collect) } imputequark <- function(data, order, silent = FALSE){ if(order==1){ data <- aImp(data=data, silent = silent) data <- gImp(data=data, silent = silent) } else if(order==2) { data <- gImp(data=data, silent = silent) if(length(which(is.na(data > 0)))){ data <- aImp(data=data, silent = silent) } } return(data) } gImp <- function(data, silent = FALSE){ imputed_data <- data num_adds <- vector(length=ncol(data)) #number of columns combined into one for averaging. data.cor <- cor(data,use="pairwise",method="pearson") if(!silent) printCor(data.cor) #populate multiple matrices that can then be utilized to determine if one column should enhance another based upon #the correlations they share... if(!silent) cat("Imputing Column... \n") for(a in 1:ncol(data)){ temp_mat <- matrix(ncol=ncol(data),nrow=nrow(data)) list <- unique(sort(data[,a])) if(length(list)>1 && length(list)<=10){ for(b in 1:nrow(data)){ for(c in 1:length(list)){ if(data[b,a]==list[c] && !is.na(data[b,a])){ temp_mat[b,] <- round(colMeans(subset(data,data[,a]==list[c]),na.rm=T),digits=1) } else if(is.na(data[b,a])){ for(p in 1:ncol(data)){ temp_mat[b,p] <- data[b,p] } } } } #Here I need to determine if the other columns are correlated enough with the reference to ensure accuracy #of predictions temp_cor <- data.cor[,a] #if(countNA(temp_cor)==0){ for(i in 1:length(temp_cor)){ if(i!=a){ if(abs(temp_cor[i])>=.5&&!is.na(temp_cor[i])){#Using a moderate effect size, column a, will inform other columns. for(x in 1:nrow(imputed_data)){ imputed_data[x,i] <- sum(imputed_data[x,i],temp_mat[x,a],na.rm=T) } num_adds[i] <- num_adds[i] + 1 } } } #} if(!silent) cat("\t", colnames(data)[a]) } } if(!silent) cat("\n") imputed_data <- cleanMat(m1=data,m2=imputed_data,impact=num_adds) imputed_data <- fixData(imputed_data) return(imputed_data) } cleanMat <- function(m1,m2,impact){ #Impact is the number of influences on each column... #We need to clean up and then try to determine what final values should be... #Go through each of the cells... new_mat <- m2 for(a in 1:ncol(m1)){ for(b in 1:nrow(m1)){ if(!is.na(m1[b,a])){ new_mat[b,a] <- m1[b,a] } else if(is.na(m1[b,a])){ new_mat[b,a] <- new_mat[b,a]/impact[a] } } } return(new_mat) } fixData <- function(data){ for(a in 1:ncol(data)){ for(b in 1:nrow(data)){ data[b,a] <- round(data[b,a],digits=1) } } return(data) } aImp <- function(data, silent = FALSE){ requireNamespace("mice") if(!("package:mice" %in% search())) attachNamespace("mice") if(!silent) cat("Starting Algorithm Imputation...\n") data <- mice::mice(data,maxit=1,m=1, printFlag = !silent) data <- mice::complete(data) if(!silent) cat("Ending Algorithm Imputation...\n") return(data) } bigquark <- function(data, silent = FALSE){ if(!silent) cat("Calculating Polynomial Effects.\n") poly <- ((data^2)+(data^3))/2 if(!silent) cat("Creating Matrix for Interaction Effects.\n") prod <- matrix(ncol=(ncol(data)-1),nrow=nrow(data)) if(!silent) cat("Calculating Interaction Effects...0%..") for(i in 1:nrow(data)){ if(!silent) printpct(percent=i/nrow(data)) for(j in 1:(ncol(data)-1)){ prod[i,j] <- mean(as.numeric(data[i,j])*as.numeric(data[i,(j+1):ncol(data)])) } } cat("\n") data <- cbind(data,poly,prod) return(data) } compquark <- function(data, silent = FALSE){ if(!silent) cat("Calculating values for the PCA\n") pcam <- pcaquark(data, ncp=ncol(data)) cmp <- list() cmp$pca <- pcam$ind$coord cmp$var <- pcam$eig[,3] colnames(cmp$pca) <- c(paste0("AuxVar",1:ncol(cmp$pca))) return(cmp) } printpct <- function(percent){ if(round(percent,digits=10)==0) cat("0%..") if(round(percent,digits=10)==.10) cat("10%..") if(round(percent,digits=10)==.20) cat("20%..") if(round(percent,digits=10)==.30) cat("30%..") if(round(percent,digits=10)==.40) cat("40%..") if(round(percent,digits=10)==.50) cat("50%..") if(round(percent,digits=10)==.60) cat("60%..") if(round(percent,digits=10)==.70) cat("70%..") if(round(percent,digits=10)==.80) cat("80%..") if(round(percent,digits=10)==.90) cat("90%..") if(round(percent,digits=10)==1) cat("100%..") } combinequark <- function(quark,percent){ data <- cbind(quark$ID_Vars,quark$Used_Data) pct <- quark$Prin_Components_Prcnt comp <- quark$Prin_Components for(i in 1:length(pct)){ if(pct[i]>=percent){ num <- i break } } return(cbind(data,comp[,1:num])) } # This function is modified from the FactoMinoR package. pcaquark <- function (X, ncp = 5) { moy.p <- function(V, poids) { res <- sum(V * poids)/sum(poids) } ec <- function(V, poids) { res <- sqrt(sum(V^2 * poids)/sum(poids)) } X <- as.data.frame(X) if (any(is.na(X))) { warnings("Missing values are imputed by the mean of the variable: you should use the imputePCA function of the missMDA package") X[is.na(X)] <- matrix(apply(X,2,mean,na.rm=TRUE),ncol=ncol(X),nrow=nrow(X),byrow=TRUE)[is.na(X)] } if (is.null(rownames(X))) rownames(X) <- 1:nrow(X) if (is.null(colnames(X))) colnames(X) <- paste("V", 1:ncol(X), sep = "") colnames(X)[colnames(X) == ""] <- paste("V", 1:sum(colnames(X)==""),sep="") rownames(X)[is.null(rownames(X))] <- paste("row",1:sum(rownames(X)==""),sep="") Xtot <- X if (any(!sapply(X, is.numeric))) { auxi <- NULL for (j in 1:ncol(X)) if (!is.numeric(X[, j])) auxi <- c(auxi, colnames(X)[j]) stop(paste("\nThe following variables are not quantitative: ", auxi)) } ncp <- min(ncp, nrow(X) - 1, ncol(X)) row.w <- rep(1, nrow(X)) row.w.init <- row.w row.w <- row.w/sum(row.w) col.w <- rep(1, ncol(X)) centre <- apply(X, 2, moy.p, row.w) X <- as.matrix(sweep(as.matrix(X), 2, centre, FUN = "-")) ecart.type <- apply(X, 2, ec, row.w) ecart.type[ecart.type <= 1e-16] <- 1 X <- sweep(as.matrix(X), 2, ecart.type, FUN = "/") dist2.ind <- apply(sweep(X,2,sqrt(col.w),FUN="*")^2,1,sum) dist2.var <- apply(sweep(X,1,sqrt(row.w),FUN="*")^2,2,sum) tmp <- svd.triplet.quark(X, row.w = row.w, col.w = col.w, ncp = ncp) eig <- tmp$vs^2 vp <- as.data.frame(matrix(NA, length(eig), 3)) rownames(vp) <- paste("comp", 1:length(eig)) colnames(vp) <- c("eigenvalue", "percentage of variance", "cumulative percentage of variance") vp[, "eigenvalue"] <- eig vp[, "percentage of variance"] <- (eig/sum(eig)) * 100 vp[, "cumulative percentage of variance"] <- cumsum(vp[, "percentage of variance"]) V <- tmp$V U <- tmp$U eig <- eig[1:ncp] coord.ind <- sweep(as.matrix(U), 2, sqrt(eig), FUN = "*") coord.var <- sweep(as.matrix(V), 2, sqrt(eig), FUN = "*") contrib.var <- sweep(as.matrix(coord.var^2), 2, eig, "/") contrib.var <- sweep(as.matrix(contrib.var), 1, col.w, "*") dist2 <- dist2.var cor.var <- sweep(as.matrix(coord.var), 1, sqrt(dist2), FUN = "/") cos2.var <- cor.var^2 rownames(coord.var) <- rownames(cos2.var) <- rownames(cor.var) <- rownames(contrib.var) <- colnames(X) colnames(coord.var) <- colnames(cos2.var) <- colnames(cor.var) <- colnames(contrib.var) <- paste("Dim", c(1:ncol(V)), sep = ".") res.var <- list(coord = coord.var[, 1:ncp], cor = cor.var[, 1:ncp], cos2 = cos2.var[, 1:ncp], contrib = contrib.var[, 1:ncp] * 100) dist2 <- dist2.ind cos2.ind <- sweep(as.matrix(coord.ind^2), 1, dist2, FUN = "/") contrib.ind <- sweep(as.matrix(coord.ind^2), 1, row.w/sum(row.w), FUN = "*") contrib.ind <- sweep(as.matrix(contrib.ind), 2, eig, FUN = "/") rownames(coord.ind) <- rownames(cos2.ind) <- rownames(contrib.ind) <- names(dist2) <- rownames(X) colnames(coord.ind) <- colnames(cos2.ind) <- colnames(contrib.ind) <- paste("Dim", c(1:ncol(U)), sep = ".") res.ind <- list(coord = coord.ind[, 1:ncp], cos2 = cos2.ind[, 1:ncp], contrib = contrib.ind[, 1:ncp] * 100, dist = sqrt(dist2)) res <- list(eig = vp, var = res.var, ind = res.ind, svd = tmp) class(res) <- c("PCA", "list") return(res) } # This function is modified from the FactoMinoR package. svd.triplet.quark <- function (X, row.w = NULL, col.w = NULL,ncp=Inf) { tryCatch.W.E <- function(expr){ ## function proposed by Maechlmr W <- NULL w.handler <- function(w){ # warning handler W <<- w invokeRestart("muffleWarning") } list(value = withCallingHandlers(tryCatch(expr, error = function(e) e), warning = w.handler), warning = W) } ncp <- min(ncp,nrow(X)-1,ncol(X)) row.w <- row.w / sum(row.w) X <- sweep(X, 2, sqrt(col.w), FUN = "*") X <- sweep(X, 1, sqrt(row.w), FUN = "*") if (ncol(X) < nrow(X)){ svd.usuelle <- tryCatch.W.E(svd(X,nu=ncp,nv=ncp))$val if (names(svd.usuelle)[[1]]=="message") { svd.usuelle<- tryCatch.W.E(svd(t(X),nu=ncp,nv=ncp))$val if (names(svd.usuelle)[[1]]=="d"){ aux=svd.usuelle$u svd.usuelle$u=svd.usuelle$v svd.usuelle$v=aux } else{ bb=eigen(t(X)%*%X,symmetric=TRUE) svd.usuelle <- vector(mode = "list", length = 3) svd.usuelle$d[svd.usuelle$d<0]=0 svd.usuelle$d=sqrt(svd.usuelle$d) svd.usuelle$v=bb$vec[,1:ncp] svd.usuelle$u=sweep(X%*%svd.usuelle$v,2,svd.usuelle$d[1:ncp],FUN="/") } } U <- svd.usuelle$u V <- svd.usuelle$v if (ncp >1){ mult <- sign(apply(V,2,sum)) mult[mult==0] <- 1 U <- sweep(U,2,mult,FUN="*") V <- sweep(V,2,mult,FUN="*") } U <- sweep(as.matrix(U), 1, sqrt(row.w), FUN = "/") V <- sweep(as.matrix(V), 1, sqrt(col.w), FUN = "/") } else { svd.usuelle=tryCatch.W.E(svd(t(X),nu=ncp,nv=ncp))$val if (names(svd.usuelle)[[1]]=="message"){ svd.usuelle=tryCatch.W.E(svd(X,nu=ncp,nv=ncp))$val if (names(svd.usuelle)[[1]]=="d"){ aux=svd.usuelle$u svd.usuelle$u=svd.usuelle$v svd.usuelle$v=aux } else{ bb=eigen(X%*%t(X),symmetric=TRUE) svd.usuelle <- vector(mode = "list", length = 3) svd.usuelle$d[svd.usuelle$d<0]=0 svd.usuelle$d=sqrt(svd.usuelle$d) svd.usuelle$v=bb$vec[,1:ncp] svd.usuelle$u=sweep(t(X)%*%svd.usuelle$v,2,svd.usuelle$d[1:ncp],FUN="/") } } U <- svd.usuelle$v V <- svd.usuelle$u mult <- sign(apply(V,2,sum)) mult[mult==0] <- 1 V <- sweep(V,2,mult,FUN="*") U <- sweep(U,2,mult,FUN="*") U <- sweep(U, 1, sqrt(row.w), FUN = "/") V <- sweep(V, 1, sqrt(col.w), FUN = "/") } vs <- svd.usuelle$d[1:min(ncol(X),nrow(X)-1)] num <- which(vs[1:ncp]<1e-15) if (length(num)==1){ U[,num] <- U[,num]*vs[num] V[,num] <- V[,num]*vs[num] } if (length(num)>1){ U[,num] <- sweep(U[,num],2,vs[num],FUN="*") V[,num] <- sweep(V[,num],2,vs[num],FUN="*") } res <- list(vs = vs, U = U, V = V) return(res) } # This function is copied from the psych package: lowerMat printCor <- function (R, digits = 2) { lowleft <- lower.tri(R, diag = TRUE) nvar <- ncol(R) nc <- digits + 3 width <- getOption("width") k1 <- width/(nc + 2) if (is.null(colnames(R))) { colnames(R) <- paste("C", 1:nvar, sep = "") } if (is.null(rownames(R))) { rownames(R) <- paste("R", 1:nvar, sep = "") } colnames(R) <- abbreviate(colnames(R), minlength = digits + 3) nvar <- ncol(R) nc <- digits + 3 if (k1 * nvar < width) { k1 <- nvar } k1 <- floor(k1) fx <- format(round(R, digits = digits)) if (nrow(R) == ncol(R)) { fx[!lowleft] <- "" } for (k in seq(0, nvar, k1)) { if (k < nvar) { print(fx[(k + 1):nvar, (k + 1):min((k1 + k), nvar)], quote = FALSE) } } } semTools/R/residualCovariate.R0000644000175100001440000000104713000201061016050 0ustar hornikusers# residualCovariate: Residual centered all target indicators by covariates residualCovariate <- function(data, targetVar, covVar) { x <- as.list(match.call()) cov <- eval(x$covVar) target <- eval(x$targetVar) if (all(is.numeric(cov))) cov <- colnames(data)[cov] if (all(is.numeric(target))) target <- colnames(data)[target] express <- paste("cbind(", paste(target, collapse = ", "), ") ~ ", paste(cov, collapse = " + "), sep = "") data[, target] <- lm(express, data = data)$residuals return(data) } semTools/R/measurementInvarianceCat.R0000644000175100001440000001624013000201061017360 0ustar hornikusersmeasurementInvarianceCat <- function(..., std.lv = FALSE, strict=FALSE, quiet=FALSE, fit.measures = "default", method = "satorra.bentler.2001") { List <- list(...) lavaancfa <- function(...) { lavaan::cfa(...) } lavaanlavaan <- function(...) { lavaan::lavaan(...) } if(!is.null(List$parameterization) && tolower(List$parameterization) != "theta") warning("The parameterization is set to 'theta' by default.") List$parameterization <- "theta" # Find the number of groups if(is.null(List$group)) stop("Please specify the group variable") # Get the lavaan parameter table template <- do.call(lavaancfa, c(List, do.fit=FALSE)) lavaanParTable <- lavaan::parTable(template) # Find the number of groups ngroups <- max(lavaanParTable$group) # Check whether all variables are categorical sampstat <- lavaan::lavInspect(template, "samp")[[1]] meanname <- names(sampstat$mean) thname <- names(sampstat$th) if(any(is.na(charmatch(meanname, thname)))) stop("Some variables in your model are not identified as categorical.") varList <- lavaanParTable$rhs[lavaanParTable$op == "=~"] facName <- lavaanParTable$lhs[(lavaanParTable$op == "=~") & (lavaanParTable$rhs %in% varList)] if(length(unique(sapply(facName, function(x) length(x)))) > 1) stop("The numbers of variables in each element are not equal.") varList <- unique(varList) facName <- unique(facName) # Check whether the factor configuration is the same across gorups groupParTable <- split(lavaanParTable, lavaanParTable$group) group1pt <- groupParTable[[1]] groupParTable <- lapply(groupParTable, "[", c("lhs", "op", "rhs")) if(!multipleAllEqualList(lapply(groupParTable, function(x) sapply(x, "[", x$op == "=~")))) stop("Factor configuration is not the same across groups") # Extract the number of thresholds numThreshold <- table(sapply(group1pt, "[", group1pt$op == "|")[,"lhs"]) # Find the indicators of each factor group1facload <- sapply(group1pt, "[", group1pt$op == "=~") factorRep <- split(group1facload[,"rhs"], group1facload[,"lhs"]) # Find marker variables marker <- rep(NA, length(factorRep)) numThresholdMarker <- rep(NA, length(factorRep)) for(i in seq_along(factorRep)) { temp <- sapply(group1pt, "[", group1pt$rhs %in% factorRep[[i]] & group1pt$op == "=~" & group1pt$lhs == names(factorRep)[i]) marker[i] <- temp[!is.na(temp[,"ustart"]), "rhs"] numThresholdMarker[i] <- numThreshold[marker[i]] } numThresholdFactorRep <- lapply(factorRep, function(x) numThreshold[x]) constraintSecondThreshold <- unlist(lapply(numThresholdFactorRep, function(x) names(which(x > 1)[1]))) constraintSecondThreshold <- constraintSecondThreshold[!is.na(constraintSecondThreshold)] # Find the marker variable of each facto for(i in names(numThreshold)) { lavaanParTable <- constrainParTable(lavaanParTable, i, "|", "t1", 1:ngroups) } if(length(constraintSecondThreshold) > 0) { for(i in constraintSecondThreshold) { lavaanParTable <- constrainParTable(lavaanParTable, i, "|", "t2", 1:ngroups) } } # Group 1 for(i in facName) { lavaanParTable <- fixParTable(lavaanParTable, i, "~1", "", 1, 0) # Fix factor means as 0 if(std.lv) { lavaanParTable <- fixParTable(lavaanParTable, i, "~~", i, 1, 1) } else { lavaanParTable <- freeParTable(lavaanParTable, i, "~~", i, 1, NA) # Free factor variances } # Assuming that all factor covariances are freeParTable } for(i in varList) { lavaanParTable <- fixParTable(lavaanParTable, i, "~~", i, 1, 1) } # Other groups for(k in 2:ngroups) { for(i in facName) { lavaanParTable <- freeParTable(lavaanParTable, i, "~1", "", k, NA) if(std.lv) { lavaanParTable <- fixParTable(lavaanParTable, i, "~~", i, k, 1) } else { lavaanParTable <- freeParTable(lavaanParTable, i, "~~", i, k, NA) } } for(i in varList) { lavaanParTable <- freeParTable(lavaanParTable, i, "~~", i, k, NA) } # Fix the indicator variances of marker variables with two categories as 1 for(i in seq_along(marker)) { if(numThresholdMarker[i] == 1) lavaanParTable <- fixParTable(lavaanParTable, marker[i], "~~", marker[i], k, 1) } } if(std.lv) { for(i in seq_along(factorRep)) { lavaanParTable <- freeParTable(lavaanParTable, names(factorRep)[i], "=~", marker[i], 1:ngroups, NA) } } # Fit configural invariance ListConfigural <- List ListConfigural$model <- lavaanParTable fitConfigural <- do.call(lavaanlavaan, ListConfigural) # Create the parameter table for metric invariance ptMetric <- lavaanParTable for(i in seq_along(factorRep)) { varwithin <- factorRep[[i]] if(!std.lv) { varwithin <- setdiff(varwithin, marker[i]) } for(j in seq_along(varwithin)) { ptMetric <- constrainParTable(ptMetric, names(factorRep)[i], "=~", varwithin[j], 1:ngroups) } } if(std.lv) { for(k in 2:ngroups) { for(i in facName) { ptMetric <- freeParTable(ptMetric, i, "~~", i, k, NA) } } } ListMetric <- List ListMetric$model <- ptMetric fitMetric <- do.call(lavaanlavaan, ListMetric) ptMeans <- ptStrict <- ptMetric nonMarker <- setdiff(names(numThreshold), marker) nonDichoMarker <- numThreshold[which(numThreshold[nonMarker] > 1)] scalar <- length(nonDichoMarker) > 0 if(scalar) { ptScalar <- ptMetric for(i in seq_along(numThreshold)) { thresholdName <- paste0("t", 1:numThreshold[i]) for(j in seq_along(thresholdName)) { ptScalar <- constrainParTable(ptScalar, names(numThreshold)[i], "|", thresholdName[j], 1:ngroups) } } ListScalar <- List ListScalar$model <- ptScalar fitScalar <- do.call(lavaanlavaan, ListScalar) ptMeans <- ptStrict <- ptScalar } fitStrict <- NULL # Create the parameter table for strict invariance if specified if(strict) { ptStrict <- ptScalar for(k in 2:ngroups) { # Constrain measurement error variances for(i in varList) { ptStrict <- fixParTable(ptStrict, i, "~~", i, k, 1) } } ListStrict <- List ListStrict$model <- ptStrict fitStrict <- do.call(lavaanlavaan, ListStrict) ptMeans <- ptStrict } # Create the parameter table for mean equality # Constrain factor means to be equal for(k in 2:ngroups) { ptMeans <- fixParTable(ptMeans, facName, "~1", "", k, ustart = 0) } ListMeans <- List ListMeans$model <- ptMeans fitMeans <- do.call(lavaanlavaan, ListMeans) FIT <- invisible(list(fit.configural = fitConfigural, fit.loadings = fitMetric, fit.thresholds = fitScalar, fit.residuals = fitStrict, fit.means = fitMeans)) FIT <- FIT[!sapply(FIT, is.null)] if(!quiet) { printInvarianceResult(FIT, fit.measures, method) } invisible(FIT) } multipleAllEqual <- function(...) { obj <- list(...) multipleAllEqualList(obj) } multipleAllEqualList <- function(obj) { for (i in 2:length(obj)) { for (j in 1:(i - 1)) { temp <- isTRUE(all.equal(obj[[i]], obj[[j]])) if (!temp) return(FALSE) } } return(TRUE) } multipleAnyEqual <- function(...) { obj <- list(...) multipleAnyEqualList(obj) } multipleAnyEqualList <- function(obj) { for (i in 2:length(obj)) { for (j in 1:(i - 1)) { temp <- isTRUE(all.equal(obj[[i]], obj[[j]])) if (temp) return(TRUE) } } return(FALSE) } semTools/R/measurementInvariance.R0000644000175100001440000001234413000201061016731 0ustar hornikusersmeasurementInvariance <- measurementinvariance <- function(..., std.lv = FALSE, strict=FALSE, quiet=FALSE, fit.measures = "default", method = "satorra.bentler.2001") { lavaancfa <- function(...) { lavaan::cfa(...)} # check for a group.equal argument in ... dotdotdot <- list(...) if(!is.null(dotdotdot$group.equal)) stop("lavaan ERROR: group.equal argument should not be used") res <- list() # base-line model: configural invariance configural <- dotdotdot configural$group.equal <- "" template <- do.call(lavaancfa, configural) pttemplate <- lavaan::partable(template) varnames <- unique(pttemplate$rhs[pttemplate$op == "=~"]) facnames <- unique(pttemplate$lhs[(pttemplate$op == "=~") & (pttemplate$rhs %in% varnames)]) ngroups <- max(pttemplate$group) if(ngroups <= 1) stop("Well, the number of groups is 1. Measurement invariance across 'groups' cannot be done.") if(std.lv) { for(i in facnames) { pttemplate <- fixParTable(pttemplate, i, "~~", i, 1:ngroups, 1) } fixloadings <- which(pttemplate$op == "=~" & pttemplate$free == 0) for(i in fixloadings) { pttemplate <- freeParTable(pttemplate, pttemplate$lhs[i], "=~", pttemplate$rhs[i], pttemplate$group[i]) } res$fit.configural <- refit(pttemplate, template) } else { res$fit.configural <- template } # fix loadings across groups if(std.lv) { findloadings <- which(pttemplate$op == "=~" & pttemplate$free != 0 & pttemplate$group == 1) for(i in findloadings) { pttemplate <- constrainParTable(pttemplate, pttemplate$lhs[i], "=~", pttemplate$rhs[i], 1:ngroups) } for(i in facnames) { pttemplate <- freeParTable(pttemplate, i, "~~", i, 2:ngroups) } res$fit.loadings <- refit(pttemplate, template) } else { loadings <- dotdotdot loadings$group.equal <- c("loadings") res$fit.loadings <- do.call("cfa", loadings) } # fix loadings + intercepts across groups if(std.lv) { findintcepts <- which(pttemplate$op == "~1" & pttemplate$lhs %in% varnames & pttemplate$free != 0 & pttemplate$group == 1) for(i in findintcepts) { pttemplate <- constrainParTable(pttemplate, pttemplate$lhs[i], "~1", "", 1:ngroups) } for(i in facnames) { pttemplate <- freeParTable(pttemplate, i, "~1", "", 2:ngroups) } res$fit.intercepts <- refit(pttemplate, template) } else { intercepts <- dotdotdot intercepts$group.equal <- c("loadings", "intercepts") res$fit.intercepts <- do.call(lavaancfa, intercepts) } if(strict) { if(std.lv) { findresiduals <- which(pttemplate$op == "~~" & pttemplate$lhs %in% varnames & pttemplate$rhs == pttemplate$lhs & pttemplate$free != 0 & pttemplate$group == 1) for(i in findresiduals) { pttemplate <- constrainParTable(pttemplate, pttemplate$lhs[i], "~~", pttemplate$rhs[i], 1:ngroups) } res$fit.residuals <- refit(pttemplate, template) for(i in facnames) { pttemplate <- fixParTable(pttemplate, i, "~1", "", 1:ngroups, 0) } res$fit.means <- refit(pttemplate, template) } else { # fix loadings + intercepts + residuals residuals <- dotdotdot residuals$group.equal <- c("loadings", "intercepts", "residuals") res$fit.residuals <- do.call(lavaancfa, residuals) # fix loadings + residuals + intercepts + means means <- dotdotdot means$group.equal <- c("loadings", "intercepts", "residuals", "means") res$fit.means <- do.call(lavaancfa, means) } } else { if(std.lv) { for(i in facnames) { pttemplate <- fixParTable(pttemplate, i, "~1", "", 1:ngroups, 0) } res$fit.means <- refit(pttemplate, template) } else { # fix loadings + intercepts + means means <- dotdotdot means$group.equal <- c("loadings", "intercepts", "means") res$fit.means <- do.call(lavaancfa, means) } } if(!quiet) { printInvarianceResult(res, fit.measures, method) } invisible(res) } printInvarianceResult <- function(FIT, fit.measures, method) { # compare models NAMES <- names(FIT); names(FIT) <- NULL lavaanLavTestLRT <- function(...) { lavaan::lavTestLRT(...) } TABLE <- do.call(lavaanLavTestLRT, c(FIT, list(model.names = NAMES, method = method))) if(length(fit.measures) == 1L && fit.measures == "default") { # scaled test statistic? if(length(lavaan::lavInspect(FIT[[1]], "test")) > 1L) { fit.measures <- c("cfi.scaled", "rmsea.scaled") } else { fit.measures <- c("cfi", "rmsea") } } # add some fit measures if(length(fit.measures)) { FM <- lapply(FIT, lavaan::fitMeasures, fit.measures) FM.table1 <- sapply(fit.measures, function(x) sapply(FM, "[[", x)) if(length(FM) == 1L) { FM.table1 <- rbind( rep(as.numeric(NA), length(fit.measures)), FM.table1 ) } if(length(FM) > 1L) { FM.table2 <- rbind(as.numeric(NA), abs(apply(FM.table1, 2, diff))) colnames(FM.table2) <- paste(colnames(FM.table2), ".delta", sep="") FM.TABLE <- as.data.frame(cbind(FM.table1, FM.table2)) } else { FM.TABLE <- as.data.frame(FM.table1) } rownames(FM.TABLE) <- rownames(TABLE) class(FM.TABLE) <- c("lavaan.data.frame", "data.frame") } cat("\n") cat("Measurement invariance models:\n\n") cat(paste(paste("Model", seq_along(FIT), ":", NAMES), collapse = "\n")) cat("\n\n") print(TABLE) if(length(fit.measures)) { cat("\n\n") cat("Fit measures:\n\n") print(FM.TABLE) cat("\n") } } semTools/R/PAVranking.R0000644000175100001440000010556513000201061014414 0ustar hornikusers PAVranking <- function(nPerPar, facPlc, nAlloc=100, parceloutput=0, syntaxA, syntaxB, dataset, names = NULL, leaveout=0, seed=NA, ...) { if(is.character(dataset)){ dataset <- read.csv(dataset) } if(is.null(names)) names <- matrix(NA,length(nPerPar), 1) if (is.na(seed)==FALSE) set.seed(seed) ## set random seed if specified options(max.print=1000000) ### allow many tables to be outputted ##Create parceled datasets dataset <- as.matrix(dataset) if(nAlloc<2) stop("Minimum of two allocations required.") if(is.list(facPlc)){ if(is.numeric(facPlc[[1]][1])==FALSE){ facPlcb <- facPlc Namesv <- colnames(dataset) for(i in 1:length(facPlc)){ for(j in 1:length(facPlc[[i]])){ facPlcb[[i]][j] <- match(facPlc[[i]][j],Namesv) } facPlcb[[i]] <- as.numeric(facPlcb[[i]]) } facPlc <- facPlcb } # facPlc2 <- rep(0, sum(sapply(facPlc, length))) facPlc2 <- rep(0,ncol(dataset)) for(i in 1:length(facPlc)){ for(j in 1:length(facPlc[[i]])){ facPlc2[facPlc[[i]][j]] <- i } } facPlc <- facPlc2 } if(leaveout!=0){ if(is.numeric(leaveout)==FALSE){ leaveoutb <- rep(0,length(leaveout)) Namesv <- colnames(dataset) for(i in 1:length(leaveout)){ leaveoutb[i] <- match(leaveout[i],Namesv) } leaveout <- as.numeric(leaveoutb) } k1 <- .001 for(i in 1:length(leaveout)){ facPlc[leaveout[i]] <- facPlc[leaveout[i]] + k1 k1 <- k1 +.001 } } if(0 %in% facPlc == TRUE){ Zfreq <- sum(facPlc==0) for (i in 1:Zfreq){ Zplc <- match(0,facPlc) dataset <- dataset[ , -Zplc] facPlc <- facPlc[-Zplc] } ## this allows for unused variables in dataset, ## which are specified by zeros, and deleted } if(is.list(nPerPar)){ nPerPar2 <- c() for (i in 1:length(nPerPar)){ Onesp <- sum(facPlc>i & facPlc 0){ ##Bug was here. With 1 factor Maxv=0. Skip this with a single factor for (i in 1:Maxv){ Mat <- match(i+1, Locate) if(Npp[Mat] == Npp[Mat-1]){ stop('** WARNING! ** Parcels incorrectly specified. Check input!')} } } ## warning message if parcel crosses into multiple factors ## vector, parcel to which each variable belongs ## vector, factor to which each variables belongs ## if variables are in the same parcel, but different factors ## error message given in output Onevec <- facPlc - round(facPlc) NleaveA <- length(Onevec) - sum(Onevec==0) NleaveP <- sum(nPerPar==1) if(NleaveA < NleaveP){ print('** WARNING! ** Single-variable parcels have been requested. Check input!')} if(NleaveA > NleaveP) print('** WARNING! ** More non-parceled variables have been requested than provided for in parcel vector. Check input!') if(length(names)>1){ if(length(names) != length(nPerPar)){ print('** WARNING! ** Number of parcel names provided not equal to number of parcels requested. Check input!')}} Data <- c(1:ncol(dataset)) ## creates a vector of the number of indicators ## e.g. for three indicators, c(1, 2, 3) Nfactors <- max(facPlc) ## scalar, number of factors Nindicators <- length(Data) ## scalar, number of indicators Npar <- length(nPerPar) ## scalar, number of parcels Rmize <- runif(Nindicators, 1, Nindicators) ## create vector of randomly ordered numbers, ## length of number of indicators Data <- rbind(facPlc, Rmize, Data) ## "Data" becomes object of three rows, consisting of ## 1) factor to which each indicator belongs ## (in order to preserve indicator/factor ## assignment during randomization) ## 2) randomly order numbers ## 3) indicator number Results <- matrix(numeric(0), nAlloc, Nindicators) ##create empty matrix for parcel allocation matrix Pin <- nPerPar[1] for (i in 2:length(nPerPar)){ Pin <- c(Pin, nPerPar[i]+Pin[i-1]) ## creates vector which indicates the range ## of columns (endpoints) in each parcel } for (i in 1:nAlloc) { Data[2,]<-runif(Nindicators, 1, Nindicators) ## Replace second row with newly randomly ordered numbers Data <- Data[, order(Data[2,])] ## Order the columns according ## to the values of the second row Data <- Data[, order(Data[1,])] ## Order the columns according ## to the values of the first row ## in order to preserve factor assignment Results[i,] <- Data[3,] ## assign result to allocation matrix } Alpha <- rbind(Results[1,], dataset) ## bind first random allocation to dataset "Alpha" Allocations <- list() ## create empty list for allocation data matrices for (i in 1:nAlloc){ Ineff <- rep(NA, ncol(Results)) Ineff2 <- c(1:ncol(Results)) for (inefficient in 1:ncol(Results)){ Ineff[Results[i,inefficient]] <- Ineff2[inefficient] } Alpha[1,] <- Ineff ## replace first row of dataset matrix ## with row "i" from allocation matrix Beta <- Alpha[, order(Alpha[1,])] ## arrangle dataset columns by values of first row ## assign to temporary matrix "Beta" Temp <- matrix(NA, nrow(dataset), Npar) ## create empty matrix for averaged parcel variables TempAA <- if(length(1:Pin[1])>1) Beta[2:nrow(Beta) , 1:Pin[1]] else cbind(Beta[2:nrow(Beta) , 1:Pin[1]],Beta[2:nrow(Beta) , 1:Pin[1]]) Temp[, 1] <- rowMeans(TempAA,na.rm = TRUE) ## fill first column with averages from assigned indicators for (al in 2:Npar) { Plc <- Pin[al-1]+1 ## placeholder variable for determining parcel width TempBB <- if(length(Plc:Pin[al])>1) Beta[2:nrow(Beta) , Plc:Pin[al]] else cbind(Beta[2:nrow(Beta) , Plc:Pin[al]],Beta[2:nrow(Beta) , Plc:Pin[al]]) Temp[, al] <- rowMeans(TempBB,na.rm = TRUE) ## fill remaining columns with averages from assigned indicators } if(length(names)>1){ colnames(Temp) <- names } Allocations[[i]] <- Temp ## assign result to list of parcel datasets } ##Write parceled datasets if(as.vector(regexpr("/",parceloutput))!=-1){ replist<-matrix(NA,nAlloc,1) for (i in 1:nAlloc){ ##if (is.na(names)==TRUE) names <- matrix(NA,nrow( colnames(Allocations[[i]]) <- names write.table(Allocations[[i]],paste(parceloutput,'/parcelruns',i,'.dat',sep=''),row.names=FALSE,col.names=TRUE) replist[i,1]<-paste('parcelruns',i,'.dat',sep='') } write.table(replist,paste(parceloutput,"/parcelrunsreplist.dat",sep=''),quote=FALSE,row.names=FALSE,col.names=FALSE) } ##Model A estimation { Param_A <- list() ## list for parameter estimated for each imputation Fitind_A <- list() ## list for fit indices estimated for each imputation Converged_A <- list() ## list for whether or not each allocation converged ProperSolution_A <- list() ## list for whether or not each allocation has proper solutions ConvergedProper_A <- list() ## list for whether or not each allocation converged and has proper solutions for (i in 1:nAlloc){ data_A <- as.data.frame(Allocations[[i]], row.names = NULL, optional = FALSE) ## convert allocation matrix to dataframe for model estimation fit_A <- lavaan::sem(syntaxA, data=data_A, ...) ## estimate model in lavaan if (lavaan::lavInspect(fit_A, "converged")==TRUE){ Converged_A[[i]] <- 1 } else Converged_A[[i]] <- 0 ## determine whether or not each allocation converged Param_A[[i]] <- lavaan::parameterEstimates(fit_A)[,c("lhs","op","rhs","est","se","z","pvalue","ci.lower","ci.upper")] ## assign allocation parameter estimates to list if (lavaan::lavInspect(fit_A, "post.check")==TRUE & Converged_A[[i]]==1){ ProperSolution_A[[i]] <- 1 } else ProperSolution_A[[i]] <- 0 ## determine whether or not each allocation has proper solutions if (any(is.na(Param_A[[i]][,5]==TRUE))) ProperSolution_A[[i]] <- 0 ## make sure each allocation has existing SE if (Converged_A[[i]]==1 & ProperSolution_A[[i]]==1) { ConvergedProper_A[[i]] <- 1 } else ConvergedProper_A[[i]] <- 0 ## determine whether or not each allocation converged and has proper solutions if (ConvergedProper_A[[i]]==0) Param_A[[i]][,4:9] <- matrix(data=NA,nrow(Param_A[[i]]),6) ## make parameter estimates null for nonconverged, improper solutions if (ConvergedProper_A[[i]]==1) { Fitind_A[[i]] <- lavaan::fitMeasures(fit_A, c("chisq", "df", "cfi", "tli", "rmsea", "srmr", "logl", "bic", "aic")) } else Fitind_A[[i]] <- c(NA,NA,NA,NA,NA,NA,NA,NA,NA) ### assign allocation parameter estimates to list } nConverged_A <- Reduce("+",Converged_A) ## count number of converged allocations nProperSolution_A <- Reduce("+",ProperSolution_A) ## count number of allocations with proper solutions nConvergedProper_A <- Reduce("+",ConvergedProper_A) ## count number of allocations with proper solutions if (nConvergedProper_A==0) stop("All allocations failed to converge and/or yielded improper solutions for Model A and/or B.") ## stop program if no allocations converge Parmn_A <- Param_A[[1]] ## assign first parameter estimates to mean dataframe ParSE_A <- matrix(NA, nrow(Parmn_A), nAlloc) ParSEmn_A <- Parmn_A[,5] Parsd_A <- matrix(NA, nrow(Parmn_A), nAlloc) ## assign parameter estimates for S.D. calculation Fitmn_A <- Fitind_A[[1]] ## assign first fit indices to mean dataframe Fitsd_A <- matrix(NA, length(Fitmn_A), nAlloc) ## assign fit indices for S.D. calculation Sigp_A <- matrix(NA, nrow(Parmn_A), nAlloc) ## assign p-values to calculate percentage significant Fitind_A <- data.frame(Fitind_A) ### convert fit index table to data frame for (i in 1:nAlloc){ Parsd_A[,i] <- Param_A[[i]][,4] ## assign parameter estimates for S.D. estimation ParSE_A[,i] <- Param_A[[i]][,5] if(i>1){ParSEmn_A <- rowSums(cbind(ParSEmn_A,Param_A[[i]][,5]),na.rm=TRUE)} Sigp_A[,ncol(Sigp_A)-i+1] <- Param_A[[i]][,7] ## assign p-values to calculate percentage significant Fitsd_A[,i] <- Fitind_A[[i]] ## assign fit indices for S.D. estimation if(i>1){Parmn_A[,4:ncol(Parmn_A)] <- rowSums(cbind(Parmn_A[,4:ncol(Parmn_A)],Param_A[[i]][,4:ncol(Parmn_A)]),na.rm=TRUE)} ## add together all parameter estimates if(i>1){Fitmn_A <- rowSums(cbind(Fitmn_A,Fitind_A[[i]]),na.rm=TRUE)} ## add together all fit indices } Sigp_A <- Sigp_A + .45 Sigp_A <- apply(Sigp_A, c(1,2), round) Sigp_A <- 1 - as.vector(rowMeans(Sigp_A, na.rm = TRUE)) ## calculate percentage significant parameters Parsum_A <- cbind(apply(Parsd_A,1,mean,na.rm=TRUE),apply(Parsd_A,1,sd,na.rm=TRUE),apply(Parsd_A,1,max,na.rm=TRUE),apply(Parsd_A,1,min,na.rm=TRUE),apply(Parsd_A,1,max,na.rm=TRUE)-apply(Parsd_A,1,min,na.rm=TRUE), Sigp_A*100) colnames(Parsum_A) <- c("Avg Est.","S.D.","MAX","MIN","Range", "% Sig") ## calculate parameter S.D., minimum, maximum, range, bind to percentage significant ParSEmn_A <- Parmn_A[,1:3] ParSEfn_A <- cbind(ParSEmn_A,apply(ParSE_A,1,mean,na.rm=TRUE),apply(ParSE_A,1,sd,na.rm=TRUE),apply(ParSE_A,1,max,na.rm=TRUE),apply(ParSE_A,1,min,na.rm=TRUE),apply(ParSE_A,1,max,na.rm=TRUE)-apply(ParSE_A,1,min,na.rm=TRUE)) colnames(ParSEfn_A) <- c("lhs", "op", "rhs", "Avg SE","S.D.","MAX","MIN","Range") Fitsum_A <- cbind(apply(Fitsd_A,1,mean,na.rm=TRUE),apply(Fitsd_A,1,sd,na.rm=TRUE),apply(Fitsd_A,1,max,na.rm=TRUE),apply(Fitsd_A,1,min,na.rm=TRUE),apply(Fitsd_A,1,max,na.rm=TRUE)-apply(Fitsd_A,1,min,na.rm=TRUE)) rownames(Fitsum_A) <- c("chisq", "df", "cfi", "tli", "rmsea", "srmr", "logl", "bic", "aic") ## calculate fit S.D., minimum, maximum, range Parmn_A[,4:ncol(Parmn_A)] <- Parmn_A[,4:ncol(Parmn_A)] / nConvergedProper_A ## divide totalled parameter estimates by number converged allocations Parmn_A <- Parmn_A[,1:3] ## remove confidence intervals from output Parmn_A <- cbind(Parmn_A, Parsum_A) ## bind parameter average estimates to cross-allocation information Fitmn_A <- Fitmn_A / nConvergedProper_A ## divide totalled fit indices by number converged allocations pChisq_A <- list() ## create empty list for Chi-square p-values sigChisq_A <- list() ## create empty list for Chi-square significance for (i in 1:nAlloc){ pChisq_A[[i]] <- (1-pchisq(Fitsd_A[1,i],Fitsd_A[2,i])) ## calculate p-value for each Chi-square if (is.na(pChisq_A[[i]])==FALSE & pChisq_A[[i]]<.05) { sigChisq_A[[i]] <- 1 } else sigChisq_A[[i]] <- 0 } ## count number of allocations with significant chi-square PerSigChisq_A <- (Reduce("+",sigChisq_A))/nConvergedProper_A*100 PerSigChisq_A <- round(PerSigChisq_A,3) ## calculate percent of allocations with significant chi-square PerSigChisqCol_A <- c(PerSigChisq_A,"n/a","n/a","n/a","n/a","n/a","n/a","n/a","n/a") ## create list of Chi-square Percent Significant and "n/a" (used for fit summary table) options(stringsAsFactors=FALSE) ## set default option to allow strings into dataframe without converting to factors Fitsum_A <- data.frame(Fitsum_A,PerSigChisqCol_A) colnames(Fitsum_A) <- c("Avg Ind","S.D.","MAX","MIN","Range","% Sig") ### bind to fit averages options(stringsAsFactors=TRUE) ## unset option to allow strings into dataframe without converting to factors ParSEfn_A[,4:8] <- apply(ParSEfn_A[,4:8], 2, round, digits = 3) Parmn_A[,4:9] <- apply(Parmn_A[,4:9], 2, round, digits = 3) Fitsum_A[,1:5] <- apply(Fitsum_A[,1:5], 2, round, digits = 3) ## round output to three digits Fitsum_A[2,2:5] <- c("n/a","n/a","n/a","n/a") ## Change df row to "n/a" for sd, max, min, and range Output_A <- list(Parmn_A,ParSEfn_A,Fitsum_A) names(Output_A) <- c('Estimates_A', 'SE_A', 'Fit_A') ## output summary for model A } ##Model B estimation { Param <- list() ## list for parameter estimated for each imputation Fitind <- list() ## list for fit indices estimated for each imputation Converged <- list() ## list for whether or not each allocation converged ProperSolution <- list() ## list for whether or not each allocation has proper solutions ConvergedProper <- list() ## list for whether or not each allocation is converged and proper for (i in 1:nAlloc){ data <- as.data.frame(Allocations[[i]], row.names = NULL, optional = FALSE) ## convert allocation matrix to dataframe for model estimation fit <- lavaan::sem(syntaxB, data=data, ...) ## estimate model in lavaan if (lavaan::lavInspect(fit, "converged")==TRUE){ Converged[[i]] <- 1 } else Converged[[i]] <- 0 ## determine whether or not each allocation converged Param[[i]] <- lavaan::parameterEstimates(fit)[,c("lhs","op","rhs","est","se","z","pvalue","ci.lower","ci.upper")] ## assign allocation parameter estimates to list if (lavaan::lavInspect(fit, "post.check")==TRUE & Converged[[i]]==1) { ProperSolution[[i]] <- 1 } else ProperSolution[[i]] <- 0 ## determine whether or not each allocation has proper solutions if (any(is.na(Param[[i]][,5]==TRUE))) ProperSolution[[i]] <- 0 ## make sure each allocation has existing SE if (Converged[[i]]==1 & ProperSolution[[i]]==1) { ConvergedProper[[i]] <- 1 } else ConvergedProper[[i]] <- 0 ## determine whether or not each allocation converged and has proper solutions if (ConvergedProper[[i]]==0) Param[[i]] <- matrix(data=NA,nrow(Param[[i]]),ncol(Param[[i]])) ## make parameter estimates null for nonconverged, improper solutions if (ConvergedProper[[i]]==1) { Fitind[[i]] <- lavaan::fitMeasures(fit, c("chisq", "df", "cfi", "tli", "rmsea", "srmr", "logl", "bic", "aic")) } else Fitind[[i]] <- c(NA,NA,NA,NA,NA,NA,NA,NA,NA) ### assign allocation parameter estimates to list } nConverged <- Reduce("+",Converged) ## count number of converged allocations nProperSolution <- Reduce("+",ProperSolution) ## count number of allocations with proper solutions nConvergedProper <- Reduce("+",ConvergedProper) ## count number of allocations with proper solutions if (nConvergedProper==0) stop("All allocations failed to converge and/or yielded improper solutions for Model A and/or B.") ## stop program if no allocations converge Parmn <- Param[[1]] ## assign first parameter estimates to mean dataframe ParSE <- matrix(NA, nrow(Parmn), nAlloc) ParSEmn <- Parmn[,5] Parsd <- matrix(NA, nrow(Parmn), nAlloc) ## assign parameter estimates for S.D. calculation Fitmn <- Fitind[[1]] ## assign first fit indices to mean dataframe Fitsd <- matrix(NA, length(Fitmn), nAlloc) ## assign fit indices for S.D. calculation Sigp <- matrix(NA, nrow(Parmn), nAlloc) ## assign p-values to calculate percentage significant Fitind <- data.frame(Fitind) ### convert fit index table to dataframe for (i in 1:nAlloc){ Parsd[,i] <- Param[[i]][,4] ## assign parameter estimates for S.D. estimation ParSE[,i] <- Param[[i]][,5] if(i>1) ParSEmn <- rowSums(cbind(ParSEmn,Param[[i]][,5]),na.rm=TRUE) Sigp[,ncol(Sigp)-i+1] <- Param[[i]][,7] ## assign p-values to calculate percentage significant Fitsd[,i] <- Fitind[[i]] ## assign fit indices for S.D. estimation if(i>1){Parmn[,4:ncol(Parmn)] <- rowSums(cbind(Parmn[,4:ncol(Parmn)],Param[[i]][,4:ncol(Parmn)]),na.rm=TRUE)} ## add together all parameter estimates if(i>1){Fitmn <- rowSums(cbind(Fitmn,Fitind[[i]]),na.rm=TRUE)} ## add together all fit indices } Sigp <- Sigp + .45 Sigp <- apply(Sigp, c(1,2), round) Sigp <- 1 - as.vector(rowMeans(Sigp, na.rm = TRUE)) ## calculate percentage significant parameters Parsum <- cbind(apply(Parsd,1,mean,na.rm=TRUE),apply(Parsd,1,sd,na.rm=TRUE),apply(Parsd,1,max,na.rm=TRUE),apply(Parsd,1,min,na.rm=TRUE),apply(Parsd,1,max,na.rm=TRUE)-apply(Parsd,1,min,na.rm=TRUE), Sigp*100) colnames(Parsum) <- c("Avg Est","S.D.","MAX","MIN","Range", "% Sig") ## calculate parameter S.D., minimum, maximum, range, bind to percentage significant ParSEmn <- Parmn[,1:3] ParSEfn <- cbind(ParSEmn,apply(ParSE,1,mean,na.rm=TRUE),apply(ParSE,1,sd,na.rm=TRUE),apply(ParSE,1,max,na.rm=TRUE),apply(ParSE,1,min,na.rm=TRUE),apply(ParSE,1,max,na.rm=TRUE)-apply(ParSE,1,min,na.rm=TRUE)) colnames(ParSEfn) <- c("lhs", "op", "rhs", "Avg SE","S.D.","MAX","MIN","Range") Fitsum <- cbind(apply(Fitsd,1,mean,na.rm=TRUE),apply(Fitsd,1,sd,na.rm=TRUE),apply(Fitsd,1,max,na.rm=TRUE),apply(Fitsd,1,min,na.rm=TRUE),apply(Fitsd,1,max,na.rm=TRUE)-apply(Fitsd,1,min,na.rm=TRUE)) rownames(Fitsum) <- c("chisq", "df", "cfi", "tli", "rmsea", "srmr", "logl", "bic", "aic") ## calculate fit S.D., minimum, maximum, range Parmn[,4:ncol(Parmn)] <- Parmn[,4:ncol(Parmn)] / nConvergedProper ## divide totalled parameter estimates by number converged allocations Parmn <- Parmn[,1:3] ## remove confidence intervals from output Parmn <- cbind(Parmn, Parsum) ## bind parameter average estimates to cross-allocation information Fitmn <- as.numeric(Fitmn) ## make fit index values numeric Fitmn <- Fitmn / nConvergedProper ## divide totalled fit indices by number converged allocations pChisq <- list() ## create empty list for Chi-square p-values sigChisq <- list() ## create empty list for Chi-square significance for (i in 1:nAlloc){ pChisq[[i]] <- (1-pchisq(Fitsd[1,i],Fitsd[2,i])) ## calculate p-value for each Chi-square if (is.na(pChisq[[i]])==FALSE & pChisq[[i]]<.05) { sigChisq[[i]] <- 1 } else sigChisq[[i]] <- 0 } ## count number of allocations with significant chi-square PerSigChisq <- (Reduce("+",sigChisq))/nConvergedProper*100 PerSigChisq <- round(PerSigChisq,3) ## calculate percent of allocations with significant chi-square PerSigChisqCol <- c(PerSigChisq,"n/a","n/a","n/a","n/a","n/a","n/a","n/a","n/a") ## create list of Chi-square Percent Significant and "n/a" (used for fit summary table) options(stringsAsFactors=FALSE) ## set default option to allow strings into dataframe without converting to factors Fitsum <- data.frame(Fitsum,PerSigChisqCol) colnames(Fitsum) <- c("Avg Ind","S.D.","MAX","MIN","Range","% Sig") ### bind to fit averages options(stringsAsFactors=TRUE) ## unset option to allow strings into dataframe without converting to factors ParSEfn[,4:8] <- apply(ParSEfn[,4:8], 2, round, digits = 3) Parmn[,4:9] <- apply(Parmn[,4:9], 2, round, digits = 3) Fitsum[,1:5] <- apply(Fitsum[,1:5], 2, round, digits = 3) ## round output to three digits Fitsum[2,2:5] <- c("n/a","n/a","n/a","n/a") ## Change df row to "n/a" for sd, max, min, and range Output_B <- list(Parmn,ParSEfn,Fitsum) names(Output_B) <- c('Estimates_B', 'SE_B', 'Fit_B') ## output summary for model A } ##Model Comparison (everything in this section is new) { Converged_AB <- list() ## create list of convergence comparison for each allocation ProperSolution_AB <- list() ## create list of proper solution comparison for each allocation ConvergedProper_AB <- list() ## create list of convergence and proper solution comparison for each allocation lrtest_AB <- list() ## create list for likelihood ratio test for each allocation lrchisq_AB <- list() ## create list for likelihood ratio chi square value lrchisqp_AB <- list() ## create list for likelihood ratio test p-value lrsig_AB <- list() ## create list for likelihood ratio test significance for (i in 1:nAlloc){ if (Converged_A[[i]]==1 & Converged[[i]]==1) { Converged_AB[[i]] <- 1 } else Converged_AB[[i]] <- 0 ## compare convergence if (ProperSolution_A[[i]]==1 & ProperSolution[[i]]==1) { ProperSolution_AB[[i]] <- 1 } else ProperSolution_AB[[i]] <- 0 ## compare existence of proper solutions if (ConvergedProper_A[[i]]==1 & ConvergedProper[[i]]==1) { ConvergedProper_AB[[i]] <- 1 } else ConvergedProper_AB[[i]] <- 0 ## compare existence of proper solutions and convergence if (ConvergedProper_AB[[i]]==1) { data <- as.data.frame(Allocations[[i]], row.names = NULL, optional = FALSE) ## convert allocation matrix to dataframe for model estimation fit_A <- lavaan::sem(syntaxA, data=data, ...) ## estimate model A in lavaan fit <- lavaan::sem(syntaxB, data=data, ...) ## estimate model B in lavaan lrtest_AB[[i]] <- lavaan::lavTestLRT(fit_A,fit) ## likelihood ratio test comparing A and B lrtestd_AB <- as.data.frame(lrtest_AB[[i]], row.names = NULL, optional = FALSE) ## convert lrtest results to dataframe lrchisq_AB[[i]] <- lrtestd_AB[2,5] ## write lrtest chisq as single numeric variable lrchisqp_AB[[i]] <- lrtestd_AB[2,7] ## write lrtest p-value as single numeric variable if (lrchisqp_AB[[i]]<.05) { lrsig_AB[[i]] <- 1 } else { lrsig_AB[[i]] <- 0 } ## determine statistical significance of lrtest }} lrchisqp_AB <- unlist(lrchisqp_AB,recursive=TRUE,use.names=TRUE) ## convert lrchisqp_AB from list to vector lrchisqp_AB <- as.numeric(lrchisqp_AB) ## make lrchisqp_AB numeric lrsig_AB <- unlist(lrsig_AB,recursive=TRUE,use.names=TRUE) ## convert lrsig_AB from list to vector lrsig_AB <- as.numeric(lrsig_AB) ### make lrsig_AB numeric nConverged_AB <- Reduce("+",Converged_AB) ## count number of allocations that converged for both A and B nProperSolution_AB <- Reduce("+",ProperSolution_AB) ## count number of allocations with proper solutions for both A and B nConvergedProper_AB <- Reduce("+",ConvergedProper_AB) ## count number of allocations that converged and have proper solutions for both A and B ProConverged_AB <- (nConverged_AB/nAlloc)*100 ## calc proportion of allocations that converged for both A and B nlrsig_AB <- Reduce("+",lrsig_AB) ## count number of allocations with significant lrtest between A and B Prolrsig_AB <- (nlrsig_AB/nConvergedProper_AB)*100 ## calc proportion of allocations with significant lrtest between A and B lrchisq_AB <- unlist(lrchisq_AB,recursive=TRUE,use.names=TRUE) ### convert lrchisq_AB from list to vector lrchisq_AB <- as.numeric(lrchisq_AB) ### make lrchisq_AB numeric AvgLRT_AB <- (Reduce("+",lrchisq_AB))/nConvergedProper_AB ## calc average LRT LRTsum <- cbind(AvgLRT_AB,lrtestd_AB[2,3],sd(lrchisq_AB,na.rm=TRUE),max(lrchisq_AB),min(lrchisq_AB),max(lrchisq_AB)-min(lrchisq_AB),Prolrsig_AB) colnames(LRTsum) <- c("Avg LRT","df","S.D.","MAX","MIN","Range", "% Sig") ## calculate LRT distribution statistics FitDiff_AB <- Fitsd_A - Fitsd ## compute fit index difference matrix for (i in 1:nAlloc){ if (ConvergedProper_AB[[i]]!=1) FitDiff_AB[1:9,i] <- 0 } ### make fit differences zero for each non-converged allocation BICDiff_AB <- list() AICDiff_AB <- list() RMSEADiff_AB <- list() CFIDiff_AB <- list() TLIDiff_AB <- list() SRMRDiff_AB <- list() BICDiffGT10_AB <- list() ## create list noting each allocation in which A is preferred over B BICDiff_BA <- list() AICDiff_BA <- list() RMSEADiff_BA <- list() CFIDiff_BA <- list() TLIDiff_BA <- list() SRMRDiff_BA <- list() BICDiffGT10_BA <- list() ## create list noting each allocation in which B is preferred over A for (i in 1:nAlloc){ if (FitDiff_AB[8,i]<0){ BICDiff_AB[[i]] <- 1 } else BICDiff_AB[[i]] <- 0 if (FitDiff_AB[9,i]<0){ AICDiff_AB[[i]] <- 1 } else AICDiff_AB[[i]] <- 0 if (FitDiff_AB[5,i]<0){ RMSEADiff_AB[[i]] <- 1 } else RMSEADiff_AB[[i]] <- 0 if (FitDiff_AB[3,i]>0){ CFIDiff_AB[[i]] <- 1 } else CFIDiff_AB[[i]] <- 0 if (FitDiff_AB[4,i]>0){ TLIDiff_AB[[i]] <- 1 } else TLIDiff_AB[[i]] <- 0 if (FitDiff_AB[6,i]<0){ SRMRDiff_AB[[i]] <- 1 } else SRMRDiff_AB[[i]] <- 0 if (FitDiff_AB[8,i]<(-10)){ BICDiffGT10_AB[[i]] <- 1 } else BICDiffGT10_AB[[i]] <- 0 } nBIC_AoverB <- Reduce("+",BICDiff_AB) nAIC_AoverB <- Reduce("+",AICDiff_AB) nRMSEA_AoverB <- Reduce("+",RMSEADiff_AB) nCFI_AoverB <- Reduce("+",CFIDiff_AB) nTLI_AoverB <- Reduce("+",TLIDiff_AB) nSRMR_AoverB <- Reduce("+",SRMRDiff_AB) nBICDiffGT10_AoverB <- Reduce("+",BICDiffGT10_AB) ## compute number of "A preferred over B" for each fit index for (i in 1:nAlloc){ if (FitDiff_AB[8,i]>0){ BICDiff_BA[[i]] <- 1 } else BICDiff_BA[[i]] <- 0 if (FitDiff_AB[9,i]>0){ AICDiff_BA[[i]] <- 1 } else AICDiff_BA[[i]] <- 0 if (FitDiff_AB[5,i]>0){ RMSEADiff_BA[[i]] <- 1 } else RMSEADiff_BA[[i]] <- 0 if (FitDiff_AB[3,i]<0){ CFIDiff_BA[[i]] <- 1 } else CFIDiff_BA[[i]] <- 0 if (FitDiff_AB[4,i]<0){ TLIDiff_BA[[i]] <- 1 } else TLIDiff_BA[[i]] <- 0 if (FitDiff_AB[6,i]>0){ SRMRDiff_BA[[i]] <- 1 } else SRMRDiff_BA[[i]] <- 0 if (FitDiff_AB[8,i]>(10)){ BICDiffGT10_BA[[i]] <- 1 } else BICDiffGT10_BA[[i]] <- 0 } nBIC_BoverA <- Reduce("+",BICDiff_BA) nAIC_BoverA <- Reduce("+",AICDiff_BA) nRMSEA_BoverA <- Reduce("+",RMSEADiff_BA) nCFI_BoverA <- Reduce("+",CFIDiff_BA) nTLI_BoverA <- Reduce("+",TLIDiff_BA) nSRMR_BoverA <- Reduce("+",SRMRDiff_BA) nBICDiffGT10_BoverA <- Reduce("+",BICDiffGT10_BA) ## compute number of "B preferred over A" for each fit index BICDiffAvgtemp <- list() AICDiffAvgtemp <- list() RMSEADiffAvgtemp <- list() CFIDiffAvgtemp <- list() TLIDiffAvgtemp <- list() SRMRDiffAvgtemp <- list() BICgt10DiffAvgtemp <- list() ## create empty list for average fit index differences for (i in 1:nAlloc){ if (BICDiff_AB[[i]]!=1){ BICDiffAvgtemp[[i]] <- 0 } else BICDiffAvgtemp[[i]] <- FitDiff_AB[8,i] if (AICDiff_AB[[i]]!=1){ AICDiffAvgtemp[[i]] <- 0 } else AICDiffAvgtemp[[i]] <- FitDiff_AB[9,i] if (RMSEADiff_AB[[i]]!=1){ RMSEADiffAvgtemp[[i]] <- 0 } else RMSEADiffAvgtemp[[i]] <- FitDiff_AB[5,i] if (CFIDiff_AB[[i]]!=1){ CFIDiffAvgtemp[[i]] <- 0 } else CFIDiffAvgtemp[[i]] <- FitDiff_AB[3,i] if (TLIDiff_AB[[i]]!=1){ TLIDiffAvgtemp[[i]] <- 0 } else TLIDiffAvgtemp[[i]] <- FitDiff_AB[4,i] if (SRMRDiff_AB[[i]]!=1){ SRMRDiffAvgtemp[[i]] <- 0 } else SRMRDiffAvgtemp[[i]] <- FitDiff_AB[6,i] if (BICDiffGT10_AB[[i]]!=1){ BICgt10DiffAvgtemp[[i]] <- 0 } else BICgt10DiffAvgtemp[[i]] <- FitDiff_AB[8,i] } ## make average fit index difference list composed solely of values where A is preferred over B BICDiffAvg_AB <- (Reduce("+",BICDiffAvgtemp))/nBIC_AoverB*(-1) AICDiffAvg_AB <- (Reduce("+",AICDiffAvgtemp))/nAIC_AoverB*(-1) RMSEADiffAvg_AB <- (Reduce("+",RMSEADiffAvgtemp))/nRMSEA_AoverB*(-1) CFIDiffAvg_AB <- (Reduce("+",CFIDiffAvgtemp))/nCFI_AoverB TLIDiffAvg_AB <- (Reduce("+",TLIDiffAvgtemp))/nTLI_AoverB SRMRDiffAvg_AB <- (Reduce("+",SRMRDiffAvgtemp))/nSRMR_AoverB*(-1) BICgt10DiffAvg_AB <- (Reduce("+",BICgt10DiffAvgtemp))/nBICDiffGT10_AoverB*(-1) ## calc average fit index difference when A is preferred over B FitDiffAvg_AoverB <- list(BICDiffAvg_AB,AICDiffAvg_AB,RMSEADiffAvg_AB,CFIDiffAvg_AB,TLIDiffAvg_AB,SRMRDiffAvg_AB) ## create list of all fit index differences when A is preferred over B FitDiffAvg_AoverB <- unlist(FitDiffAvg_AoverB,recursive=TRUE,use.names=TRUE) ### convert from list to vector for (i in 1:nAlloc){ if (BICDiff_BA[[i]]!=1){ BICDiffAvgtemp[[i]] <- 0 } else BICDiffAvgtemp[[i]] <- FitDiff_AB[8,i] if (AICDiff_BA[[i]]!=1){ AICDiffAvgtemp[[i]] <- 0 } else AICDiffAvgtemp[[i]] <- FitDiff_AB[9,i] if (RMSEADiff_BA[[i]]!=1){ RMSEADiffAvgtemp[[i]] <- 0 } else RMSEADiffAvgtemp[[i]] <- FitDiff_AB[5,i] if (CFIDiff_BA[[i]]!=1){ CFIDiffAvgtemp[[i]] <- 0 } else CFIDiffAvgtemp[[i]] <- FitDiff_AB[3,i] if (TLIDiff_BA[[i]]!=1){ TLIDiffAvgtemp[[i]] <- 0 } else TLIDiffAvgtemp[[i]] <- FitDiff_AB[4,i] if (SRMRDiff_BA[[i]]!=1){ SRMRDiffAvgtemp[[i]] <- 0 } else SRMRDiffAvgtemp[[i]] <- FitDiff_AB[6,i] if (BICDiffGT10_BA[[i]]!=1){ BICgt10DiffAvgtemp[[i]] <- 0 } else BICgt10DiffAvgtemp[[i]] <- FitDiff_AB[8,i] } ## make average fit index difference list composed solely of values where B is preferred over A BICDiffAvg_BA <- (Reduce("+",BICDiffAvgtemp))/nBIC_BoverA AICDiffAvg_BA <- (Reduce("+",AICDiffAvgtemp))/nAIC_BoverA RMSEADiffAvg_BA <- (Reduce("+",RMSEADiffAvgtemp))/nRMSEA_BoverA CFIDiffAvg_BA <- (Reduce("+",CFIDiffAvgtemp))/nCFI_BoverA*(-1) TLIDiffAvg_BA <- (Reduce("+",TLIDiffAvgtemp))/nTLI_BoverA*(-1) SRMRDiffAvg_BA <- (Reduce("+",SRMRDiffAvgtemp))/nSRMR_BoverA BICgt10DiffAvg_BA <- (Reduce("+",BICgt10DiffAvgtemp))/nBICDiffGT10_BoverA ## calc average fit index difference when B is preferred over A FitDiffAvg_BoverA <- list(BICDiffAvg_BA,AICDiffAvg_BA,RMSEADiffAvg_BA,CFIDiffAvg_BA,TLIDiffAvg_BA,SRMRDiffAvg_BA) ## create list of all fit index differences when B is preferred over A FitDiffAvg_BoverA <- unlist(FitDiffAvg_BoverA,recursive=TRUE,use.names=TRUE) ### convert from list to vector FitDiffBICgt10_AoverB <- nBICDiffGT10_AoverB/nConvergedProper_AB*100 ### calculate portion of allocations where A strongly preferred over B FitDiffBICgt10_BoverA <- nBICDiffGT10_BoverA/nConvergedProper_AB*100 ### calculate portion of allocations where B strongly preferred over A FitDiffBICgt10 <- rbind(FitDiffBICgt10_AoverB,FitDiffBICgt10_BoverA) rownames(FitDiffBICgt10) <- c("Very Strong evidence for A>B","Very Strong evidence for B>A") colnames(FitDiffBICgt10) <- "% Allocations" ### create table of proportions of "A strongly preferred over B" and "B strongly preferred over A" FitDiff_AoverB <- list(nBIC_AoverB/nConvergedProper_AB*100,nAIC_AoverB/nConvergedProper_AB*100,nRMSEA_AoverB/nConvergedProper_AB*100,nCFI_AoverB/nConvergedProper_AB*100,nTLI_AoverB/nConvergedProper_AB*100,nSRMR_AoverB/nConvergedProper_AB*100) ### create list of all proportions of "A preferred over B" FitDiff_BoverA <- list(nBIC_BoverA/nConvergedProper_AB*100,nAIC_BoverA/nConvergedProper_AB*100,nRMSEA_BoverA/nConvergedProper_AB*100,nCFI_BoverA/nConvergedProper_AB*100,nTLI_BoverA/nConvergedProper_AB*100,nSRMR_BoverA/nConvergedProper_AB*100) ### create list of all proportions of "B preferred over A" FitDiff_AoverB <- unlist(FitDiff_AoverB,recursive=TRUE,use.names=TRUE) ### convert from list to vector FitDiff_BoverA <- unlist(FitDiff_BoverA,recursive=TRUE,use.names=TRUE) ### convert from list to vector FitDiffSum_AB <- cbind(FitDiff_AoverB,FitDiffAvg_AoverB,FitDiff_BoverA,FitDiffAvg_BoverA) colnames(FitDiffSum_AB) <- c("% A>B","Avg Amount A>B","% B>A","Avg Amount B>A") rownames(FitDiffSum_AB) <- c("bic","aic","rmsea","cfi","tli","srmr") ## create table showing number of allocations in which A>B and B>A as well as average difference values for (i in 1:nAlloc){ is.na(FitDiff_AB[1:9,i]) <- ConvergedProper_AB[[i]]!=1 } ### make fit differences missing for each non-converged allocation LRThistMax <- max(hist(lrchisqp_AB,plot=FALSE)$counts) BIChistMax <- max(hist(FitDiff_AB[8,1:nAlloc],plot=FALSE)$counts) AIChistMax <- max(hist(FitDiff_AB[9,1:nAlloc],plot=FALSE)$counts) RMSEAhistMax <- max(hist(FitDiff_AB[5,1:nAlloc],plot=FALSE)$counts) CFIhistMax <- max(hist(FitDiff_AB[3,1:nAlloc],plot=FALSE)$counts) TLIhistMax <- max(hist(FitDiff_AB[4,1:nAlloc],plot=FALSE)$counts) ### calculate y-axis height for each histogram LRThist <- hist(lrchisqp_AB,ylim=c(0,LRThistMax),xlab="p-value", main="LRT p-values") ## plot histogram of LRT p-values BIChist <- hist(FitDiff_AB[8,1:nAlloc],ylim=c(0,BIChistMax),xlab="BIC_modA - BIC_modB", main="BIC Diff") AIChist <- hist(FitDiff_AB[9,1:nAlloc],ylim=c(0,AIChistMax),xlab="AIC_modA - AIC_modB", main="AIC Diff") RMSEAhist <- hist(FitDiff_AB[5,1:nAlloc],ylim=c(0,RMSEAhistMax),xlab="RMSEA_modA - RMSEA_modB", main="RMSEA Diff") CFIhist <- hist(FitDiff_AB[3,1:nAlloc],ylim=c(0,CFIhistMax),xlab="CFI_modA - CFI_modB", main="CFI Diff") TLIhist <- hist(FitDiff_AB[4,1:nAlloc],ylim=c(0,TLIhistMax),xlab="TLI_modA - TLI_modB", main="TLI Diff") ### plot histograms for each index_modA - index_modB BIChist AIChist RMSEAhist CFIhist TLIhist ConvergedProperSum <- rbind(nConverged_A/nAlloc,nConverged/nAlloc,nConverged_AB/nAlloc,nConvergedProper_A/nAlloc,nConvergedProper/nAlloc,nConvergedProper_AB/nAlloc) rownames(ConvergedProperSum) <- c("Converged_A","Converged_B","Converged_AB","ConvergedProper_A","ConvergedProper_B","ConvergedProper_AB") colnames(ConvergedProperSum) <- "Proportion of Allocations" ### create table summarizing proportions of converged allocations and allocations with proper solutions Output_AB <- list(round(LRTsum,3),"LRT results are interpretable specifically for nested models",round(FitDiffSum_AB,3),round(FitDiffBICgt10,3),ConvergedProperSum) names(Output_AB) <- c('LRT Summary, Model A vs. Model B','Note:', 'Fit Index Differences','Percent of Allocations with |BIC Diff| > 10','Converged and Proper Solutions Summary') ### output for model comparison } return(list(Output_A,Output_B,Output_AB)) ## returns output for model A, model B, and the comparison of these } semTools/R/fmi.R0000644000175100001440000001246513000250017013170 0ustar hornikusers########### Mauricio Garnier Villarreal (mgv@ku.edu) ### Last updated: 14 October 2016 ######This function estimates the Fraction of Missing Information for the variance and mean of each variable in a list of multiple imputed data sets #### dat.imp is a list of the imputed data sets #### method is the model used for the estimation #### varnames is used to select a subset of variables #### digits is the number of decimals #### group is the grouping variable, in case you want to get the fmi for each group #### exclude are the variables that you wnat to exclude from the analysis fmi <- function(dat.imp, method="saturated", varnames=NULL, group=NULL, exclude=NULL, digits=3){ if(is.character(varnames)){ vars <- varnames } else { vars <- colnames(dat.imp[[1]]) } if(!is.null(group)){ vars <- vars[vars!=group] } if(!is.null(exclude)){ vars <- vars[vars!=exclude] } if(method == "saturated" | method == "sat"){ par.tab <- satParFMI(dat.imp, var.names=vars, groups=group) } if(method == "null"){ par.tab <- nullParFMI(dat.imp, var.names=vars, groups=group) } comb.results1 <- cfa.mi(par.tab, dat.imp, chi="none", meanstructure = TRUE, group = group) comb.results <- inspect(comb.results1, "impute")[[2]] ## FIXME: can't just be lavInspect because it is a lavaanStar comb.results <- data.frame(comb.results[,c("lhs","op","rhs","group")], round(lavaan::parameterEstimates(comb.results1)[,"est"], digits), round(comb.results[,c("fmi1","fmi2")], digits)) colnames(comb.results) <- c('lhs', 'op', 'rhs', 'group', 'coef', 'fmi.1', 'fmi.2') variances <- comb.results[comb.results$lhs==comb.results$rhs,] variances <- data.frame(variances[,"lhs"], variances[,"group"], variances[,"coef"], variances[,"fmi.1"], variances[,"fmi.2"]) colnames(variances) <- c('var', 'group', 'coef', 'fmi.1', 'fmi.2') var.means <- comb.results[comb.results$op=="~1",] var.means <- data.frame(var.means[,"lhs"], var.means[,"group"], var.means[,"coef"], var.means[,"fmi.1"], var.means[,"fmi.2"]) colnames(var.means) <- c('var', 'group', 'coef', 'fmi.1', 'fmi.2') if(method == "null"){ mes <- "These estimates used the null model, they may not be as precise as the saturated model estimates" results<-list(Variances=variances, Means=var.means, Message=mes) } else { results<-list(Variances=variances, Means=var.means) } return(results) } #### function to produce a parameter table for the saturated model satParFMI <- function(dat.imp, var.names=NULL, groups=NULL){ if(!is.null(groups)){ ngroups <- length(table(dat.imp[[1]][,groups])) } else { ngroups <- 1 } # gets the parameter table from the null model par.null <- nullParFMI(dat.imp, var.names, groups=groups) lhs.diag <- par.null$lhs op.diag <- par.null$op rhs.diag <- par.null$rhs gnull <- par.null$group #combine the variable names to set al the covariances combs <- t(combn(var.names, 2)) lhs.up <- rep(combs[, 1],times=ngroups) op.up <- rep("~~", length(lhs.up)) rhs.up <- rep(combs[, 2],times=ngroups) galt <- sort(rep(1:ngroups,times=length(lhs.up)/ngroups)) #put together the null table and the covariances lhs.all <- c(lhs.up, lhs.diag) id <- seq(1:length(lhs.all)) op.all <- c(op.up, op.diag) rhs.all <- c(rhs.up, rhs.diag) user <- rep(1,length(lhs.all)) group <- as.integer(c(galt,gnull)) free <- as.integer(id) ustart <- rep(NA, length(lhs.all)) exo <- rep(0, length(lhs.all)) label <- rep("", length(lhs.all)) plabel <- rep("", length(lhs.all)) par.sat <- list(id, lhs.all, op.all, rhs.all, user, group, free, ustart, exo, label, plabel) names(par.sat) <- c("id", "lhs", "op", "rhs", "user", "group", "free", "ustart", "exo", "label", "plabel") return(par.sat) } #### function to produce a parameter table for the null model nullParFMI <- function(dat.imp, var.names=NULL, groups=NULL){ if(!is.null(groups)){ ngroups <- length(table(dat.imp[[1]][,groups])) } else { ngroups <- 1 } lhs.diag1 <- rep(c(var.names),times=ngroups) op.diag1 <- rep("~~",ngroups*(length(var.names))) rhs.diag1 <- rep(var.names,times=ngroups) group1 <- sort(rep(1:ngroups,times=length(lhs.diag1)/ngroups)) lhs.diag2 <- rep(c(var.names),times=ngroups) op.diag2 <- rep("~1",ngroups*(length(var.names))) rhs.diag2 <- rep("",ngroups*length(var.names)) group2 <- sort(rep(1:ngroups,times=length(lhs.diag2)/ngroups)) lhs.diag <- c(lhs.diag1, lhs.diag2) op.diag <- c(op.diag1, op.diag2) rhs.diag <- c(rhs.diag1, rhs.diag2) group <- c(group1, group2) first <- data.frame(lhs.diag,op.diag,rhs.diag,group) first <- first[order(first$group),] id <- seq(1:length(lhs.diag)) user <- rep(1,length(lhs.diag)) free <- as.integer(id) ustart <- rep(NA, length(lhs.diag)) exo <- rep(0, length(lhs.diag)) label <- rep("", length(lhs.diag)) plabel <- rep("", length(lhs.diag)) null.sat.fmi <- list(id, as.character(first$lhs.diag), as.character(first$op.diag), as.character(first$rhs.diag), user, first$group, free, ustart, exo, label, plabel) names(null.sat.fmi) <- c("id","lhs","op","rhs","user","group","free","ustart","exo","label","plabel") return(null.sat.fmi) } semTools/R/runMI.R0000644000175100001440000005427013000250017013447 0ustar hornikusers## Functon to impute missing data, run Lavaan on each one ## input: data frames of raw data with missing data, model specification (lavaan script), number of imputations wanted.) ## Output: lavaanStar object which filled with the appropriate information ## Alexander Schoemann, Patrick Miller, Mijke Rhemtulla, Sunthud Pornprasertmanit, Alexander Robitzsch, Mauricio Garnier Villarreal ## Last modified 5/25/2012 ##Currently outputs a list of parameter estimates, standard errors, fit indices and fraction missing information cfa.mi <- function(model, data, m, miArgs=list(), miPackage="Amelia", chi="all", seed=12345, nullModel = NULL, includeImproper = FALSE, ...) { runMI(model=model, data=data, m=m, miArgs=miArgs, chi=chi, miPackage=miPackage, seed=seed, fun="cfa", nullModel = nullModel, includeImproper = includeImproper, ...) } sem.mi <- function(model, data, m, miArgs=list(), miPackage="Amelia", chi="all", seed=12345, nullModel = NULL, includeImproper = FALSE, ...) { runMI(model=model, data=data, m=m, miArgs=miArgs, chi=chi, miPackage=miPackage, seed=seed, fun="sem", nullModel = nullModel, includeImproper = includeImproper, ...) } growth.mi <- function(model, data, m, miArgs=list(), miPackage="Amelia", chi="all", seed=12345, nullModel = NULL, includeImproper = FALSE, ...) { runMI(model=model, data=data, m=m, miArgs=miArgs, chi=chi, miPackage=miPackage, seed=seed, fun="growth", nullModel = nullModel, includeImproper = includeImproper, ...) } lavaan.mi <- function(model, data, m, miArgs=list(), miPackage="Amelia", chi="all", seed=12345, nullModel = NULL, includeImproper = FALSE, ...) { runMI(model=model, data=data, m=m, miArgs=miArgs, chi=chi, miPackage=miPackage, seed=seed, fun="lavaan", nullModel = nullModel, includeImproper = includeImproper, ...) } runMI <- function(model, data, m, miArgs=list(), chi="all", miPackage="Amelia", seed=12345, fun, nullModel = NULL, includeImproper = FALSE, ...) { set.seed(seed) chi <- tolower(chi) if(!(chi %in% c("none", "mplus", "mr", "lmrr", "all"))) { stop("The chi argument should be one of the followings only: 'none, 'mr', 'lmrr', 'mplus', or 'all'.") } imputed.data <- is.list(data) & (!is.data.frame(data)) imputed.l <- NULL if (!imputed.data){ if( ( miPackage!="Amelia" ) & ( miPackage !="mice") ) { stop("Currently runMI only supports imputation by Amelia or mice") } if(miPackage=="Amelia"){ imputed.l<-imputeMissingAmelia(data,m, miArgs) } else if(miPackage=="mice"){ imputed.l<-imputeMissingMice(data,m, miArgs) } } else { imputed.l <- data m <- length( data ) data <- data[[1]] } out <- list(model=model, data=imputed.l[[1]], se="none", do.fit=FALSE) out <- c(out, list(...)) template <- do.call(fun, out) imputed.results.l <- suppressWarnings(lapply(imputed.l, runlavaanMI, syntax=model, fun=fun, ...)) converged.l <- sapply(imputed.results.l, lavaan::lavInspect, what = "converged") coefAll <- sapply(imputed.results.l, function(x) lavaan::parTable(x)$est) seAll <- sapply(imputed.results.l, function(x) lavaan::parTable(x)$se) partableImp <- lavaan::partable(imputed.results.l[[1]]) posVar <- (partableImp$op == "~~") & (partableImp$lhs == partableImp$rhs) convergedtemp <- converged.l properSE <- apply(seAll, 2, function(x) all(!is.na(x)) & all(x >= 0) & !(all(x == 0))) properVariance <- apply(coefAll[posVar, ,drop=FALSE], 2, function(x) all(x >= 0)) if(!includeImproper) { converged.l <- converged.l & properSE & properVariance } if(sum(converged.l) < 2) { tab <- cbind(convergedtemp, properSE, properVariance, converged.l) colnames(tab) <- c("1. Convergence", "2. Proper SE", "3. Proper Variance Estimate", "Used for pooling") print(tab) stop("Please increase the number of imputations. The number of convergent replications is less than or equal to 1. See the details above.") } mOriginal <- m m <- sum(converged.l) convergenceRate <- m/mOriginal imputed.results.l <- imputed.results.l[converged.l] coefs <- sapply(imputed.results.l, function(x) lavaan::parTable(x)$est) se <- sapply(imputed.results.l, function(x) lavaan::parTable(x)$se) Sigma.hat <- lapply(imputed.results.l, lavaan::lavInspect, what = "cov.ov") Mu.hat <- lapply(imputed.results.l, lavaan::lavInspect, what = "mean.ov") meanSigmaHat <- list() meanMuHat <- list() for(g in seq_len(lavaan::lavInspect(template, "ngroups"))) { tempSigma <- lapply(Sigma.hat, "[[", g) meanSigmaHat[[g]] <- Reduce("+", tempSigma)/m tempMu <- lapply(Mu.hat, "[[", g) meanMuHat[[g]] <- Reduce("+", tempMu)/m } template@Fit@Sigma.hat <- meanSigmaHat template@Fit@Mu.hat <- meanMuHat comb.results <- miPoolVector(t(coefs),t(se), m) template@Fit@est <- template@ParTable$est <- comb.results$coef template@Fit@se <- template@ParTable$se <- comb.results$se template@Fit@x <- comb.results$coef[comb.results$se != 0] template@Model <- imposeGLIST(template@Model, comb.results$coef, lavaan::parTable(template)) selectVCOV <- lavaan::partable(imputed.results.l[[1]])$free != 0 # VCOV VCOVs <- sapply(imputed.results.l, function(x) vecsmat(lavaan::vcov(x))) template@vcov$vcov <- vcovPool(t(coefs[selectVCOV, ]),t(VCOVs), m) fmi.results <- cbind(lavaan::parameterEstimates(template, remove.system.eq = FALSE, remove.eq = FALSE, remove.ineq = FALSE)[,1:3], group=lavaan::parTable(template)$group, fmi1 = comb.results[[3]], fmi2 = comb.results[[4]]) fit <- lavaan::lavInspect(imputed.results.l[[1]], "test") df <- fit[[1]]$df #if (df == 0) chi <- "none" # for saturated models, no model fit available chi1 <- sapply(imputed.results.l, function(x) lavaan::lavInspect(x, "test")[[1]]$stat) if(length(lavaan::lavNames(template, "ov.ord")) | (length(fit) > 1)) { if(chi=="all") chi <- "lmrr" if(chi %in% c("mplus", "mr")) { stop("The 'mplus' or 'mr' method for pooling chi-square values is not available with categorical variables.") } } chiScaled1 <- NULL dfScaled <- NULL if(length(fit) > 1) { chiScaled1 <- sapply(imputed.results.l, function(x) lavaan::lavInspect(x, "test")[[2]]$stat) dfScaled <- fit[[2]]$df } if(lavaan::lavInspect(template, "ngroups") == 1) { fit[[1]]$stat.group <- mean(sapply(imputed.results.l, function(x) lavaan::lavInspect(x, "test")[[1]]$stat.group)) } else { fit[[1]]$stat.group <- rowMeans(sapply(imputed.results.l, function(x) lavaan::lavInspect(x, "test")[[1]]$stat.group)) } if(is.null(nullModel)) nullModel <- lavaan::lav_partable_independence(template) if(is.list(nullModel)) nullModel$ustart[nullModel$exo == 1] <- NA null.results <- suppressWarnings(lapply(imputed.l, runlavaanMI, syntax=nullModel, fun=fun, ...)) convergedNull.l <- sapply(null.results, lavaan::lavInspect, what = "converged") seNullAll <- sapply(null.results, function(x) lavaan::parTable(x)$se) if(!includeImproper) { convergedNull.l <- convergedNull.l & apply(seNullAll, 2, function(x) all(!is.na(x) & (x >= 0))) } dfNull <- lavaan::lavInspect(null.results[[1]], "test")[[1]]$df if(dfNull == 0) convergedNull.l <- rep(TRUE, m) if(!any(convergedNull.l)) stop("No null model is converged") mNull <- sum(convergedNull.l) convergenceNullRate <- mNull/mOriginal null.results <- null.results[convergedNull.l] chiNull <- sapply(null.results, function(x) lavaan::lavInspect(x, "test")[[1]]$stat) chiNullScaled1 <- NULL dfNullScaled <- NULL if(length(fit) > 1) { chiNullScaled1 <- sapply(null.results, function(x) lavaan::lavInspect(x, "test")[[2]]$stat) dfNullScaled <- lavaan::lavInspect(null.results[[1]], "test")[[2]]$df } outNull <- list(model=nullModel, data=imputed.l[[1]], se="none", do.fit=FALSE) outNull <- c(outNull, list(...)) templateNull <- suppressWarnings(do.call(fun, outNull)) coefsNull <- sapply(null.results, function(x) lavaan::parTable(x)$est) seNull <- sapply(null.results, function(x) lavaan::parTable(x)$se) comb.results.null <- miPoolVector(t(coefsNull),t(seNull), mNull) fitNull <- lavaan::lavInspect(null.results[[1]], "test") lmrr <- NULL lmrrNull <- NULL mr <- NULL mrNull <- NULL mplus <- NULL mplusNull <- NULL lmrrScaled <- NULL lmrrScaledNull <- NULL logsat <- NA logalt <- NA loglnull <- NULL loglsat <- NULL loglmod <- NULL if(chi %in% c("lmrr", "all")){ lmrr <- lmrrPooledChi(chi1, df) lmrrNull <- lmrrPooledChi(chiNull, dfNull) fit[[1]]$stat <- as.numeric(lmrr[1] * lmrr[2]) fit[[1]]$pvalue <- as.numeric(lmrr[4]) fitNull[[1]]$stat <- as.numeric(lmrrNull[1] * lmrrNull[2]) fitNull[[1]]$pvalue <- as.numeric(lmrrNull[4]) if(!is.null(chiScaled1)) { lmrrScaled <- lmrrPooledChi(chiScaled1, dfScaled) lmrrScaledNull <- lmrrPooledChi(chiNullScaled1, dfNullScaled) fit[[2]] <- lavaan::lavInspect(imputed.results.l[[1]], "test")[[2]] fit[[2]]$stat <- as.numeric(lmrrScaled[1] * lmrrScaled[2]) fit[[2]]$pvalue <- as.numeric(lmrrScaled[4]) fitNull[[2]] <- lavaan::lavInspect(null.results[[1]], "test")[[2]] fitNull[[2]]$stat <- as.numeric(lmrrScaledNull[1] * lmrrScaledNull[2]) fitNull[[2]]$pvalue <- as.numeric(lmrrScaledNull[4]) template@Options$estimator <- lavaan::lavInspect(imputed.results.l[[1]], "options")$estimator template@Options$test <- lavaan::lavInspect(imputed.results.l[[1]], "options")$test templateNull@Options$estimator <- lavaan::lavInspect(null.results[[1]], "options")$estimator templateNull@Options$test <- lavaan::lavInspect(null.results[[1]], "options")$test } } if(chi %in% c("mplus", "mr", "all")){ mrplusOut <- mrplusPooledChi(template, imputed.l[converged.l], chi1, df, coef=comb.results$coef, coefs = coefs, m=m, fun=fun, ...) mrplus <- mrplusOut[[1]] mrplusChi <- mrplusOut[[2]] mrplusNullOut <- mrplusPooledChi(templateNull, imputed.l[convergedNull.l], chiNull, dfNull, coef=comb.results.null$coef, coefs = coefsNull, m=mNull, fun=fun, par.sat=lavaan::lav_partable_unrestricted(template), ...) mrplusNull <- mrplusNullOut[[1]] mrplusNullChi <- mrplusNullOut[[2]] logsat <- mrplus[5] / (1 + mrplus[4]) logalt <- mrplus[6] / (1 + mrplus[4]) loglnull <- mrplusNullChi[,2] loglsat <- mrplusChi[,1] loglmod <- mrplusChi[,2] if(chi %in% c("mr", "all")){ mr <- mrPooledChi(mrplus[1], mrplus[2], mrplus[3], mrplus[4]) mrNull <- mrPooledChi(mrplusNull[1], mrplusNull[2], mrplusNull[3], mrplusNull[4]) fit[[1]]$stat <- as.numeric(mr[1] * mr[2]) fit[[1]]$pvalue <- as.numeric(mr[4]) fitNull[[1]]$stat <- as.numeric(mrNull[1] * mrNull[2]) fitNull[[1]]$pvalue <- as.numeric(mrNull[4]) } if(chi %in% c("mplus", "all")){ mplus <- mplusPooledChi(mrplus[1], mrplus[3], mrplus[4]) mplusNull <- mplusPooledChi(mrplusNull[1], mrplusNull[3], mrplusNull[4]) fit[[1]]$stat <- as.numeric(mplus[1]) fit[[1]]$pvalue <- as.numeric(mplus[3]) fitNull[[1]]$stat <- as.numeric(mplusNull[1]) fitNull[[1]]$pvalue <- as.numeric(mplusNull[3]) } } template@test <- fit template@Fit@npar <- lavaan::fitMeasures(imputed.results.l[[1]], "npar")[[1]] template@Options <- lavaan::lavInspect(imputed.results.l[[1]], "options") templateNull@test <- fitNull result <- as(template, "lavaanStar") ## HACK! YR templateNull@Fit@converged <- TRUE ### ! to trick fitMeasures ## notused <- capture.output(fitVec <- suppressWarnings(lavaan::fitMeasures(templateNull))) name <- names(fitVec) fitVec <- as.vector(fitVec) names(fitVec) <- name result@nullfit <- fitVec result@Fit@iterations <- as.integer(m) result@Fit@converged <- TRUE summaryImputed <- list() summaryImputed[[1]] <- c("target model" = convergenceRate, "null model" = convergenceNullRate) summaryImputed[[2]] <- fmi.results summaryImputed[[3]] <- list(lmrr = lmrr, mr = mr, mplus = mplus) summaryImputed[[4]] <- list(lmrr = lmrrNull, mr = mrNull, mplus = mplusNull) summaryImputed[[5]] <- list(unrestricted.logl = logsat, logl = logalt) summaryImputed[[6]] <- list(chiorig = chi1, loglmod = loglmod, loglnull = loglnull, loglsat = loglsat) nameImputed <- c("convergenceRate", "fractionMissing", "targetFit", "nullFit", "logl", "indivlogl") if(!is.null(lmrrScaled)) { summaryImputed[[7]] <- list(lmrr = lmrrScaled) summaryImputed[[8]] <- list(lmrr = lmrrScaledNull) names(summaryImputed) <- c(nameImputed, "targetFit.scaled", "nullFit.scaled") } else { names(summaryImputed) <- nameImputed } result@imputed <- summaryImputed result@imputedResults <- imputed.results.l return(result) } #Convenient function to run lavaan models and get results out. For easy use with lapply runlavaanMI <- function(MIdata, syntax, fun, ...) { out <- list(model=syntax, data=MIdata) out <- c(out, list(...)) fit <- NULL try(fit <- do.call(fun, out), silent=TRUE) return(fit) } #Conveniance function to run impuations on data and only return list of data imputeMissingAmelia <- function(data,m, miArgs){ # pull out only the imputations out <- c(list(Amelia::amelia, x = data, m = m, p2s=0), miArgs) temp.am <- eval(as.call(out)) return(temp.am$imputations) } # end imputeMissingAmelia imputeMissingMice <- function(data,m, miArgs){ # pull out only the imputations requireNamespace("mice") if(!("package:mice" %in% search())) attachNamespace("mice") out <- c(list(mice::mice, data=data, m = m, diagnostics=FALSE, printFlag=FALSE), miArgs) temp.mice <- eval(as.call(out)) temp.mice.imp <- NULL for(i in 1:m) { temp.mice.imp[[i]] <- mice::complete(x=temp.mice, action=i, include=FALSE) } return(temp.mice.imp) } # end imputeMissingAmelia # miPoolVector # Function -- simsem package # Pool MI results that providing in matrix or vector formats # Argument: # MI.param: Coefficients matrix (row = imputation, col = parameters) # MI.se: Standard errors matrix (row = imputation, col = parameters) # imps: Number of imputations # Return: # coef: Parameter estimates # se: Standard error combining the between and within variances # FMI.1: Fraction missing? # FMI.2: Fraction missing? # Author: Mijke Rhumtella # Alex Schoemann # Sunthud Pornprasertmanit (University of Kansas; psunthud@ku.edu) # Date Modified: February 8, 2012 miPoolVector <- function(MI.param, MI.se, imps) { #compute parameter estimates Estimates <- colMeans(MI.param) #compute between-imputation variance: variance of parameter estimates Bm <- apply(MI.param,2,var) #compute within-imputation variance: average of squared estimated SEs #Um <- colSums(MI.se^2/m) Um <- apply(MI.se^2,2,mean) #Total variance #Tm <- Um + (Bm)*((1+m)/m+1) #compute total variance: sum of between- and within- variance with correction TV <- Um + ((imps+1)/imps)*Bm #compute correction factor for fraction of missing info nu <- (imps-1)*((((1+1/imps)*Bm)/TV)^-2) #compute 2 estimates of fraction of missing information FMI.1 <- 1-(Um/TV) FMI.2 <- 1- ((nu+1)*Um)/((nu+3)*TV) FMI.2[is.nan(FMI.2)] <- 0 FMI<-rbind(FMI.1,FMI.2) #Get rid of estimates from fixed variables #fixedParam <- Bm==0 #Estimates <- subset(Estimates, !fixedParam) #TV <- subset(TV, !fixedParam) #FMI.1 <- subset(FMI.1, !fixedParam) #FMI.2 <- subset(FMI.2, !fixedParam) SE <- sqrt(TV) MI.res<-list(Estimates,SE,FMI.1,FMI.2) names(MI.res)<-c('coef','se','FMI.1','FMI.2') #compute chi-square proportion (is this useful?) #(MI.fit.mat$chisq.p is a placeholder for however we'll index the p-value of chi square) #chisq <- sum(MI.fit.mat$chisq.pval<.05)/m return(MI.res) } #Examples: #param <- matrix(c(0.7, 0.1, 0.5, # 0.75, 0.12, 0.54, # 0.66, 0.11, 0.56, # 0.74, 0.09, 0.55), nrow=4, byrow=T) #SE <- matrix(c(0.1, 0.01, 0.05, # 0.11, 0.023, 0.055, # 0.10, 0.005, 0.04, # 0.14, 0.012, 0.039), nrow=4, byrow=T) #nimps <- 4 #miPoolVector(param, SE, nimps) # lmrrPooledChi # Function -- simsem package # Pool Chi-square statistic based on Li, Meng, Raghunathan, & Rubin (1991) adapted from http://psychology.clas.asu.edu/files/CombiningLikelihoodRatioChi-SquareStatisticsFromaMIAnalysis.sas # Argument: # chis: vector of chi-square values # df: degree of freedom # Author: Craig Enders # Sunthud Pornprasertmanit (University of Kansas; psunthud@ku.edu) # Date Modified: March 31, 2012 vecsmat <- function(X) X[lower.tri(X, diag = TRUE)] invvecsmat <- function(x) { p <- (sqrt(1 + 8 * length(x)) - 1) /2 X <- matrix(0, p, p) X[lower.tri(X, diag = TRUE)] <- x vars <- diag(X) X <- X + t(X) diag(X) <- vars X } vcovPool <- function(MI.param, MI.cov, imps) { #compute parameter estimates Estimates <- colMeans(MI.param) #compute between-imputation variance: variance of parameter estimates Bm <- vecsmat(cov(MI.param)) #compute within-imputation variance: average of squared estimated SEs #Um <- colSums(MI.se^2/m) Um <- apply(MI.cov,2,mean) #Total variance #Tm <- Um + (Bm)*((1+m)/m+1) #compute total variance: sum of between- and within- variance with correction TV <- Um + ((imps+1)/imps)*Bm return(invvecsmat(TV)) } lmrrPooledChi <- function(chis, df) { # From Li, Meng, Raghunathan, & Rubin (1991) if(is.matrix(chis)) { ifelse(ncol(chis) == 1 | nrow(chis) == 1, chis <- as.vector(chis), stop("Please put a vector of chi-square values")) } m <- length(chis) dbar <- mean(chis) sqrtd <- sqrt(chis) xbarsqrtd <- mean(sqrtd) # Equation 2.2 r <- (1 + 1/m) * (sum((sqrtd - xbarsqrtd)^2)/(m - 1)) # Equation 2.1 D <- (dbar/df - ((m + 1) * r /(m - 1)))/(1 + r) if(D < 0) D <- 0 # Equation 2.16 and 2.17 aw <- df^(-(3/m)) * (m - 1) * (1 + (1/r))^2 p <- 1 - pf(D, df, aw) result <- c(D, df, aw, p) names(result) <- c("F", "df1", "df2", "p.F") return(result) } #Examples: #lmrrPooledChi(c(89.864, 81.116,71.500,49.022,61.986,64.422,55.256,57.890,79.416,63.944), 2) ##### function that does the part of the MR and Mplus combination methods are equal mrplusPooledChi <- function(template, imputed.l, chi1, df, coef, coefs, m, fun, par.sat=NULL, ...) { if(is.null(par.sat)) par.sat <- lavaan::lav_partable_unrestricted(template) comb.sat <- suppressWarnings(lapply(imputed.l, runlavaanMI, syntax=par.sat, fun=fun, ...)) converged.sat1 <- sapply(comb.sat, lavaan::lavInspect, what = "converged") coefs.sat1 <- sapply(comb.sat, function(x) lavaan::parTable(x)$est) est.sat1 <- rowMeans(coefs.sat1[,converged.sat1]) par.sat2 <- par.sat par.sat2$free <- as.integer(rep(0, length(par.sat2$free))) par.sat2$ustart <- est.sat1 par.sat2$start <- est.sat1 par.sat2$est <- est.sat1 comb.sat2 <- suppressWarnings(lapply(imputed.l, runlavaanMI, syntax=par.sat2, fun=fun, ...)) comb.sat2 <- lapply(comb.sat2, forceTest) fit.sat2 <- sapply(comb.sat2, function(x) lavaan::lavInspect(x, "fit")["logl"]) par.alt2 <- lavaan::partable(template) par.alt2$free <- as.integer(rep(0, length(par.alt2$free))) par.alt2$ustart <- coef par.alt2$start <- coef par.alt2$est <- coef par.alt2.l <- rep(list(par.alt2), m) TEMPFUN <- function(ptable, origcoef) { exo <- ptable$exo == 1 ptable$ustart[exo] <- origcoef[exo] ptable$start[exo] <- origcoef[exo] ptable$est[exo] <- origcoef[exo] ptable } par.alt2.l <- mapply(TEMPFUN, par.alt2.l, data.frame(coefs), SIMPLIFY = FALSE) comb.alt2 <- suppressWarnings(mapply(runlavaanMI, MIdata = imputed.l, syntax = par.alt2.l, SIMPLIFY = FALSE, MoreArgs = list(fun = fun, ...))) #comb.alt2 <- suppressWarnings(lapply(imputed.l, runlavaanMI, syntax=par.alt2, fun=fun, ...)) comb.alt2 <- lapply(comb.alt2, forceTest) fit.alt2 <- sapply(comb.alt2, function(x) lavaan::lavInspect(x, "fit")["logl"]) chinew <- cbind(fit.sat2, fit.alt2, (fit.sat2-fit.alt2)*2) chimean <- mean(chinew[,3]) logsat <- mean(chinew[,1]) logalt <- mean(chinew[,2]) fit.altcc <- mean(chi1) ariv <- ((m+1)/((m-1)*df))*(fit.altcc-chimean) resmrCHI <- c(chimean, m, df, ariv, logsat, logalt) return(list(resmrCHI, chinew)) } ##### function that does the calculations for the Mplus chi combination mplusPooledChi <- function(chimean, k, ariv){ comb.chi.mplus <- matrix(NA, nrow=1, ncol=3) comb.chi.mplus[1] <- chimean/(1+ariv) comb.chi.mplus[2] <- k comb.chi.mplus[3] <- 1 - pchisq(comb.chi.mplus[1], comb.chi.mplus[2]) colnames(comb.chi.mplus) <- c("chisq", "df", "pvalue") comb.chi.mplus <- as.data.frame(comb.chi.mplus) rownames(comb.chi.mplus) <- "" return(comb.chi.mplus) } ##### function that does the calculations for the MR chi combination mrPooledChi <-function(chimean, m, k, ariv){ km <- m*k kmtest <- km-k if(kmtest<=4){ v4 <- 4+(km-k-4)*(1+(1-(2/kmtest))*(1/ariv))^2 } else{ v4 <- (kmtest*(1+k^-1)*(1+(1/ariv))^2)/2 } comb.chi.mr <- matrix(NA, nrow=1, ncol=4) comb.chi.mr[1] <- chimean/((1+ariv)*k) comb.chi.mr[2] <- k comb.chi.mr[3] <- v4 comb.chi.mr[4] <- 1 - pf(comb.chi.mr[1], comb.chi.mr[2], comb.chi.mr[3]) colnames(comb.chi.mr) <- c("F", "df1", "df2", "pvalue") comb.chi.mr <- as.data.frame(comb.chi.mr) rownames(comb.chi.mr) <- "" return(comb.chi.mr) } forceTest <- function(object) { previousCall <- lavaan::lavInspect(object, "call") args <- previousCall[-1] args$model <- lavaan::parTable(object) args$control <- list(optim.method="none", optim.force.converged=TRUE) funcall <- as.character(previousCall[[1]]) lav <- do.call(funcall[length(funcall)], args) lav } imposeGLIST <- function(object, coef, partable) { GLIST <- object@GLIST for(i in 1:length(GLIST)) { if(!is.matrix(GLIST[[i]])) GLIST[[i]] <- as.matrix(GLIST[[i]]) dimnames(GLIST[[i]]) <- object@dimNames[[i]] } for(i in 1:length(coef)) { group <- partable$group[i] lhs <- partable$lhs[i] rhs <- partable$rhs[i] if(partable$op[i] == "=~") { targetName <- "lambda" if(!(rhs %in% rownames(GLIST[names(GLIST) == "lambda"][[group]]))) targetName <- "beta" GLIST[names(GLIST) == targetName][[group]][rhs, lhs] <- coef[i] } else if (partable$op[i] == "~~") { if(lhs %in% rownames(GLIST[names(GLIST) == "psi"][[group]])) { GLIST[names(GLIST) == "psi"][[group]][rhs, lhs] <- coef[i] GLIST[names(GLIST) == "psi"][[group]][lhs, rhs] <- coef[i] } else { GLIST[names(GLIST) == "theta"][[group]][rhs, lhs] <- coef[i] GLIST[names(GLIST) == "theta"][[group]][lhs, rhs] <- coef[i] } } else if (partable$op[i] == "~") { targetName <- "beta" if(!(rhs %in% colnames(GLIST[names(GLIST) == "beta"][[group]]))) targetName <- "gamma" GLIST[names(GLIST) == targetName][[group]][lhs, rhs] <- coef[i] } else if (partable$op[i] == "~1") { if(lhs %in% rownames(GLIST[names(GLIST) == "alpha"][[group]])) { GLIST[names(GLIST) == "alpha"][[group]][lhs, 1] <- coef[i] } else { GLIST[names(GLIST) == "nu"][[group]][lhs, 1] <- coef[i] } } else if (partable$op[i] == "|") { GLIST[names(GLIST) == "tau"][[group]][paste0(lhs, "|", rhs), 1] <- coef[i] } } object@GLIST <- GLIST object } semTools/R/dataDiagnosis.R0000644000175100001440000000741313000201061015157 0ustar hornikusers## Title: Data Diagnosis ## Author: Sunthud Pornprasertmanit # Description: Diagnose data for its distribution # Remark: initial version from the simsem package # centralMoment # Calculate central moments of a variable # Argument: # x: vector of a variable # ord: order of the moment # weight: weight variable centralMoment <- function(x, ord) { if(ord < 2) stop("Central moment can be calculated for order 2 or more in an integer.") wm <- mean(x) result <- sum((x - wm)^(ord))/length(x) return(result) } # Example # centralMoment(1:5, 2) # kStat # Calculate the k-statistic (i.e., unbiased estimator of a cumulant) of a variable # Argument: # x: vector of a variable # ord: order of the k-statistics kStat <- function(x, ord) { # Formula from mathworld wolfram n <- length(x) if(ord == 1) { return(mean(x)) } else if (ord == 2) { return(centralMoment(x, 2) * n / (n - 1)) } else if (ord == 3) { return(centralMoment(x, 3) * n^2 / ((n - 1) * (n - 2))) } else if (ord == 4) { num1 <- n^2 num2 <- (n + 1) * centralMoment(x, 4) num3 <- 3 * (n - 1) * centralMoment(x, 2)^2 denom <- (n - 1) * (n - 2) * (n - 3) return((num1 * (num2 - num3))/denom) } else { stop("Order can be 1, 2, 3, or 4 only.") } } # Example # kStat(1:5, 4) # skew # Calculate the skewness of a vector # Argument: # object: The target vector # population: The vector represents population values or sample values skew <- function(object, population=FALSE) { if(any(is.na(object))) { object <- object[!is.na(object)] warning("Missing observations are removed from a vector.") } if(population) { return(centralMoment(object, 3)/(centralMoment(object, 2)^(3/2))) } else { est <- kStat(object, 3)/(kStat(object, 2)^(3/2)) se <- sqrt(6/length(object)) z <- est/se p <- (1 - pnorm(abs(z)))*2 return(c("skew (g1)"=est, se=se, z=z, p=p)) } } # kurtosis # Calculate the (excessive) kurtosis of a vector # Argument: # object: The target vector # population: The vector represents population values or sample values kurtosis <- function(object, population=FALSE) { if(any(is.na(object))) { object <- object[!is.na(object)] warning("Missing observations are removed from a vector.") } if(population) { return((centralMoment(object, 4)/(centralMoment(object, 2)^2)) - 3) } else { est <- kStat(object, 4)/(kStat(object, 2)^(2)) se <- sqrt(24/length(object)) z <- est/se p <- (1 - pnorm(abs(z)))*2 return(c("Excess Kur (g2)"=est, se=se, z=z, p=p)) } } # mardiaSkew # Calculate the Mardia's skewness # Argument: # dat: Datasets with multiple variables mardiaSkew <- function(dat, use = "everything") { centeredDat <- scale(dat, center=TRUE, scale=FALSE) invS <- solve(cov(dat, use = use)) FUN <- function(vec1, vec2, invS) { as.numeric(t(as.matrix(vec1)) %*% invS %*% as.matrix(vec2)) } FUN2 <- function(vec1, listVec2, invS) { sapply(listVec2, FUN, vec1=vec1, invS=invS) } indivTerm <- sapply(as.list(data.frame(t(centeredDat))), FUN2, listVec2=as.list(data.frame(t(centeredDat))), invS=invS) b1d <- sum(indivTerm^3)/(nrow(dat)^2) d <- ncol(dat) chi <- nrow(dat) * b1d / 6 df <- d * (d + 1) * (d + 2) / 6 p <- pchisq(chi, df = df, lower.tail = FALSE) return(c(b1d = b1d, chi = chi, df=df, p=p)) } # mardiaKurtosis # Calculate the Mardia's Kurtosis # Argument: # dat: Datasets with multiple variables mardiaKurtosis <- function(dat, use = "everything") { centeredDat <- scale(dat, center=TRUE, scale=FALSE) invS <- solve(cov(dat, use = use)) FUN <- function(vec, invS) { as.numeric(t(as.matrix(vec)) %*% invS %*% as.matrix(vec)) } indivTerm <- sapply(as.list(data.frame(t(centeredDat))), FUN, invS=invS) b2d <- sum(indivTerm^2)/nrow(dat) d <- ncol(dat) m <- d * (d + 2) v <- 8 * d * (d + 2) / nrow(dat) z <- (b2d - m)/sqrt(v) p <- pnorm(-abs(z)) * 2 return(c(b2d = b2d, z = z, p=p)) } semTools/R/TSML.R0000644000175100001440000003673313002112720013200 0ustar hornikusers## Terrence D. Jorgensen ### Last updated: 14 October 2016 ### semTools function to implement 2-stage ML setClass("twostage", slots = c(saturated = "lavaan", target = "lavaan", baseline = "lavaan", auxNames = "character")) cfa.2stage <- function(..., aux = NULL, baseline.model = NULL) { twostage(..., aux = aux, fun = "cfa", baseline.model = baseline.model) } sem.2stage <- function(..., aux = NULL, baseline.model = NULL) { twostage(..., aux = aux, fun = "sem", baseline.model = baseline.model) } growth.2stage <- function(..., aux = NULL, baseline.model = NULL) { twostage(..., aux = aux, fun = "growth", baseline.model = baseline.model) } lavaan.2stage <- function(..., aux = NULL, baseline.model = NULL) { twostage(..., aux = aux, fun = "lavaan", baseline.model = baseline.model) } twostage <- function(..., aux, fun, baseline.model = NULL) { if (all(aux == "")) aux <- NULL dots <- list(...) if (is.null(dots$model)) stop("lavaan model syntax argument must be named 'model'.") lavaanifyArgs <- dots[intersect(names(dots), names(formals(lavaan::lavaanify)))] funArgs <- dots[intersect(names(dots), names(formals(lavaan::lavaan)))] ## set some non-optional lavaan arguments funArgs$meanstructure <- TRUE funArgs$conditional.x <- FALSE funArgs$fixed.x <- FALSE funArgs$missing <- "fiml" funArgs$estimator <- "ML" funArgs$test <- "standard" if (is.null(funArgs$information)) funArgs$information <- "observed" if (funArgs$information == "expected") { message("If data are MAR, only the observed information matrix is consistent.") if (!is.null(aux)) { funArgs$information <- "observed" message(c("Using auxiliary variables implies assuming that data are MAR. ", "The lavaan argument 'information' was set to 'observed'.")) } if (!is.null(funArgs$se)) if(funArgs$se != "standard") { funArgs$information <- "observed" message(c("The lavaan argument 'information' was set to 'observed' ", "because adjusting SEs for non-normality requires it.")) } } funArgs$NACOV <- NULL funArgs$do.fit <- NULL ## STAGE 1: ## fit saturated model if (!is.null(funArgs$group)) lavaanifyArgs$ngroups <- length(table(funArgs$data[ , funArgs$group])) targetNames <- lavaan::lavNames(do.call(lavaan::lavaanify, lavaanifyArgs)) varnames <- c(targetNames, aux) covstruc <- outer(varnames, varnames, function(x, y) paste(x, "~~", y)) satArgs <- funArgs satArgs$constraints <- NULL satArgs$group.equal <- "" satArgs$model <- c(covstruc[lower.tri(covstruc, diag = TRUE)], paste(varnames, "~ 1")) satFit <- do.call(lavaan::lavaan, satArgs) ## check for robust estimators opts <- lavaan::lavInspect(satFit, "options") if (!opts$se %in% c("standard","robust.huber.white")) stop(c("Two-Stage estimation requires either se = 'standard' for ", "multivariate normal data or se = 'robust.huber.white' to ", "correct for non-normality.")) ## STAGE 2: ## fit target model to saturated estimates targetArgs <- funArgs targetArgs$data <- NULL targetArgs$sample.cov <- lavaan::lavInspect(satFit, "cov.ov") targetArgs$sample.mean <- lavaan::lavInspect(satFit, "mean.ov") targetArgs$sample.nobs <- lavaan::lavInspect(satFit, "nobs") targetArgs$se <- "standard" targetArgs$sample.cov.rescale <- FALSE targetFit <- do.call(fun, targetArgs) ## STAGE 0: ## fit baseline model (for incremental fit indices) baseArgs <- targetArgs if (is.null(baseline.model)) { basecov <- outer(targetNames, targetNames, function(x, y) paste0(x, " ~~ 0*", y)) diag(basecov) <- paste(targetNames, "~~", targetNames) baseArgs$model <- c(basecov[lower.tri(basecov, diag = TRUE)], paste(targetNames, "~ 1")) } else baseArgs$model <- baseline.model baseArgs$se <- "standard" baseFit <- do.call(lavaan::lavaan, baseArgs) if (length(setdiff(lavaan::lavNames(baseFit), targetNames))) warning("The baseline model includes variables excluded from the target model.") if (length(setdiff(targetNames, lavaan::lavNames(baseFit)))) warning("The target model includes variables excluded from the baseline model.") ## return both models out <- new("twostage", saturated = satFit, target = targetFit, baseline = baseFit, auxNames = as.character(aux)) out } ## methods setMethod("coef", "twostage", function(object, type = c("free","user")) { type <- type[1] lavaan::coef(object@target, type = type) }) setMethod("fitted.values", "twostage", function(object, model = c("target","saturated","baseline"), type = "moments", labels = TRUE) { model <- model[1] lavaan::fitted.values(slot(object, model), type = type, labels = labels) }) setMethod("fitted", "twostage", function(object, model = c("target","saturated","baseline"), type = "moments", labels = TRUE) { model <- model[1] lavaan::fitted.values(slot(object, model), type = type, labels = labels) }) setMethod("residuals", "twostage", function(object, type = c("raw","cor","normalized","standardized")) { type <- type[1] lavaan::residuals(object@target, type = type) }) setMethod("resid", "twostage", function(object, type = c("raw","cor","normalized","standardized")) { type <- type[1] lavaan::residuals(object@target, type = type) }) setMethod("nobs", "twostage", function(object, type = c("ntotal","ngroups","n.per.group","norig", "patterns","coverage")) { type <- type[1] if (type == "n.per.group") type <- "nobs" lavaan::lavInspect(object@saturated, what = type) }) setMethod("vcov", "twostage", function(object, baseline = FALSE) { SLOT <- if (baseline) "baseline" else "target" ## calculate model derivatives and complete-data information matrix MATS <- twostageMatrices(object, baseline) meat <- MATS$H %*% MATS$delta bread <- MASS::ginv(t(MATS$delta) %*% meat) # FIXME: why not solve()? out <- bread %*% t(meat) %*% MATS$satACOV %*% meat %*% bread class(out) <- c("lavaan.matrix.symmetric","matrix") if (baseline) { rownames(out) <- names(getMethod("coef", "lavaan")(object@baseline)) } else { rownames(out) <- names(getMethod("coef", "twostage")(object)) } colnames(out) <- rownames(out) out }) ## chi-squared test results (difference tests not available yet) setMethod("anova", "twostage", function(object, h1 = NULL, baseline = FALSE) { if (is.null(h1)) { return(twostageLRT(object, baseline, print = TRUE)) } H0 <- twostageLRT(object, baseline = FALSE) H1 <- twostageLRT(h1, baseline = FALSE) DF0 <- H0$residual[["df"]] DF1 <- H1$residual[["df"]] if (DF0 == DF1) stop("Models have the same degrees of freedom.") if (min(c(DF0, DF1)) == 0L) return(twostageLRT(object, baseline, print = TRUE)) parent <- which.min(c(DF0, DF1)) if (parent == 1L) { parent <- H0 H0 <- H1 H1 <- parent DF0 <- H0$residual[["df"]] DF1 <- H1$residual[["df"]] } DF <- DF0 - DF1 ## residual-based statistic T.res <- H0$residual[["chisq"]] - H1$residual[["chisq"]] residual <- c(chisq = T.res, df = DF, pvalue = pchisq(T.res, df = DF, lower.tail = FALSE)) class(residual) <- c("lavaan.vector","numeric") ## scaled test statistic chisq.naive <- H0$scaled[["chisq.naive"]] - H1$scaled[["chisq.naive"]] cc <- (DF0*H0$scaled[["scaling.factor"]] - DF1*H1$scaled[["scaling.factor"]]) / DF if (cc < 0) { warning("Scaling factor is negative, so it was set to missing.") cc <- NA } scaled <- c(chisq.naive = chisq.naive, scaling.factor = cc, chisq.scaled = chisq.naive / cc, DF = DF, pvalue = pchisq(chisq.naive / cc, df = DF, lower.tail = FALSE)) class(scaled) <- c("lavaan.vector","numeric") ## return both statistics if (lavaan::lavInspect(object@saturated, "options")$se == "standard") { cat("Difference test for Browne (1984) residual-based statistics:\n\n") print(residual) } cat("\n\nSatorra-Bentler (2001) scaled difference test:\n\n") print(scaled) invisible(list(residual = residual, scaled = scaled)) }) setMethod("show", "twostage", function(object) { ## show chi-squared test results cat("Chi-squared test(s) results, ADJUSTED for missing data:\n\n") getMethod("anova", "twostage")(object) cat("\n\nChi-squared test results, UNADJUSTED for missing data:\n\n") show(object@target) invisible(object) }) setMethod("summary", "twostage", function(object, ...) { ## show chi-squared test results AND estimates getMethod("show", "twostage")(object) cat("\n\nParameter Estimates, with SEs (and tests/CIs) ADJUSTED for missing data:\n\n") dots <- list(...) if (!"fmi" %in% names(dots)) dots$fmi <- FALSE if (!"ci" %in% names(dots)) dots$ci <- TRUE if (!"level" %in% names(dots)) dots$level <- .95 PT <- lavaan::parTable(object@target) PT <- PT[PT$group > 0, ] PE <- do.call(lavaan::parameterEstimates, c(dots, object = object@target)) SEs <- sqrt(diag(getMethod("vcov", "twostage")(object))) PE$se[PT$free > 0] <- SEs[PT$free] PE$z[PT$free > 0] <- PE$est[PT$free > 0] / PE$se[PT$free > 0] PE$pvalue[PT$free > 0] <- pnorm(abs(PE$z[PT$free > 0]), lower.tail = FALSE)*2 if (dots$ci) { crit <- qnorm(1 - (1 - dots$level) / 2) PE$ci.lower[PT$free > 0] <- PE$est[PT$free > 0] - crit * PE$se[PT$free > 0] PE$ci.upper[PT$free > 0] <- PE$est[PT$free > 0] + crit * PE$se[PT$free > 0] } if (dots$fmi) { compVar <- diag(lavaan::vcov(object@target))[PT$free] ## FIXME: need to re-fit model to model-implied moments from Stage 2? # compFit <- lavaan::update(object@target, sample.nobs = lavaan::nobs(object@target), # sample.cov = lavInspect(object@target, "cov.ov"), # sample.mean = lavInspect(object@target, "mean.ov")) # compVar <- diag(lavaan::vcov(compFit))[PT$free] missVar <- SEs^2 PE$fmi[PT$free > 0] <- 1 - compVar / missVar } PE }) ## (hidden?) function utilized by vcov and anova methods twostageMatrices <- function(object, baseline) { SLOT <- if (baseline) "baseline" else "target" ## extract parameter table to isolate estimates by group PTsat <- lavaan::parTable(object@saturated) nG <- max(PTsat$group) isMG <- nG > 1L ## model derivatives delta <- lavaan::lavInspect(slot(object, SLOT), "delta") if (!isMG) delta <- list(delta) for (g in 1:nG) { covparams <- grep(pattern = "~~", x = rownames(delta[[g]])) meanparams <- grep(pattern = "~1", x = rownames(delta[[g]])) delta[[g]] <- delta[[g]][c(covparams, meanparams), ] } ## stack groups' deltas into 1 matrix delta <- do.call(rbind, delta) ## extract estimated moments from saturated model, and number of moments satSigma <- lavaan::lavInspect(object@saturated, "cov.ov") satMu <- lavaan::lavInspect(object@saturated, "mean.ov") if (!isMG) { satSigma <- list(satSigma) satMu <- list(satMu) } if (length(object@auxNames)) { an <- object@auxNames tn <- lavaan::lavNames(slot(object, SLOT)) for (g in 1:nG) { satSigma[[g]] <- satSigma[[g]][tn, tn] satMu[[g]] <- satMu[[g]][tn] } } p <- length(satMu[[1]]) pStar <- p*(p + 1) / 2 ## extract model-implied moments muHat <- lavaan::lavInspect(slot(object, SLOT), "mean.ov") sigmaHat <- lavaan::lavInspect(slot(object, SLOT), "cov.ov") if (!isMG) { sigmaHat <- list(sigmaHat) muHat <- list(muHat) } shinv <- list() for (g in 1:nG) { muHat[[g]] <- muHat[[g]][names(satMu[[g]])] sigmaHat[[g]] <- sigmaHat[[g]][rownames(satSigma[[g]]), colnames(satSigma[[g]])] shinv[[g]] <- solve(sigmaHat[[g]]) } ## assemble complete-data information matrix H <- list() for (g in 1:nG) H[[g]] <- matrix(0, (pStar + p), (pStar + p)) if (lavaan::lavInspect(slot(object, SLOT), "options")$estimator == "expected") { for (g in 1:nG) { H[[g]][1:pStar, 1:pStar] <- .5*lavaan::lav_matrix_duplication_pre_post(shinv[[g]] %x% shinv[[g]]) H[[g]][(pStar + 1):(pStar + p), (pStar + 1):(pStar + p)] <- shinv[[g]] } } else { ## estimator == "observed" dMu <- list() for (g in 1:nG) { dMu[[g]] <- satMu[[g]] - muHat[[g]] H[[g]][1:pStar, 1:pStar] <- lavaan::lav_matrix_duplication_pre_post(shinv[[g]] %x% (shinv[[g]] %*% (satSigma[[g]] + dMu[[g]] %*% t(dMu[[g]])) %*% shinv[[g]] - .5*shinv[[g]])) H[[g]][(pStar + 1):(pStar + p), 1:pStar] <- lavaan::lav_matrix_duplication_post(shinv[[g]] %x% (t(dMu[[g]]) %*% shinv[[g]])) H[[g]][1:pStar, (pStar + 1):(pStar + p)] <- t(H[[g]][(pStar + 1):(pStar + p), 1:pStar]) H[[g]][(pStar + 1):(pStar + p), (pStar + 1):(pStar + p)] <- shinv[[g]] } } ## combine into 1 block-diagonal matrix H <- do.call(lavaan::lav_matrix_bdiag, H) ## asymptotic information and covariance matrices of target model satACOV <- lavaan::vcov(object@saturated) satInfo <- solve(satACOV * lavaan::nobs(object@saturated)) ## all(round(acov*N, 8) == round(solve(info), 8)) ## all(round(acov, 8) == round(solve(info)/N, 8)) if (length(object@auxNames)) { dimTar <- !(PTsat$lhs %in% an | PTsat$rhs %in% an) dimAux <- PTsat$lhs %in% an | PTsat$rhs %in% an infoTar <- satInfo[dimTar, dimTar] infoAux <- satInfo[dimAux, dimAux] infoAT <- satInfo[dimAux, dimTar] satInfo <- infoTar - t(infoAT) %*% solve(infoAux) %*% infoAT satACOV <- solve(satInfo) / lavaan::nobs(object@saturated) } list(delta = delta, H = H, satACOV = satACOV, satInfo = satInfo) } ## (hidden?) function utilized by anova method to test 1 or 2 models twostageLRT <- function(object, baseline, print = FALSE) { SLOT <- if (baseline) "baseline" else "target" ## calculate model derivatives and complete-data information matrix MATS <- twostageMatrices(object, baseline) ## residual-based statistic (Savalei & Bentler, 2009, eq. 8) N <- lavaan::nobs(slot(object, SLOT)) nG <- lavaan::lavInspect(slot(object, SLOT), "ngroups") res <- lavaan::residuals(slot(object, SLOT)) if (nG == 1L) res <- list(res) etilde <- do.call(c, lapply(res, function(x) c(lavaan::lav_matrix_vech(x$cov), x$mean))) ID <- MATS$satInfo %*% MATS$delta T.res <- N*t(etilde) %*% (MATS$satInfo - ID %*% MASS::ginv(t(MATS$delta) %*% ID) %*% t(ID)) %*% etilde # FIXME: why not solve()? DF <- lavaan::lavInspect(slot(object, SLOT), "fit")[["df"]] pval.res <- pchisq(T.res, df = DF, lower.tail = FALSE) residual <- c(chisq = T.res, df = DF, pvalue = pval.res) class(residual) <- c("lavaan.vector","numeric") ## scaled test statistic (Savalei & Bentler, 2009, eq. 9) meat <- MATS$H %*% MATS$delta bread <- MASS::ginv(t(MATS$delta) %*% meat) # FIXME: why not solve()? cc <- DF / sum(diag(MATS$satACOV %*% (MATS$H - meat %*% bread %*% t(meat)))) chisq <- lavaan::lavInspect(slot(object, SLOT), "fit")[["chisq"]] T.scaled <- cc * chisq pval.scaled <- pchisq(T.scaled, df = DF, lower.tail = FALSE) scaled <- c(chisq.naive = chisq, scaling.factor = 1 / cc, chisq.scaled = T.scaled, df = DF, pvalue = pval.scaled) class(scaled) <- c("lavaan.vector","numeric") ## return both statistics if (print) { if (lavaan::lavInspect(object@saturated, "options")$se == "standard") { cat("Browne (1984) residual-based test statistic:\n\n") print(residual) } cat("\n\nSatorra-Bentler (2001) scaled test statistic:\n\n") print(scaled) } invisible(list(residual = residual, scaled = scaled)) } # fitS <- cfa(model = model, data = dat1, missing = "fiml", se = "standard") # fitR <- cfa(model = model, data = dat1, missing = "fiml", se = "robust.huber.white") # all(lavInspect(fitS, "information") == lavInspect(fitR, "information")) # all(vcov(fitS) == vcov(fitR)) semTools/R/singleParamTest.R0000644000175100001440000001362113000201061015505 0ustar hornikuserssingleParamTest <- function(model1, model2, return.fit = FALSE, method = "satorra.bentler.2001") { # Check nested models without any swaps if(lavaan::fitMeasures(model1, "df")[[1]] > lavaan::fitMeasures(model2, "df")[[1]]) { fit0 <- model1 fit1 <- model2 } else { fit0 <- model2 fit1 <- model1 } # fit0 = Nested model, fit1 = Parent model pt1 <- lavaan::partable(fit1) pt0 <- lavaan::partable(fit0) namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) # Two possible constraints: fixed parameters and equality constraints free1 <- (pt1$free != 0) & !(duplicated(pt1$free)) free0 <- (pt0$free != 0) & !(duplicated(pt0$free)) iscon1 <- pt1$op == "==" iscon0 <- pt0$op == "==" con1 <- list(id = integer(0), lhs = character(0), op = character(0), rhs = character(0)) con0 <- list(id = integer(0), lhs = character(0), op = character(0), rhs = character(0)) if(any(iscon1)) con1 <- list(id = pt1$id[iscon1], lhs = pt1$lhs[iscon1], op = pt1$op[iscon1], rhs = pt1$rhs[iscon1]) if(any(iscon0)) con0 <- list(id = pt0$id[iscon0], lhs = pt0$lhs[iscon0], op = pt0$op[iscon0], rhs = pt0$rhs[iscon0]) if(length(free1[!iscon1]) != length(free0[!iscon0])) stop("Parameter tables in two models do not have equal lengths. This function does not work.") if(!all(free1[free0])) stop("Model are not nested or are not arranged in the way that this function works.") if(sum(iscon1) > sum(iscon0)) stop("There are equality constraints in the model with less degrees of freedom that do not exist in the model with higher degrees of freedom. Thus, two models are not nested.") if(!all.equal(lapply(pt1[2:4], "[", !iscon1), lapply(pt0[2:4], "[", !iscon0))) stop("This function needs parameter tables of two models to have the same orders of the same parameters.") # Find fixed values or constraints difffree <- !free0[!iscon0] & free1[!iscon1] textcon1 <- paste0(con1$lhs, con1$op, con1$rhs) textcon0 <- paste0(con0$lhs, con0$op, con0$rhs) indexsamecon <- match(textcon1, textcon0) indexdiffcon <- setdiff(seq_along(textcon0), indexsamecon) diffcon <- lapply(con0, "[", indexdiffcon) fixval <- which(difffree) index <- c(fixval, diffcon$id) if(length(index) <= 0) stop("Two models are identical. No single parameter test can be done.") # Find nested model and release 1-by-1 freeCon <- matrix(NA, length(index), 2) colnames(freeCon) <- c("free.chi", "free.p") listFreeCon <- list() runnum <- 1 for(i in seq_along(fixval)) { temp <- freeParTable(pt0, pt0$lhs[fixval[i]], pt0$op[fixval[i]], pt0$rhs[fixval[i]], pt0$group[fixval[i]]) tryresult <- try(tempfit <- refit(temp, fit0), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit0, method = method), silent = TRUE) if(!is(compresult, "try-error")) freeCon[runnum,] <- unlist(modelcomp[2, c(5, 7)]) } listFreeCon <- c(listFreeCon, tryresult) runnum <- runnum + 1 } rownames(freeCon)[seq_along(fixval)] <- names(listFreeCon)[seq_along(fixval)] <- namept0[fixval] for(i in seq_along(diffcon$id)) { temp <- removeEqCon(pt0, diffcon$id[i]) tryresult <- try(tempfit <- refit(temp, fit0), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit0, method = method), silent = TRUE) if(!is(compresult, "try-error")) freeCon[runnum,] <- unlist(modelcomp[2, c(5, 7)]) } listFreeCon <- c(listFreeCon, tryresult) runnum <- runnum + 1 } poscon <- seq_along(diffcon$id) + length(fixval) rownames(freeCon)[poscon] <- names(listFreeCon)[poscon] <- namept0[diffcon$id] # Find parent model and constrain 1-by-1 fixCon <- matrix(NA, length(index), 2) colnames(fixCon) <- c("fix.chi", "fix.p") listFixCon <- list() runnum <- 1 for(i in seq_along(fixval)) { temp <- fixParTable(pt1, pt1$lhs[fixval[i]], pt1$op[fixval[i]], pt1$rhs[fixval[i]], pt1$group[fixval[i]], pt0$ustart[fixval[i]]) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[runnum,] <- unlist(modelcomp[2,c(5, 7)]) } listFixCon <- c(listFixCon, tryresult) runnum <- runnum + 1 } rownames(fixCon)[seq_along(fixval)] <- names(listFixCon)[seq_along(fixval)] <- namept0[fixval] for(i in seq_along(diffcon$id)) { temp <- patMerge(pt1, list(lhs = diffcon$lhs[i], op = diffcon$op[i], rhs = diffcon$rhs[i])) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[runnum,] <- unlist(modelcomp[2, c(5, 7)]) } listFixCon <- c(listFixCon, tryresult) runnum <- runnum + 1 } poscon <- seq_along(diffcon$id) + length(fixval) rownames(fixCon)[poscon] <- names(listFixCon)[poscon] <- namept0[diffcon$id] result <- cbind(freeCon, fixCon) if(return.fit) { return(invisible(list(result = result, models = list(free = listFreeCon, fix = listFixCon)))) } else { return(result) } } paramNameFromPt <- function(pt) { ngroups <- max(pt$group) result <- NULL if(ngroups == 1) { result <- paste0(pt$lhs, pt$op, pt$rhs) } else { grouplab <- paste0(".g", pt$group) grouplab[grouplab == ".g0" | grouplab == ".g1"] <- "" result <- paste0(pt$lhs, pt$op, pt$rhs, grouplab) } con <- pt$op == "==" pt$lhs[con] <- result[match(pt$lhs[con], pt$plabel)] pt$rhs[con] <- result[match(pt$rhs[con], pt$plabel)] result[con] <- paste(pt$lhs[con], pt$op[con], pt$rhs[con]) result } refit <- function(pt, object, resetstart = TRUE) { if(resetstart && "start" %in% names(pt)) pt <- pt[-which("start" == names(pt))] previousCall <- lavaan::lavInspect(object, "call") args <- previousCall[-1] args$model <- pt funcall <- as.character(previousCall[[1]]) tempfit <- do.call(funcall[length(funcall)], args) }semTools/R/powerAnalysis.R0000644000175100001440000001336613000201061015251 0ustar hornikusers# plotRMSEApower #Plot power of RMSEA over a range of possible sample sizes #input: rmsea of null and alternative model, degress of freedom, lower sampel size, upper sample sample, sample size steps, alpha, the number of group in calculating RMSEA #Output: plot of power #Alexander M. Schoemann, Kristopher J. Preacher, Donna Coffman #5/30/2012 plotRMSEApower <- function(rmsea0, rmseaA, df, nlow, nhigh, steps=1, alpha=.05, group=1, ...) { pow1 <- 0 nseq <- seq(nlow,nhigh, by=steps) for(i in nseq){ ncp0 <- ((i-1)*df*rmsea0^2)/group ncpa <- ((i-1)*df*rmseaA^2)/group #Compute power if(rmsea0 < rmseaA) { cval <- qchisq(alpha,df,ncp=ncp0,lower.tail=FALSE) pow <- pchisq(cval,df,ncp=ncpa,lower.tail=FALSE) } if(rmsea0 > rmseaA) { cval <- qchisq(1-alpha, df, ncp=ncp0, lower.tail=FALSE) pow <- 1-pchisq(cval,df,ncp=ncpa,lower.tail=FALSE) } pow1<-c(pow1, pow) } pow1 <- pow1[-1] plot(nseq,pow1,xlab="Sample Size",ylab="Power",main="Compute Power for RMSEA",type="l", ...) } #Example Code #plotRMSEApower(.025, .075, 23, 100, 500, 10) # findDensity # Find the x and y coordinate of a distribution in order to plot a density of a distribution # dist: target distribution in text, such as "chisq" # ...: Additional argument of the distribution # Return the data frame with x and y coordinates for plotting density findDensity <- function(dist, ...) { FUN <- list() FUN[[1]] <- get(paste("q", dist, sep="")) FUN[[2]] <- c(0.001, 0.999) FUN <- c(FUN, ...) bound <- eval(as.call(FUN)) val <- seq(bound[1], bound[2], length.out=1000) FUN[[1]] <- get(paste("d", dist, sep="")) FUN[[2]] <- val height <- eval(as.call(FUN)) return(cbind(val, height)) } #Example Code #findDensity("chisq", df=10) # plotOverlapDensity # Plot the overlapping distributions using density # dat: A list of data frame where each data frame has the x coordinate as the variable 1 and y coordinate as the variable 2 # vline: Vertical line in the graph # caption: The name of each density line # ...: Additional argument of the plot function plotOverlapDensity <- function(dat, vline = NULL, caption=NULL, ...) { if(!is.list(dat)) { temp <- list() temp[[1]] <- dat dat <- temp } stack <- do.call(rbind, dat) lim <- apply(stack, 2, function(x) c(min(x), max(x))) plot(stack, xlim=lim[,1], ylim=lim[,2], type="n", ...) for(i in 1:length(dat)) lines(dat[[i]], col = i, lwd=1.5) for(i in 1:length(vline)) abline(v = vline[i], lwd=1.5) if(!is.null(caption)) legend(0.50 * (lim[2,1] - lim[1,1]) + lim[1,1], 0.99 * (lim[2,2] - lim[1,2]) + lim[1,2], caption, col=1:length(dat), lty=1) } # plotRMSEAdist # Plot the overlapping distributions of RMSEA based on noncentral chi-square distribution # rmsea: A vector of RMSEA # n: sample size # df: degree of freedom of the chi-square distribution # ptile: The percentile rank of the first specified rmsea to put the vertical line # caption: The description of each rmsea # rmseaScale: If TRUE, use RMSEA as the scale in x-axis. If FALSE, use chi-square as the scale in x-axis. # group: The number of group in calculating RMSEA plotRMSEAdist <- function(rmsea, n, df, ptile=NULL, caption=NULL, rmseaScale = TRUE, group=1) { graph <- cbind(rmsea, df) ncp <- apply(graph, 1, function(x, n, group) ((n - 1) * x[2] * (x[1]^2))/group, n=n, group=group) graph <- cbind(graph, ncp) dens <- lapply(as.list(data.frame(t(graph))), function(x) findDensity("chisq", df = x[2], ncp=x[3])) if(rmseaScale) dens <- lapply(dens, function(x, df, n, group) { x[,1] <- (x[,1] - df)/(n-1); x[(x[,1] < 0),1] <- 0; x[,1] <- sqrt(group) * sqrt(x[,1]/df); return(x) }, df=df, n=n, group=group) cutoff <- NULL if(!is.null(ptile)) { cutoff <- qchisq(ptile, df=graph[1, 2], ncp=graph[1, 3]) if(rmseaScale) cutoff <- sqrt(group) * sqrt((cutoff - df)/(df * (n - 1))) } if(is.null(caption)) caption <- sapply(graph[,1], function(x) paste("Population RMSEA = ", format(x, digits=3), sep="")) plotOverlapDensity(dens, cutoff, caption, xlab=ifelse(rmseaScale, "RMSEA", "Chi-Square"), ylab="Density") equal0 <- sapply(dens, function(x) x[,1] == 0) if(any(equal0)) warning("The density at RMSEA = 0 cannot be trusted because the plots are truncated.") } # findRMSEApower # Find the proportion of the samples from the alternative RMSEA rejected by the cutoff dervied from the null RMSEA # rmsea0: The null RMSEA # rmseaA: The alternative RMSEA # n: sample size # df: degree of freedom of the chi-square distribution # alpha: The alpha level # group: The number of group in calculating RMSEA # Return power findRMSEApower <- function(rmsea0, rmseaA, df, n, alpha=.05, group=1) { ncp0 <- ((n-1)*df*rmsea0^2)/group ncpa <- ((n-1)*df*rmseaA^2)/group if (rmsea0 power)) { return("Sample Size <= 5") } else if (all(power > pow)) { repeat { n <- n + 500 pow <- findRMSEApower(rmsea0, rmseaA, df, n, alpha, group=group) if(any(pow > power)) { index <- which(pow > power)[1] return(n[index]/group) } } } else { index <- which(pow > power)[1] return(n[index]/group) } } semTools/R/tukeySEM.R0000644000175100001440000000207613000201061014113 0ustar hornikusers############################################# ## tukeySEM -- a function to compute ## ## tukey's post-hoc test with unequal ## ## sample sizes and variances ## ## ## ## Alexander M. Schoemann ## ## Last edited on 01/16/2013 ## ############################################# ##inputs: mean of group 1, mean of group2, variance of group 1, variance of group 2 ## sample size or group1, sample size of group2, number of groups in the ANOVA ##Output: vector containing the q statistic, degrees of freedom, and associated p value tukeySEM <- function(m1, m2, var1, var2, n1, n2, ng){ qNum <- abs(m1 - m2) qDenom <- sqrt(((var1/n1) + (var2/n2))/2) Tukeyq <- qNum/qDenom Tukeydf <- ((var1/n1) + (var2/n2))^2 / (((var1/n1)^2/(n1-1)) + ((var2/n2)^2/(n2-2))) p <- 1- ptukey(Tukeyq, ng, Tukeydf) cols <- c("q", "df", "p") res <- c(Tukeyq, Tukeydf, p) names(res) <- cols res } ##Example from Schoemann (2013) ##Bio vs. policial science on evo misconceptions #tukeySEM(3.91, 3.96,.46, .62, 246, 425,3) semTools/R/parcelAllocation.R0000644000175100001440000002526513000201061015666 0ustar hornikusers##Parcel Allocation ##Corbin Quick & Alex Schoemann ##6/4/12 ##Bug fix 1/30/2014 - works with single factor in the model ##Vector of numbers of indicators in each parcel, vector assigning each indicator to its factor, Number allocations, lavaan syntax, Data set, parcel names, variables left out of parceling, additional arguments to be passed to lavaan parcelAllocation <- function(nPerPar,facPlc,nAlloc=100,syntax,dataset,names='default',leaveout=0, ...) { if(is.character(dataset)){ dataset <- read.csv(dataset) } dataset <- as.matrix(dataset) if(nAlloc<2) stop("Minimum of two allocations required.") if(is.list(facPlc)){ if(is.numeric(facPlc[[1]][1])==FALSE){ facPlcb <- facPlc Namesv <- colnames(dataset) for(i in 1:length(facPlc)){ for(j in 1:length(facPlc[[i]])){ facPlcb[[i]][j] <- match(facPlc[[i]][j],Namesv) } facPlcb[[i]] <- as.numeric(facPlcb[[i]]) } facPlc <- facPlcb } # facPlc2 <- rep(0, sum(sapply(facPlc, length))) facPlc2 <- rep(0,ncol(dataset)) for(i in 1:length(facPlc)){ for(j in 1:length(facPlc[[i]])){ facPlc2[facPlc[[i]][j]] <- i } } facPlc <- facPlc2 } if(leaveout!=0){ if(is.numeric(leaveout)==FALSE){ leaveoutb <- rep(0,length(leaveout)) Namesv <- colnames(dataset) for(i in 1:length(leaveout)){ leaveoutb[i] <- match(leaveout[i],Namesv) } leaveout <- as.numeric(leaveoutb) } k1 <- .001 for(i in 1:length(leaveout)){ facPlc[leaveout[i]] <- facPlc[leaveout[i]] + k1 k1 <- k1 +.001 } } if(0 %in% facPlc == TRUE){ Zfreq <- sum(facPlc==0) for (i in 1:Zfreq){ Zplc <- match(0,facPlc) dataset <- dataset[ , -Zplc] facPlc <- facPlc[-Zplc] } ## this allows for unused variables in dataset, ## which are specified by zeros, and deleted } if(is.list(nPerPar)){ nPerPar2 <- c() for (i in 1:length(nPerPar)){ Onesp <- sum(facPlc>i & facPlc 0){ ##Bug was here. With 1 factor Maxv=0. Skip this with a single factor for (i in 1:Maxv){ Mat <- match(i+1, Locate) if(Npp[Mat] == Npp[Mat-1]){ stop('** WARNING! ** Parcels incorrectly specified. Check input!')} } } ## warning message if parcel crosses into multiple factors ## vector, parcel to which each variable belongs ## vector, factor to which each variables belongs ## if variables are in the same parcel, but different factors ## error message given in output Onevec <- facPlc - round(facPlc) NleaveA <- length(Onevec) - sum(Onevec==0) NleaveP <- sum(nPerPar==1) if(NleaveA < NleaveP){ print('** WARNING! ** Single-variable parcels have been requested. Check input!')} if(NleaveA > NleaveP) print('** WARNING! ** More non-parceled variables have been requested than provided for in parcel vector. Check input!') if(length(names)>1){ if(length(names) != length(nPerPar)){ print('** WARNING! ** Number of parcel names provided not equal to number of parcels requested. Check input!')}} if(NA %in% dataset == TRUE){ print('** WARNING! ** Missing data detected. Prior multiple imputation recommended.')} Data <- c(1:ncol(dataset)) ## creates a vector of the number of indicators ## e.g. for three indicators, c(1, 2, 3) Nfactors <- max(facPlc) ## scalar, number of factors Nindicators <- length(Data) ## scalar, number of indicators Npar <- length(nPerPar) ## scalar, number of parcels Rmize <- runif(Nindicators, 1, Nindicators) ## create vector of randomly ordered numbers, ## length of number of indicators Data <- rbind(facPlc, Rmize, Data) ## "Data" becomes object of three rows, consisting of ## 1) factor to which each indicator belongs ## (in order to preserve indicator/factor ## assignment during randomization) ## 2) randomly order numbers ## 3) indicator number Results <- matrix(numeric(0), nAlloc, Nindicators) ##create empty matrix for parcel allocation matrix Pin <- nPerPar[1] for (i in 2:length(nPerPar)){ Pin <- c(Pin, nPerPar[i]+Pin[i-1]) ## creates vector which indicates the range ## of columns (endpoints) in each parcel } for (i in 1:nAlloc) { Data[2,]<-runif(Nindicators, 1, Nindicators) ## Replace second row with newly randomly ordered numbers Data <- Data[, order(Data[2,])] ## Order the columns according ## to the values of the second row Data <- Data[, order(Data[1,])] ## Order the columns according ## to the values of the first row ## in order to preserve factor assignment Results[i,] <- Data[3,] ## assign result to allocation matrix } Alpha <- rbind(Results[1,], dataset) ## bind first random allocation to dataset "Alpha" Allocations <- list() ## create empty list for allocation data matrices for (i in 1:nAlloc){ Ineff <- rep(NA, ncol(Results)) Ineff2 <- c(1:ncol(Results)) for (inefficient in 1:ncol(Results)){ Ineff[Results[i,inefficient]] <- Ineff2[inefficient] } Alpha[1,] <- Ineff ## replace first row of dataset matrix ## with row "i" from allocation matrix Beta <- Alpha[, order(Alpha[1,])] ## arrangle dataset columns by values of first row ## assign to temporary matrix "Beta" Temp <- matrix(NA, nrow(dataset), Npar) ## create empty matrix for averaged parcel variables TempAA <- if(length(1:Pin[1])>1) Beta[2:nrow(Beta) , 1:Pin[1]] else cbind(Beta[2:nrow(Beta) , 1:Pin[1]],Beta[2:nrow(Beta) , 1:Pin[1]]) Temp[, 1] <- rowMeans(TempAA) ## fill first column with averages from assigned indicators for (al in 2:Npar) { Plc <- Pin[al-1]+1 ## placeholder variable for determining parcel width TempBB <- if(length(Plc:Pin[al])>1) Beta[2:nrow(Beta) , Plc:Pin[al]] else cbind(Beta[2:nrow(Beta) , Plc:Pin[al]],Beta[2:nrow(Beta) , Plc:Pin[al]]) Temp[, al] <- rowMeans(TempBB) ## fill remaining columns with averages from assigned indicators } if(length(names)>1){ colnames(Temp) <- names } Allocations[[i]] <- Temp ## assign result to list of parcel datasets } if(as.vector(regexpr("/",syntax))!=-1){ replist<-matrix(NA,nAlloc,1) for (i in 1:nAlloc){ if(names!='default'){colnames(Allocations[[i]])<-names}else{colnames(Allocations[[i]])<-NULL} write.table(Allocations[[i]],paste(syntax,'parcelruns',i,'.dat',sep=''),row.names=FALSE,col.names=TRUE) replist[i,1]<-paste('parcelrun',i,'.dat',sep='') } write.table(replist,paste(syntax,"parcelrunsreplist.dat",sep=''),quote=FALSE,row.names=FALSE,col.names=FALSE) } else{ Param <- list() ## list for parameter estimated for each imputation Fitind <- list() ## list for fit indices estimated for each imputation for (i in 1:nAlloc){ data <- as.data.frame(Allocations[[i]], row.names = NULL, optional = FALSE) ## convert allocation matrix to dataframe for model estimation fit <- lavaan::sem(syntax, data=data, ...) ## estimate model in lavaan Param[[i]] <- lavaan::parameterEstimates(fit) ## assign allocation parameter estimates to list Fitind[[i]] <- lavaan::fitMeasures(fit, c("chisq", "df", "cfi", "tli", "rmsea", "srmr")) ## assign allocation parameter estimates to list } Parmn <- Param[[1]] ## assign first parameter estimates to mean dataframe ParSE <- matrix(NA, nrow(Parmn), nAlloc) ParSEmn <- Parmn[,5] Parsd <- matrix(NA, nrow(Parmn), nAlloc) ## assign parameter estimates for S.D. calculation Fitmn <- Fitind[[1]] ## assign first fit indices to mean dataframe Fitsd <- matrix(NA, length(Fitmn), nAlloc) ## assign fit indices for S.D. calculation Sigp <- matrix(NA, nrow(Parmn), nAlloc) ## assign p-values to calculate percentage significant for (i in 1:nAlloc){ Parsd[,i] <- Param[[i]][,4] ## assign parameter estimates for S.D. estimation ParSE[,i] <- Param[[i]][,5] if(i>1){ParSEmn <- ParSEmn + Param[[i]][,5]} Sigp[,ncol(Sigp)-i+1] <- Param[[i]][,7] ## assign p-values to calculate percentage significant Fitsd[,i] <- Fitind[[i]] ## assign fit indices for S.D. estimation if(i>1){Parmn[,4:ncol(Parmn)] <- Parmn[,4:ncol(Parmn)] + Param[[i]][,4:ncol(Parmn)]} ## add together all parameter estimates if(i>1){Fitmn <- Fitmn + Fitind[[i]]} ## add together all fit indices } Sigp <- Sigp + .45 Sigp <- apply(Sigp, c(1,2), round) Sigp <- 1 - as.vector(rowMeans(Sigp)) ## calculate percentage significant parameters Parsum <- cbind(apply(Parsd,1,sd),apply(Parsd,1,max),apply(Parsd,1,min),apply(Parsd,1,max)-apply(Parsd,1,min), Sigp) colnames(Parsum) <- c("S.D.","MAX","MIN","Range", "% Sig") ## calculate parameter S.D., minimum, maximum, range, bind to percentage significant ParSEmn <- cbind(Parmn[,1:3], ParSEmn/nAlloc) ParSEfn <- cbind(ParSEmn,apply(ParSE,1,sd),apply(ParSE,1,max),apply(ParSE,1,min),apply(ParSE,1,max)-apply(ParSE,1,min)) colnames(ParSEfn) <- c("lhs", "op", "rhs", "Avg SE","S.D.","MAX","MIN","Range") Fitsum <- cbind(apply(Fitsd,1,sd),apply(Fitsd,1,max),apply(Fitsd,1,min),apply(Fitsd,1,max)-apply(Fitsd,1,min)) rownames(Fitsum) <- c("chisq", "df", "cfi", "tli", "rmsea", "srmr") ## calculate fit S.D., minimum, maximum, range Parmn[,4:ncol(Parmn)] <- Parmn[,4:ncol(Parmn)] / nAlloc ## divide totalled parameter estimates by number allocations Parmn <- Parmn[,1:4] ## remove confidence intervals from output Parmn <- cbind(Parmn, Parsum) ## bind parameter average estimates to cross-allocation information Fitmn <- Fitmn / nAlloc ## divide totalled fit indices by number allocations Fitsum <- cbind(Fitmn,Fitsum) colnames(Fitsum) <- c("Avg Ind","S.D.","MAX","MIN","Range") ## bind to fit averages ParSEfn[,4:8] <- apply(ParSEfn[,4:8], 2, round, digits = 3) Parmn[,4:9] <- apply(Parmn[,4:9], 2, round, digits = 3) Fitsum <- apply(Fitsum, 2, round, digits = 3) ## round output to three digits Output <- list(Parmn,ParSEfn,Fitsum) names(Output) <- c('Estimates', 'SE', 'Fit') return(Output) }} #parcelAllocation(list(c(3,3,3)), list(name1), nAlloc=20, syntax=syntax, dataset=simParcel) semTools/R/powerAnalysisNested.R0000644000175100001440000001312213000201061016402 0ustar hornikusers# Power analysis for nested model comparison # Note: Model0 = Null hypothesis # Model1 = Alternative hypothesis # ModelA = More-restricted models (higher df; higher RMSEA) # ModelB = Less-restricted models (lower df; lower RMSEA) # findRMSEApowernested # Find the proportion of the samples from the alternative pair of RMSEAs in nested model comparison rejected by the cutoff dervied from the null pair of RMSEAs in nested model comparison # rmsea0A: The H0 baseline RMSEA # rmsea0B: The H0 alternative RMSEA (trivial misfit) # rmsea1A: The H1 baseline RMSEA # rmsea1B: The H1 alternative RMSEA (target misfit to be rejected) # n: sample size # dfA: degree of freedom of the more-restricted model # dfB: degree of freedom of the less-restricted model # alpha: The alpha level # group: The number of group in calculating RMSEA # Return power findRMSEApowernested <- function(rmsea0A = NULL, rmsea0B = NULL, rmsea1A, rmsea1B = NULL, dfA, dfB, n, alpha = 0.05, group = 1) { if(is.null(rmsea0A)) rmsea0A <- 0 if(is.null(rmsea0B)) rmsea0B <- 0 if(is.null(rmsea1B)) rmsea1B <- rmsea0B if(dfA <= dfB) stop("The degree of freedom of the more-restricted model (dfA) should be greater than the degree of freedom of the less-restricted model (dfB)") if(rmsea0A < rmsea0B) stop("In the null-hypothesis models, the RMSEA of the more-restricted model (rmsea0A) should have a higher than or equal to the RMSEA of the less-restricted model (rmsea0B).") if(rmsea1A < rmsea1B) stop("In the alternative-hypothesis models, the RMSEA of the more-restricted model (rmsea1A) should have a higher than or equal to the RMSEA of the less-restricted model (rmsea1B).") ddiff <- dfA-dfB f0a <- (dfA*rmsea0A^2)/group f0b <- (dfB*rmsea0B^2)/group f1a <- (dfA*rmsea1A^2)/group f1b <- (dfB*rmsea1B^2)/group ncp0 <- (n-1)*(f0a-f0b) ncp1 <- (n-1)*(f1a-f1b) cval <- qchisq(1-alpha,ddiff,ncp0) Power <- 1-pchisq(cval,ddiff,ncp1) Power } test.findRMSEApowernested <- function() { alpha <- 0.05 rmsea0A <- 0.06 rmsea0B <- 0.05 rmsea1A <- 0.08 rmsea1B <- 0.05 dfA <- 22 dfB <- 20 n <- 200 group <- 1 findRMSEApowernested(rmsea0A, rmsea0B, rmsea1A, rmsea1B, dfA, dfB, n, alpha, group) } # findRMSEAsamplesizenested # Find the sample size that the power in rejection the samples from the alternative pair of RMSEA is just over the specified power # rmsea0A: The H0 baseline RMSEA # rmsea0B: The H0 alternative RMSEA (trivial misfit) # rmsea1A: The H1 baseline RMSEA # rmsea1B: The H1 alternative RMSEA (target misfit to be rejected) # dfA: degree of freedom of the more-restricted model # dfB: degree of freedom of the less-restricted model # power: The desired statistical power # alpha: The alpha level # group: The number of group in calculating RMSEA # Return The estimated sample size findRMSEAsamplesizenested <- function(rmsea0A = NULL, rmsea0B = NULL, rmsea1A, rmsea1B = NULL, dfA, dfB, power=0.80, alpha=.05, group=1) { if(is.null(rmsea0A)) rmsea0A <- 0 if(is.null(rmsea0B)) rmsea0B <- 0 if(is.null(rmsea1B)) rmsea1B <- rmsea0B if(dfA <= dfB) stop("The degree of freedom of the more-restricted model (dfA) should be greater than the degree of freedom of the less-restricted model (dfB)") if(rmsea0A < rmsea0B) stop("In the null-hypothesis models, the RMSEA of the more-restricted model (rmsea0A) should have a higher than or equal to the RMSEA of the less-restricted model (rmsea0B).") if(rmsea1A < rmsea1B) stop("In the alternative-hypothesis models, the RMSEA of the more-restricted model (rmsea1A) should have a higher than or equal to the RMSEA of the less-restricted model (rmsea1B).") n <- 5:500 pow <- findRMSEApowernested(rmsea0A, rmsea0B, rmsea1A, rmsea1B, dfA, dfB, n, alpha, group = group) if(all(pow > power)) { return("Sample Size <= 5") } else if (all(power > pow)) { repeat { n <- n + 500 pow <- findRMSEApowernested(rmsea0A, rmsea0B, rmsea1A, rmsea1B, dfA, dfB, n, alpha, group = group) if(any(pow > power)) { index <- which(pow > power)[1] return(n[index]/group) } } } else { index <- which(pow > power)[1] return(n[index]/group) } } test.findRMSEAsamplesizenested <- function() { alpha <- 0.05 rmseaA <- 0.06 rmseaB <- 0.05 da <- 22 db <- 20 powd <- 0.8 G <- 1 findRMSEAsamplesizenested(rmsea0A = 0, rmsea0B = 0, rmsea1A = rmseaA, rmsea1B = rmseaB, da, db, power=0.80, alpha=.05, group=1) } # plotRMSEApowernested #Plot power of nested model RMSEA over a range of possible sample sizes # rmsea0A: The H0 baseline RMSEA # rmsea0B: The H0 alternative RMSEA (trivial misfit) # rmsea1A: The H1 baseline RMSEA # rmsea1B: The H1 alternative RMSEA (target misfit to be rejected) # dfA: degree of freedom of the more-restricted model # dfB: degree of freedom of the less-restricted model # nlow: Lower bound of sample size # nhigh: Upper bound of sample size # steps: Step size # alpha: The alpha level # group: The number of group in calculating RMSEA # ...: Additional parameters for graphs # Return plot of power plotRMSEApowernested <- function(rmsea0A = NULL, rmsea0B = NULL, rmsea1A, rmsea1B = NULL, dfA, dfB, nlow, nhigh, steps=1, alpha=.05, group=1, ...){ nseq <- seq(nlow,nhigh, by=steps) pow1 <- findRMSEApowernested(rmsea0A = rmsea0A, rmsea0B = rmsea0B, rmsea1A = rmsea1A, rmsea1B = rmsea1B, dfA = dfA, dfB = dfB, n = nseq, alpha = alpha, group = group) plot(nseq, pow1, xlab="Sample Size", ylab="Power", main="Compute Power for Nested RMSEA", type="l", ...) } test.plotRMSEApowernested <- function() { alpha <- 0.05 rmseaA <- 0.06 rmseaB <- 0.05 da <- 22 db <- 20 plotRMSEApowernested(rmsea0A = 0, rmsea0B = 0, rmsea1A = rmseaA, rmsea1B = rmseaB, da, db, nlow=50, nhigh=500, steps=1, alpha=.05, group=1) } semTools/R/mvrnonnorm.R0000644000175100001440000001023613000201061014615 0ustar hornikusersmvrnonnorm <- function(n, mu, Sigma, skewness = NULL, kurtosis = NULL, empirical = FALSE) { p <- length(mu) if (!all(dim(Sigma) == c(p, p))) stop("incompatible arguments") eS <- eigen(Sigma, symmetric = TRUE) ev <- eS$values if (!all(ev >= -1e-06 * abs(ev[1L]))) stop("'Sigma' is not positive definite") X <- NULL if(is.null(skewness) && is.null(kurtosis)) { X <- MASS::mvrnorm(n=n, mu=mu, Sigma=Sigma, empirical = empirical) } else { if(empirical) warnings("The empirical argument does not work when the Vale and Maurelli's method is used.") if(is.null(skewness)) skewness <- rep(0, p) if(is.null(kurtosis)) kurtosis <- rep(0, p) Z <- ValeMaurelli1983copied(n = n, COR = cov2cor(Sigma), skewness = skewness, kurtosis = kurtosis) TMP <- scale(Z, center = FALSE, scale = 1/sqrt(diag(Sigma)))[,,drop=FALSE] X <- sweep(TMP, MARGIN=2, STATS=mu, FUN="+") } X } # Copied from lavaan package ValeMaurelli1983copied <- function(n=100L, COR, skewness, kurtosis, debug = FALSE) { fleishman1978_abcd <- function(skewness, kurtosis) { system.function <- function(x, skewness, kurtosis) { b.=x[1L]; c.=x[2L]; d.=x[3L] eq1 <- b.^2 + 6*b.*d. + 2*c.^2 + 15*d.^2 - 1 eq2 <- 2*c.*(b.^2 + 24*b.*d. + 105*d.^2 + 2) - skewness eq3 <- 24*(b.*d. + c.^2*(1 + b.^2 + 28*b.*d.) + d.^2*(12 + 48*b.*d. + 141*c.^2 + 225*d.^2)) - kurtosis eq <- c(eq1,eq2,eq3) sum(eq^2) ## SS } out <- nlminb(start=c(1,0,0), objective=system.function, scale=10, control=list(trace=0), skewness=skewness, kurtosis=kurtosis) if(out$convergence != 0) warning("no convergence") b. <- out$par[1L]; c. <- out$par[2L]; d. <- out$par[3L]; a. <- -c. c(a.,b.,c.,d.) } getICOV <- function(b1, c1, d1, b2, c2, d2, R) { objectiveFunction <- function(x, b1, c1, d1, b2, c2, d2, R) { rho=x[1L] eq <- rho*(b1*b2 + 3*b1*d2 + 3*d1*b2 + 9*d1*d2) + rho^2*(2*c1*c2) + rho^3*(6*d1*d2) - R eq^2 } #gradientFunction <- function(x, bcd1, bcd2, R) { # #} out <- nlminb(start=R, objective=objectiveFunction, scale=10, control=list(trace=0), b1=b1, c1=c1, d1=d1, b2=b2, c2=c2, d2=d2, R=R) if(out$convergence != 0) warning("no convergence") rho <- out$par[1L] rho } # number of variables nvar <- ncol(COR) # check skewness if(is.null(skewness)) { SK <- rep(0, nvar) } else if(length(skewness) == nvar) { SK <- skewness } else if(length(skewness) == 1L) { SK <- rep(skewness, nvar) } else { stop("skewness has wrong length") } if(is.null(kurtosis)) { KU <- rep(0, nvar) } else if(length(kurtosis) == nvar) { KU <- kurtosis } else if(length(kurtosis) == 1L) { KU <- rep(kurtosis, nvar) } else { stop("kurtosis has wrong length") } # create Fleishman table FTable <- matrix(0, nvar, 4L) for(i in 1:nvar) { FTable[i,] <- fleishman1978_abcd(skewness=SK[i], kurtosis=KU[i]) } # compute intermediate correlations between all pairs ICOR <- diag(nvar) for(j in 1:(nvar-1L)) { for(i in (j+1):nvar) { if(COR[i,j] == 0) next ICOR[i,j] <- ICOR[j,i] <- getICOV(FTable[i,2], FTable[i,3], FTable[i,4], FTable[j,2], FTable[j,3], FTable[j,4], R=COR[i,j]) } } if(debug) { cat("\nOriginal correlations (for Vale-Maurelli):\n") print(COR) cat("\nIntermediate correlations (for Vale-Maurelli):\n") print(ICOR) cat("\nEigen values ICOR:\n") print( eigen(ICOR)$values ) } # generate Z ## FIXME: replace by rmvnorm once we use that package X <- Z <- MASS::mvrnorm(n=n, mu=rep(0,nvar), Sigma=ICOR) # transform Z using Fleishman constants for(i in 1:nvar) { X[,i] <- FTable[i,1L] + FTable[i,2L]*Z[,i] + FTable[i,3L]*Z[,i]^2 + FTable[i,4L]*Z[,i]^3 } X }semTools/R/longInvariance.R0000644000175100001440000004221513000201061015343 0ustar hornikusers## Title: Longitudinal (or within-group, such as dyadic data) measurement invariance ## Author: Sunthud Pornprasertmanit ## Description: Test measurement invariance and save the fitted objects ##----------------------------------------------------------------------------## longInvariance <- function(model, varList, auto = "all", constrainAuto = FALSE, fixed.x = TRUE, std.lv = FALSE, group=NULL, group.equal="", group.partial="", warn=TRUE, debug=FALSE, strict = FALSE, quiet = FALSE, fit.measures = "default", method = "satorra.bentler.2001", ...) { List <- list(...) # Find the number of groups ngroups <- 1 if(!is.null(group)) { if(!is.null(List$data)) { ngroups <- length(unique(List$data[,group])) } else if (!is.null(List$sample.cov)) { ngroups <- length(List$sample.cov) } else { stop("Cannot find the specifying variable name in the 'group' argument.") } } # Get the lavaan parameter table if(is.character(model)) { lavaanParTable <- lavaan::lavaanify(model = model, meanstructure = TRUE, int.ov.free = TRUE, int.lv.free = FALSE, orthogonal = FALSE, fixed.x = fixed.x, std.lv = std.lv, auto.fix.first = ifelse(std.lv, FALSE, TRUE), auto.fix.single = TRUE, auto.var = TRUE, auto.cov.lv.x = TRUE, auto.cov.y = TRUE, ngroups = ngroups, group.equal = group.equal, group.partial = group.partial, debug = debug, warn = warn, as.data.frame. = TRUE) } else if(is.list(model)) { if(!is.null(model$lhs) && !is.null(model$op) && !is.null(model$rhs) && !is.null(model$free)) { lavaanParTable <- model } else if(is.character(model[[1]])) { stop("lavaan ERROR: model is a list, but not a parameterTable?") } } else { cat("model type: ", class(model), "\n") stop("lavaan ERROR: model is not of type character or list") } # Error checking on the varList argument and get the factor name corresponding to each elements of the list facName <- lapply(varList, function(vec, pt) pt$lhs[(pt$op == "=~") & (pt$rhs %in% vec)], pt=lavaanParTable) if(any(sapply(facName, function(x) length(unique(x)) > 1))) stop("The factor names of the same element of the 'varList' are not the same.") if(length(unique(sapply(facName, function(x) length(x)))) > 1) stop("The numbers of variables in each element are not equal.") facName <- unlist(lapply(facName, unique)) # Impose the autocorrelation in the parameter table if(auto != 0) { if(is.numeric(auto) && auto >= length(varList)) stop("The number of lag in auto-correlation is not possible in the current number of timepoints.") if(auto == "all") auto <- length(varList) - 1 for(k in 1:ngroups) { for(i in 1:length(varList[[1]])) { name <- sapply(varList, function(x, element) x[element], element = i) for(j in 1:auto) { vec <- 1:(length(varList) - j) lavaanParTable <- freeParTable(lavaanParTable, name[vec], "~~", name[vec + j], k, ustart = NA) if(constrainAuto & (length(vec) > 1)) lavaanParTable <- constrainParTable(lavaanParTable, name[vec], "~~", name[vec + j], k) } } } } # Fit configural invariance fitConfigural <- lavaan::lavaan(lavaanParTable, ..., group=group, group.equal=group.equal, group.partial=group.partial, warn=TRUE, debug=FALSE) # Create the parameter table for metric invariance ptMetric <- lavaanParTable if(std.lv) { for(k in 1:ngroups) { # Free variances of factor 2, 3, ... ptMetric <- freeParTable(ptMetric, facName[-1], "~~", facName[-1], k, ustart = NA) # Constrain factor loadings for(i in 1:length(varList[[1]])) { ptMetric <- constrainParTable(ptMetric, facName, "=~", sapply(varList, function(x, element) x[element], element = i), k) } } ptMetric$ustart[(ptMetric$op == "=~") & (ptMetric$rhs %in% sapply(varList, function(x, element) x[element], element = 1))] <- 1 } else { for(k in 1:ngroups) { # Constrain factor loadings but keep marker variables for(i in 2:length(varList[[1]])) { ptMetric <- constrainParTable(ptMetric, facName, "=~", sapply(varList, function(x, element) x[element], element = i), k) } } } fitMetric <- lavaan::lavaan(ptMetric, ..., group=group, group.equal=group.equal, group.partial=group.partial, warn=TRUE, debug=FALSE) # Create the parameter table for scalar invariance ptScalar <- ptMetric for(k in 1:ngroups) { # Free means of factors 2, 3, ... ptScalar <- freeParTable(ptScalar, facName[-1], "~1", "", k, ustart = NA) # Constrain measurement intercepts for(i in 1:length(varList[[1]])) { ptScalar <- constrainParTable(ptScalar, sapply(varList, function(x, element) x[element], element = i), "~1", "", k) } } ptScalar$ustart[(ptMetric$op == "~1") & (ptMetric$rhs %in% facName)] <- 0 fitScalar <- lavaan::lavaan(ptScalar, ..., group=group, group.equal=group.equal, group.partial=group.partial, warn=TRUE, debug=FALSE) ptMeans <- ptScalar # Create the parameter table for strict invariance if specified ptStrict <- ptScalar fitStrict <- NULL if(strict) { ptStrict <- ptScalar for(k in 1:ngroups) { # Constrain measurement error variances for(i in 1:length(varList[[1]])) { name <- sapply(varList, function(x, element) x[element], element = i) ptStrict <- constrainParTable(ptStrict, name, "~~", name, k) } } fitStrict <- lavaan::lavaan(ptStrict, ..., group=group, group.equal=group.equal, group.partial=group.partial, warn=TRUE, debug=FALSE) ptMeans <- ptStrict } # Create the parameter table for mean equality # Constrain factor means to be equal for(k in 1:ngroups) { ptMeans <- fixParTable(ptMeans, facName[-1], "~1", "", k, ustart = 0) } fitMeans <- lavaan::lavaan(ptMeans, ..., group=group, group.equal=group.equal, group.partial=group.partial, warn=TRUE, debug=FALSE) FIT <- invisible(list(fit.configural = fitConfigural, fit.loadings = fitMetric, fit.thresholds = fitScalar, fit.residuals = fitStrict, fit.means = fitMeans)) FIT <- FIT[!sapply(FIT, is.null)] if(!quiet) { printInvarianceResult(FIT, fit.measures, method) } invisible(FIT) # Modify these functions from measurementInvariance function # if(!quiet) { # cat("\n#################### Measurement invariance tests ####################\n") # cat("\nThe order of autocorrelation: ", auto, "\n") # cat("\n#################### Model 1: configural invariance:\n") # printFitLine(fitConfigural) # cat("\n#################### Model 2: weak invariance (equal loadings):\n") # printFitLine(fitMetric) # cat("\n[Model 1 versus model 2]\n") # difftest(fitConfigural, fitMetric) # cat("\n#################### Model 3: strong invariance (equal loadings + intercepts):\n") # printFitLine(fitScalar) # cat("\n[Model 1 versus model 3]\n") # difftest(fitConfigural, fitScalar) # cat("\n[Model 2 versus model 3]\n") # difftest(fitMetric, fitScalar) # if(strict) { # cat("\n#################### Model 4: strict invariance (equal loadings + intercepts + residuals):\n") # printFitLine(fitStrict) # cat("\n[Model 1 versus model 4]\n") # difftest(fitConfigural, fitStrict) # cat("\n[Model 2 versus model 4]\n") # difftest(fitMetric, fitStrict) # cat("\n[Model 3 versus model 4]\n") # difftest(fitScalar, fitStrict) # cat("\n#################### Model 5: equal loadings + intercepts + residuals + means:\n") # printFitLine(fitMeans, horizontal=TRUE) # cat("\n[Model 1 versus model 5]\n") # difftest(fitConfigural, fitMeans) # cat("\n[Model 2 versus model 5]\n") # difftest(fitMetric, fitMeans) # cat("\n[Model 3 versus model 5]\n") # difftest(fitScalar, fitMeans) # cat("\n[Model 4 versus model 5]\n") # difftest(fitStrict, fitMeans) # } else { # cat("\n#################### Model 4: equal loadings + intercepts + means:\n") # printFitLine(fitMeans) # cat("\n[Model 1 versus model 4]\n") # difftest(fitConfigural, fitMeans) # cat("\n[Model 2 versus model 4]\n") # difftest(fitMetric, fitMeans) # cat("\n[Model 3 versus model 4]\n") # difftest(fitScalar, fitMeans) # } # } # return(invisible(list(fit.configural = fitConfigural, fit.loadings = fitMetric, fit.intercepts = fitScalar, fit.residuals = fitStrict, fit.means = fitMeans))) } # freeParTable: Free elements in parameter table freeParTable <- function(parTable, lhs, op, rhs, group, ustart = NA) { parTable$start <- parTable$est <- parTable$se <- NULL target <- cbind(lhs, op, rhs, group) for(i in 1:nrow(target)) { targetElem <- matchElement(parTable = parTable, vec = target[i,]) ptargetElem <- parTable$plabel[targetElem] if((length(targetElem) == 0) || is.na(targetElem)) { newline <- list(lhs = as.character(target[i, 1]), op = as.character(target[i, 2]), rhs = as.character(target[i, 3]), group = as.integer(target[i, 4]), free = as.integer(max(parTable$free) + 1), ustart = as.numeric(NA)) parTable <- patMerge(pt1 = parTable, pt2 = newline) } else { if(parTable$free[targetElem] == 0) { parTable$ustart[targetElem] <- ustart parTable$user[targetElem] <- 1 parTable$free[targetElem] <- max(parTable$free) + 1 } equalelement <- which(parTable$op == "==") rmelem <- intersect(union(match(ptargetElem, parTable$lhs), match(ptargetElem, parTable$rhs)), equalelement) if(length(rmelem) > 0) parTable <- removeEqCon(parTable, rmelem) } } parTable <- rearrangept(parTable) return(parTable) } # removeEqCon: Remove equality constraints removeEqCon <- function(pt, element) { pt <- lapply(pt, "[", -element) pt$id <- seq_along(pt$id) pt } # fixParTable: Fix elements in parameter table fixParTable <- function(parTable, lhs, op, rhs, group, ustart = NA) { parTable$start <- parTable$est <- parTable$se <- NULL target <- cbind(lhs, op, rhs, group) element <- apply(target, 1, matchElement, parTable=parTable) for(i in 1:nrow(target)) { if(parTable$free[element[i]] == 0) warnings(paste("The", lhs, op, rhs, group, "is fixed already.")) # equalelement <- which(parTable$op == "==") # targetElem <- matchElement(parTable = parTable, vec = target[i,]) # ptargetElem <- parTable$plabel[targetElem] # rmelem <- intersect(union(match(ptargetElem, parTable$lhs), match(ptargetElem, parTable$rhs)), equalelement) # if(length(rmelem) > 0) parTable <- removeEqCon(parTable, rmelem) parTable$ustart[element[i]] <- ustart parTable$user[element[i]] <- 1 parTable$free[element[i]] <- 0 } parTable <- rearrangept(parTable) # rearrangePlabel with change all equality constraints return(parTable) } # constrainParTable: Impose equality constraints in any set of elements in the parameter table constrainParTable <- function(parTable, lhs, op, rhs, group) { parTable$start <- parTable$est <- parTable$se <- NULL target <- cbind(lhs, op, rhs, group) element <- apply(target, 1, matchElement, parTable=parTable) # id lhs op rhs user group free ustart exo label plabel start for(i in 2:length(element)) { len <- length(parTable$id) newline <- list(lhs = parTable$plabel[element[1]], op = "==", rhs = parTable$plabel[element[i]]) if(!any(parTable$lhs == newline$lhs & parTable$op == newline$op & parTable$rhs == newline$rhs)) parTable <- patMerge(pt1 = parTable, pt2 = newline) } return(parTable) } # matchElement: Find the number of row that have the specification in vec (lhs, op, rhs, group) matchElement <- function(parTable, vec) { if(is.null(parTable$group)) { return(which((parTable$lhs == vec[1]) & (parTable$op == vec[2]) & (parTable$rhs == vec[3]))) } else { return(which((parTable$lhs == vec[1]) & (parTable$op == vec[2]) & (parTable$rhs == vec[3]) & (parTable$group == vec[4]))) } } # rearrangeFreeElement: Rearrange the number listed in 'free' in parameter tables rearrangeFreeElement <- function(vec) { vec2 <- vec vec <- vec[vec != 0] uvec <- unique(vec) newvec <- 1:length(unique(vec)) vec2[vec2 != 0] <- newvec[match(vec, uvec)] class(vec2) <- "integer" return(vec2) } createplabel <- function(num) { result <- paste0(".p", num, ".") result[num == 0] <- "" result } # rearrangept: Rearrange parameter table and plabel rearrangept <- function(pt) { oldfree <- pt$free newfree <- rearrangeFreeElement(oldfree) oldplabel <- pt$plabel newplabel <- createplabel(seq_along(pt$op)) eqpos <- which(pt$op == "==") newplabel[eqpos] <- "" if(length(eqpos) > 0) { eqlhs <- pt$lhs[eqpos] eqrhs <- pt$rhs[eqpos] matchlhs <- match(eqlhs, oldplabel) matchrhs <- match(eqrhs, oldplabel) neweqlhs <- newplabel[matchlhs] neweqrhs <- newplabel[matchrhs] neweqlhs[is.na(matchlhs)] <- eqlhs[is.na(matchlhs)] neweqrhs[is.na(matchrhs)] <- eqrhs[is.na(matchrhs)] pt$lhs[eqpos] <- neweqlhs pt$rhs[eqpos] <- neweqrhs } pt$free <- newfree pt$plabel <- newplabel pt } getValue <- function(parTable, est, lhs, op, rhs, group) { target <- cbind(lhs, op, rhs, group) element <- apply(target, 1, matchElement, parTable=parTable) free <- parTable$free[element] out <- parTable$ustart[element] out[free != 0] <- est[free[free != 0]] out } patMerge <- function (pt1 = NULL, pt2 = NULL, remove.duplicated = FALSE, fromLast = FALSE, warn = TRUE) { pt1 <- as.data.frame(pt1, stringsAsFactors = FALSE) pt2 <- as.data.frame(pt2, stringsAsFactors = FALSE) stopifnot(!is.null(pt1$lhs), !is.null(pt1$op), !is.null(pt1$rhs), !is.null(pt2$lhs), !is.null(pt2$op), !is.null(pt2$rhs)) if (is.null(pt1$group) && is.null(pt2$group)) { TMP <- rbind(pt1[, c("lhs", "op", "rhs", "group")], pt2[, c("lhs", "op", "rhs", "group")]) } else { if (is.null(pt1$group) && !is.null(pt2$group)) { pt1$group <- rep(1L, length(pt1$lhs)) } else if (is.null(pt2$group) && !is.null(pt1$group)) { pt2$group <- rep(1L, length(pt2$lhs)) } TMP <- rbind(pt1[, c("lhs", "op", "rhs", "group")], pt2[, c("lhs", "op", "rhs", "group")]) } if (is.null(pt1$user) && !is.null(pt2$user)) { pt1$user <- rep(0L, length(pt1$lhs)) } else if (is.null(pt2$user) && !is.null(pt1$user)) { pt2$user <- rep(0L, length(pt2$lhs)) } if (is.null(pt1$free) && !is.null(pt2$free)) { pt1$free <- rep(0L, length(pt1$lhs)) } else if (is.null(pt2$free) && !is.null(pt1$free)) { pt2$free <- rep(0L, length(pt2$lhs)) } if (is.null(pt1$ustart) && !is.null(pt2$ustart)) { pt1$ustart <- rep(0, length(pt1$lhs)) } else if (is.null(pt2$ustart) && !is.null(pt1$ustart)) { pt2$ustart <- rep(0, length(pt2$lhs)) } if (is.null(pt1$exo) && !is.null(pt2$exo)) { pt1$exo <- rep(0L, length(pt1$lhs)) } else if (is.null(pt2$exo) && !is.null(pt1$exo)) { pt2$exo <- rep(0L, length(pt2$lhs)) } if (is.null(pt1$label) && !is.null(pt2$label)) { pt1$label <- rep("", length(pt1$lhs)) } else if (is.null(pt2$label) && !is.null(pt1$label)) { pt2$label <- rep("", length(pt2$lhs)) } if (is.null(pt1$plabel) && !is.null(pt2$plabel)) { pt1$plabel <- rep("", length(pt1$lhs)) } else if (is.null(pt2$plabel) && !is.null(pt1$plabel)) { pt2$plabel <- rep("", length(pt2$lhs)) } if (is.null(pt1$start) && !is.null(pt2$start)) { pt1$start <- rep(as.numeric(NA), length(pt1$lhs)) } else if (is.null(pt2$start) && !is.null(pt1$start)) { pt2$start <- rep(as.numeric(NA), length(pt2$lhs)) } if(!is.null(pt1$est)) pt1$est <- NULL if(!is.null(pt2$est)) pt2$est <- NULL if(!is.null(pt1$se)) pt1$se <- NULL if(!is.null(pt2$se)) pt2$se <- NULL if (remove.duplicated) { idx <- which(duplicated(TMP, fromLast = fromLast)) if (length(idx)) { if (warn) { warning("lavaan WARNING: duplicated parameters are ignored:\n", paste(apply(pt1[idx, c("lhs", "op", "rhs")], 1, paste, collapse = " "), collapse = "\n")) } if (fromLast) { pt1 <- pt1[-idx, ] } else { idx <- idx - nrow(pt1) pt2 <- pt2[-idx, ] } } } else if (!is.null(pt1$start) && !is.null(pt2$start)) { for (i in 1:length(pt1$lhs)) { idx <- which(pt2$lhs == pt1$lhs[i] & pt2$op == pt1$op[i] & pt2$rhs == pt1$rhs[i] & pt2$group == pt1$group[i]) pt2$start[idx] <- pt1$start[i] } } if (is.null(pt1$id) && !is.null(pt2$id)) { nid <- max(pt2$id) pt1$id <- (nid + 1L):(nid + nrow(pt1)) } else if (is.null(pt2$id) && !is.null(pt1$id)) { nid <- max(pt1$id) pt2$id <- (nid + 1L):(nid + nrow(pt2)) } NEW <- base::merge(pt1, pt2, all = TRUE, sort = FALSE) NEW } semTools/R/imposeStart.R0000644000175100001440000000144413000201061014715 0ustar hornikusersimposeStart <- function(out, expr, silent = TRUE) { if(!is(out, "lavaan")) stop("The first argument of the function must be a lavaan output.") template2 <- template <- substitute(expr) template2$do.fit <- FALSE model <- eval(expr = template2, enclos = parent.frame()) ptmodel <- lavaan::parTable(model) coefmodel <- lavaan::coef(model) coefout <- lavaan::coef(out) start <- coefout[match(names(coefmodel), names(coefout))] ptmodel$start[ptmodel$free != 0] <- start[ptmodel$free[ptmodel$free != 0]] ptmodel$est <- NULL ptmodel$se <- NULL if(!silent) { cat("########## Model with imposed starting values #########\n") print(ptmodel) } if("model" %in% names(template)) { template$model <- ptmodel } else { template[[2]] <- ptmodel } eval(expr = template, enclos = parent.frame()) } semTools/R/auxiliary.R0000644000175100001440000004033013001167164014427 0ustar hornikusers### Title: Automatically accounts for auxiliary variable in full information maximum likelihood ### Author: Sunthud Pornprasertmanit ### Last updated: 17 October 2016 ### Description: Automatically accounts for auxiliary variable in full information maximum likelihood setClass("lavaanStar", contains = "lavaan", representation(nullfit = "vector", imputed="list", imputedResults="list", auxNames="vector"), prototype(nullfit=c(chi=0,df=0), imputed=list(), imputedResults=list(), auxNames = "")) setMethod("inspect", "lavaanStar", ## FIXME: get rid of lavaanStar object function(object, what="free") { what <- tolower(what) if(what == "fit" || what == "fitmeasures" || what == "fit.measures" || what == "fit.indices") { fitMeasuresLavaanStar(object) } else if(what == "imputed" || what == "impute") { result <- object@imputed if(length(result) > 0) { return(result) } else { stop("This method did not made by multiple imputation.") } } else if(what == "aux" || what == "auxiliary") { print(object@auxNames) } else { getMethod("inspect", "lavaan")(object, what=what) ## FIXME: don't set a new inspect method } }) setMethod("summary", "lavaanStar", function(object, fit.measures=FALSE, ...) { getMethod("summary", "lavaan")(object, fit.measures=FALSE, ...) if(fit.measures) { cat("Because the original method to find the baseline model does not work, \nplease do not use any fit measures relying on baseline model, including CFI and TLI. \nTo find the correct one, please use the function: lavInspect(object, what='fit').\n") } }) setMethod("anova", signature(object = "lavaanStar"), function(object, ...) { imputed <- object@imputed if(length(imputed) > 0) { dots <- list(...) if(length(dots) > 1) stop("Multiple Imputed Results: Cannot compare more than two objects") object2 <- dots[[1]] imputed2 <- object2@imputed if(length(imputed) == 0) stop("The second object must come from multiple imputation.") listlogl1 <- imputed[["indivlogl"]] listlogl2 <- imputed2[["indivlogl"]] df1 <- lavaan::lavInspect(object, "fit")["df"] df2 <- lavaan::lavInspect(object2, "fit")["df"] if(df2 < df1) { templogl <- listlogl1 listlogl1 <- listlogl2 listlogl2 <- templogl } dfdiff <- df2 - df1 anovaout <- mapply(lavaan::anova, object@imputedResults, object2@imputedResults, SIMPLIFY = FALSE) chidiff <- sapply(anovaout, function(u) u[2, "Chisq diff"]) dfdiff2 <- mean(sapply(anovaout, function(u) u[2, "Df diff"])) fit.altcc <- mean(chidiff) naive <- c(fit.altcc, dfdiff2, 1 - pchisq(fit.altcc, dfdiff2)) names(naive) <- c("chisq", "df", "pvalue") lmrr <- lmrrPooledChi(chidiff, dfdiff2) mr <- NULL mplus <- NULL if(!is.null(listlogl1[["loglmod"]]) | !is.null(listlogl2[["loglmod"]])) { logl1 <- listlogl1[["loglmod"]] logl2 <- listlogl2[["loglmod"]] chimean <- mean((logl1 - logl2)*2) m <- length(logl1) ariv <- ((m+1)/((m-1)*dfdiff))*(fit.altcc-chimean) mplus <- mplusPooledChi(chimean, dfdiff, ariv) mr <- mrPooledChi(chimean, m, dfdiff, ariv) } result <- list(naive = naive, lmrr = lmrr, mr = mr, mplus = mplus) return(result) } else { return(getMethod("anova", "lavaan")(object, ...)) } }) setMethod("vcov", "lavaanStar", function(object, ...) { result <- object@imputed if(length(result) == 0) { return(getMethod("vcov", "lavaan")(object, ...)) } else { out <- object@vcov$vcov rownames(out) <- colnames(out) <- lavaan::lav_partable_labels(lavaan::partable(object), type="free") return(out) } }) # auxiliary: Automatically accounts for auxiliary variable in full information maximum likelihood cfa.auxiliary <- function(model, aux, ...) { auxiliary(model = model, aux = aux, fun = "cfa", ...) } sem.auxiliary <- function(model, aux, ...) { auxiliary(model = model, aux = aux, fun = "sem", ...) } growth.auxiliary <- function(model, aux, ...) { auxiliary(model = model, aux = aux, fun = "growth", ...) } lavaan.auxiliary <- function(model, aux, ...) { auxiliary(model = model, aux = aux, fun = "lavaan", ...) } auxiliary <- function(model, aux, fun, ...) { args <- list(...) args$fixed.x <- FALSE args$missing <- "fiml" if(is(model, "lavaan")) { if(!lavaan::lavInspect(model, "options")$meanstructure) stop("The lavaan fitted model must evaluate the meanstructure. Please re-fit the lavaan object again with 'meanstructure=TRUE'") model <- lavaan::parTable(model) } else if(!(is.list(model) && ("lhs" %in% names(model)))) { fit <- do.call(fun, c(list(model=model, do.fit=FALSE), args)) model <- lavaan::parTable(fit) } model <- model[setdiff(1:length(model), which(names(model) == "start"))] if(any(model$exo == 1)) { stop("All exogenous variables (covariates) must be treated as endogenous variables by the 'auxiliary' function (fixed.x = FALSE).") } auxResult <- craftAuxParTable(model = model, aux = aux, ...) if(checkOrdered(args$data, auxResult$indName, ...)) { stop("The analysis model or the analysis data have ordered categorical variables. The auxiliary variable feature is not available for the models for categorical variables with the weighted least square approach.") } args$model <- auxResult$model result <- do.call(fun, args) codeNull <- nullAuxiliary(aux, auxResult$indName, NULL, any(model$op == "~1"), max(model$group)) resultNull <- lavaan::lavaan(codeNull, ...) result <- as(result, "lavaanStar") fit <- lavaan::fitMeasures(resultNull) name <- names(fit) fit <- as.vector(fit) names(fit) <- name result@nullfit <- fit result@auxNames <- aux return(result) } checkOrdered <- function(dat, varnames, ...) { ord <- list(...)$ordered if(is.null(ord)) { ord <- FALSE } else { ord <- TRUE } if(is.null(dat)) { orderedVar <- FALSE } else { orderedVar <- sapply(dat[,varnames], function(x) "ordered" %in% is(x)) } any(c(ord, orderedVar)) } craftAuxParTable <- function(model, aux, ...) { constraintLine <- model$op %in% c("==", ":=", ">", "<") modelConstraint <- lapply(model, "[", constraintLine) model <- lapply(model, "[", !constraintLine) facName <- NULL indName <- NULL singleIndicator <- NULL facName <- unique(model$lhs[model$op == "=~"]) indName <- setdiff(unique(model$rhs[model$op == "=~"]), facName) singleIndicator <- setdiff(unique(c(model$lhs, model$rhs)), c(facName, indName, "")) facSingleIndicator <- paste0("f", singleIndicator) for(i in seq_along(singleIndicator)) { model$lhs <- gsub(singleIndicator[i], facSingleIndicator[i], model$lhs) model$rhs <- gsub(singleIndicator[i], facSingleIndicator[i], model$rhs) } ngroups <- max(model$group) if(!is.null(singleIndicator) && length(singleIndicator) != 0) model <- attachPT(model, facSingleIndicator, "=~", singleIndicator, ngroups, fixed = TRUE, ustart = 1, expand = FALSE) if(!is.null(singleIndicator) && length(singleIndicator) != 0) model <- attachPT(model, singleIndicator, "~~", singleIndicator, ngroups, fixed = TRUE, ustart = 0, expand = FALSE) if(!is.null(singleIndicator) && length(singleIndicator) != 0) model <- attachPT(model, singleIndicator, "~1", "", ngroups, fixed = TRUE, ustart = 0, expand = FALSE) if(is.null(indName) || length(indName) == 0) { faux <- paste0("f", aux) model <- attachPT(model, faux, "=~", aux, ngroups, fixed = TRUE, ustart = 1, expand = FALSE) model <- attachPT(model, aux, "~~", aux, ngroups, fixed = TRUE, ustart = 0, expand = FALSE) model <- attachPT(model, facSingleIndicator, "~~", faux, ngroups) model <- attachPT(model, faux, "~~", faux, ngroups, symmetric=TRUE) if(any(model$op == "~1")) { model <- attachPT(model, faux, "~1", "", ngroups) model <- attachPT(model, aux, "~1", "", ngroups, fixed = TRUE, ustart = 0, expand = FALSE) } } else { if(!is.null(indName) && length(indName) != 0) model <- attachPT(model, indName, "~~", aux, ngroups) model <- attachPT(model, aux, "~~", aux, ngroups, symmetric=TRUE, useUpper=TRUE) if(!is.null(singleIndicator) && length(singleIndicator) != 0) model <- attachPT(model, facSingleIndicator, "=~", aux, ngroups) if(any(model$op == "~1")) model <- attachPT(model, aux, "~1", "", ngroups) } model <- attachConstraint(model, modelConstraint) list(model = model, indName = union(indName, singleIndicator)) } attachConstraint <- function(pt, con) { len <- length(con$id) if(len > 0) { pt$id <- c(pt$id, (max(pt$id)+1):(max(pt$id)+len)) pt$lhs <- c(pt$lhs, con$lhs) pt$op <- c(pt$op, con$op) pt$rhs <- c(pt$rhs, con$rhs) pt$user <- c(pt$user, con$user) pt$group <- c(pt$group, con$group) pt$free <- c(pt$free, con$free) pt$ustart <- c(pt$ustart, con$ustart) pt$exo <- c(pt$exo, con$exo) pt$label <- c(pt$label, con$label) pt$plabel <- c(pt$plabel, con$plabel) pt$start <- c(pt$start, con$start) pt$est <- c(pt$est, con$est) pt$se <- c(pt$se, con$se) } pt } attachPT <- function(pt, lhs, op, rhs, ngroups, symmetric=FALSE, exo=FALSE, fixed=FALSE, useUpper=FALSE, ustart = NA, expand = TRUE, diag = TRUE) { pt$start <- pt$est <- pt$se <- NULL if(expand) { element <- expand.grid(lhs, rhs, stringsAsFactors = FALSE) } else { element <- cbind(lhs, rhs) } if(symmetric) { if(useUpper) { element <- element[as.vector(upper.tri(diag(length(lhs)), diag=diag)),] } else { element <- element[as.vector(lower.tri(diag(length(lhs)), diag=diag)),] } } num <- nrow(element) * ngroups pt$id <- c(pt$id, (max(pt$id)+1):(max(pt$id)+num)) pt$lhs <- c(pt$lhs, rep(element[,1], ngroups)) pt$op <- c(pt$op, rep(op, num)) pt$rhs <- c(pt$rhs, rep(element[,2], ngroups)) pt$user <- c(pt$user, rep(1, num)) pt$group <- c(pt$group, rep(1:ngroups, each=nrow(element))) free <- (max(pt$free)+1):(max(pt$free)+num) if(fixed) free <- rep(0L, num) pt$free <- c(pt$free, free) pt$ustart <- c(pt$ustart, rep(ustart, num)) pt$exo <- c(pt$exo, rep(as.numeric(exo), num)) pt$label <- c(pt$label, rep("", num)) pt$plabel <- c(pt$plabel, rep("", num)) return(pt) } nullAuxiliary <- function(aux, indName, covName=NULL, meanstructure, ngroups) { covName <- rev(covName) pt <- list() num <- length(indName) * ngroups if(meanstructure) num <- num*2 pt$id <- 1:num pt$lhs <- rep(indName, ngroups) pt$op <- rep("~~", num) pt$rhs <- rep(indName, ngroups) pt$user <- rep(1, num) pt$group <- rep(1:ngroups, each=length(indName)) pt$free <- 1:num pt$ustart <- rep(NA, num) pt$exo <- rep(0, num) pt$label <- rep("", num) pt$plabel <- rep("", num) if(meanstructure) { pt$lhs <- rep(rep(indName, ngroups), 2) pt$op <- rep(c("~~", "~1"), each=num/2) pt$rhs <- c(rep(indName, ngroups), rep("", num/2)) pt$group <- rep(rep(1:ngroups, each=length(indName)), 2) } pt <- attachPT(pt, aux, "~~", aux, ngroups, symmetric=TRUE) pt <- attachPT(pt, indName, "~~", aux, ngroups) if(meanstructure) pt <- attachPT(pt, aux, "~1", "", ngroups) if(!is.null(covName) && length(covName) != 0) { pt <- attachPT(pt, aux, "~~", covName, ngroups) pt <- attachPT(pt, covName, "~~", covName, ngroups, symmetric=TRUE, useUpper=TRUE) if(meanstructure) pt <- attachPT(pt, covName, "~1", "", ngroups) } return(pt) } fitMeasuresLavaanStar <- function(object) { notused <- capture.output(result <- suppressWarnings(getMethod("inspect", "lavaan")(object, what="fit"))) ## FIXME: don't set a new inspect method result[c("baseline.chisq", "baseline.df", "baseline.pvalue")] <- object@nullfit[c("chisq", "df", "pvalue")] if(lavaan::lavInspect(object, "options")$test %in% c("satorra.bentler", "yuan.bentler", "mean.var.adjusted", "scaled.shifted")) { scaled <- TRUE } else { scaled <- FALSE } if(scaled) { result[c("baseline.chisq.scaled", "baseline.df.scaled", "baseline.pvalue.scaled", "baseline.chisq.scaling.factor")] <- object@nullfit[c("chisq.scaled", "df.scaled", "pvalue.scaled", "chisq.scaling.factor")] } X2.null <- object@nullfit["chisq"] df.null <- object@nullfit["df"] X2 <- result["chisq"] df <- result["df"] if(df.null == 0) { result["cfi"] <- NA result["tli"] <- NA result["nnfi"] <- NA result["rfi"] <- NA result["nfi"] <- NA result["pnfi"] <- NA result["ifi"] <- NA result["rni"] <- NA } else { # CFI if("cfi" %in% names(result)) { t1 <- max( c(X2 - df, 0) ) t2 <- max( c(X2 - df, X2.null - df.null, 0) ) if(t1 == 0 && t2 == 0) { result["cfi"] <- 1 } else { result["cfi"] <- 1 - t1/t2 } } # TLI if("tli" %in% names(result)) { if(df > 0) { t1 <- X2.null/df.null - X2/df t2 <- X2.null/df.null - 1 # note: TLI original formula was in terms of fx/df, not X2/df # then, t1 <- fx_0/df.null - fx/df # t2 <- fx_0/df.null - 1/N (or N-1 for wishart) if(t1 < 0 && t2 < 0) { TLI <- 1 } else { TLI <- t1/t2 } } else { TLI <- 1 } result["tli"] <- result["nnfi"] <- TLI } # RFI if("rfi" %in% names(result)) { if(df > 0) { t1 <- X2.null/df.null - X2/df t2 <- X2.null/df.null if(t1 < 0 || t2 < 0) { RLI <- 1 } else { RLI <- t1/t2 } } else { RLI <- 1 } result["rfi"] <- RLI } # NFI if("nfi" %in% names(result)) { t1 <- X2.null - X2 t2 <- X2.null NFI <- t1/t2 result["nfi"] <- NFI } # PNFI if("pnfi" %in% names(result)) { t1 <- X2.null - X2 t2 <- X2.null PNFI <- (df/df.null) * t1/t2 result["pnfi"] <- PNFI } # IFI if("ifi" %in% names(result)) { t1 <- X2.null - X2 t2 <- X2.null - df if(t2 < 0) { IFI <- 1 } else { IFI <- t1/t2 } result["ifi"] <- IFI } # RNI if("rni" %in% names(result)) { t1 <- X2 - df t2 <- X2.null - df.null if(df.null == 0) { RNI <- NA } else if(t1 < 0 || t2 < 0) { RNI <- 1 } else { RNI <- 1 - t1/t2 } result["rni"] <- RNI } } if(scaled) { X2.scaled <- result["chisq.scaled"] df.scaled <- result["df.scaled"] X2.null.scaled <- object@nullfit["chisq.scaled"] df.null.scaled <- object@nullfit["df.scaled"] if(df.null.scaled == 0) { result["cfi.scaled"] <- NA result["tli.scaled"] <- result["nnfi.scaled"] <- NA result["rfi.scaled"] <- NA result["nfi.scaled"] <- NA result["pnfi.scaled"] <- NA result["ifi.scaled"] <- NA result["rni.scaled"] <- NA } else { if("cfi.scaled" %in% names(result)) { t1 <- max( c(X2.scaled - df.scaled, 0) ) t2 <- max( c(X2.scaled - df.scaled, X2.null.scaled - df.null.scaled, 0) ) if(t1 == 0 && t2 == 0) { result["cfi.scaled"] <- 1 } else { result["cfi.scaled"] <- 1 - t1/t2 } } if("tli.scaled" %in% names(result)) { if(df > 0) { t1 <- X2.null.scaled/df.null.scaled - X2.scaled/df.scaled t2 <- X2.null.scaled/df.null.scaled - 1 if(t1 < 0 && t2 < 0) { TLI <- 1 } else { TLI <- t1/t2 } } else { TLI <- 1 } result["tli.scaled"] <- result["nnfi.scaled"] <- TLI } if("rfi.scaled" %in% names(result)) { if(df > 0) { t1 <- X2.null.scaled/df.null.scaled - X2.scaled/df.scaled t2 <- X2.null.scaled/df.null.scaled if(t1 < 0 || t2 < 0) { RLI <- 1 } else { RLI <- t1/t2 } } else { RLI <- 1 } result["rfi.scaled"] <- RLI } if("nfi.scaled" %in% names(result)) { t1 <- X2.null.scaled - X2.scaled t2 <- X2.null.scaled NFI <- t1/t2 result["nfi.scaled"] <- NFI } if("pnfi.scaled" %in% names(result)) { t1 <- X2.null.scaled - X2.scaled t2 <- X2.null.scaled PNFI <- (df/df.null) * t1/t2 result["pnfi.scaled"] <- PNFI } if("ifi.scaled" %in% names(result)) { t1 <- X2.null.scaled - X2.scaled t2 <- X2.null.scaled if(t2 < 0) { IFI <- 1 } else { IFI <- t1/t2 } result["ifi.scaled"] <- IFI } if("rni.scaled" %in% names(result)) { t1 <- X2.scaled - df.scaled t2 <- X2.null.scaled - df.null.scaled t2 <- X2.null - df.null if(t1 < 0 || t2 < 0) { RNI <- 1 } else { RNI <- 1 - t1/t2 } result["rni.scaled"] <- RNI } } } #logl imputed <- object@imputed if(length(imputed) > 0) { loglikval <- unlist(imputed[["logl"]]) npar <- result["npar"] result["unrestricted.logl"] <- loglikval["unrestricted.logl"] result["logl"] <- loglikval["logl"] result["aic"] <- -2*loglikval["logl"] + 2*npar result["bic"] <- -2*loglikval["logl"] + npar*log(result["ntotal"]) N.star <- (result["ntotal"] + 2) / 24 result["bic2"] <- -2*loglikval["logl"] + npar*log(N.star) result <- result[-which("fmin" == names(result))] } result } semTools/R/splitSample.R0000644000175100001440000000411113000201061014672 0ustar hornikuserssplitSample<-function(dataset,path="default",div=2,type="default",name="splitSample"){ type1<-type hea=FALSE file<-dataset if(is.character(file)){ temp <- strsplit(file,'/',fixed=TRUE) if(path=="default"){ path<-paste(temp[[1]][1:(length(temp[[1]])-1)],"/",sep='',collapse="") } fileN <- temp[[1]][length(temp[[1]])] temp <- strsplit(fileN,'.',fixed=TRUE) type <- temp[[1]][2] name <- temp[[1]][1] if(type=='dat'){ if(is.numeric(as.matrix(read.table(file, nrows=1)))==FALSE){ data <- as.matrix(read.table(file,header=TRUE)) hea=TRUE } else{data <- as.matrix(read.table(file))} } if(type=='csv'){ if(is.numeric(as.matrix(read.table(file, nrows=1)))==FALSE){ data <- as.matrix(read.csv(file,header=TRUE)) hea=TRUE }else{data <- as.matrix(read.csv(file))} } }else{ if(is.matrix(file) | is.data.frame(file)){ data <- as.matrix(file) }else{stop("PROVIDE DATA IN .DAT OR .CSV FORMAT")} } if(type1!="default"){ type<-type1 } if(is.character(colnames(data))){ hea=TRUE } random <- runif(nrow(data),1,nrow(data)) data <- cbind(random, data) data <- data[order(random),] data <- data[,2:ncol(data)] size<-split((1:nrow(data)),cut((1:nrow(data)),div,labels=FALSE)) size<-as.matrix(as.data.frame(lapply(size,length))) dataL <- list() dataL[[1]] <- data[1:size[1,1],] for(i in 2:div){ size[1,i]<-size[1,(i-1)]+size[1,i] dataL[[i]] <- data[(size[1,(i-1)]+1):size[1,i],] } if(path=='default'){ return(dataL)} else{ if(path=="object"){ return(dataL)} else{ for(i in 1:div){ if(type=="dat"){ write.table(dataL[[i]],paste(path,name,"_s",i,".dat",sep=''),sep=' ',row.names=FALSE,col.names=hea)} if(type=="csv"){ write.table(dataL[[i]],paste(path,name,"_s",i,".csv",sep=''),sep=",",row.names=FALSE,col.names=hea)} if(type=="default"){ write.table(dataL[[i]],paste(path,name,"_s",i,".dat",sep=''),sep=' ',row.names=FALSE,col.names=hea)} } } } }semTools/R/wald.R0000644000175100001440000000413713000201061013334 0ustar hornikusers wald <- function(object, syntax) { model <- unlist( strsplit(syntax, "\n") ) # remove comments starting with '#' or '!' model <- gsub("#.*","", model); model <- gsub("!.*","", model) # replace semicolons by newlines and split in lines again model <- gsub(";","\n", model); model <- unlist( strsplit(model, "\n") ) # strip all white space model <- gsub("[[:space:]]+", "", model) # keep non-empty lines only idx <- which(nzchar(model)) model <- model[idx] beta <- lavaan::coef(object) contrast <- matrix(0, length(model), length(beta)) for(i in 1:length(model)) { rhs <- model[i] out <- NULL sign <- NULL if(substr(rhs, 1, 1) == "-") { sign <- "-" rhs <- substr(rhs, 2, nchar(rhs)) } else { sign <- "+" } cont <- TRUE while(cont) { pos <- regexpr("[+-]", rhs) if(pos == -1) { out <- c(out, rhs) cont <- FALSE } else { out <- c(out, substr(rhs, 1, pos - 1)) sign <- c(sign, substr(rhs, pos, pos)) rhs <- substr(rhs, pos + 1, nchar(rhs)) } } num <- rep(NA, length(out)) vname <- rep(NA, length(out)) for(j in seq_along(out)) { pos <- regexpr("[*]", out[j]) tmp <- 1 if(pos == -1) { vname[j] <- out[j] } else { tmp <- substr(out[j], 1, pos-1) vname[j] <- substr(out[j], pos + 1, nchar(out[j])) } if(is.character(tmp) && regexpr("[/^]", tmp) != -1) tmp <- eval(parse(text = tmp)) if(is.character(tmp)) tmp <- as.numeric(tmp) num[j] <- tmp if(sign[j] == "-") num[j] <- -num[j] } posmatch <- match(vname, names(beta)) if(any(is.na(posmatch))) { stop(paste("Unknown parameters:", paste(vname[is.na(posmatch)], collapse = ", "))) } contrast[i,posmatch] <- num } result <- waldContrast(object, contrast) print(round(result, 6)) invisible(result) } waldContrast <- function(object, contrast) { beta <- lavaan::coef(object) acov <- lavaan::vcov(object) chisq <- t(contrast %*% beta) %*% solve(contrast %*% as.matrix(acov) %*% t(contrast)) %*% (contrast %*% beta) df <- nrow(contrast) p <- pchisq(chisq, df, lower.tail=FALSE) c(chisq = chisq, df = df, p = p) }semTools/R/probeInteraction.R0000644000175100001440000010010013000201061015677 0ustar hornikusers## Title: Probing Interaction ## Author: Sunthud Pornprasertmanit ## Description: Probing Interaction with Residual Centering ##----------------------------------------------------------------------------## probe2WayMC <- function(fit, nameX, nameY, modVar, valProbe) { # Check whether modVar is correct if(is.character(modVar)) { modVar <- match(modVar, nameX) } if(is.na(modVar) || !(modVar %in% 1:2)) stop("The moderator name is not in the name of independent factors or not 1 or 2.") # Check whether the fit object does not use mlm, mlr, or mlf estimator (because the variance-covariance matrix of parameter estimates cannot be computed estSpec <- lavaan::lavInspect(fit, "call")$estimator if(!is.null(estSpec) && (estSpec %in% c("mlr", "mlm", "mlf"))) stop("This function does not work when 'mlr', 'mlm', or 'mlf' is used as the estimator because the covariance matrix of the parameter estimates cannot be computed.") # Get the parameter estimate values from the lavaan object est <- lavaan::lavInspect(fit, "coef") # Compute the intercept of no-centering betaNC <- as.matrix(est$beta[nameY, nameX]); colnames(betaNC) <- nameY # Extract all varEst varEst <- lavaan::vcov(fit) # Check whether intercept are estimated targetcol <- paste(nameY, "~", 1, sep="") estimateIntcept <- targetcol %in% rownames(varEst) pvalue <- function(x) (1 - pnorm(abs(x))) * 2 resultIntcept <- NULL resultSlope <- NULL if(estimateIntcept) { # Extract SE from residual centering targetcol <- c(targetcol, paste(nameY, "~", nameX, sep="")) # Transform it to non-centering SE usedVar <- varEst[targetcol, targetcol] usedBeta <- rbind(est$alpha[nameY,], betaNC) # Change the order of usedVar and usedBeta if the moderator variable is listed first if(modVar == 1) { usedVar <- usedVar[c(1, 3, 2, 4), c(1, 3, 2, 4)] usedBeta <- usedBeta[c(1, 3, 2, 4)] } # Find simple intercept simpleIntcept <- usedBeta[1] + usedBeta[3] * valProbe varIntcept <- usedVar[1, 1] + 2 * valProbe * usedVar[1, 3] + (valProbe^2) * usedVar[3, 3] zIntcept <- simpleIntcept/sqrt(varIntcept) pIntcept <- round(pvalue(zIntcept),6) #JG: rounded values to make them more readable resultIntcept <- cbind(valProbe, simpleIntcept, sqrt(varIntcept), zIntcept, pIntcept) colnames(resultIntcept) <- c(nameX[modVar], "Intcept", "SE", "Wald", "p") # Find simple slope simpleSlope <- usedBeta[2] + usedBeta[4] * valProbe varSlope <- usedVar[2, 2] + 2 * valProbe * usedVar[2, 4] + (valProbe^2) * usedVar[4, 4] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- pvalue(zSlope) resultSlope <- cbind(valProbe, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } else { targetcol <- paste(nameY, "~", nameX, sep="") # Transform it to non-centering SE usedVar <- varEst[targetcol, targetcol] usedBeta <- betaNC # Change the order of usedVar and usedBeta if the moderator variable is listed first if(modVar == 2) { usedVar <- usedVar[c(2, 1, 3), c(2, 1, 3)] # usedBeta <- usedBeta[c(2, 1, 3)] } # Find simple slope simpleSlope <- usedBeta[1] + usedBeta[3] * valProbe varSlope <- usedVar[1, 1] + 2 * valProbe * usedVar[1, 3] + (valProbe^2) * usedVar[3, 3] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- round(pvalue(zSlope),6) #JG: rounded values to make them more readable resultSlope <- cbind(valProbe, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } return(list(SimpleIntcept = resultIntcept, SimpleSlope = resultSlope)) } probe2WayRC <- function(fit, nameX, nameY, modVar, valProbe) { # Check whether modVar is correct if(is.character(modVar)) { modVar <- match(modVar, nameX) } if(is.na(modVar) || !(modVar %in% 1:2)) stop("The moderator name is not in the name of independent factors or not 1 or 2.") # Check whether the fit object does not use mlm, mlr, or mlf estimator (because the variance-covariance matrix of parameter estimates cannot be computed estSpec <- lavaan::lavInspect(fit, "call")$estimator if(!is.null(estSpec) && (estSpec %in% c("mlr", "mlm", "mlf"))) stop("This function does not work when 'mlr', 'mlm', or 'mlf' is used as the estimator because the covariance matrix of the parameter estimates cannot be computed.") # Get the parameter estimate values from the lavaan object est <- lavaan::lavInspect(fit, "coef") # Find the mean and covariance matrix of independent factors varX <- est$psi[nameX, nameX] meanX <- as.matrix(est$alpha[nameX,]); colnames(meanX) <- "intcept" # Find the intercept, regression coefficients, and residual variance of residual-centered regression intceptRC <- est$alpha[nameY,] resVarRC <- est$psi[nameY, nameY] betaRC <- as.matrix(est$beta[nameY, nameX]); colnames(betaRC) <- nameY # Find the number of observations numobs <- lavaan::lavInspect(fit, "nobs") # Compute SSRC meanXwith1 <- rbind(1, meanX) varXwith0 <- cbind(0, rbind(0, varX)) SSRC <- numobs * (varXwith0 + (meanXwith1 %*% t(meanXwith1))) # Compute Mean(Y) and Var(Y) betaRCWithIntcept <- rbind(intceptRC, betaRC) meanY <- t(meanXwith1) %*% betaRCWithIntcept varY <- (t(betaRCWithIntcept) %*% SSRC %*% betaRCWithIntcept)/numobs - meanY^2 + resVarRC # Compute Cov(Y, X) covY <- as.matrix((varX %*% betaRC)[1:2,]) # Compute E(XZ) meanX[3] <- meanX[1] * meanX[2] + varX[1, 2] # Compute Var(XZ) varX[3, 3] <- meanX[1]^2 * varX[2, 2] + meanX[2]^2 * varX[1, 1] + 2 * meanX[1] * meanX[2] * varX[1, 2] + varX[1, 1] * varX[2, 2] + varX[1, 2]^2 # Compute Cov(X, XZ), Cov(Z, XZ) varX[1, 3] <- varX[3, 1] <- meanX[1] * varX[1, 2] + meanX[2] * varX[1, 1] varX[2, 3] <- varX[3, 2] <- meanX[1] * varX[2, 2] + meanX[2] * varX[1, 2] # Compute Cov(Y, XZ) and regression coefficients of no-centering betaNC <- solve(varX[1:2,1:2], covY - rbind(varX[1,3] * betaRC[3,1], varX[2, 3] * betaRC[3,1])) betaNC <- rbind(betaNC, betaRC[3, 1]) covY <- rbind(covY, (varX %*% betaNC)[3, 1]) # Aggregate the non-centering sufficient statistics (Just show how to do but not necessary) fullCov <- rbind(cbind(varX, covY), c(covY, varY)) fullMean <- rbind(meanX, meanY) # Compute the intercept of no-centering intceptNC <- meanY - t(betaNC) %*% meanX # Compute SSNC betaNCWithIntcept <- rbind(intceptNC, betaNC) meanXwith1 <- rbind(1, meanX) varXwith0 <- rbind(0, cbind(0, varX)) SSNC <- numobs * (varXwith0 + (meanXwith1 %*% t(meanXwith1))) # Compute residual variance on non-centering resVarNC <- varY - (t(betaNCWithIntcept) %*% SSNC %*% betaNCWithIntcept)/numobs + meanY^2 # Extract all varEst varEst <- lavaan::vcov(fit) # Check whether intercept are estimated targetcol <- paste(nameY, "~", 1, sep="") estimateIntcept <- targetcol %in% rownames(varEst) pvalue <- function(x) (1 - pnorm(abs(x))) * 2 resultIntcept <- NULL resultSlope <- NULL if(estimateIntcept) { # Extract SE from residual centering targetcol <- c(targetcol, paste(nameY, "~", nameX, sep="")) varEstSlopeRC <- varEst[targetcol, targetcol] # Transform it to non-centering SE usedVar <- as.numeric(resVarNC/resVarRC) * (varEstSlopeRC %*% SSRC %*% solve(SSNC)) usedBeta <- betaNCWithIntcept # Change the order of usedVar and usedBeta if the moderator variable is listed first if(modVar == 1) { usedVar <- usedVar[c(1, 3, 2, 4), c(1, 3, 2, 4)] usedBeta <- usedBeta[c(1, 3, 2, 4)] } # Find simple intercept simpleIntcept <- usedBeta[1] + usedBeta[3] * valProbe varIntcept <- usedVar[1, 1] + 2 * valProbe * usedVar[1, 3] + (valProbe^2) * usedVar[3, 3] zIntcept <- simpleIntcept/sqrt(varIntcept) pIntcept <- round(pvalue(zIntcept),6) #JG: rounded values to make them more readable resultIntcept <- cbind(valProbe, simpleIntcept, sqrt(varIntcept), zIntcept, pIntcept) colnames(resultIntcept) <- c(nameX[modVar], "Intcept", "SE", "Wald", "p") # Find simple slope simpleSlope <- usedBeta[2] + usedBeta[4] * valProbe varSlope <- usedVar[2, 2] + 2 * valProbe * usedVar[2, 4] + (valProbe^2) * usedVar[4, 4] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- pvalue(zSlope) resultSlope <- cbind(valProbe, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } else { targetcol <- paste(nameY, "~", nameX, sep="") varEstSlopeRC <- varEst[targetcol, targetcol] # Transform it to non-centering SE usedVar <- as.numeric(resVarNC/resVarRC) * (varEstSlopeRC %*% SSRC[2:4, 2:4] %*% solve(SSNC[2:4, 2:4])) usedBeta <- betaNC # Change the order of usedVar and usedBeta if the moderator variable is listed first if(modVar == 2) { usedVar <- usedVar[c(2, 1, 3), c(2, 1, 3)] # usedBeta <- usedBeta[c(2, 1, 3)] } # Find simple slope simpleSlope <- usedBeta[1] + usedBeta[3] * valProbe varSlope <- usedVar[1, 1] + 2 * valProbe * usedVar[1, 3] + (valProbe^2) * usedVar[3, 3] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- round(pvalue(zSlope),6) #JG: rounded values to make them more readable resultSlope <- cbind(valProbe, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } return(list(SimpleIntcept = resultIntcept, SimpleSlope = resultSlope)) } probe3WayMC <- function(fit, nameX, nameY, modVar, valProbe1, valProbe2) { # Check whether modVar is correct if(is.character(modVar)) { modVar <- match(modVar, nameX) } if((NA %in% modVar) || !(do.call("&", as.list(modVar %in% 1:3)))) stop("The moderator name is not in the list of independent factors and is not 1, 2 or 3.") # Check whether the fit object does not use mlm, mlr, or mlf estimator (because the variance-covariance matrix of parameter estimates cannot be computed estSpec <- lavaan::lavInspect(fit, "call")$estimator if(!is.null(estSpec) && (estSpec %in% c("mlr", "mlm", "mlf"))) stop("This function does not work when 'mlr', 'mlm', or 'mlf' is used as the estimator because the covariance matrix of the parameter estimates cannot be computed.") # Get the parameter estimate values from the lavaan object est <- lavaan::lavInspect(fit, "coef") # Compute the intercept of no-centering betaNC <- as.matrix(est$beta[nameY, nameX]); colnames(betaNC) <- nameY # Extract all varEst varEst <- lavaan::vcov(fit) # Check whether intercept are estimated targetcol <- paste(nameY, "~", 1, sep="") estimateIntcept <- targetcol %in% rownames(varEst) pvalue <- function(x) (1 - pnorm(abs(x))) * 2 # Find the order to rearrange ord <- c(setdiff(1:3, modVar), modVar) ord <- c(ord, 7 - rev(ord)) resultIntcept <- NULL resultSlope <- NULL if(estimateIntcept) { # Extract SE from residual centering targetcol <- c(targetcol, paste(nameY, "~", nameX, sep="")) # Transform it to non-centering SE usedVar <- varEst[targetcol, targetcol] usedBeta <- rbind(est$alpha[nameY,], betaNC) if(sum(diag(usedVar) < 0) > 0) stop("This method does not work. The resulting calculation provided negative standard errors.") # JG: edited this error # Change the order of usedVar and usedBeta if the moderator variable is listed first usedVar <- usedVar[c(1, ord+1, 8), c(1, ord+1, 8)] usedBeta <- usedBeta[c(1, ord+1, 8)] # Find probe value val <- expand.grid(valProbe1, valProbe2) # Find simple intercept simpleIntcept <- usedBeta[1] + usedBeta[3] * val[,1] + usedBeta[4] * val[,2] + usedBeta[7] * val[,1] * val[,2] varIntcept <- usedVar[1, 1] + val[,1]^2 * usedVar[3, 3] + val[,2]^2 * usedVar[4, 4] + val[,1]^2 * val[,2]^2 * usedVar[7, 7] + 2 * val[,1] * usedVar[1, 3] + 2 * val[,2] * usedVar[1, 4] + 2 * val[,1] * val[,2] * usedVar[1, 7] + 2 * val[,1] * val[,2] * usedVar[3, 4] + 2 * val[,1]^2 * val[,2] * usedVar[3, 7] + 2* val[,1] * val[,2]^2 * usedVar[4, 7] zIntcept <- simpleIntcept/sqrt(varIntcept) pIntcept <- pvalue(zIntcept) resultIntcept <- cbind(val, simpleIntcept, sqrt(varIntcept), zIntcept, pIntcept) colnames(resultIntcept) <- c(nameX[modVar], "Intcept", "SE", "Wald", "p") # Find simple slope simpleSlope <- usedBeta[2] + usedBeta[5] * val[,1] + usedBeta[6] * val[,2] + usedBeta[8] * val[,1] * val[,2] varSlope <- usedVar[2, 2] + val[,1]^2 * usedVar[5, 5] + val[,2]^2 * usedVar[6, 6] + val[,1]^2 * val[,2]^2 * usedVar[8, 8] + 2 * val[,1] * usedVar[2, 5] + 2 * val[,2] * usedVar[2, 6] + 2 * val[,1] * val[,2] * usedVar[2, 8] + 2 * val[,1] * val[,2] * usedVar[5, 6] + 2 * val[,1]^2 * val[,2] * usedVar[5, 8] + 2 * val[,1] * val[,2]^2 * usedVar[6, 8] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- round(pvalue(zSlope),6) # JG: rounded values resultSlope <- cbind(val, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } else { targetcol <- paste(nameY, "~", nameX, sep="") # Transform it to non-centering SE usedVar <- varEst[targetcol, targetcol] usedBeta <- betaNC if(sum(diag(usedVar) < 0) > 0) stop("This method does not work. The resulting calculation provided negative standard errors.") # JG: edited this error # Change the order of usedVar and usedBeta if the moderator variable is listed first usedVar <- usedVar[c(ord, 7), c(ord, 7)] usedBeta <- usedBeta[c(ord, 7)] # Find probe value val <- expand.grid(valProbe1, valProbe2) # Find simple slope simpleSlope <- usedBeta[1] + usedBeta[4] * val[,1] + usedBeta[5] * val[,2] + usedBeta[7] * val[,1] * val[,2] varSlope <- usedVar[1, 1] + val[,1]^2 * usedVar[4, 4] + val[,2]^2 * usedVar[5, 5] + val[,1]^2 * val[,2]^2 * usedVar[7, 7] + 2 * val[,1] * usedVar[1, 4] + 2 * val[,2] * usedVar[1, 5] + 2 * val[,1] * val[,2] * usedVar[1, 7] + 2 * val[,1] * val[,2] * usedVar[4, 5] + 2 * val[,1]^2 * val[,2] * usedVar[4, 7] + 2 * val[,1] * val[,2]^2 * usedVar[5, 7] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- round(pvalue(zSlope),6) # JG: rounded values resultSlope <- cbind(val, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } return(list(SimpleIntcept = resultIntcept, SimpleSlope = resultSlope)) } probe3WayRC <- function(fit, nameX, nameY, modVar, valProbe1, valProbe2) { # Check whether modVar is correct if(is.character(modVar)) { modVar <- match(modVar, nameX) } if((NA %in% modVar) || !(do.call("&", as.list(modVar %in% 1:3)))) stop("The moderator name is not in the list of independent factors and is not 1, 2 or 3.") # JG: Changed error # Check whether the fit object does not use mlm, mlr, or mlf estimator (because the variance-covariance matrix of parameter estimates cannot be computed estSpec <- lavaan::lavInspect(fit, "call")$estimator if(!is.null(estSpec) && (estSpec %in% c("mlr", "mlm", "mlf"))) stop("This function does not work when 'mlr', 'mlm', or 'mlf' is used as the estimator because the covariance matrix of the parameter estimates cannot be computed.") # Get the parameter estimate values from the lavaan object est <- lavaan::lavInspect(fit, "coef") # Find the mean and covariance matrix of independent factors varX <- est$psi[nameX, nameX] meanX <- as.matrix(est$alpha[nameX,]); colnames(meanX) <- "intcept" # Find the intercept, regression coefficients, and residual variance of residual-centered regression intceptRC <- est$alpha[nameY,] resVarRC <- est$psi[nameY, nameY] if(resVarRC < 0) stop("The residual variance is negative. The model did not converge!") # JG: Changed error betaRC <- as.matrix(est$beta[nameY, nameX]); colnames(betaRC) <- nameY # Find the number of observations numobs <- lavaan::lavInspect(fit, "nobs") # Compute SSRC meanXwith1 <- rbind(1, meanX) varXwith0 <- cbind(0, rbind(0, varX)) SSRC <- numobs * (varXwith0 + (meanXwith1 %*% t(meanXwith1))) # Compute Mean(Y) and Var(Y) betaRCWithIntcept <- rbind(intceptRC, betaRC) meanY <- t(meanXwith1) %*% betaRCWithIntcept varY <- (t(betaRCWithIntcept) %*% SSRC %*% betaRCWithIntcept)/numobs - meanY^2 + resVarRC # Compute Cov(Y, X) covY <- as.matrix((varX %*% betaRC)[1:3,]) # Compute E(XZ), E(XW), E(ZW), E(XZW) meanX[4] <- expect2NormProd(meanX[c(1,2)], varX[c(1,2), c(1,2)]) meanX[5] <- expect2NormProd(meanX[c(1,3)], varX[c(1,3), c(1,3)]) meanX[6] <- expect2NormProd(meanX[c(2,3)], varX[c(2,3), c(2,3)]) meanX[7] <- expect3NormProd(meanX[1:3], varX[1:3, 1:3]) # Compute Var(XZ), Var(XW), Var(ZW), Var(XZW) varX[4, 4] <- var2NormProd(meanX[c(1,2)], varX[c(1,2), c(1,2)]) varX[5, 5] <- var2NormProd(meanX[c(1,3)], varX[c(1,3), c(1,3)]) varX[6, 6] <- var2NormProd(meanX[c(2,3)], varX[c(2,3), c(2,3)]) varX[7, 7] <- var3NormProd(meanX[1:3], varX[1:3, 1:3]) # Compute All covariances varX[4, 1] <- varX[1, 4] <- expect3NormProd(meanX[c(1, 2, 1)], varX[c(1, 2, 1),c(1, 2, 1)]) - expect2NormProd(meanX[c(1,2)], varX[c(1,2),c(1,2)]) * meanX[1] varX[5, 1] <- varX[1, 5] <- expect3NormProd(meanX[c(1, 3, 1)], varX[c(1, 3, 1),c(1, 3, 1)]) - expect2NormProd(meanX[c(1,3)], varX[c(1,3),c(1,3)]) * meanX[1] varX[6, 1] <- varX[1, 6] <- expect3NormProd(meanX[c(2, 3, 1)], varX[c(2, 3, 1),c(2, 3, 1)]) - expect2NormProd(meanX[c(2,3)], varX[c(2,3),c(2,3)]) * meanX[1] varX[7, 1] <- varX[1, 7] <- expect4NormProd(meanX[c(1,2,3,1)], varX[c(1,2,3,1),c(1,2,3,1)]) - expect3NormProd(meanX[c(1,2,3)], varX[c(1,2,3),c(1,2,3)]) * meanX[1] varX[4, 2] <- varX[2, 4] <- expect3NormProd(meanX[c(1, 2, 2)], varX[c(1, 2, 2),c(1, 2, 2)]) - expect2NormProd(meanX[c(1,2)], varX[c(1,2),c(1,2)]) * meanX[2] varX[5, 2] <- varX[2, 5] <- expect3NormProd(meanX[c(1, 3, 2)], varX[c(1, 3, 2),c(1, 3, 2)]) - expect2NormProd(meanX[c(1,3)], varX[c(1,3),c(1,3)]) * meanX[2] varX[6, 2] <- varX[2, 6] <- expect3NormProd(meanX[c(2, 3, 2)], varX[c(2, 3, 2),c(2, 3, 2)]) - expect2NormProd(meanX[c(2,3)], varX[c(2,3),c(2,3)]) * meanX[2] varX[7, 2] <- varX[2, 7] <- expect4NormProd(meanX[c(1,2,3,2)], varX[c(1,2,3,2),c(1,2,3,2)]) - expect3NormProd(meanX[c(1,2,3)], varX[c(1,2,3),c(1,2,3)]) * meanX[2] varX[4, 3] <- varX[3, 4] <- expect3NormProd(meanX[c(1, 2, 3)], varX[c(1, 2, 3),c(1, 2, 3)]) - expect2NormProd(meanX[c(1,2)], varX[c(1,2),c(1,2)]) * meanX[3] varX[5, 3] <- varX[3, 5] <- expect3NormProd(meanX[c(1, 3, 3)], varX[c(1, 3, 3),c(1, 3, 3)]) - expect2NormProd(meanX[c(1,3)], varX[c(1,3),c(1,3)]) * meanX[3] varX[6, 3] <- varX[3, 6] <- expect3NormProd(meanX[c(2, 3, 3)], varX[c(2, 3, 3),c(2, 3, 3)]) - expect2NormProd(meanX[c(2,3)], varX[c(2,3),c(2,3)]) * meanX[3] varX[7, 3] <- varX[3, 7] <- expect4NormProd(meanX[c(1,2,3,3)], varX[c(1,2,3,3),c(1,2,3,3)]) - expect3NormProd(meanX[c(1,2,3)], varX[c(1,2,3),c(1,2,3)]) * meanX[3] varX[5, 4] <- varX[4, 5] <- expect4NormProd(meanX[c(1,3,1,2)], varX[c(1,3,1,2),c(1,3,1,2)]) - expect2NormProd(meanX[c(1,3)], varX[c(1,3),c(1,3)]) * expect2NormProd(meanX[c(1,2)], varX[c(1,2),c(1,2)]) varX[6, 4] <- varX[4, 6] <- expect4NormProd(meanX[c(2,3,1,2)], varX[c(2,3,1,2),c(2,3,1,2)]) - expect2NormProd(meanX[c(2,3)], varX[c(2,3),c(2,3)]) * expect2NormProd(meanX[c(1,2)], varX[c(1,2),c(1,2)]) varX[7, 4] <- varX[4, 7] <- expect5NormProd(meanX[c(1,2,3,1,2)], varX[c(1,2,3,1,2),c(1,2,3,1,2)]) - expect3NormProd(meanX[c(1, 2, 3)], varX[c(1, 2, 3),c(1, 2, 3)]) * expect2NormProd(meanX[c(1,2)], varX[c(1,2),c(1,2)]) varX[6, 5] <- varX[5, 6] <- expect4NormProd(meanX[c(2,3,1,3)], varX[c(2,3,1,3),c(2,3,1,3)]) - expect2NormProd(meanX[c(2,3)], varX[c(2,3),c(2,3)]) * expect2NormProd(meanX[c(1,3)], varX[c(1,3),c(1,3)]) varX[7, 5] <- varX[5, 7] <- expect5NormProd(meanX[c(1,2,3,1,3)], varX[c(1,2,3,1,3),c(1,2,3,1,3)]) - expect3NormProd(meanX[c(1, 2, 3)], varX[c(1, 2, 3),c(1, 2, 3)]) * expect2NormProd(meanX[c(1,3)], varX[c(1,3),c(1,3)]) varX[7, 6] <- varX[6, 7] <- expect5NormProd(meanX[c(1,2,3,2,3)], varX[c(1,2,3,2,3),c(1,2,3,2,3)]) - expect3NormProd(meanX[c(1, 2, 3)], varX[c(1, 2, 3),c(1, 2, 3)]) * expect2NormProd(meanX[c(2,3)], varX[c(2,3),c(2,3)]) # Find the meanX and varX without XZW meanXReducedWith1 <- rbind(1, as.matrix(meanX[1:6])) varXReducedWith0 <- cbind(0, rbind(0, varX[1:6, 1:6])) SSMCReduced <- numobs * (varXReducedWith0 + (meanXReducedWith1 %*% t(meanXReducedWith1))) # Find product of main and two-way onto three-way covXZWwith0 <- rbind(0, as.matrix(varX[7, 1:6])) meanXZWwith1 <- meanX[7] * meanXReducedWith1 SSXZW <- numobs * (covXZWwith0 + meanXZWwith1) # should the mean vector be squared (postmultiplied by its transpose)? # Compute a vector and b4, b5, b6 a <- solve(SSMCReduced) %*% as.matrix(SSXZW) betaTemp <- betaRC[4:6] - (as.numeric(betaRC[7]) * a[5:7]) betaTemp <- c(betaTemp, betaRC[7]) # Compute Cov(Y, XZ) and regression coefficients of no-centering betaNC <- solve(varX[1:3,1:3], as.matrix(covY) - (t(varX[4:7, 1:3]) %*% as.matrix(betaTemp))) betaNC <- rbind(as.matrix(betaNC), as.matrix(betaTemp)) covY <- rbind(covY, as.matrix((varX %*% betaNC)[4:7, 1])) # Aggregate the non-centering sufficient statistics (Just show how to do but not necessary) fullCov <- rbind(cbind(varX, covY), c(covY, varY)) fullMean <- rbind(meanX, meanY) # Compute the intercept of no-centering intceptNC <- meanY - t(betaNC) %*% meanX # Compute SSNC betaNCWithIntcept <- rbind(intceptNC, betaNC) meanXwith1 <- rbind(1, meanX) #JG: redundant varXwith0 <- rbind(0, cbind(0, varX)) #JG: redundant SSNC <- numobs * (varXwith0 + (meanXwith1 %*% t(meanXwith1))) # Compute residual variance on non-centering resVarNC <- varY - (t(betaNCWithIntcept) %*% SSNC %*% betaNCWithIntcept)/numobs + meanY^2 # Extract all varEst varEst <- lavaan::vcov(fit) # Check whether intercept are estimated targetcol <- paste(nameY, "~", 1, sep="") estimateIntcept <- targetcol %in% rownames(varEst) pvalue <- function(x) (1 - pnorm(abs(x))) * 2 # Find the order to rearrange ord <- c(setdiff(1:3, modVar), modVar) ord <- c(ord, 7 - rev(ord)) resultIntcept <- NULL resultSlope <- NULL if(estimateIntcept) { # Extract SE from residual centering targetcol <- c(targetcol, paste(nameY, "~", nameX, sep="")) varEstSlopeRC <- varEst[targetcol, targetcol] # Transform it to non-centering SE usedVar <- as.numeric(resVarNC/resVarRC) * (varEstSlopeRC %*% SSRC %*% solve(SSNC)) usedBeta <- betaNCWithIntcept if(sum(diag(usedVar) < 0) > 0) stop("This method does not work. The resulting calculation provided negative standard errors.") # JG: edited this error # Change the order of usedVar and usedBeta if the moderator variable is listed first usedVar <- usedVar[c(1, ord+1, 8), c(1, ord+1, 8)] usedBeta <- usedBeta[c(1, ord+1, 8)] # Find probe value val <- expand.grid(valProbe1, valProbe2) # Find simple intercept simpleIntcept <- usedBeta[1] + usedBeta[3] * val[,1] + usedBeta[4] * val[,2] + usedBeta[7] * val[,1] * val[,2] varIntcept <- usedVar[1, 1] + val[,1]^2 * usedVar[3, 3] + val[,2]^2 * usedVar[4, 4] + val[,1]^2 * val[,2]^2 * usedVar[7, 7] + 2 * val[,1] * usedVar[1, 3] + 2 * val[,2] * usedVar[1, 4] + 2 * val[,1] * val[,2] * usedVar[1, 7] + 2 * val[,1] * val[,2] * usedVar[3, 4] + 2 * val[,1]^2 * val[,2] * usedVar[3, 7] + 2* val[,1] * val[,2]^2 * usedVar[4, 7] zIntcept <- simpleIntcept/sqrt(varIntcept) pIntcept <- pvalue(zIntcept) resultIntcept <- cbind(val, simpleIntcept, sqrt(varIntcept), zIntcept, pIntcept) colnames(resultIntcept) <- c(nameX[modVar], "Intcept", "SE", "Wald", "p") # Find simple slope simpleSlope <- usedBeta[2] + usedBeta[5] * val[,1] + usedBeta[6] * val[,2] + usedBeta[8] * val[,1] * val[,2] varSlope <- usedVar[2, 2] + val[,1]^2 * usedVar[5, 5] + val[,2]^2 * usedVar[6, 6] + val[,1]^2 * val[,2]^2 * usedVar[8, 8] + 2 * val[,1] * usedVar[2, 5] + 2 * val[,2] * usedVar[2, 6] + 2 * val[,1] * val[,2] * usedVar[2, 8] + 2 * val[,1] * val[,2] * usedVar[5, 6] + 2 * val[,1]^2 * val[,2] * usedVar[5, 8] + 2 * val[,1] * val[,2]^2 * usedVar[6, 8] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- round(pvalue(zSlope),6) # JG: rounded values resultSlope <- cbind(val, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } else { targetcol <- paste(nameY, "~", nameX, sep="") varEstSlopeRC <- varEst[targetcol, targetcol] # Transform it to non-centering SE usedVar <- as.numeric(resVarNC/resVarRC) * (varEstSlopeRC %*% SSRC[2:8, 2:8] %*% solve(SSNC[2:8, 2:8])) usedBeta <- betaNC if(sum(diag(usedVar) < 0) > 0) stop("This method does not work. The resulting calculation provided negative standard errors.") # JG: edited this error # Change the order of usedVar and usedBeta if the moderator variable is listed first usedVar <- usedVar[c(ord, 7), c(ord, 7)] usedBeta <- usedBeta[c(ord, 7)] # Find probe value val <- expand.grid(valProbe1, valProbe2) # Find simple slope simpleSlope <- usedBeta[1] + usedBeta[4] * val[,1] + usedBeta[5] * val[,2] + usedBeta[7] * val[,1] * val[,2] varSlope <- usedVar[1, 1] + val[,1]^2 * usedVar[4, 4] + val[,2]^2 * usedVar[5, 5] + val[,1]^2 * val[,2]^2 * usedVar[7, 7] + 2 * val[,1] * usedVar[1, 4] + 2 * val[,2] * usedVar[1, 5] + 2 * val[,1] * val[,2] * usedVar[1, 7] + 2 * val[,1] * val[,2] * usedVar[4, 5] + 2 * val[,1]^2 * val[,2] * usedVar[4, 7] + 2 * val[,1] * val[,2]^2 * usedVar[5, 7] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- round(pvalue(zSlope),6) # JG: rounded values resultSlope <- cbind(val, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } return(list(SimpleIntcept = resultIntcept, SimpleSlope = resultSlope)) } # Find the expected value of the product of two normal variates # m = the mean of each normal variate # s = the covariance matrix of all variates expect2NormProd <- function(m, s) { return(prod(m) + s[1, 2]) } # Find the expected value of the product of three normal variates # m = the mean of each normal variate # s = the covariance matrix of all variates expect3NormProd <- function(m, s) { return(prod(m) + m[3] * s[1, 2] + m[2] * s[1, 3] + m[1] * s[2, 3]) } # Find the expected value of the product of four normal variates # m = the mean of each normal variate # s = the covariance matrix of all variates expect4NormProd <- function(m, s) { first <- prod(m) com <- combn(1:4, 2) forSecond <- function(draw, meanval, covval, index) { draw2 <- setdiff(index, draw) prod(meanval[draw2]) * covval[draw[1], draw[2]] } second <- sum(apply(com, 2, forSecond, meanval=m, covval=s, index=1:4)) com2 <- com[,1:3] #select only first three terms containing the first element only forThird <- function(draw, covval, index) { draw2 <- setdiff(index, draw) covval[draw[1], draw[2]] * covval[draw2[1], draw2[2]] } third <- sum(apply(com2, 2, forThird, covval=s, index=1:4)) return(first + second + third) } # Find the expected value of the product of five normal variates # m = the mean of each normal variate # s = the covariance matrix of all variates expect5NormProd <- function(m, s) { first <- prod(m) com <- combn(1:5, 2) forSecond <- function(draw, meanval, covval, index) { draw2 <- setdiff(index, draw) prod(meanval[draw2]) * covval[draw[1], draw[2]] } second <- sum(apply(com, 2, forSecond, meanval=m, covval=s, index=1:5)) com2 <- combn(1:5, 4) forThirdOuter <- function(index, m, s, indexall) { targetMean <- m[setdiff(indexall, index)] cominner <- combn(index, 2)[,1:3] #select only first three terms containing the first element only forThirdInner <- function(draw, covval, index) { draw2 <- setdiff(index, draw) covval[draw[1], draw[2]] * covval[draw2[1], draw2[2]] } thirdInner <- targetMean * sum(apply(cominner, 2, forThirdInner, covval=s, index=index)) return(thirdInner) } third <- sum(apply(com2, 2, forThirdOuter, m=m, s=s, indexall=1:5)) return(first + second + third) } # Find the variance of the product of two normal variates # m = the mean of each normal variate # s = the covariance matrix of all variates var2NormProd <- function(m, s) { first <- m[2]^2 * s[1, 1] + m[1]^2 * s[2, 2] second <- 2 * m[1] * m[2] * s[1, 2] third <- s[1, 1] * s[2, 2] fourth <- s[1, 2]^2 return(first + second + third + fourth) } # Find the variance of the product of three normal variates # m = the mean of each normal variate # s = the covariance matrix of all variates var3NormProd <- function(m, s) { com <- combn(1:3, 2) forFirst <- function(draw, meanval, covval, index) { # draw = 2, 3; draw2 = 1 draw2 <- setdiff(index, draw) term1 <- meanval[draw[1]]^2 * meanval[draw[2]]^2 * covval[draw2, draw2] term2 <- 2 * meanval[draw2]^2 * meanval[draw[1]] * meanval[draw[2]] * covval[draw[1], draw[2]] term3 <- (meanval[draw2]^2 * covval[draw[1], draw[1]] * covval[draw[2], draw[2]]) + (meanval[draw2]^2 * covval[draw[1], draw[2]]^2) term4 <- 4 * meanval[draw[1]] * meanval[draw[2]] * covval[draw2, draw2] * covval[draw[1], draw[2]] term5 <- 6 * meanval[draw[1]] * meanval[draw[2]] * covval[draw2, draw[1]] * covval[draw2, draw[2]] term1 + term2 + term3 + term4 + term5 } first <- sum(apply(com, 2, forFirst, meanval=m, covval=s, index=1:3)) second <- prod(diag(s)) third <- 2 * s[3, 3] * s[1, 2]^2 + 2 * s[2, 2] * s[1, 3]^2 + 2 * s[1, 1] * s[2, 3]^2 fourth <- 8 * s[1, 2] * s[1, 3] * s[2, 3] return(first + second + third + fourth) } # plotProbe: plot the probing interaction result plotProbe <- function(object, xlim, xlab="Indepedent Variable", ylab="Dependent Variable", ...) { if(length(xlim) != 2) stop("The x-limit should be specified as a numeric vector with the length of 2.") # Extract simple slope slope <- object$SimpleSlope # Check whether the object is the two-way or three-way interaction result numInt <- 2 if(ncol(slope) == 6) numInt <- 3 estSlope <- slope[,ncol(slope) - 3] # Get whether the simple slope is significant. If so, the resulting lines will be shown as red. If not, the line will be black. estSlopeSig <- (slope[,ncol(slope)] < 0.05) + 1 # Extract simple intercept. If the simple intercept is not provided, the intercept will be fixed as 0. estIntercept <- NULL if(!is.null(object$SimpleIntcept)) estIntercept <- object$SimpleIntcept[,ncol(slope) - 3] if(numInt == 2) { plotSingleProbe(estSlope, estIntercept, xlim=xlim, xlab=xlab, ylab=ylab, colLine=estSlopeSig, legendMain=colnames(slope)[1], legendVal=slope[,1], ...) } else if (numInt == 3) { # Three-way interaction; separate lines for the first moderator, separate graphs for the second moderator mod2 <- unique(slope[,2]) mod1 <- unique(slope[,1]) # Use multiple graphs in a figure if (length(mod2) == 2) { obj <- par(mfrow = c(1, 2)) } else if (length(mod2) == 3) { obj <- par(mfrow = c(1, 3)) } else if (length(mod2) > 3) { obj <- par(mfrow = c(2, ceiling(length(mod2)/2))) } else if (length(mod2) == 1) { # Intentionally leaving as blank } else { stop("Some errors occur") } for(i in 1:length(mod2)) { select <- slope[,2] == mod2[i] plotSingleProbe(estSlope[select], estIntercept[select], xlim=xlim, xlab=xlab, ylab=ylab, colLine=estSlopeSig[select], main=paste(colnames(slope)[2], "=", mod2[i]), legendMain=colnames(slope)[1], legendVal=mod1, ...) } if (length(mod2) > 1) par(obj) } else { stop("Please make sure that the object argument is obtained from 'probe2wayMC', 'probe2wayRC', 'probe3wayMC', or 'probe3wayRC'.") } } # plotSingleProbe : plot the probing interaction result specific for only one moderator # estSlope = slope of each line # estIntercept = intercept of each line # xlim = the minimum and maximum values of the independent variable (x-axis) # xlab = the label for the independent variable # ylab = the lable for the dependent variable # main = the title of the graph # colLine = the color of each line # legendMain = the title of the legend # legendVal = the description of each line representing in the plot plotSingleProbe <- function(estSlope, estIntercept=NULL, xlim, xlab="Indepedent Variable", ylab="Dependent Variable", main=NULL, colLine="black", legendMain=NULL, legendVal=NULL, ...) { if(is.null(estIntercept)) estIntercept <- rep(0, length(estSlope)) if(length(colLine) == 1) colLine <- rep(colLine, length(estSlope)) lower <- estIntercept + (xlim[1] * estSlope) upper <- estIntercept + (xlim[2] * estSlope) ylim <- c(min(c(lower, upper)), max(c(lower, upper))) plot(cbind(xlim, ylim), xlim=xlim, ylim=ylim, type="n", xlab=xlab, ylab=ylab, main=main, ...) for(i in 1:length(estSlope)) { lines(cbind(xlim, c(lower[i], upper[i])), col = colLine[i], lwd=1.5, lty=i) } if(!is.null(legendVal)) { positionX <- 0.25 if(all(estSlope > 0)) positionX <- 0.01 if(all(estSlope < 0)) positionX <- 0.50 legend(positionX * (xlim[2] - xlim[1]) + xlim[1], 0.99 * (ylim[2] - ylim[1]) + ylim[1], legendVal, col=colLine, lty=1:length(estSlope), title=legendMain) } } semTools/R/lisrel2lavaan.R0000644000175100001440000013212413000250017015147 0ustar hornikusers##lisrel2lavaan ##Corbin Quick ##02/12/13 ##file path/name of LS8 LISREL syntax file lisrel2lavaan <- function(filename=NULL, analyze=TRUE, silent=FALSE, ...){ ## if filename == null, prompt user with file browser if(is.null(filename)){ reverseSlash <- function (x, pat = "\\", rep = "/") { x <- gsub(pat, rep, x, fixed = T) x <- gsub("'", "", x, fixed = T) x <- gsub('"', "", x, fixed = T) paste(x, collapse = " ") } filename <- reverseSlash(file.choose()) } ## if a file path is included in 'filename', set the working directory ## to that path so that data files will be searched for in the same ## directory as the syntax file regardless of the current directory. ## working directory is restored at the end of the function. temp <- unlist(strsplit(filename,'/',fixed=T)) restore.wd <- getwd() if(length(temp)>1){ path <- paste(temp[1:(length(temp)-1)],"/",sep='',collapse="") filename <- temp[length(temp)] setwd(path) } lisrel<-function(filename, analyze, ...){ ## "find" function for manipulating syntax find <- function(pat = 0, sou, n = 1) { flag <- function(vec, pat){ vec <- unlist(vec) if(is.null(vec[1])){ FALSE }else{ if(is.na(vec[1])){ FALSE } else { if(vec[1]==pat){TRUE}else{FALSE} } } } if (is.data.frame(sou) | is.matrix(sou)) { out <- 1:nrow(sou) out <- out[unlist(apply(sou, 1, flag, pat = pat))] out <- out[length(out)] } else if (is.list(sou) | is.vector(sou)) { if(is.vector(sou)){ sou <- as.list(sou) } out <- 1:length(sou) out <- out[unlist(lapply(sou, flag, pat = pat))] if(n!=0) { out <- out[n] } else { out <- out[length(out)] } } else { out <- NULL } if(!is.null(out)){ out <- out[!is.na(out)] if(length(out)<1){ out <- NULL } else if(is.na(out)){ out <- NULL } } out } as.numeric.s <- function(x){ suppressWarnings(as.numeric(x)) } ## function to evaluate MO matrix commands; creates pseudo-class for matrices modMat <- function(name, line) { ## obtain row/col numbers using ref table (external) row <- eval(parse(text=paste(ref[find(name,ref),2]))) col <- eval(parse(text=paste(ref[find(name,ref),3]))) ## constraint and misc are blank by default constraint <- matrix(0, row, col) misc <- matrix("", row, col) ## if mode specified then obtain mode, else mode='de' (default) if(length(unlist(strsplit(line,",")))>1){ form <- unlist(strsplit(line,","))[1] mode <- unlist(strsplit(line,","))[2] } else { if(any(line==c("fi","fr"))){ mode <- line if(any(name==c("lx","ly","ga"))){ form <- "fu" }else if(any(name==c("ps","te","td"))){ form <- "di" }else if(any(name==c("be","th"))){ form <- "ze" }else if(any(name==c("ph"))){ form <- "sy" }else { form <- "fu" } }else{ form <- line mode <- "de" } } ## determine matrix type (properties differ) if(any(name== c("lx","ly") )){ if(any(form==c("fu","ze"))){ if(mode=="fr"){ start <- matrix(NA, row, col) free <- matrix(1, row, col) }else{ start <- matrix(0, row, col) free <- matrix(0, row, col) } }else if(any(form==c("sd","sy","st","iz","zi"))){ if(mode=="fr"){ start <- matrix(NA, row, col) free <- matrix(1, row, col) }else{ start <- matrix(0, row, col) free <- matrix(0, row, col) } }else if(form=="di"){ if(name=="ly"){ if(ny==ne){ if(mode=="fr"){ start <- as.matrix(diag(NA, row)) free <- as.matrix(diag(1, row)) }else{ start <- as.matrix(diag(1, row)) free <- as.matrix(diag(0, row)) } }else { stop("syntax error: LY matrix cannot be form DI when NY is not equal to NE") } } if(name=="lx"){ if(nx==nk){ if(mode=="fr"){ start <- as.matrix(diag(NA, row)) free <- as.matrix(diag(1, row)) }else { start <- as.matrix(diag(1, row)) free <- as.matrix(diag(0, row)) } }else { stop("syntax error: LX matrix cannot be form DI when NX is not equal to NK") } } }else if(any(form==c("id"))){ start <- matrix(0, row, col) diag(start) <- 1 free <- matrix(0, row, col) } }else if(name=="ga") { if(form=="fu"){ if(mode=="fr" | mode=="de"){ start <- matrix(NA, row, col) free <- matrix(1, row, col) } else { start <- matrix(0, row, col) free <- matrix(0, row, col) } }else if(form=="ze"){ if(mode=="fr"){ start <- matrix(NA, row, col) free <- matrix(1, row, col) } else { start <- matrix(0, row, col) free <- matrix(0, row, col) } }else if(any(form==c("sd","sy","st","iz","zi"))){ if(mode=="fr"){ start <- matrix(NA, row, col) free <- matrix(1, row, col) } else { start <- matrix(0, row, col) free <- matrix(0, row, col) } } if(form=="di"){ if(ny==nx){ if(mode=="fr"){ start <- as.matrix(diag(NA, row)) free <- as.matrix(diag(1, row)) } else { start <- as.matrix(diag(1, row)) free <- as.matrix(diag(0, row)) } } else { stop("syntax error: GA matrix cannot be form DI when NY is not equal to NX") } } if(form=="id"){ start <- matrix(0, row, col) free <- matrix(0, row, col) } }else if(name=="be") { if(any(form==c("fu","ze"))){ if(mode=="fr"){ start <- matrix(NA, row, col) free <- matrix(1, row, col) }else { start <- matrix(0, row, col) free <- matrix(0, row, col) } } else if(form=="sy"){ if(mode=="fr"){ start <- matrix(NA, row, col) free <- matrix(1, row, col) }else if(mode=="de") { start <- as.matrix(diag(NA, row)) free <- as.matrix(diag(1, row)) }else { start <- matrix(0, row, col) free <- matrix(0, row, col) } }else if(any(form==c("sd","st","iz","zi","id", "di"))){ if(mode=="fi"){ start <- as.matrix(diag(1, row)) free <- as.matrix(diag(0, row)) }else { start <- as.matrix(diag(NA, row)) free <- as.matrix(diag(1, row)) } } }else if(any(name==c("td", "te", "th", "ph", "ps"))) { if(any(form==c("fu","ze"))){ if(mode=="fr"){ start <- matrix(NA, row, col) free <- matrix(1, row, col) }else { start <- matrix(0, row, col) free <- matrix(0, row, col) } }else if(form=="sy"){ if(mode=="fr"){ start <- matrix(NA, row, col) free <- matrix(1, row, col) }else if(mode=="de") { start <- as.matrix(diag(NA, row)) free <- as.matrix(diag(1, row)) }else { start <- matrix(0, row, col) free <- matrix(0, row, col) } }else if(any(form==c("sd","st","iz","zi","id","di"))){ if(mode=="fi"){ start <- as.matrix(diag(1, row)) free <- as.matrix(diag(0, row)) }else { start <- as.matrix(diag(NA, row)) free <- as.matrix(diag(1, row)) } } }else if(any(name==c("ty","tx","ka","kl","al"))){ if(any(mode==c("fi","ze")) | any(form==c("fi","ze"))){ start <- matrix(0, row, col) free <- matrix(0, row, col) }else{ start <- matrix(NA, row, col) free <- matrix(1, row, col) } } list(start=start,free=free,constraint=constraint,misc=misc) } ## function to format LISREL syntax doc <- scan(filename, "", sep="\n") format <- function(doc) { doc <- gsub("\t"," ",doc) doc <- gsub("(^ +)|( +$)", "", doc) doc <- gsub("\\(","[",doc) doc <- gsub("\\)","]",doc) doc <- gsub("]","] ",doc) doc <- gsub(" \\[","\\[",doc) doc <- gsub("/","",doc) doc <- lapply(doc, function(x){gsub("!","`!",x)}) if(length(grep("!",doc))>0) { doc <- lapply(lapply(doc,strsplit,split="`",fixed=TRUE),unlist) doc <- lapply(doc, function(x){if(length(grep("!",x))>0){x[1:((grep("!",x))[1]-1)]}else{x}}) } del <- lapply(doc,function(x){if(is.null(find("",x))){NULL}else{find("",x)+1}}) for(i in seq_along(del)){ if(!is.null(del[[i]])){ doc[[i]] <- doc[[i]][doc[[i]]!=doc[[i]][del[[i]]]] } } doc<-unlist(doc) doc<-lapply(doc,gsub,pattern="\t",replacement="") doc<-lapply(doc,gsub,pattern="(^ +)|( +$)",replacement="") doc<-lapply(lapply(doc,strsplit,split=" ",fixed=TRUE),unlist) doc<-doc[doc!=""] doc<-lapply(doc,function(x){x[x!=""]}) doc<-doc[!unlist(lapply(doc,is.null))] doc<-doc[unlist(lapply(doc,function(x){if(length(x)==0){FALSE}else{TRUE}}))] doc } doc0 <- format(doc) doc <- format(tolower(doc)) ## OU output commands ... if(!is.null(find("ou",doc))){ ou <- unlist(doc[[find("ou",doc)]]) ou <- ou[ou!="ou"] }else{ ou <- NULL } if(length(grep("me",ou))>0){ estimator <- unlist(strsplit(ou[grep("me",ou)],"="))[2] if(estimator=="gl"){ estimator <- "GLS" }else if(estimator=="wl"){ estimator <- "WLS" }else if(estimator=="ul"){ estimator <- "ULS" }else if(estimator=="dw"){ estimator <- "DWLS" } }else{ estimator <- "default" } # if(length(grep("se",ou))>0){ # me <- # }else{ # me <- "default" # } ## Multiple-Group Models groupN <- 1 da <- doc[[find("da",doc,1)]] da <- t(as.data.frame(strsplit(da[2:length(da)],"="))) if(!is.null(find("ng",da))){ ng <- as.numeric.s(da[find("ng",(da)),2]) if(ng>1){ for(i in 2:ng){ if(i==ng){ tx <- ")):length(doc)]" }else{ tx <- paste(")):(find('da',doc,",(i+1),")-1)]",sep="") } eval(parse(text=paste("doc",i,"<-doc[(find('da',doc,",i,tx,sep=""))) eval(parse(text=paste("doc0",i,"<-doc0[(find('da',doc,",i,tx,sep=""))) } doc0 <- doc0[1:(find("da",doc,2)-1)] doc <- doc[1:(find("da",doc,2)-1)] } }else{ ng <- 1 } ## FUNCTION TO EXTRACT DATA ## get # variables ## must be global environment ni <- doc[[find("da",doc)]][[grep("ni",doc[[find("da",doc)]])]] ni <- as.numeric.s(gsub("ni=","",ni)) ## the 'makeSym' function is primarily used in 'getData'; ## however, it has uses elsewhere (e.g. PA commands), and ## therefore must be left out of 'getData' itself. makeSym <- function(dat, ni){ dat <- unlist(dat) lapply(1:ni,function(x,dat){ if(x==1){ return(dat[1]) }else{ dat[(sum(1:(x-1))+1):sum(1:x)] } },dat=dat) } getData <- function(doc, doc0, ngroup = 1) { ## below is an unfortunate work-around .. if(length(grep("cm=", doc))>0){ doc[[grep("cm=", doc)]] <- unlist(strsplit(unlist(doc[grep("cm=", doc)]),"=")) doc0[[grep("cm=", doc0, ignore.case=T)]] <- unlist(strsplit(unlist(doc0[grep("cm=", doc0, ignore.case=T)]),"=")) } if(length(grep("km=", doc))>0){ doc[[grep("km=", doc)]] <- unlist(strsplit(unlist(doc[grep("km=", doc)]),"=")) doc0[[grep("km=", doc0, ignore.case=T)]] <- unlist(strsplit(unlist(doc0[grep("km=", doc0, ignore.case=T)]),"=")) } if(length(grep("me=", doc))>0){ doc[[grep("me=", doc)]] <- unlist(strsplit(unlist(doc[grep("me=", doc)]),"=")) doc0[[grep("me=", doc0, ignore.case=T)]] <- unlist(strsplit(unlist(doc0[grep("me=", doc0, ignore.case=T)]),"=")) } if(length(grep("pm=", doc))>0){ doc[[grep("pm=", doc)]] <- unlist(strsplit(unlist(doc[grep("pm=", doc)]),"=")) doc0[[grep("pm=", doc0, ignore.case=T)]] <- unlist(strsplit(unlist(doc0[grep("pm=", doc0, ignore.case=T)]),"=")) } if(length(grep("sd=", doc))>0){ doc[[grep("sd=", doc)]] <- unlist(strsplit(unlist(doc[grep("sd=", doc)]),"=")) doc0[[grep("sd=", doc0, ignore.case=T)]] <- unlist(strsplit(unlist(doc0[grep("sd=", doc0, ignore.case=T)]),"=")) } if(length(grep("ra=", doc))>0){ doc[[grep("ra=", doc)]] <- unlist(strsplit(unlist(doc[grep("ra=", doc)]),"=")) doc0[[grep("ra=", doc0, ignore.case=T)]] <- unlist(strsplit(unlist(doc0[grep("ra=", doc0, ignore.case=T)]),"=")) } ##paragraphs of interest ... paragraphs <- c("cm","km","me","pm","sd","ra") pValues <- 1:length(paragraphs) cm <- NULL km <- NULL me <- NULL pm <- NULL sd <- NULL ra <- NULL fLength <- c(((ni^2+ni)/2), ((ni^2+ni)/2), ni, ni, ni, NA) fExist <- rep(FALSE, length(paragraphs)) fLocate <- rep(0, length(paragraphs)) fNames <- rep(NA, length(paragraphs)) fData <- list() dataList <- list() dN <- 1 existList <- list() orderList <- list() charTest <-function(line){ line <- unlist(line) if(is.na(as.numeric.s(line[1]))) {TRUE} else {FALSE} } ## read in data for individual paragraphs for(i in seq_along(paragraphs)){ p <- paragraphs[[i]] if(!is.null(find(p,doc))){ line1a <- length(unlist(doc[find(p,doc)]))>1 line1b <- if(line1a){charTest(doc[[find(p,doc)]][2])}else{FALSE} line2 <- charTest(unlist(doc[find(p,doc)+1])) if(line1a && line1b && line2){ fExist[i] <- TRUE if(length(grep("=",doc0[[find(p,doc)]][2]))>0){ if(doc0[[find(p,doc)]][2]=="="){ fname <- doc0[[find(p,doc)]][3] }else{ fname <- unlist(strsplit(doc0[[find(p,doc)]][2],"="))[2] } }else{ fname <- doc0[[find(p,doc)]][2] } if(length(find(fname, fNames))==0){ if(paragraphs[i]=="ra"){ type <- tolower(substr(fname, (nchar(fname)-2), nchar(fname))) if(type=='dat'){ if(is.numeric(as.matrix(read.table(fname, nrows=1)))==FALSE){ dataList[[dN]] <- as.matrix(read.table(fname,header=TRUE)) } else{dataList[[dN]] <- as.matrix(read.table(fname))} } else if(type=='csv'){ if(is.numeric(as.matrix(read.table(fname, nrows=1)))==FALSE){ dataList[[dN]] <- as.matrix(read.csv(fname,header=TRUE)) }else{dataList[[dN]] <- as.matrix(read.csv(fname))} } else if (type=='psf'){ stop("Please use a different data format: .PSF files are compatible only with PRELIS.") } else { if(is.numeric(as.matrix(read.table(fname, nrows=1)))==FALSE){ dataList[[dN]] <- as.matrix(read.table(fname,header=TRUE)) } else{dataList[[dN]] <- as.matrix(read.table(fname))} } }else{ dataList[[dN]] <- unlist(format(scan(fname,"",sep="\n"))) } fLocate[i] <- dN existList[[dN]] <- rep(FALSE, length(paragraphs)) existList[[dN]][i] <- TRUE dN <- dN + 1 } else { existList[[(dN-1)]][i] <- TRUE fLocate[i] <- fLocate[find(fname, fNames)] } fNames[[i]] <- fname } } } ## determine order: which paragraphs are found in data files first? if(length(dataList)>0){ for(x in 1:length(dataList)){ tempFrame <- matrix(NA,2,length(pValues[existList[[x]]])) tempFrame[1,] <- pValues[existList[[x]]] for(i in pValues[existList[[x]]]){ tempFrame[2,tempFrame[1,]==i] <- find(paragraphs[i], doc) } orderList[[x]] <- tempFrame[1,order(tempFrame[2,])] } ## assign appropriate data to paragraph list for(x in 1:length(dataList)){ for(i in orderList[[x]]){ ## TEST: IS FULL (NON-SYMMETRIC) MATRIX?? if(paragraphs[[i]]=="ra"){ fData[[i]] <- dataList[[x]] }else{ if(fLength[i]==((ni^2+ni)/2)){ if(dataList[[x]][2]==dataList[[x]][(ni+1)] && dataList[[x]][3]==dataList[[x]][(2*ni+1)]){ fLength[i] <- ni^2 fData[[i]] <- dataList[[x]][1:fLength[i]] }else{ fData[[i]] <- makeSym(dataList[[x]][1:fLength[i]], ni=ni) } } else { fData[[i]] <- dataList[[x]][1:fLength[i]] } dataList[[x]] <- dataList[[x]][(fLength[i]+1):length(dataList[[x]])] } } } } excerpt <- function(para, doc){ ## determine whether or not paragraph is specified if(!is.null(find(para, doc))){ if(fExist[[grep(para, paragraphs)]]){ fData[[grep(para, paragraphs)]] } else { out <- find(para, doc):length(doc) out <- out[unlist(lapply(doc0[find(para, doc):length(doc)], charTest))] doc[(find(para, doc)+1):(out[2]-1)] } } else { return(NULL) } } makeMatrix <- function(x) { if(is.null(x)){ NULL } else { if(length(x)>1 && length(x[[1]])!=length(x[[2]])){ for(i in 1:(length(x)-1)){ d <- unlist(lapply(x[(i+1):length(x)],function(z,i){z[i]}, i=i)) x[[i]] <- c(x[[i]],d) } do.call(rbind,lapply(x, as.numeric.s)) } else { if(!is.matrix(x) && !is.data.frame(x)){ sapply(x, as.numeric.s,simplify="vector") } else{ apply(x, 2, as.numeric.s) } } } } for(i in paragraphs){ if(i=="me"|i=="sd"){ assign(i,makeMatrix(unlist(excerpt(i, doc)))) }else{ assign(i,makeMatrix(excerpt(i, doc))) } } if(!is.null(cm)){ if(length(var)>ncol(cm)){ var <- var[1:ncol(cm)] } } if(!is.null(km)){ if(length(var)>ncol(km)){ var <- var[1:ncol(km)] } } output <- list(cm=cm,km=km,me=me,pm=pm,sd=sd,ra=ra) rows <- list(var,var,NULL,var,NULL,NULL) for(i in paragraphs[!sapply(output,is.null)]){ if(i=="ra"){ if(length(var)0){ w <- grep("-",x) if(nchar(x[w])>1){ x <- gsub("-","`-`",x) x <- unlist(strsplit(x, "`")) w <- grep("-",x) } start <- x[(w-1)] end <- x[(w+1)] startV <- as.numeric(gsub("[^0-9]","",start)) endV <- as.numeric(gsub("[^0-9]","",end)) name <- gsub("[0-9]","",start) out <- paste(name, startV:endV, sep="") if(length(x)>(w+2)){ if(w==2){ c(out,x[(w+2):length(x)]) }else{ c(x[1:(w-2)],out,x[(w+2):length(x)]) } }else{ if(w==2){ out }else{ c(x[1:(w-2)],out) } } }else{ x } } ##pullNames function to simplify obtaining names pullNames <- function(x){ y <- find(x, doc) if(is.null(y)){ NULL } else { coms <- c("mo","km","cm","se","la","lk","le","ou","pd","ra","fr","va","fi","eq","co") coms <- coms[coms!=x] is.com <- function(line){any(unlist(line)[1]==coms)} com.l <- c(1:length(doc))[sapply(doc,is.com)] names <- unlist(doc0[(y+1):(c(com.l[com.l>y])[1]-1)]) extrapNames(names[names!=""]) } } use <- pullNames("se") var <- pullNames("la") if(is.null(use)){use <- var} use.t <- use if(!is.null(var)){ name.def <- FALSE } else { name.def <- TRUE } if(!is.null(ny)){ NY <- use.t[1:ny] use.t <- use.t[(ny+1):length(use.t)] }else{ NY <- NULL } if(!is.null(nx)){ NX <- use.t[1:nx] }else{ NX <- NULL } if(name.def){ ## names not specified if(is.numeric(nx)){ NX <- paste("ksi",1:nx,sep="") if(is.null(ny)){ NY <- NX var <- NX } } if(is.numeric(ny)){ NY <- paste("eta",1:ny,sep="") if(is.null(nx)){ NX <- NY var <- NY } } use <- var } NK <- pullNames("lk") NE <- pullNames("le") ## for path analysis models... if(!is.null(nx)){ if(nx>length(NX)){ NX <- paste("ksi", 1:nx, sep="") } } if(!is.null(ny)){ if(ny>length(NY)){ NY <- paste("eta", 1:ny, sep="") } } if(is.null(NK)){ NK<-NX } if(!is.null(nk)){ if(nk>length(NK)){ NK <- paste("KSI", 1:nk, sep="") } } if(is.null(NE)){ NE<-NY } if(!is.null(ne)){ if(ne>length(NE)){ NE <- paste("ETA", 1:ne, sep="") } } if(is.null(nk)){nk<-nx} if(is.null(ne)){ne<-ny} ## generate model matrices for(i in 1:nrow(mo)){ assign(mo[i,1],modMat(mo[i,1], mo[i,2])) } if((!is.null(ph) && !is.null(td))|(length(grep("lx",doc))>0)){ if(is.null(find("lx",mo))){ mo <- rbind(mo,c("lx","fu,fi")) lx <- modMat("lx", "fu,fi") } } if((!is.null(ps) && !is.null(te))|(length(grep("ly",doc))>0)){ if(is.null(find("ly",mo))){ mo <- rbind(mo,c("ly","fu,fi")) lx <- modMat("ly", "fu,fi") } } ## PA paragraph commands while(!is.null(find("pa",doc))){ if(!is.null(find("pa",doc))){ loc.n <- find("pa",doc) nam.n <- unlist(doc[[loc.n]])[2] row.n <- (eval(parse(text=paste(ref[find(nam.n,ref),2])))) lis.n <- doc[(loc.n+1):(loc.n+row.n)] if(length(lis.n[[1]])!=length(lis.n[[length(lis.n)]])){ lis.n <- lavaan::lower2full(lavaan::char2num(paste(lapply(lis.n,paste,collapse=", "),collapse="\n"))) }else{ lis.n <- do.call(rbind,lapply(lis.n,as.numeric.s)) } eval(parse(text=paste(nam.n,"$free<-lis.n"))) tex.n<-paste(nam.n,"$start[(is.na(",nam.n,"$start)|",nam.n,"$start=='NA')&(",nam.n,"$free==0)]<-0") eval(parse(text=tex.n)) doc[(loc.n):(loc.n+row.n)] <- NULL doc <- doc[!is.null(doc)] } } ## MA paragraph commands while(!is.null(find("ma",doc))){ if(!is.null(find("ma",doc))){ if(length(grep("fi",doc[[find("ma",doc)]]))>0){ loc.n <- find("ma",doc) nam.n <- unlist(doc[[loc.n]])[2] file <- unlist(strsplit(doc[[find("ma",doc)]],'=')) file <- file[length(file)] lis.n <- read.table(file) if(length(grep('D',unlist(lis.n)))>0){ s2n <- function(x){ s2n.i <- function(x){ x <- as.numeric(unlist(strsplit(x,'D'))) x[1]*10^(x[2]) } sapply(x, s2n.i) } lis.n <- apply(lis.n, 2, s2n) } if(is.list(lis.n)){ if(length(lis.n[[1]])!=length(lis.n[[length(lis.n)]])){ lis.n <- lavaan::lower2full(lavaan::char2num(paste(lapply(lis.n,paste,collapse=", "),collapse="\n"))) }else{ lis.n <- do.call(rbind,lapply(lis.n,as.numeric.s)) } } eval(parse(text=paste(nam.n,"$start<-lis.n"))) doc[(loc.n)] <- NULL doc <- doc[!is.null(doc)] }else{ loc.n <- find("ma",doc) nam.n <- unlist(doc[[loc.n]])[2] row.n <- (eval(parse(text=paste(ref[find(nam.n,ref),2])))) lis.n <- doc[(loc.n+1):(loc.n+row.n)] if(length(lis.n[[1]])!=length(lis.n[[length(lis.n)]])){ lis.n <- lavaan::lower2full(lavaan::char2num(paste(lapply(lis.n,paste,collapse=", "),collapse="\n"))) }else{ lis.n <- do.call(rbind,lapply(lis.n,as.numeric.s)) } eval(parse(text=paste(nam.n,"$start<-lis.n"))) doc[(loc.n):(loc.n+row.n)] <- NULL doc <- doc[!is.null(doc)] } } } ## ensure that command lines have brackets fixLazilyWrittenSyntax <- function(line){ commands <- c("fr", "fi", "eq", "co", "va", "st", "pa") line <- unlist(line) if(any(line[1]==commands) && length(grep("\\[",line))==0){ if(!is.na(as.numeric.s(line[2]))){ l <- 2 } else {l <- 1} line[(l+1):length(line)]<-sapply(line[(l+1):length(line)], function(x){if(!is.na(as.numeric.s(x))){paste('[',x,']',sep='')}else{x}}) temp <- paste(line[(l+1):length(line)],collapse="") temp <- strsplit(gsub("\\]","\\]:",gsub("\\]\\[",",",temp)),":") unlist(c(line[1:l],gsub(",,",",",unlist(temp)))) } else { line } } doc<-lapply(doc,fixLazilyWrittenSyntax) ## function: process model commands eqN <- 1 processCommands <- function(doc){ fr1 <- doc[(find("mo",doc)+1):length(doc)] is.pertinent <- function(doc.l){ commands <- c("fr", "fi", "eq", "co", "va", "st", "pa") if(any(doc.l[[1]][1]==commands)){ return(TRUE) } else { return(FALSE) } } fr1 <- fr1[unlist(lapply(fr1,is.pertinent))] if(length(fr1)>0){ comm <- c() commN <- 1 eq <- list() co <- c() coN <- 1 for(i in 1:length(fr1)){ fr1[[i]] <- fr1[[i]][fr1[[i]] != ""] if(length(fr1[[i]])>1){ for(z in 2:length(fr1[[i]])){ if(length(unlist(strsplit(fr1[[i]][z],",")))>2){ temp <- unlist(strsplit(fr1[[i]][z],",")) fr1[[i]][z] <- paste(substr(temp[1],1,3),temp[2],",",temp[3],sep="") } } } if(fr1[[i]][1]=="fr"){ comm[[commN]] <- c(1,NA,fr1[[i]][2:length(fr1[[i]])]) commN <- commN+1 } if(any(fr1[[i]][1]==c("va","st"))){ comm[[commN]] <- c("X",fr1[[i]][2:length(fr1[[i]])]) commN <- commN+1 } if(fr1[[i]][1]=="fi"){ comm[[commN]] <- c(0,0,fr1[[i]][2:length(fr1[[i]])]) commN <- commN+1 } if(fr1[[i]][1]=="eq"){ eq[[eqN]] <- fr1[[i]][2:length(fr1[[i]])] eqN <- eqN+1 } if(fr1[[i]][1]=="co"){ tempc <- fr1[[i]][2:length(fr1[[i]])] if(length(tempc>1)){ tempc <- paste(tempc,sep="",collapse="") tempc <- strsplit(tempc,"=") } co[[coN]] <- tempc[[1]] coN <- coN+1 } if(fr1[[i]][1]=="pa"){ a <- (i+1) b <- eval(parse(text=paste(ref[find(fr1[[i]][2],ref),2]))) + a - 1 c <- 1 matr <- list() for(d in a:b){ matr[[c]] <- fr1[[d]] c <- c + 1 } matr <- lapply(matr, as.numeric.s) matr <- do.call(rbind,matr) eval(parse(text=paste(fr1[[i]][2],"<-matr"))) } } simpl <- function(x) { x <- lapply(x,unlist) x <- x[unlist(lapply(x, length) != 0)] x <- x[x != ""] } comm <- simpl(comm) eq <- simpl(eq) co <- simpl(co) outp <- list(comm,eq,co) return(list(outp,eqN)) } else { return(list(list(NULL,NULL,NULL,NULL,NULL),eqN)) } } ## function: apply model commands to matrices eqID <- 1 applyCommands <- function(commList){ comm <- commList[[1]] eq <- commList[[2]] co <- commList[[3]] outList <- list() t0 <- 1 ## apply fixed parameter values if(length(comm)>0){ for(i in 1:length(comm)){ for(z in 3:length(comm[[i]])){ if(comm[[i]][1]=="X"){ outList[[t0]] <- paste(gsub("\\[","$start[",comm[[i]][z]),"<-",comm[[i]][2],sep="") t0 <- t0 + 1 }else{ outList[[t0]] <- paste(gsub("\\[","$free[",comm[[i]][z]),"<-",comm[[i]][1],sep="") outList[[t0+1]] <- paste(gsub("\\[","$start[",comm[[i]][z]),"<-",comm[[i]][2],sep="") t0 <- t0 + 2 } } } } ## apply equality constraints to matrices if(length(eq)>0){ for(i in 1:length(eq)){ for(z in 1:length(eq[[i]])){ qtest <- nrow(eval(parse(text=gsub("\\[","$start[",eq[[i]][z])))) if(is.null(qtest)){ qtest <- 1 } if(qtest > 1){ subN <- gsub("\\D", "", eq[[i]][z]) eq[[i]][z] <- gsub(subN, paste(subN,",",subN), eq[[i]][z]) } if(z==1){ outList[[t0]] <- paste(gsub("\\[","$constraint[",eq[[i]][z]),"<-",eqID,sep="") outList[[(t0+1)]] <- paste(gsub("\\[","$misc[",eq[[i]][z]),"<-",1,sep="") t0 <- t0 + 2 }else{ outList[[t0]] <- paste(gsub("\\[","$constraint[",eq[[i]][z]),"<-",eqID,sep="") t0 <- t0 + 1 } } eqID <- eqID + 1 } } ## apply CO parameter constraints to matrices if(length(co)>0){ for(i in 1:length(co)){ temp <- gsub('\\+','?',co[[i]][2]) temp <- gsub('\\-','?',temp) temp <- gsub('\\*','?',temp) temp <- gsub('/','?',temp) temp <- strsplit(temp,'\\?') temp <- unlist(c(co[[i]][1],temp)) is.mat <- function(input){ if(nchar(input)<4){ return(FALSE) } else{ return(TRUE) } } temp <- temp[unlist(lapply(temp,is.mat))] } } outList } ## execute processCommands, applyCommands commList1 <- processCommands(doc) eqN<-commList1[[2]] commList1<-commList1[[1]] commands1 <- applyCommands(commList1) if(length(commands1)>0){ for(i in 1:length(commands1)){ eval(parse(text=commands1[[i]])) } } ## matrix-to-parameter-table function toPara <- function(name) { ob <- eval(parse(text=name)) if(nrow(ob$free)!=0 && ncol(ob$free)!=0){ ID <- find(name, ref) ROW <- eval(parse(text=toupper(ref[ID,2]))) COL <- eval(parse(text=toupper(ref[ID,3]))) if(length(ROW)==1){ if(ROW==1){ ROW <- rep("",length(COL)) } } if(paste(ref[ID,2])==paste(ref[ID,3]) && name!="be"){ ob$start[upper.tri(ob$start)] <- 0 } if(name=="ps"){ if(is.null(be) && all(ob$free==diag(1,nrow(ob$free),ncol(ob$free)))){ diag(ob$start) <- diag(ob$start) + 99 ob$start[is.na(ob$start)] <- "NA" ob$start[ob$start[lower.tri(ob$start)]!="0"]<-(as.numeric.s(ob$start[ob$start[lower.tri(ob$start)]!="0"])+99) } else { ob$start[lower.tri(ob$start,diag=T)] <- ob$start[lower.tri(ob$start,diag=T)]+99 if(!is.null(be)){ test <- (be$free+be$start+t(be$free)+t(be$start))[lower.tri(ob$start,diag=T)] test[is.na(test)] <- 100 tmpPS <- ob$start[lower.tri(ob$start,diag=T)] tmpPS[test!=0] <- tmpPS[test!=0] - 99 ob$start[lower.tri(ob$start,diag=T)] <- tmpPS } } } if(name=="al" | name=="ka" | name=="ty"){ ob$start <- ob$start + 99 ob$start[is.na(ob$start)]<-"NA" ob$start <- t(as.matrix(as.character(ob$start))) }else { ob$start <- apply(ob$start,2,function(x){ x[is.na(x)]<-"NA" as.matrix(as.character(x)) }) } OP <- paste(ref[ID,4]) lhs <- c() op <- c() rhs <- c() user <- c() group <- c() free <- c() ustart <- c() exo <- c() label <- c() eq.id <- c() unco <- c() ob <- lapply(ob, as.matrix) correctPosition <- function(x){ if(length(ROW)!=nrow(x) && length(ROW)==ncol(x)){ t(x) }else{ x } } ob <- lapply(ob, correctPosition) if(any(name==c("al","ka","tx","ty"))){ ROW <- COL COL <- rep("", length(ROW)) } for(i in 1:ncol(ob$start)){ non <- 1:nrow(ob$start) non <- non[unlist(ob$start[,i]!=0)] if(length(non)!=0){ for(z in non){ lhs <- c(lhs, COL[i]) op <- c(op, OP) rhs <- c(rhs, paste(ROW[z])) user <- c(user, 1) group <- c(group, groupN) free <- c(free, ob$free[z,i]) ustart <- c(ustart, ob$start[z,i]) exo <- c(exo, 0) label <- c(label, paste(name,"_",z,"_",i,sep="")) eq.id <- c(eq.id, ob$constraint[z,i]) unco <- c(unco, ob$free[z,i]) } } } if(name=="al" | name=="ka" | name=="ty"){ ustart<-as.character(as.numeric.s(ustart)-99) ustart[is.na(ustart)]<-"NA" } if(name=="ps"){ ustart<-as.character(as.numeric.s(ustart)-99) ustart[is.na(ustart)]<-"NA" } if(any(name==c("al","ka","tx","ty"))){ lhs <- rhs rhs <- rep("", length(lhs)) } data.frame(lhs,op,rhs,user,group,free,ustart,exo,label,eq.id,unco) }else{ NULL } } ## CHECK FOR EQUALITY CONSTRAINTS BEFORE GROUP 1 PARAMETER TABLE if(ng>1){ moEQ <- mo moEQ[,2] <- 0 for(gN in 2:ng){ eval(parse(text=paste("docN<-doc",gN,sep=""))) eval(parse(text=paste("docN0<-doc0",gN,sep=""))) moN <- docN[[find("mo",docN,1)]] moN <- t(as.data.frame(strsplit(moN[2:length(moN)],"="))) ## check each matrix for constraints for(i in 1:nrow(moEQ)){ if(!is.null(find(moEQ[i,1],moN))){ if(moN[find(moEQ[i,1],moN),2]=="in"){ moEQ[i,2]<-1 } } } } multi.grp.eq <- function(x){ if(x[1]=="eq"){ if(any(sapply(x,function(y) {if(length(unlist(strsplit(y,",")))>1){TRUE}else{FALSE}} ))){ TRUE }else{ FALSE } } else { FALSE } } eq <- docN[sapply(docN,multi.grp.eq)] if(length(eq)>0){ t0G1 <- 1 t0GN <- 1 listG1 <- list() listGN <- list() if(is.list(eq)){ eq <- lapply(eq, function(x){x[2:length(x)]}) }else{ eq <- list(eq[2:length(eq)]) } for(i in seq_along(eq)){ for(z in 1:length(eq[[i]])){ if(length(unlist(strsplit(eq[[i]][[z]],",")))>1){ tmp <- unlist(strsplit(eq[[i]][[z]],",")) wh.gr <- as.numeric(strsplit(tmp[1],'\\[')[[1]][2]) eq[[i]][[z]] <- paste(strsplit(tmp,"\\[")[[1]][1],"[",tmp[2],",",tmp[3]) }else{ wh.gr <- 2 } qtest <- nrow(eval(parse(text=gsub("\\[","$start[",eq[[i]][z])))) if(is.null(qtest)){ qtest <- 1 } if(qtest > 1){ subN <- gsub("\\D", "", eq[[i]][z]) eq[[i]][z] <- gsub(subN, paste(subN,",",subN), eq[[i]][z]) } if(z==1){ if(wh.gr==1){ listG1[[t0G1]] <- paste(gsub("\\[","$constraint[",eq[[i]][z]),"<-",eqID,sep="") listG1[[(t0G1+1)]] <- paste(gsub("\\[","$misc[",eq[[i]][z]),"<-",1,sep="") t0G1 <- t0G1 + 2 } else { listGN[[t0GN]] <- paste(gsub("\\[","$constraint[",eq[[i]][z]),"<-",eqID,sep="") listG1[[(t0GN+1)]] <- paste(gsub("\\[","$misc[",eq[[i]][z]),"<-",1,sep="") t0GN <- t0GN + 2 } }else{ if(wh.gr==1){ listG1[[t0G1]] <- paste(gsub("\\[","$constraint[",eq[[i]][z]),"<-",eqID,sep="") t0G1 <- t0G1 + 1 } else { listGN[[t0GN]] <- paste(gsub("\\[","$constraint[",eq[[i]][z]),"<-",eqID,sep="") t0GN <- t0GN + 1 } } } eqID <- eqID + 1 } listG1<-unlist(listG1) listGN<-unlist(listGN) ## apply these constraints to group 1 if(length(listG1)>0){ for(i in listG1){eval(parse(text=i))} } }else{ listGN <- NULL } ## enter constraint requests into group 1 matrices if(sum(as.numeric.s(moEQ[,2]))>0){ g1.eq <- list() gn.eq <- list() moEQ <- unlist(moEQ[moEQ[,2]!=0,1]) liEQ <- list() t0 <- 1 for(i in seq_along(moEQ)){ # matN is the current matrix for which multiple group # invariance constraints are being processed matN <- eval(parse(text=moEQ[i]))$start matV <- 1:nrow(matN) for(z in 1:ncol(matN)){ for(y in matV[is.na(matN[,z]) | matN[,z]!="0"]){ g1.eq[[t0]] <- list(paste(moEQ[i],"$constraint[",y,",",z,"]<-",eqID,sep="")) g1.eq[[t0]][[2]] <- paste(moEQ[i],"$misc[", y, ",", z, "]<-", 1, sep="") gn.eq[[t0]] <- paste(moEQ[i],"$constraint[",y,",",z,"]<-",eqID,sep="") t0 <- t0 + 1 eqID <- eqID + 1 } } } g1.eq <- unlist(g1.eq) } else { g1.eq <- NULL gn.eq <- NULL } ## APPLY CONSTRAINTS TO GROUP 1 MATRICES ## create global matrices so that constraints ## are not carried to other groups for(i in 1:nrow(mo)){ eval(parse(text=paste(mo[i,1],"G<-",mo[i,1],sep=""))) } if(!is.null(g1.eq) && length(g1.eq)>1){ for(i in 1:length(g1.eq)){ eval(parse(text=g1.eq[i])) } } } ## PROCESS MATRICES TO PARAMETER TABLE, GROUP 1 endL <- c() allL <- c("lx","ly","td","te","al","ka","tx","ty","be","ps","ph","th","ga") for(i in 1:13){ if(is.null(find(allL[i],mo))==FALSE){ endL <- c(endL, allL[i]) } } tableList <- lapply(endL, toPara) parTable <- do.call(rbind,tableList) ## multiple group models, parameter table if(ng>1){ for(groupN in 2:ng){ eval(parse(text=paste("docN<-doc",groupN,sep=""))) eval(parse(text=paste("docN0<-doc0",groupN,sep=""))) docN <- docN[!sapply(docN,multi.grp.eq)] mo <- docN[[find("mo",docN,1)]] mo <- t(as.data.frame(strsplit(mo[2:length(mo)],"="))) if(macs==TRUE){ if(length(find("ty",mo))==0){ mo <- rbind(mo,c("ty","fr")) } if(length(find("al",mo))==0){ mo <- rbind(mo,c("al","fi")) } } for(i in 1:nrow(mo)){ m.typ <- unlist(strsplit(mo[i,2],",")) if(length(m.typ)>1){ m.form <- m.typ[2] m.typ <- m.typ[1] }else{ m.form <- "de" } if(m.typ=="fi"){ ## fixed eval(parse(text=(paste(mo[i,1],"$start[]","<- 0")))) eval(parse(text=(paste(mo[i,1],"$free[]","<- 0")))) eval(parse(text=(paste(mo[i,1],"$misc[]","<- '' ")))) } if(m.typ=="fr"){ ## free eval(parse(text=(paste(mo[i,1],"$start[]","<-NA")))) eval(parse(text=(paste(mo[i,1],"$free[]","<-1")))) eval(parse(text=(paste(mo[i,1],"$misc[]","<- '' ")))) } if(m.typ=="ps"){ ## same pattern & starting values eval(parse(text=(paste(mo[i,1],"$misc[]","<- '' ")))) } if(m.typ=="sp"){ ## same pattern eval(parse(text=(paste(mo[i,1],"$start[",mo[i,1],"$start!=0]","<-NA")))) eval(parse(text=(paste(mo[i,1],"$misc[]","<- '' ")))) } if(m.typ=="ss"){ ## same starting values ... ? eval(parse(text=(paste(mo[i,1],"$free[]","<- 0")))) eval(parse(text=(paste(mo[i,1],"$misc[]","<- '' ")))) } if(m.typ=="in"){ eval(parse(text=(paste(mo[i,1],"<-",mo[i,1],"G",sep="")))) } if(any(m.typ==c("fu","sy","ze"))){ if(any(m.form==c("fi","de"))){ eval(parse(text=(paste(mo[i,1],"$start[]","<- 0")))) eval(parse(text=(paste(mo[i,1],"$free[]","<- 0")))) eval(parse(text=(paste(mo[i,1],"$misc[]","<- '' ")))) }else{ eval(parse(text=(paste(mo[i,1],"$start[]","<-NA")))) eval(parse(text=(paste(mo[i,1],"$free[]","<-1")))) eval(parse(text=(paste(mo[i,1],"$misc[]","<- '' ")))) } } } if(!is.null(gn.eq) && length(gn.eq)>1){ for(i in unlist(gn.eq)){ eval(parse(text=i)) } } if(length(listGN)>0){ for(i in listGN){eval(parse(text=i))} } commListN <- processCommands(docN) commandsN <- applyCommands(commListN[[1]]) if(length(commandsN>0)){ for(i in 1:length(commandsN)){ eval(parse(text=commandsN[[i]])) } } ## PROCESS MATRICES TO PARAMETER TABLE, MULTIPLE GROUPS tableListN <- lapply(as.vector(mo[,1]), toPara) parTableN <- do.call(rbind,tableListN) colnames(parTableN) <- colnames(parTable) row.names(parTableN) <- NULL parTable <- rbind(parTable, parTableN) } } ## CO command constraints if(length(commList1[[3]])>1){ for(i in 1:length(commList1[[3]])){ exp <- gsub("\\[","_",commList1[[3]][[i]]) exp <- gsub(",","_",exp) exp <- gsub("\\]","",exp) exp <- data.frame(exp[1], "==",exp[2],1,0,0,"NA",0,"",0,0) colnames(exp) <- colnames(parTable) parTable <- rbind(parTable,exp) } } ## format final parameter table if(sum(parTable[,6])!=0){ if(sum(parTable$eq.id!=0)>0){ j <- 1 for(i in unique(parTable$eq.id[parTable$eq.id!=0])){ test <- parTable$free[((parTable$eq.id==i)+(parTable$free!=0))==2] if(length(test)>0){ parTable$free[((parTable$eq.id==i)+(parTable$free!=0))==2] <- j j <- j + 1 } } }else{ j <- 1 } parTable[(((parTable$free==1)+(parTable$eq.id==0))==2),6] <- j:(j-1+sum(((parTable$free==1)+(parTable$eq.id==0))==2)) } parTable$unco[parTable$unco!=0] <- 1:length(parTable$unco[parTable$unco!=0]) id <- 1:nrow(parTable) parTable <- data.frame(id,parTable) row.names(parTable) <- NULL parTable$id <- as.integer(as.numeric.s(parTable$id)) parTable$lhs <- as.character(parTable$lhs) parTable$op <- as.character(parTable$op) parTable$rhs <- as.character(parTable$rhs) parTable$user <- as.integer(as.numeric.s(parTable$user)) parTable$group <- as.integer(as.numeric.s(parTable$group)) parTable$free <- as.integer(as.numeric.s(parTable$free)) parTable$ustart <- as.numeric.s(as.character(parTable$ustart)) parTable$exo <- as.integer(as.numeric.s(parTable$exo)) parTable$label <- as.character(parTable$label) parTable$eq.id <- as.integer(as.numeric.s(as.character(parTable$eq.id))) parTable$unco <- as.integer(as.numeric.s(parTable$unco)) if(analyze){ for(i in 1:ng){ if(i==1){ data <- getData(doc, doc0) }else{ data <- mapply("list",data,getData( doc=eval(parse(text=paste("doc",i,sep=""))), doc0=eval(parse(text=paste("doc0",i,sep=""))) ),SIMPLIFY=FALSE) } } if(ng>1){ for(i in names(data)){ if(is.null(data[[i]][[1]])){ data[[i]] <- NULL } } } if(is.null(data$ra) | is.null(data$ra[[1]])){ for(i in 1:ng){ if(i==1){ if(ng==1){ n <- doc[[find("da",doc)]][[grep("no",doc[[find("da",doc)]])]] n <- as.numeric.s(gsub("no=","",n)) } else { n2 <- doc[[find("da",doc)]][[grep("no",doc[[find("da",doc)]])]] n <- list() n[[i]] <- as.numeric.s(gsub("no=","",n2)) } }else{ n2 <- eval(parse(text=paste("doc",i,"[[find('da',doc",i,")]][[grep('no',doc",i,"[[find('da',doc",i,")]])]]",sep=""))) n[[i]] <- as.numeric.s(gsub("no=","",n2)) } } } else{ n <- NULL } if(!is.null(data$sd) && is.null(data$ra)){ cr2cv <- function(x,sd){ na <- colnames(x) sd <- diag(as.vector(sd)) ou <- sd%*%x%*%t(sd) colnames(ou) <- na rownames(ou) <- na ou } for(i in 1:ng){ if(i==1){ if(ng==1){ data$cm <- cr2cv(data$km, data$sd) }else{ data$cm <- list() data$cm[[i]] <- cr2cv(data$km[[i]], data$sd[[i]]) } }else{ data$cm[[i]] <- cr2cv(data$km[[i]], data$sd[[i]]) } } } if(!is.null(data$ra)){ if(ng==1){ data$ra <- as.data.frame(data$ra) }else{ data$ra <- lapply(data$ra, as.data.frame) } } if(is.null(data$cm) && is.null(data$ra)){ invisible(suppressWarnings(lavaan::lavaan(model=parTable))) stop("lisrel2lavaan requires either 1) raw data (specified in the RA paragraph in LISREL syntax), 2) a variance-covariance matrix (the CM paragraph in LISREL syntax), or 3) a correlation matrix AND standard deviation vector (the KM and SD paragraphs respectively) in order to fit models.") } else { if(!is.null(data$me)){ macs <- T } else { macs <- F } # return(parTable) fit <- lavaan::lavaan(model=parTable,data=data$ra,sample.cov=data$cm,sample.mean=data$me,estimator=estimator,sample.nobs=n,...) if(silent==F){ lavaan::summary(fit, standardized=TRUE, fit.measures=TRUE) invisible(fit) }else{ return(fit) } } }else{ invisible(parTable) } } return(suppressWarnings(lisrel(filename=filename, analyze=analyze, ...=...))) setwd(restore.wd) } semTools/R/miPowerFit.R0000644000175100001440000001464613000201061014500 0ustar hornikusers# miPowerFit: Evaluate model fit by Satorra, Saris, & van der Weld (2009) method miPowerFit <- function(lavaanObj, stdLoad=0.4, cor=0.1, stdBeta=0.1, intcept=0.2, stdDelta=NULL, delta=NULL, cilevel=0.90) { mi <- lavaan::lavInspect(lavaanObj, "mi") mi <- mi[mi$op != "==",] sigma <- mi[,"epc"] / sqrt(mi[,"mi"]) if(is.null(delta)) { if(is.null(stdDelta)) stdDelta <- getTrivialEpc(mi, stdLoad=stdLoad, cor=cor, stdBeta=stdBeta, intcept=intcept) if(length(stdDelta) == 1) stdDelta <- rep(stdDelta, nrow(mi)) delta <- unstandardizeEpc(mi, stdDelta, findTotalVar(lavaanObj)) } if(length(delta) == 1) delta <- rep(delta, nrow(mi)) ncp <- (delta / sigma)^2 alpha <- 0.05 desiredPow <- 0.80 cutoff <- qchisq(1 - alpha, df = 1) pow <- 1 - pchisq(cutoff, df = 1, ncp=ncp) sigMI <- mi[,"mi"] > cutoff highPow <- pow > desiredPow group <- rep(1, nrow(mi)) if("group" %in% colnames(mi)) group <- mi[,"group"] decision <- mapply(decisionMIPow, sigMI=sigMI, highPow=highPow, epc=mi[,"epc"], trivialEpc=delta) if(is.null(stdDelta)) stdDelta <- standardizeEpc(mi, findTotalVar(lavaanObj), delta=delta) result <- cbind(mi[,1:3], group, as.numeric(mi[,"mi"]), mi[,"epc"], delta, standardizeEpc(mi, findTotalVar(lavaanObj)), stdDelta, sigMI, highPow, decision) # New method crit <- abs(qnorm((1 - cilevel)/2)) seepc <- abs(result[,6]) / sqrt(abs(result[,5])) lowerepc <- result[,6] - crit * seepc upperepc <- result[,6] + crit * seepc stdlowerepc <- standardizeEpc(mi, findTotalVar(lavaanObj), delta = lowerepc) stdupperepc <- standardizeEpc(mi, findTotalVar(lavaanObj), delta = upperepc) isVar <- mi[,"op"] == "~~" & mi[,"lhs"] == mi[,"rhs"] decisionci <- mapply(decisionCIEpc, targetval=as.numeric(stdDelta), lower=stdlowerepc, upper=stdupperepc, positiveonly=isVar) result <- cbind(result, seepc, lowerepc, upperepc, stdlowerepc, stdupperepc, decisionci) result <- result[!is.na(decision),] colnames(result) <- c("lhs", "op", "rhs", "group", "mi", "epc", "target.epc", "std.epc", "std.target.epc", "significant.mi", "high.power", "decision.pow", "se.epc", "lower.epc", "upper.epc", "lower.std.epc", "upper.std.epc", "decision.ci") result <- format(result, scientific=FALSE, digits=4) return(result) } # totalFacVar: Find total factor variances when regression coeffient matrix and factor residual covariance matrix are specified totalFacVar <- function(beta, psi) { ID <- diag(nrow(psi)) total <- solve(ID - beta) %*% psi %*% t(solve(ID - beta)) return(diag(total)) } # findTotalVar: find the total indicator and factor variances findTotalVar <- function(lavaanObj) { result <- list() nGroups <- lavaan::lavInspect(lavaanObj, "ngroups") cov.all <- lavaan::lavInspect(lavaanObj, "cov.all") if(nGroups == 1) cov.all <- list(cov.all) for(i in 1:nGroups) { temp <- diag(cov.all[[i]]) names(temp) <- rownames(cov.all[[i]]) result[[i]] <- temp } return(result) } # getTrivialEpc: find the trivial misspecified expected parameter changes given the type of parameters in each row of modification indices getTrivialEpc <- function(mi, stdLoad=0.4, cor=0.1, stdBeta=0.1, intcept=0.2) { op <- mi[,"op"] result <- gsub("=~", stdLoad, op) result <- gsub("~~", cor, result) result <- gsub("~1", intcept, result) result <- gsub("~", stdBeta, result) return(result) } # unstandardizeEpc: Transform from standardized EPC to unstandardized EPC unstandardizeEpc <- function(mi, delta, totalVar) { name <- names(totalVar[[1]]) lhsPos <- match(mi[,"lhs"], name) rhsPos <- match(mi[,"rhs"], name) group <- rep(1, nrow(mi)) if("group" %in% colnames(mi)) group <- mi[,"group"] getVar <- function(pos, group) totalVar[[group]][pos] lhsVar <- mapply(getVar, pos=lhsPos, group=group) rhsVar <- mapply(getVar, pos=rhsPos, group=group) FUN <- function(op, lhsVar, rhsVar, delta) { if(op == "|") return(NA) lhsSD <- sqrt(lhsVar) rhsSD <- sqrt(rhsVar) if(!is.numeric(delta)) delta <- as.numeric(delta) if(op == "=~") { return((rhsSD * delta) / lhsSD) } else if (op == "~~") { return(lhsSD * delta * rhsSD) } else if (op == "~1") { return(lhsSD * delta) } else if (op == "~") { return((lhsSD * delta) / rhsSD) } else { return(NA) } } unstdDelta <- mapply(FUN, op=mi[,"op"], lhsVar=lhsVar, rhsVar=rhsVar, delta=delta) return(unstdDelta) } # unstandardizeEpc: Transform from unstandardized EPC to standardized EPC. If delta is null, the unstandardized epc from the modification indices data.frame are used standardizeEpc <- function(mi, totalVar, delta=NULL) { if(is.null(delta)) delta <- mi[,"epc"] name <- names(totalVar[[1]]) lhsPos <- match(mi[,"lhs"], name) rhsPos <- match(mi[,"rhs"], name) group <- rep(1, nrow(mi)) if("group" %in% colnames(mi)) group <- mi[,"group"] getVar <- function(pos, group) totalVar[[group]][pos] lhsVar <- mapply(getVar, pos=lhsPos, group=group) rhsVar <- mapply(getVar, pos=rhsPos, group=group) FUN <- function(op, lhsVar, rhsVar, delta) { lhsSD <- sqrt(lhsVar) rhsSD <- sqrt(rhsVar) if(!is.numeric(delta)) delta <- as.numeric(delta) if(op == "=~") { #stdload = beta * sdlatent / sdindicator = beta * lhs / rhs return((delta / rhsSD) * lhsSD) } else if (op == "~~") { #r = cov / (sd1 * sd2) return(delta / (lhsSD * rhsSD)) } else if (op == "~1") { #d = meanDiff/sd return(delta / lhsSD) } else if (op == "~") { #beta = b * sdX / sdY = b * rhs / lhs return((delta / lhsSD) * rhsSD) } else { return(NA) } } stdDelta <- mapply(FUN, op=mi[,"op"], lhsVar=lhsVar, rhsVar=rhsVar, delta=delta) return(stdDelta) } # decisionMIPow: provide the decision given the significance of modification indices and power to detect trivial misspecification decisionMIPow <- function(sigMI, highPow, epc, trivialEpc) { if(is.na(sigMI) | is.na(highPow)) return(NA) if(sigMI & highPow) { if(abs(epc) > abs(trivialEpc)) { return("EPC:M") } else { return("EPC:NM") } } else if (sigMI & !highPow) { return("M") } else if (!sigMI & highPow) { return("NM") } else if (!sigMI & !highPow) { return("I") } else { return(NA) } } decisionCIEpc <- function(targetval, lower, upper, positiveonly = FALSE) { if(is.na(lower) | is.na(upper)) return(NA) if(positiveonly) { if(lower > targetval) { return("M") } else if (upper < targetval) { return("NM") } else { return("I") } } else { negtargetval <- -targetval if(lower > targetval | upper < negtargetval) { return("M") } else if (upper < targetval & negtargetval < lower) { return("NM") } else { return("I") } } } semTools/R/compareFit.R0000644000175100001440000001301313000250017014474 0ustar hornikusers### Sunthud Pornprasertmanit ### Last updated: 14 October 2016 setClass("FitDiff", representation(name = "vector", nested = "data.frame", ordernested = "vector", fit="data.frame")) isNested <- function(object) length(object@ordernested) > 1 || !is.na(object@ordernested) noLeadingZero <- function(vec, fmt) { out <- sprintf(fmt, vec) used <- vec < 1 & vec >= 0 used[is.na(used)] <- FALSE out[used] <- substring(out[used], 2) out } setMethod("show", signature(object = "FitDiff"), function(object) { summary(object) }) setMethod("summary", signature(object = "FitDiff"), function(object, fit.measures = "default") { if(isNested(object)) { cat("################### Nested Model Comparison #########################\n") print(getNestedTable(object)) cat("\n") } cat("#################### Fit Indices Summaries ##########################\n") print(getFitSummary(object, fit.measures)) }) getNestedTable <- function(object) { ord <- object@ordernested nameDiff <- paste(object@name[ord[-1]], "-", object@name[ord[-length(ord)]]) pprint <- noLeadingZero(object@nested$p, "%4.3f") pprint[object@nested$p < 0.001] <- " <.001" nestedTab <- object@nested nestedTab$chi <- sprintf("%.2f", object@nested$chi) nestedTab$p <- pprint nestedTab$delta.cfi <- sprintf("%.4f", object@nested$delta.cfi) nestedTab <- data.frame(nestedTab) rownames(nestedTab) <- nameDiff nestedTab[nrow(nestedTab):1,] } getFitSummary <- function(object, fit.measures = "default") { if(is.null(fit.measures)) fit.measures <- "all" if(length(fit.measures) == 1) { if(fit.measures == "default") { fit.measures <- c("chisq", "df", "pvalue", "cfi", "tli", "rmsea", "srmr", "aic", "bic") } else if (fit.measures == "all") { fit.measures <- colnames(object@fit) } } fitTab <- object@fit orderThing <- rep(NA, ncol(fitTab)) orderThing[colnames(fitTab) %in% c("rmsea", "aic", "bic", "bic2", "srmr", "srmr_nomean", "rmr", "rmr_nomean", "ecvi")] <- TRUE orderThing[colnames(fitTab) %in% c("pvalue", "cfi", "tli", "nnfi", "rfi", "nfi", "pnfi", "ifi", "rni", "cn_05", "cn_01", "gfi", "agfi", "pgfi", "mfi")] <- FALSE isDF <- rep(FALSE, ncol(fitTab)) isDF[grep("df", colnames(fitTab))] <- TRUE suppressWarnings(fitTab <- as.data.frame(mapply(tagDagger, fitTab, orderThing, is.df=isDF))) rownames(fitTab) <- object@name fitTab[,colnames(fitTab) %in% fit.measures] } saveFileFitDiff <- function(object, filewrite, what="summary", tableFormat=FALSE, fit.measures = "default") { if(tableFormat) { filetemplate <- file(filewrite, 'w') if(isNested(object)) { cat("Nested Model Comparison\n\n", file=filetemplate) out <- getNestedTable(object) out <- data.frame(model.diff = rownames(out), out) write.table(out, file=filetemplate, sep="\t", quote=FALSE, row.names=FALSE) cat("\n\n", file=filetemplate) } out2 <- getFitSummary(object, fit.measures) out2 <- data.frame(model = object@name, out2) cat("Fit Indices Summaries\n\n", file=filetemplate) write.table(out2, file=filetemplate, sep="\t", quote=FALSE, row.names=FALSE) close(filetemplate) } else { write(paste(capture.output(lavaan::summary(object)), collapse="\n"), file=filewrite) } } tagDagger <- function(vec, minvalue = NA, is.df = FALSE) { if(is.na(minvalue)) { if(is.df) { vec <- noLeadingZero(vec, fmt="%.0f") } else { vec <- noLeadingZero(vec, fmt="%.3f") } } else { target <- max(vec, na.rm=TRUE) if (minvalue) { target <- min(vec, na.rm=TRUE) } tag <- rep(" ", length(vec)) tag[vec == target] <- "\u2020" vec <- noLeadingZero(vec, fmt="%.3f") vec <- paste0(vec, tag) } vec } compareFit <- function(..., nested = TRUE) { arg <- match.call() mods <- input <- list(...) if(any(sapply(mods, is, "list"))) { temp <- list() for(i in seq_along(mods)) { if(!is(mods[[i]], "list")) { temp <- c(temp, list(mods[[i]])) } else { temp <- c(temp, mods[[i]]) } } mods <- temp } if(any(!sapply(mods, is, "lavaan"))) stop("Some models specified here are not lavaan outputs or list of lavaan outputs") nameMods <- NULL tempname <- as.list(arg)[-1] if(!is.null(names(tempname))) tempname <- tempname[!(names(tempname) %in% "nested")] tempname <- lapply(tempname, as.character) for(i in seq_along(input)) { if(is(input[[i]], "list")) { if(length(tempname[[i]]) == 1) { temp2 <- paste0(tempname[[i]], "[[", seq_along(input[[i]]), "]]") if(!is.null(names(input[[i]]))) temp2 <- names(input[[i]]) nameMods <- c(nameMods, temp2) } else { temp2 <- tempname[[i]][tempname[[i]] != "list"] nameMods <- c(nameMods, temp2) } } else { nameMods <- c(nameMods, tempname[[i]]) } } nestedout <- data.frame() ord <- NA if(nested) { dfs <- sapply(mods, function(x) lavaan::fitMeasures(x)["df"]) ord <- order(dfs, decreasing = TRUE) modsTemp <- mods[ord] modsA <- modsTemp[-1] modsB <- modsTemp[-length(mods)] chisqdiff <- NULL dfdiff <- NULL pdiff <- NULL cfidiff <- NULL # Need the for loop because the mapply function does not work. for(i in seq_along(modsA)) { fitA <- modsA[[i]] fitB <- modsB[[i]] fitDiff <- lavaan::anova(fitA, fitB) cfidiff <- c(cfidiff, lavaan::fitMeasures(fitA)["cfi"] - lavaan::fitMeasures(fitB)["cfi"]) chisqdiff <- c(chisqdiff, fitDiff["Chisq diff"][2, 1]) dfdiff <- c(dfdiff, fitDiff["Df diff"][2, 1]) pdiff <- c(pdiff, fitDiff["Pr(>Chisq)"][2, 1]) } nestedout <- data.frame(chi = chisqdiff, df = dfdiff, p = pdiff, delta.cfi = cfidiff) } fit <- as.data.frame(t(sapply(mods, lavaan::fitMeasures))) new("FitDiff", name = nameMods, nested = nestedout, ordernested = ord, fit = fit) } semTools/R/kd.R0000644000175100001440000000335113000201061013000 0ustar hornikusers"kd" <- function(covmat, n, type=c("exact","sample")) { ## Kaiser-Dickman (1962) algorithm for generating sample data ## based on the input covmat, which is a covariance matrix. ## ## n is desired sample size ## type="exact" returns data matrix that yields the exact covmat; ## type="sample" returns sample data, treating covmat as population matrix ## ## Returns the sample data matrix, dat ## Code written by Edgar Merkle, University of Missouri type <- match.arg(type) ## Check to ensure that covmat is a valid covariance matrix. if(nrow(covmat) != ncol(covmat)) stop("non-square matrix supplied") symmetric <- isSymmetric.matrix(covmat) if(!symmetric) stop("non-symmetric matrix supplied") pd <- all(eigen(covmat, only.values=TRUE)$values > 0) if(!pd) stop("covariance matrix is not positive definite") p <- nrow(covmat) ## Algorithm works on a correlation matrix mv.vars <- matrix(0, nrow(covmat), nrow(covmat)) diag(mv.vars) <- sqrt(diag(covmat)) cormat <- cov2cor(covmat) ## Generate standard normal data and mean center each variable Xscore <- matrix(rnorm(p*n), p, n) Xsub0 <- t(apply(Xscore, 1, scale, scale=FALSE)) ## Correlation matrix factored via Cholesky decomposition: Fcomp <- t(chol(cormat)) ## Equation 2 from K&D: Zhat <- Fcomp %*% Xscore ## Equation 3 from K&D: Xsub0.prod <- Xsub0 %*% t(Xsub0) ## Get singular value decomp of Xsub0.prod Xsub0.svd <- svd(Xsub0.prod) M.sqrt <- matrix(0,p,p) diag(M.sqrt) <- 1/sqrt(Xsub0.svd$d) ## Equation 5 from K&D: Z <- Fcomp %*% M.sqrt %*% t(Xsub0.svd$u) %*% Xsub0 Z <- Z*sqrt(n) dat <- Z if (type=="sample"){dat <- Zhat} ## Scale data to correspond to covmat dat <- t(dat) %*% mv.vars dat } semTools/R/monteCarloMed.R0000644000175100001440000000420313000201061015130 0ustar hornikusers## Monte Carlo test of mediation for complex mediation cases ## Corbin Quick, Alex Schoemann, James Selig ## Function that takes an expression for an indirect effect, related parameter estimates and SEs and outputs a Monte Carlo SE ##Output: matrix of LL and UL, optional plot of indirect effect, or values of indirect effect. monteCarloMed<-function(expression, ..., ACM=NULL, object = NULL, rep=20000, CI=95, plot=FALSE, outputValues=FALSE){ input<- c(...) #Get names and the number of unique variables in the expression uniquepar<-function(var){ var<-gsub(" ","",var) var<-strsplit(var,'+',fixed=TRUE) var<-strsplit(var[[1]],'*',fixed=TRUE) varb<-var[[1]] if(length(var)>1){ for(i in 2:length(var)){ varb<-c(varb,var[[i]])} var<-unique(varb)} if(is.list(var)){var<-var[[1]]} return(var)} paramnames<-uniquepar(expression) #If input is a lavaan object pull out coefs and ACM if(class(object)=="lavaan"){ input <- lavaan::coef(object)[paramnames] ACM <- lavaan::vcov(object)[paramnames,paramnames] } vecs<-list() #Matrix of values, need to be converted to a list dat <- MASS::mvrnorm(n=rep, mu=input, Sigma=ACM) #Add parameters as the first row dat <-rbind(input, dat) #Convert to a list, vecs<-as.list(as.data.frame(dat)) #Give names to it works with assign for(i in 1:length(vecs)){assign(paramnames[i],vecs[[i]])} #Apply the expression to compute the indirect effect indirect<-eval(parse(text=expression)) #Get the CI low=(1-CI/100)/2 upp=((1-CI/100)/2)+(CI/100) LL=round(quantile(indirect[-1],low),digits=4) UL=round(quantile(indirect[-1],upp),digits=4) interval<-list(indirect[1],rbind(LL,UL)) dimnames(interval[[2]]) <- list(c("LL", "UL"),c(" ")) names(interval) <- c("Point Estimate", paste(CI, "% Confidence Interval", sep="")) #Switch for outputting a plot if(plot) { hist(indirect,breaks='FD',col='skyblue',xlab=paste(CI,'% Confidence Interval ','LL',LL,' UL',UL), main='Distribution of Indirect Effect') } #Switch to return simulated values if(outputValues) { interval <- list(interval, indirect) } return(interval) } semTools/R/loadingFromAlpha.R0000644000175100001440000000036013000201061015606 0ustar hornikusers# loadingFromAlpha: Find a standardized factor loading that provide a specified # alpha value loadingFromAlpha <- function(alpha, ni) { denominator <- ni - ((ni - 1) * alpha) result <- sqrt(alpha/denominator) return(result) } semTools/R/fitOpenMx.R0000644000175100001440000010574113000201061014321 0ustar hornikusers saturateMx <- function(data, groupLab = NULL) { multipleGroup <- FALSE if(is.data.frame(data) && !is.null(groupLab) && groupLab %in% colnames(data)) multipleGroup <- TRUE if(is.list(data) && !is.data.frame(data)) multipleGroup <- TRUE if(multipleGroup) { if(is.data.frame(data)) { data.l <- split(data, data[,groupLab]) data.l <- lapply(data.l, function(x) x[-ncol(x)]) ngroups <- length(data.l) } else if(is.list(data)) { data.l <- data ngroups <- length(data.l) } else { stop("The data argument must be a data frame or a list of MxData objects") } temp <- mapply(saturateMxSingleGroup, data = data.l, title = paste0("group", 1:ngroups), groupnum = 1:ngroups, SIMPLIFY=FALSE) title <- "Multiple group Saturate Model" asdf <- NULL algebra <- OpenMx::mxAlgebra(asdf, name="allobjective") groupnames <- paste0("group", 1:ngroups) groupnames <- paste0(groupnames, ".objective") groupnames <- lapply(groupnames, as.name) algebra@formula <- as.call(c(list(as.name("sum")), groupnames)) objective <- OpenMx::mxFitFunctionAlgebra("allobjective") Saturate <- OpenMx::mxModel(title, unlist(temp), algebra, objective) } else { Saturate <- saturateMxSingleGroup(data, title = "Saturate Model") } capture.output(fit <- OpenMx::mxRun(Saturate, suppressWarnings = FALSE, silent = TRUE)) fit } saturateMxSingleGroup <- function(data, title = "Saturate Model", groupnum = NULL) { if(!is(data, "MxData")) { data <- OpenMx::mxData( observed=data, type="raw") } p <- ncol(data@observed) if(data@type == "raw") { categorical <- rep(FALSE, p) for(i in seq_len(p)) { categorical[i] <- "ordered" %in% class(data@observed[,i]) } startMeans <- apply(data@observed, 2, function(x) mean(as.numeric(x), na.rm=TRUE)) startVar <- apply(data@observed, 2, var, na.rm=TRUE) } else { categorical <- rep(FALSE, p) if(!all(is.na(data@means))) { startMeans <- data@means } else { startMeans <- rep(0, p) } startVar <- diag(data@observed) } startCor <- diag(p) startVar[categorical] <- 1 startMeans[categorical] <- 0 startCov <- lavaan::cor2cov(startCor, sqrt(startVar)) lab <- outer(1:p, 1:p, function(x, y) paste0("cov", x, y, "_", groupnum)) lab2 <- outer(1:p, 1:p, function(x, y) paste0("cov", y, x, "_", groupnum)) lab[upper.tri(lab)] <- lab2[upper.tri(lab2)] freeMean <- !categorical freeCov <- matrix(TRUE, p, p) diag(freeCov) <- !categorical if(any(categorical)) { labCategorical <- colnames(data@observed)[categorical] datCategorical <- data@observed[,categorical, drop=FALSE] numCat <- apply(datCategorical, 2, function(x) length(unique(x))) maxCat <- max(numCat) FUN <- function(x, tot) c(rep(TRUE, x), rep(FALSE, tot-x)) freeThreshold <- sapply(numCat - 1, FUN, maxCat - 1) FUN2 <- function(x, tot) { x <- x[!is.na(x)] f <- table(x)/length(x) f <- cumsum(f)[-length(f)] f <- qnorm(f) c(f, rep(NA, tot - length(f))) } valueThreshold <- sapply(datCategorical, FUN2, maxCat - 1) T <- OpenMx::mxMatrix( type="Full", nrow=maxCat - 1, ncol=length(labCategorical), free=freeThreshold, values=valueThreshold, dimnames=list(c(), labCategorical), byrow=TRUE, name="thresh" ) Saturate <- OpenMx::mxModel(title, data, # means OpenMx::mxMatrix( type="Full", nrow=1, ncol=p, values=startMeans, free=freeMean, labels=paste0("mean", 1:p, "_", groupnum), name="M" ), # symmetric paths OpenMx::mxMatrix( type="Symm", nrow=p, ncol=p, values=startCov, free=freeCov, labels=lab, byrow=TRUE, name="S" ), T, OpenMx::mxExpectationNormal( covariance="S", means="M", dimnames=colnames(data@observed), thresholds = "thresh" ), OpenMx::mxFitFunctionML() ) } else { if(data@type == "raw") { obj <- OpenMx::mxExpectationNormal( covariance="S", means="M", dimnames=colnames(data@observed) ) modelMean <- OpenMx::mxMatrix( type="Full", nrow=1, ncol=p, values=startMeans, free=freeMean, labels=paste0("mean", 1:p, "_", groupnum), name="M" ) } else { if(!all(is.na(data@means))) { modelMean <- OpenMx::mxMatrix( type="Full", nrow=1, ncol=p, values=startMeans, free=freeMean, labels=paste0("mean", 1:p, "_", groupnum), name="M" ) obj <- OpenMx::mxExpectationNormal( covariance="S", means="M", dimnames=colnames(data@observed) ) } else { modelMean <- NULL obj <- OpenMx::mxExpectationNormal( covariance="S", dimnames=colnames(data@observed) ) } } Saturate <- OpenMx::mxModel(title, data, # means modelMean, # symmetric paths OpenMx::mxMatrix( type="Symm", nrow=p, ncol=p, values=startCov, free=freeCov, labels=lab, byrow=TRUE, name="S" ), obj, OpenMx::mxFitFunctionML() ) } Saturate } nullMx <- function(data, groupLab = NULL) { multipleGroup <- FALSE if(is.data.frame(data) && !is.null(groupLab) && groupLab %in% colnames(data)) multipleGroup <- TRUE if(is.list(data) && !is.data.frame(data)) multipleGroup <- TRUE if(multipleGroup) { if(is.data.frame(data)) { data.l <- split(data, data[,groupLab]) data.l <- lapply(data.l, function(x) x[-ncol(x)]) ngroups <- length(data.l) } else if(is.list(data)) { data.l <- data ngroups <- length(data.l) } else { stop("The data argument must be a data frame or a list of MxData objects") } temp <- mapply(nullMxSingleGroup, data = data.l, title = paste0("group", 1:ngroups), groupnum = 1:ngroups, SIMPLIFY=FALSE) title <- "Multiple group Null Model" asdf <- NULL algebra <- OpenMx::mxAlgebra(asdf, name="allobjective") groupnames <- paste0("group", 1:ngroups) groupnames <- paste0(groupnames, ".objective") groupnames <- lapply(groupnames, as.name) algebra@formula <- as.call(c(list(as.name("sum")), groupnames)) objective <- OpenMx::mxFitFunctionAlgebra("allobjective") Null <- OpenMx::mxModel(title, unlist(temp), algebra, objective) } else { Null <- nullMxSingleGroup(data, title = "Null Model") } capture.output(fit <- OpenMx::mxRun(Null, suppressWarnings = FALSE, silent = TRUE)) fit } nullMxSingleGroup <- function(data, title = "Null Model", groupnum = NULL) { if(!is(data, "MxData")) { data <- OpenMx::mxData( observed=data, type="raw") } p <- ncol(data@observed) if(data@type == "raw") { categorical <- rep(FALSE, p) for(i in seq_len(p)) { categorical[i] <- "ordered" %in% class(data@observed[,i]) } startMeans <- apply(data@observed, 2, function(x) mean(as.numeric(x), na.rm=TRUE)) startVar <- apply(data@observed, 2, var, na.rm=TRUE) } else { categorical <- rep(FALSE, p) if(!all(is.na(data@means))) { startMeans <- data@means } else { startMeans <- rep(0, p) } startVar <- diag(data@observed) } startVar[categorical] <- 1 startMeans[categorical] <- 0 lab <- paste0("var", 1:p, "_", groupnum) freeMean <- !categorical if(any(categorical)) { labCategorical <- colnames(data@observed)[categorical] datCategorical <- data@observed[,categorical, drop=FALSE] numCat <- apply(datCategorical, 2, function(x) length(unique(x))) maxCat <- max(numCat) FUN <- function(x, tot) c(rep(TRUE, x), rep(FALSE, tot-x)) freeThreshold <- sapply(numCat - 1, FUN, maxCat - 1) FUN2 <- function(x, tot) { f <- table(x)/length(x) f <- cumsum(f)[-length(f)] f <- qnorm(f) c(f, rep(NA, tot - length(f))) } valueThreshold <- sapply(datCategorical, FUN2, maxCat - 1) T <- OpenMx::mxMatrix( type="Full", nrow=maxCat - 1, ncol=length(labCategorical), free=freeThreshold, values=valueThreshold, dimnames=list(c(), labCategorical), byrow=TRUE, name="thresh" ) NullModel <- OpenMx::mxModel(title, data, # means OpenMx::mxMatrix( type="Full", nrow=1, ncol=p, values=startMeans, free=freeMean, labels=paste0("mean", 1:p, "_", groupnum), name="M" ), # symmetric paths OpenMx::mxMatrix( type="Diag", nrow=p, ncol=p, values=startVar, free=freeMean, labels=lab, byrow=TRUE, name="S" ), T, OpenMx::mxExpectationNormal( covariance="S", means="M", dimnames=colnames(data@observed), thresholds = "thresh" ), OpenMx::mxFitFunctionML() ) } else { if(data@type == "raw") { obj <- OpenMx::mxExpectationNormal( covariance="S", means="M", dimnames=colnames(data@observed) ) modelMean <- OpenMx::mxMatrix( type="Full", nrow=1, ncol=p, values=startMeans, free=freeMean, labels=paste0("mean", 1:p, "_", groupnum), name="M" ) } else { if(!all(is.na(data@means))) { modelMean <- OpenMx::mxMatrix( type="Full", nrow=1, ncol=p, values=startMeans, free=freeMean, labels=paste0("mean", 1:p, "_", groupnum), name="M" ) obj <- OpenMx::mxExpectationNormal( covariance="S", means="M", dimnames=colnames(data@observed) ) } else { modelMean <- NULL obj <- OpenMx::mxExpectationNormal( covariance="S", dimnames=colnames(data@observed) ) } } NullModel <- OpenMx::mxModel(title, data, # means modelMean, # symmetric paths OpenMx::mxMatrix( type="Diag", nrow=p, ncol=p, values=startVar, free=freeMean, labels=lab, byrow=TRUE, name="S" ), obj, OpenMx::mxFitFunctionML() ) } NullModel } checkConvergence <- function(object) { (object@output$status[[1]] %in% c(0,1)) & (object@output$status[[2]] == 0) } fitMeasuresMx <- function(object, fit.measures="all") { mxMixture <- FALSE if(length(object@submodels) > 1) { if(is.null(object@submodels[[1]]@data)) mxMixture <- TRUE } if(length(object@submodels) > 1 & !mxMixture) { varnames <- lapply(object@submodels, function(x) { out <- x@expectation@dims if(any(is.na(out))) out <- x@manifestVars out }) dat <- lapply(object@submodels, slot, "data") FUN <- function(x, var) { if(x@type == "raw") { x@observed <- x@observed[,intersect(var, colnames(x@observed))] } x } dat <- mapply(FUN, x=dat, var=varnames) } else { dat <- object@data if(!mxMixture) { varnames <- object@expectation@dims if(any(is.na(varnames))) varnames <- object@manifestVars dat@observed <- dat@observed[,intersect(varnames, colnames(dat@observed)), drop=FALSE] } } if(length(object@output) == 0) { stop("The target model has not been estimated yet.") } if(!checkConvergence(object)) { warning("The target model may be not convergent.") } if("all" %in% fit.measures) { class.flag <- TRUE } else { class.flag <- FALSE } # reference: Muthen technical Appendix 5 # collect info from the lavaan slots if(length(object@submodels) > 1 & !mxMixture) { N <- sum(sapply(dat, slot, "numObs")) } else { N <- dat@numObs } # Does not account for equality constraints imposed in MxAlgebra npar <- length(object@output$estimate) multigroup <- length(object@submodels) > 1 G <- length(object@submodels) # number of groups if(G == 0) G <- 1 # Correct when there is no submodel if(multigroup) { if(is(object@submodels[[1]]@expectation, "MxExpectationRAM")) { meanstructure <- !all(is.na(object@submodels[[1]]@expectation@M)) # Only ML objective categorical <- !all(is.na(object@submodels[[1]]@expectation@thresholds)) # Only ML objective } else { meanstructure <- !all(is.na(object@submodels[[1]]@expectation@means)) # Only ML objective categorical <- !all(is.na(object@submodels[[1]]@expectation@thresholds)) # Only ML objective } } else { if(is(object@expectation, "MxExpectationRAM")) { meanstructure <- !all(is.na(object@expectation@M)) # Only ML objective categorical <- !all(is.na(object@expectation@thresholds)) # Only ML objective } else { meanstructure <- !all(is.na(object@expectation@means)) # Only ML objective categorical <- !all(is.na(object@expectation@thresholds)) # Only ML objective } } # define 'sets' of fit measures: # basic chi-square test fit.chisq <- c("chisq", "df", "pvalue") # basline model fit.baseline <- c("baseline.chisq", "baseline.df", "baseline.pvalue") # cfi/tli fit.cfi.tli <- c("cfi", "tli") # more incremental fit indices fit.incremental <- c("cfi", "tli", "nnfi", "rfi", "nfi", "pnfi", "ifi", "rni") # likelihood based measures fit.logl <- c("logl", "npar", "aic", "bic", "ntotal", "bic2") # rmsea fit.rmsea <- c("rmsea", "rmsea.ci.lower", "rmsea.ci.upper", "rmsea.pvalue") # srmr if(categorical) { fit.srmr <- c("wrmr") fit.srmr2 <- c("wrmr") } else { fit.srmr <- c("srmr") fit.srmr2 <- c("rmr", "rmr_nomean", "srmr", # the default "srmr_bentler", "srmr_bentler_nomean", "srmr_bollen", "srmr_bollen_nomean", "srmr_mplus", "srmr_mplus_nomean") } # various fit.other <- c("cn_05","cn_01","mfi") if(!categorical && G == 1) { fit.other <- c(fit.other, "ecvi") } # lower case fit.measures <- tolower(fit.measures) # select 'default' fit measures # Check ML Categorical in OpenMx if(length(fit.measures) == 1L) { if(fit.measures == "default") { fit.measures <- c(fit.chisq, fit.baseline, fit.cfi.tli, fit.logl, fit.rmsea, fit.srmr, "saturate.status", "null.status") } else if(fit.measures == "all") { fit.measures <- c(fit.chisq, fit.baseline, fit.incremental, fit.logl, fit.rmsea, fit.srmr2, fit.other, "saturate.status", "null.status") } } objectSat <- saturateMx(dat) objectNull <- nullMx(dat) # main container indices <- list() # Number of free parameters if("npar" %in% fit.measures) { indices["npar"] <- npar } if("logl" %in% fit.measures || "aic" %in% fit.measures || "bic" %in% fit.measures) { # Use the definition in OpenMx logl.H0 <- (-1/2) * (object@output$Minus2LogLikelihood - objectSat@output$Minus2LogLikelihood ) if("logl" %in% fit.measures) { indices["logl"] <- logl.H0 } # AIC AIC <- -2*logl.H0 + 2*npar if("aic" %in% fit.measures) { indices["aic"] <- AIC } # BIC if("bic" %in% fit.measures) { BIC <- -2*logl.H0 + npar*log(N) indices["bic"] <- BIC # add sample-size adjusted bic N.star <- (N + 2) / 24 BIC2 <- -2*logl.H0 + npar*log(N.star) indices["bic2"] <- BIC2 } } if(!mxMixture) { if(multigroup) { defVars <- lapply(object@submodels, findDefVars) defVars <- do.call(c, defVars) } else { defVars <- findDefVars(object) } } if(mxMixture || length(defVars) > 0) { out <- unlist(indices[intersect(fit.measures, names(indices))]) return(out) } if(length(objectSat@output) == 0) { stop("The saturated model has not been estimated yet.") } if(length(objectNull@output) == 0) { stop("The null model has not been estimated yet.") } if(length(objectNull@output) == 0) { stop("The null model has not been estimated yet.") } if(length(object@output) == 0) { stop("The model has not been estimated yet.") } # has the model converged? if(!checkConvergence(objectSat)) { warning("The saturated model may be not convergent.") } if(!checkConvergence(objectNull)) { warning("The null model may be not convergent.") } X2 <- object@output$Minus2LogLikelihood - objectSat@output$Minus2LogLikelihood df <- length(objectSat@output$estimate) - length(object@output$estimate) indices["saturate.status"] <- objectSat@output$status[[1]] indices["null.status"] <- objectNull@output$status[[1]] if(objectSat@output$status[[2]] != 0) indices["saturate.status"] <- -1 if(objectNull@output$status[[2]] != 0) indices["null.status"] <- -1 # Chi-square value estimated model (H0) if(any("chisq" %in% fit.measures)) { indices["chisq"] <- X2 } if(any("df" %in% fit.measures)) { indices["df"] <- df } if(any(c("pvalue") %in% fit.measures)) { indices["pvalue"] <- pchisq(X2, df, lower.tail = FALSE) } if(any(c("cfi", "tli", "nnfi", "pnfi", "rfi", "nfi", "ifi", "rni", "baseline.chisq", "baseline.pvalue") %in% fit.measures)) { X2.null <- objectNull@output$Minus2LogLikelihood - objectSat@output$Minus2LogLikelihood df.null <- length(objectSat@output$estimate) - length(objectNull@output$estimate) # check for NAs if(is.na(X2) || is.na(df) || is.na(X2.null) || is.na(df.null)) { indices[fit.incremental] <- as.numeric(NA) } else { if("baseline.chisq" %in% fit.measures) { indices["baseline.chisq"] <- X2.null } if("baseline.df" %in% fit.measures) { indices["baseline.df"] <- df.null } if("baseline.pvalue" %in% fit.measures) { indices["baseline.pvalue"] <- pchisq(X2.null, df.null, lower.tail = FALSE) } # CFI - comparative fit index (Bentler, 1990) if("cfi" %in% fit.measures) { t1 <- max( c(X2 - df, 0) ) t2 <- max( c(X2 - df, X2.null - df.null, 0) ) if(t1 == 0 && t2 == 0) { indices["cfi"] <- 1 } else { indices["cfi"] <- 1 - t1/t2 } } # TLI - Tucker-Lewis index (Tucker & Lewis, 1973) # same as # NNFI - nonnormed fit index (NNFI, Bentler & Bonett, 1980) if("tli" %in% fit.measures || "nnfi" %in% fit.measures) { if(df > 0) { t1 <- X2.null/df.null - X2/df t2 <- X2.null/df.null - 1 # note: TLI original formula was in terms of fx/df, not X2/df # then, t1 <- fx_0/df.null - fx/df # t2 <- fx_0/df.null - 1/N (or N-1 for wishart) if(t1 < 0 && t2 < 0) { TLI <- 1 } else { TLI <- t1/t2 } } else { TLI <- 1 } indices["tli"] <- indices["nnfi"] <- TLI } # RFI - relative fit index (Bollen, 1986; Joreskog & Sorbom 1993) if("rfi" %in% fit.measures) { if(df > 0) { t1 <- X2.null/df.null - X2/df t2 <- X2.null/df.null if(t1 < 0 || t2 < 0) { RLI <- 1 } else { RLI <- t1/t2 } } else { RLI <- 1 } indices["rfi"] <- RLI } # NFI - normed fit index (Bentler & Bonett, 1980) if("nfi" %in% fit.measures) { t1 <- X2.null - X2 t2 <- X2.null NFI <- t1/t2 indices["nfi"] <- NFI } # PNFI - Parsimony normed fit index (James, Mulaik & Brett, 1982) if("pnfi" %in% fit.measures) { t1 <- X2.null - X2 t2 <- X2.null PNFI <- (df/df.null) * t1/t2 indices["pnfi"] <- PNFI } # IFI - incremental fit index (Bollen, 1989; Joreskog & Sorbom, 1993) if("ifi" %in% fit.measures) { t1 <- X2.null - X2 t2 <- X2.null - df if(t2 < 0) { IFI <- 1 } else { IFI <- t1/t2 } indices["ifi"] <- IFI } # RNI - relative noncentrality index (McDonald & Marsh, 1990) if("rni" %in% fit.measures) { t1 <- X2 - df t2 <- X2.null - df.null if(t1 < 0 || t2 < 0) { RNI <- 1 } else { RNI <- 1 - t1/t2 } indices["rni"] <- RNI } } } N.RMSEA <- max(N, X2*4) # FIXME: good strategy?? if(any("rmsea" %in% fit.measures)) { # RMSEA if(is.na(X2) || is.na(df)) { RMSEA <- as.numeric(NA) } else if(df > 0) { GG <- 0 RMSEA <- sqrt( max( c((X2/N)/df - 1/(N-GG), 0) ) ) * sqrt(G) } else { RMSEA <- 0 } indices["rmsea"] <- RMSEA } if("rmsea.ci.lower" %in% fit.measures) { lower.lambda <- function(lambda) { (pchisq(X2, df=df, ncp=lambda) - 0.95) } if(is.na(X2) || is.na(df)) { indices["rmsea.ci.lower"] <- NA } else if(df < 1 || lower.lambda(0) < 0.0) { indices["rmsea.ci.lower"] <- 0 } else { lambda.l <- try(uniroot(f=lower.lambda, lower=0, upper=X2)$root) if(inherits(lambda.l, "try-error")) { lambda.l <- NA } GG <- 0 indices["rmsea.ci.lower"] <- sqrt( lambda.l/((N-GG)*df) ) * sqrt(G) } } if("rmsea.ci.upper" %in% fit.measures) { upper.lambda <- function(lambda) { (pchisq(X2, df=df, ncp=lambda) - 0.05) } if(is.na(X2) || is.na(df)) { indices["rmsea.ci.upper"] <- NA } else if(df < 1 || upper.lambda(N.RMSEA) > 0 || upper.lambda(0) < 0) { indices["rmsea.ci.upper"] <- 0 } else { lambda.u <- try(uniroot(f=upper.lambda, lower=0, upper=N.RMSEA)$root) if(inherits(lambda.u, "try-error")) { lambda.u <- NA } GG <- 0 indices["rmsea.ci.upper"] <- sqrt( lambda.u/((N-GG)*df) ) * sqrt(G) } } if("rmsea.pvalue" %in% fit.measures) { if(is.na(X2) || is.na(df)) { indices["rmsea.pvalue"] <- as.numeric(NA) } else if(df > 0) { GG <- 0 ncp <- (N-GG)*df*0.05^2/G indices["rmsea.pvalue"] <- 1 - pchisq(X2, df=df, ncp=ncp) } else { indices["rmsea.pvalue"] <- 1 } } if(any(c("rmr","srmr") %in% fit.measures)) { # RMR and SRMR rmr.group <- numeric(G) rmr_nomean.group <- numeric(G) # srmr.group <- numeric(G) # srmr_nomean.group <- numeric(G) srmr_bentler.group <- numeric(G) srmr_bentler_nomean.group <- numeric(G) srmr_bollen.group <- numeric(G) srmr_bollen_nomean.group <- numeric(G) srmr_mplus.group <- numeric(G) srmr_mplus_nomean.group <- numeric(G) upperLevelMatrices <- NULL if(G > 1) { upperLevelMatrices <- getInnerObjects(object) if(length(upperLevelMatrices) > 0) { names(upperLevelMatrices) <- paste0(object@name, ".", names(upperLevelMatrices)) } } for(g in 1:G) { if(G > 1) { if(is(objectSat@submodels[[g]]@expectation, "MxExpectationRAM")) { impliedSat <- getImpliedStatRAM(objectSat@submodels[[g]]) } else { impliedSat <- getImpliedStatML(objectSat@submodels[[g]]) } } else { if(is(objectSat@expectation, "MxExpectationRAM")) { impliedSat <- getImpliedStatRAM(objectSat) } else { impliedSat <- getImpliedStatML(objectSat) } } S <- impliedSat[[2]] M <- matrix(impliedSat[[1]], ncol=1) nvar <- ncol(S) # estimated if(G > 1) { if(is(object@submodels[[g]]@expectation, "MxExpectationRAM")) { implied <- getImpliedStatRAM(object@submodels[[g]]) } else { implied <- getImpliedStatML(object@submodels[[g]], xxxextraxxx = upperLevelMatrices) } } else { if(is(object@expectation, "MxExpectationRAM")) { implied <- getImpliedStatRAM(object) } else { implied <- getImpliedStatML(object) } } Sigma.hat <- implied[[2]] Mu.hat <- matrix(implied[[1]], ncol=1) # unstandardized residuals RR <- (S - Sigma.hat) # not standardized # standardized residual covariance matrix # this is the Hu and Bentler definition, not the Bollen one! sqrt.d <- 1/sqrt(diag(S)) D <- diag(sqrt.d, ncol=length(sqrt.d)) R <- D %*% (S - Sigma.hat) %*% D # Bollen approach: simply using cov2cor ('residual correlations') S.cor <- cov2cor(S) Sigma.cor <- cov2cor(Sigma.hat) R.cor <- (S.cor - Sigma.cor) if(meanstructure) { # standardized residual mean vector R.mean <- D %*% (M - Mu.hat) # EQS approach! RR.mean <- (M - Mu.hat) # not standardized R.cor.mean <- M/sqrt(diag(S)) - Mu.hat/sqrt(diag(Sigma.hat)) e <- nvar*(nvar+1)/2 + nvar srmr_bentler.group[g] <- sqrt( (sum(R[lower.tri(R, diag=TRUE)]^2) + sum(R.mean^2))/ e ) rmr.group[g] <- sqrt( (sum(RR[lower.tri(RR, diag=TRUE)]^2) + sum(RR.mean^2))/ e ) srmr_bollen.group[g] <- sqrt( (sum(R.cor[lower.tri(R.cor, diag=TRUE)]^2) + sum(R.cor.mean^2)) / e ) # see http://www.statmodel.com/download/SRMR.pdf srmr_mplus.group[g] <- sqrt( (sum(R.cor[lower.tri(R.cor, diag=FALSE)]^2) + sum(R.cor.mean^2) + sum(((diag(S) - diag(Sigma.hat))/diag(S))^2)) / e ) e <- nvar*(nvar+1)/2 srmr_bentler_nomean.group[g] <- sqrt( sum( R[lower.tri( R, diag=TRUE)]^2) / e ) rmr_nomean.group[g] <- sqrt( sum(RR[lower.tri(RR, diag=TRUE)]^2) / e ) srmr_bollen_nomean.group[g] <- sqrt( sum(R.cor[lower.tri(R.cor, diag=TRUE)]^2) / e ) srmr_mplus_nomean.group[g] <- sqrt( (sum(R.cor[lower.tri(R.cor, diag=FALSE)]^2) + sum(((diag(S) - diag(Sigma.hat))/diag(S))^2)) / e ) } else { e <- nvar*(nvar+1)/2 srmr_bentler_nomean.group[g] <- srmr_bentler.group[g] <- sqrt( sum(R[lower.tri(R, diag=TRUE)]^2) / e ) rmr_nomean.group[g] <- rmr.group[g] <- sqrt( sum(RR[lower.tri(RR, diag=TRUE)]^2) / e ) srmr_bollen_nomean.group[g] <- srmr_bollen.group[g] <- sqrt( sum(R.cor[lower.tri(R.cor, diag=TRUE)]^2) / e ) srmr_mplus_nomean.group[g] <- srmr_mplus.group[g] <- sqrt( (sum(R.cor[lower.tri(R.cor, diag=FALSE)]^2) + sum(((diag(S) - diag(Sigma.hat))/diag(S))^2)) / e ) } } if(G > 1) { ## FIXME: get the scaling right ngroups <- sapply(dat, slot, "numObs") SRMR_BENTLER <- as.numeric( (ngroups %*% srmr_bentler.group) / N ) SRMR_BENTLER_NOMEAN <- as.numeric( (ngroups %*% srmr_bentler_nomean.group) / N ) SRMR_BOLLEN <- as.numeric( (ngroups %*% srmr_bollen.group) / N ) SRMR_BOLLEN_NOMEAN <- as.numeric( (ngroups %*% srmr_bollen_nomean.group) / N ) SRMR_MPLUS <- as.numeric( (ngroups %*% srmr_mplus.group) / N ) SRMR_MPLUS_NOMEAN <- as.numeric( (ngroups %*% srmr_mplus_nomean.group) / N ) RMR <- as.numeric( (ngroups %*% rmr.group) / N ) RMR_NOMEAN <- as.numeric( (ngroups %*% rmr_nomean.group) / N ) } else { SRMR_BENTLER <- srmr_bentler.group[1] SRMR_BENTLER_NOMEAN <- srmr_bentler_nomean.group[1] SRMR_BOLLEN <- srmr_bollen.group[1] SRMR_BOLLEN_NOMEAN <- srmr_bollen_nomean.group[1] SRMR_MPLUS <- srmr_mplus.group[1] SRMR_MPLUS_NOMEAN <- srmr_mplus_nomean.group[1] RMR <- rmr.group[1] RMR_NOMEAN <- rmr_nomean.group[1] } indices["srmr"] <- SRMR_BENTLER indices["srmr_nomean"] <- SRMR_BENTLER_NOMEAN indices["srmr_bentler"] <- SRMR_BENTLER indices["srmr_bentler_nomean"] <- SRMR_BENTLER_NOMEAN indices["srmr_bollen"] <- SRMR_BOLLEN indices["srmr_bollen_nomean"] <- SRMR_BOLLEN_NOMEAN indices["srmr_mplus"] <- SRMR_MPLUS indices["srmr_mplus_nomean"] <- SRMR_MPLUS_NOMEAN indices["rmr"] <- RMR indices["rmr_nomean"] <- RMR_NOMEAN } if(any(c("cn_05", "cn_01") %in% fit.measures)) { CN_05 <- qchisq(p=0.95, df=df)/(X2/N) + 1 CN_01 <- qchisq(p=0.99, df=df)/(X2/N) + 1 indices["cn_05"] <- CN_05 indices["cn_01"] <- CN_01 } if("wrmr" %in% fit.measures) { # we use the definition: wrmr = sqrt ( 2*N*F / e ) e <- npar + df # Modified from lavaan WRMR <- sqrt( X2 / e ) indices["wrmr"] <- WRMR } # Intentionally not report GFI, AGFI, and PGFI because it requires the weight matrix # MFI - McDonald Fit Index (McDonald, 1989) if("mfi" %in% fit.measures) { #MFI <- exp(-0.5 * (X2 - df)/(N-1)) # Hu & Bentler 1998 Table 1 MFI <- exp(-0.5 * (X2 - df)/N) indices["mfi"] <- MFI } # ECVI - cross-validation index (Brown & Cudeck, 1989) # not defined for multiple groups and/or models with meanstructures if("ecvi" %in% fit.measures) { if(G > 1 || meanstructure) { ECVI <- as.numeric(NA) } else { ECVI <- X2/N + (2*npar)/N } indices["ecvi"] <- ECVI } if("ntotal" %in% fit.measures) { indices["ntotal"] <- N } # do we have everything that we requested? idx.missing <- which(is.na(match(fit.measures, names(indices)))) if(length(idx.missing) > 0L) { cat("lavaan WARNING: some requested fit measure(s) are not available for this model:\n") print( fit.measures[ idx.missing ] ) cat("\n") } out <- unlist(indices[fit.measures]) if(length(out) > 0L) { return(out) } else { return( invisible(numeric(0)) ) } } findDefVars <- function(object) { mat <- lapply(object@matrices, slot, "labels") defvars <- sapply(mat, function(x) x[apply(x, c(1,2), OpenMx::imxIsDefinitionVariable)]) Reduce("c", defvars) } getImpliedStatML <- function(xxxobjectxxx, xxxcovdatatxxx = NULL, xxxextraxxx = NULL) { if(!is.null(xxxextraxxx)) { xxxmatnamexxx2 <- names(xxxextraxxx) for(i in seq_along(xxxmatnamexxx2)) { assign(xxxmatnamexxx2[i], xxxextraxxx[[i]]) } } xxxmatxxx <- xxxobjectxxx@matrices xxxmatnamexxx <- names(xxxmatxxx) xxxmatvalxxx <- lapply(xxxmatxxx, slot, "values") for(i in seq_along(xxxmatnamexxx)) { assign(xxxmatnamexxx[i], xxxmatvalxxx[[i]]) } if(!is.null(xxxcovdatatxxx)) { xxxmatlabxxx <- lapply(xxxmatxxx, slot, "labels") xxxdefvarsxxx <- lapply(xxxmatlabxxx, function(x) apply(x, c(1,2), OpenMx::imxIsDefinitionVariable)) for(i in seq_along(xxxmatnamexxx)) { if(any(xxxdefvarsxxx[[i]])) { xxxtempxxx <- get(xxxmatnamexxx[i]) for(j in seq_len(length(xxxdefvarsxxx[[i]]))) { if(xxxdefvarsxxx[[i]][j]) { xxxtempnamexxx <- gsub("data.", "", xxxmatlabxxx[[i]][j]) xxxtempxxx[j] <- xxxcovdatatxxx[xxxtempnamexxx] } } assign(xxxmatnamexxx[i], xxxtempxxx) } } } xxxalgebraxxx <- xxxobjectxxx@algebras xxxalgebranamexxx <- names(xxxalgebraxxx) xxxalgebraformulaxxx <- lapply(xxxalgebraxxx, slot, "formula") for(i in seq_along(xxxalgebranamexxx)) { assign(xxxalgebranamexxx[i], eval(xxxalgebraformulaxxx[[i]])) } xxximpliedCovxxx <- get(xxxobjectxxx@expectation@covariance) if(is.na(xxxobjectxxx@expectation@means)) { xxximpliedMeanxxx <- rep(0, nrow(xxximpliedCovxxx)) } else { xxximpliedMeanxxx <- get(xxxobjectxxx@expectation@means) } if(is.na(xxxobjectxxx@expectation@thresholds)) { xxximpliedThresholdxxx <- NA } else { xxximpliedThresholdxxx <- get(xxxobjectxxx@expectation@thresholds) } list(xxximpliedMeanxxx, xxximpliedCovxxx, xxximpliedThresholdxxx) } getImpliedStatRAM <- function(object) { A <- object@matrices$A@values I <- diag(nrow(A)) S <- object@matrices$S@values F <- object@matrices$F@values Z <- solve(I - A) impliedCov <- F %*% Z %*% S %*% t(Z) %*% t(F) if(!is.null(object@matrices$M)) { M <- object@matrices$M@values impliedMean <- t(F %*% Z %*% t(M)) } else { impliedMean <- rep(0, nrow(impliedCov)) } list(impliedMean, impliedCov) } standardizeMx <- function(object, free = TRUE) { objectOrig <- object multigroup <- length(object@submodels) > 0 if(multigroup) { defVars <- lapply(object@submodels, findDefVars) defVars <- do.call(c, defVars) } else { defVars <- findDefVars(object) } if(length(defVars) > 0) stop("The standardizeMx is not available for the model with definition variable.") if(multigroup) { object@submodels <- lapply(object@submodels, standardizeMxSingleGroup) } else { object <- standardizeMxSingleGroup(object) } vectorizeMx(object, free=free) } standardizeMxSingleGroup <- function(object) { if(!is(object@expectation, "MxExpectationRAM")) stop("The standardizeMx function is available for the MxExpectationRAM only.") A <- object@matrices$A@values I <- diag(nrow(A)) S <- object@matrices$S@values F <- object@matrices$F@values Z <- solve(I - A) impliedCov <- Z %*% S %*% t(Z) temp <- sqrt(diag(impliedCov)) if(length(temp) == 1) { ImpliedSd <- as.matrix(temp) } else { ImpliedSd <- diag(temp) } ImpliedInvSd <- solve(ImpliedSd) object@matrices$S@values <- ImpliedInvSd %*% S %*% ImpliedInvSd object@matrices$A@values <- ImpliedInvSd %*% A %*% ImpliedSd if(!is.null(object@matrices$M)) { M <- object@matrices$M@values object@matrices$M@values <- M %*% ImpliedInvSd } return(object) } vectorizeMx <- function(object, free = TRUE) { multigroup <- length(object@submodels) > 0 if(multigroup) { object <- object@submodels } else { object <- list(object) } result <- NULL for(i in seq_along(object)) { name <- "" if(multigroup) name <- paste0(object[[i]]@name, ".") mat <- object[[i]]@matrices for(j in seq_along(mat)) { tempname <- paste0(name, mat[[j]]@name) lab <- mat[[j]]@labels tempfree <- as.vector(mat[[j]]@free) madeLab <- paste0(tempname, "[", row(lab), ",", col(lab), "]") lab <- as.vector(lab) madeLab[!is.na(lab)] <- lab[!is.na(lab)] if(!free) tempfree <- rep(TRUE, length(tempfree)) temp <- mat[[j]]@values[tempfree] names(temp) <- madeLab[tempfree] result <- c(result, temp) } } result[!duplicated(names(result))] } getInnerObjects <- function(xxxobjectxxx) { xxxmatxxx <- xxxobjectxxx@matrices xxxmatnamexxx <- names(xxxmatxxx) xxxmatvalxxx <- lapply(xxxmatxxx, slot, "values") for(i in seq_along(xxxmatnamexxx)) { assign(xxxmatnamexxx[i], xxxmatvalxxx[[i]]) } xxxalgebraxxx <- xxxobjectxxx@algebras xxxalgebranamexxx <- names(xxxalgebraxxx) xxxalgebraformulaxxx <- lapply(xxxalgebraxxx, slot, "formula") xxxalgebraassignedxxx <- NULL for(i in seq_along(xxxalgebranamexxx)) { temp <- NULL try(temp <- eval(xxxalgebraformulaxxx[[i]]), silent = TRUE) if(!is.null(temp)) { assign(xxxalgebranamexxx[i], temp) xxxalgebraassignedxxx <- c(xxxalgebraassignedxxx, xxxalgebranamexxx[i]) } } xxxusednamexxx <- c(xxxmatnamexxx, xxxalgebraassignedxxx) xxxresultxxx <- list() for(i in seq_along(xxxusednamexxx)) { xxxresultxxx[[i]] <- get(xxxusednamexxx[i]) } names(xxxresultxxx) <- xxxusednamexxx xxxresultxxx } semTools/R/fitIndices.R0000644000175100001440000001611613002104500014470 0ustar hornikusers## Title: Compute more fit indices ## Authors: Terrence Jorgensen ## Sunthud Pornprasertmanit , ## Aaron Boulton , ## Ruben Arslan ## Last updated: 17 October 2016 ## Description: Calculations for promising alternative fit indices ##---------------------------------------------------------------------------- moreFitIndices <- function(object, fit.measures = "all", nPrior = 1) { ## check for validity of user-specified "fit.measures" argument fit.choices <- c("gammaHat","adjGammaHat","baseline.rmsea", "gammaHat.scaled","adjGammaHat.scaled","baseline.rmsea.scaled", "aic.smallN","bic.priorN","hqc","sic") flags <- setdiff(fit.measures, c("all", fit.choices)) if (length(flags)) stop(paste("Argument 'fit.measures' includes invalid options:", paste(flags, collapse = ", "), "Please choose 'all' or among the following:", paste(fit.choices, collapse = ", "), sep = "\n")) if("all" %in% fit.measures) fit.measures <- fit.choices # Extract fit indices information from lavaan object fit <- lavaan::lavInspect(object, "fit") # Get the number of variable p <- length(lavaan::lavNames(object, type = "ov", group = 1)) # Get the number of parameters nParam <- fit["npar"] # Get number of observations n <- lavaan::lavInspect(object, "ntotal") # Find the number of groups ngroup <- lavaan::lavInspect(object, "ngroups") # Calculate -2*log(likelihood) f <- -2 * fit["logl"] # Compute fit indices result <- list() if (length(grep("gamma", fit.measures, ignore.case = TRUE))) { gammaHatValue <- p / (p + 2 * ((fit["chisq"] - fit["df"]) / (n - 1))) adjGammaHatValue <- 1 - (((ngroup * p * (p + 1)) / 2) / fit["df"]) * (1 - gammaHatValue) result["gammaHat"] <- gammaHatValue result["adjGammaHat"] <- adjGammaHatValue if(lavaan::lavInspect(object, "options")$test %in% c("satorra.bentler", "yuan.bentler")) { gammaHatScaledValue <- p / (p + 2 * ((fit["chisq.scaled"] - fit["df.scaled"]) / (n - 1))) adjGammaHatScaledValue <- 1 - (((ngroup * p * (p + 1)) / 2) / fit["df.scaled"]) * (1 - gammaHatScaledValue) result["gammaHat.scaled"] <- gammaHatScaledValue result["adjGammaHat.scaled"] <- adjGammaHatScaledValue } } if (length(grep("rmsea", fit.measures))) { result["baseline.rmsea"] <- nullRMSEA(object, silent = TRUE) if(lavaan::lavInspect(object, "options")$test %in% c("satorra.bentler", "yuan.bentler")) { result["baseline.rmsea.scaled"] <- nullRMSEA(object, scaled = TRUE, silent = TRUE) } } if(!is.na(f)) { if("aic.smallN" %in% fit.measures) result["aic.smallN"] <- f + (2 * nParam * (nParam + 1)) / (n - nParam - 1) if("bic.priorN" %in% fit.measures) result["bic.priorN"] <- f + log(1 + n/nPrior) * nParam if("hqc" %in% fit.measures) result["hqc"] <- f + 2 * log(log(n)) * nParam if("sic" %in% fit.measures) result["sic"] <- sic(f, object) } unlist(result[fit.measures]) } nullRMSEA <- function (object, scaled = FALSE, silent = FALSE) { # return RMSEA of the null model, warn if it is lower than 0.158, because it makes the TLI/CLI hard to interpret test <- lavaan::lavInspect(object, "options")$test fits <- lavaan::fitMeasures(object) N <- lavaan::lavInspect(object, "ntotal") # sample size X2 <- as.numeric ( fits['baseline.chisq'] ) # get baseline chisq df <- as.numeric ( fits['baseline.df'] ) # get baseline df G <- lavaan::lavInspect(object, "ngroups") # number of groups ### a simple rip from fit.measures.R in lavaan's codebase. N.RMSEA <- max(N, X2*4) # Check with lavaan # RMSEA if(df > 0) { if(scaled) { d <- sum(diag(lavaan::lavInspect(object, "UGamma"))) } if(lavaan::lavInspect(object, "options")$mimic %in% c("Mplus", "lavaan")) { GG <- 0 RMSEA <- sqrt( max( c((X2/N)/df - 1/(N-GG), 0) ) ) * sqrt(G) if(scaled && test != "scaled.shifted") { RMSEA.scaled <- sqrt( max( c((X2/N)/d - 1/(N-GG), 0) ) ) * sqrt(G) } else if(test == "scaled.shifted") { RMSEA.scaled <- sqrt( max(c((as.numeric(fits["baseline.chisq.scaled"])/N)/df - 1/(N-GG), 0))) * sqrt(G) } } else { RMSEA <- sqrt( max( c((X2/N)/df - 1/N, 0) ) ) if(scaled) { RMSEA.scaled <- sqrt( max( c((X2/N)/d - 1/N, 0) ) ) } } } else { RMSEA <- RMSEA.scaled <- 0 } if(scaled) { RMSEA <- RMSEA.scaled } if(!silent) { if(RMSEA < 0.158 ) { cat(paste0("TLI and other incremental fit indices may not be that informative, because the RMSEA of the baseline model is lower than 0.158 (Kenny, Kaniskan, & McCoach, 2011). The baseline RMSEA is ",round(RMSEA,3), "\n")) } else { cat(paste0("Baseline RMSEA: ",round(RMSEA,3), "\n")) } } invisible(RMSEA) } ## Stochastic Information Criterion ## f = minimized discrepancy function ## lresults = lavaan sem output object sic <- function(f, lresults = NULL) { E.inv <- lavaan::lavTech(lresults, "inverted.information") if(inherits(E.inv, "try-error")) { return(as.numeric(NA)) } E <- MASS::ginv(E.inv) * lavaan::nobs(lresults) eigvals <- eigen(E, symmetric = TRUE, only.values = TRUE)$values # only positive ones eigvals <- eigvals[ eigvals > sqrt(.Machine$double.eps)] DET <- prod(eigvals) ## check singular if (DET <= 0) return(NA) ## return SIC f + log(DET) } ## small-sample adjustment for (delta) chi-squared test statistic chisqSmallN <- function(fit0, fit1 = NULL, ...) { ## if there are 2 models, order them by DF if (!is.null(fit1)) { DF0 <- lavaan::fitMeasures(fit0, "df") DF1 <- lavaan::fitMeasures(fit1, "df") if (DF0 == DF1) stop("Models have the same degrees of freedom.") parent <- which.min(c(DF0, DF1)) if (parent == 1L) { parent <- fit0 fit0 <- fit1 fit1 <- parent } #if (min(c(DF0, DF1)) == 0L) fit1 <- NULL } ## calculate k-factor correction N <- lavaan::lavInspect(fit0, "ntotal") if (!lavaan::lavInspect(fit0, "options")$sample.cov.rescale) N <- N - 1 P <- length(lavaan::lavNames(fit0)) K <- length(lavaan::lavNames(fit0, type = "lv")) # count latent factors if (!is.null(fit1)) { N1 <- lavaan::lavInspect(fit1, "ntotal") if (!lavaan::lavInspect(fit1, "options")$sample.cov.rescale) N1 <- N1 - 1 if (N != N1) stop("Unequal sample sizes") if (P != length(lavaan::lavNames(fit1))) stop("Unequal number of variables") K <- max(K, length(lavaan::lavNames(fit1, type = "lv"))) } kc <- 1 - ((2*P + 4*K + 5) / (6*N)) if (is.null(fit1)) { scaled <- lavaan::lavInspect(fit0, "options")$test %in% c("satorra.bentler","yuan.bentler","mean.var.adjusted","scaled.shifted") chi <- lavaan::fitMeasures(fit0)[[if (scaled) "chisq.scaled" else "chisq"]] DF <- lavaan::fitMeasures(fit0)[["df"]] } else { AOV <- lavaan::lavTestLRT(fit0, fit1, ...) chi <- AOV[["Chisq diff"]][2] DF <- AOV[["Df diff"]][2] } out <- c(naive.chisq = chi, `k-factor` = kc, adj.chisq = chi*kc, df = DF, pvalue = pchisq(chi*kc, DF, lower.tail = FALSE)) class(out) <- c("lavaan.vector","numeric") out } semTools/R/htmt.R0000644000175100001440000000275213000201061013362 0ustar hornikusers### HTMT function #Written by Ylenio Longo htmt <- function(data, model, ...){ R <- lavaan::lavCor(object = data, ...) R <- abs(R) #this helps avoid errors diag(R) <- NA m <- lavaan::lavaanify(model) m <- m[m$op%in% "=~",] ##variable names for each scale / factor factors <- unique(m$lhs) var <- list() for(i in 1:length(factors)){ var[[i]] <- m$rhs[which(m$lhs %in% factors[i])] } var ##mean correlation within scales m.cor.w <- list() for(i in 1:length(factors)){ m.cor.w[[i]] <- mean(R[var[[i]],var[[i]]], na.rm=TRUE) } m.cor.w <- as.numeric(m.cor.w) m.cor.w ##geometric mean correlations within scale pairs #all possible correlation combinations comb <- expand.grid(1:length(factors), 1:length(factors)) g <- list() for(i in 1:nrow(comb)){ g[[i]] <- sqrt(m.cor.w[comb[i,2]]*m.cor.w[comb[i,1]]) } g <- as.numeric(g) g #geometric mean results paste(comb[,2], comb[,1]) ##mean correlations among items across scales m.cor.a <- list() for(i in 1:nrow(comb)){ m.cor.a[[i]] <- mean(R[var[[comb[i,2]]], var[[comb[i,1]]]], na.rm=TRUE) } m.cor.a <- as.numeric(m.cor.a) m.cor.a ##htmt values outhtmt <- m.cor.a / g ##results res <- matrix(outhtmt, nrow=length(factors), ncol=length(factors), dimnames=list(factors)) colnames(res) <- factors class(res) <- c("lavaan.matrix.symmetric", "matrix") res } semTools/R/NET.R0000644000175100001440000001526413000250017013043 0ustar hornikusers### Terrence D. Jorgensen ### Last updated: 14 October 2016 ### semTools function for Nesting and Equivalence Testing setClass("Net", representation(test = "matrix", df = "vector")) ## function to test whether model "x" is nested within model "y" x.within.y <- function(x, y, crit = crit) { if (length(c(lavaan::lavNames(x, "ov.ord"), lavaan::lavNames(y, "ov.ord")))) stop("The net() function is not available for categorical-data estimators.") exoX <- lavaan::lavInspect(x, "options")$fixed.x & length(lavaan::lavNames(x, "ov.x")) exoY <- lavaan::lavInspect(y, "options")$fixed.x & length(lavaan::lavNames(y, "ov.x")) if (exoX | exoY) { stop(c("The net() function does not work with exogenous variables.\n", "Fit the model again with 'fixed.x = FALSE'")) } ## variable names Xnames <- lavaan::lavNames(x) Ynames <- lavaan::lavNames(y) if (!identical(sort(Xnames), sort(Ynames))) stop("Models do not contain the same variables") ## check that the analyzed data matches xData <- lavaan::lavInspect(x, "data") if (is.list(xData)) xData <- do.call(rbind, xData) xData <- xData[ , rank(Xnames)] yData <- lavaan::lavInspect(y, "data") if (is.list(yData)) yData <- do.call(rbind, yData) yData <- yData[ , rank(Ynames)] if (!identical(xData, yData)) stop("Models must apply to the same data") ############################################################################## ## check degrees of freedom support nesting structure if (lavaan::lavInspect(x, "fit")["df"] < lavaan::lavInspect(y, "fit")["df"]) stop("x cannot be nested within y because y is more restricted than x") ## model-implied moments Sigma <- lavaan::lavInspect(x, "cov.ov") Mu <- lavaan::lavInspect(x, "mean.ov") N <- lavaan::lavInspect(x, "nobs") ## fit model and check that chi-squared < crit suppressWarnings(try(newFit <- lavaan::update(y, data = NULL, sample.cov = Sigma, sample.mean = Mu, sample.nobs = N))) if(!lavaan::lavInspect(newFit, "converged")) return(NA) else { result <- lavaan::lavInspect(newFit, "fit")["chisq"] < crit names(result) <- NULL if (lavaan::lavInspect(x, "fit")["df"] == lavaan::lavInspect(y, "fit")["df"]) return(c(Equivalent = result)) } c(Nested = result) } ## generic function that utilizes "x.within.y" to test a set of models net <- function(..., crit = .0001) { ## put fitted objects in a list fitList <- list(...) nFits <- length(fitList) ## check that they are all lavaan objects notLavaan <- sapply(fitList, class) != "lavaan" if (any(notLavaan)) { fitNames <- sapply(as.list(substitute(list(...)))[-1], deparse) stop(paste("The following arguments are not fitted lavaan objects:\n", paste(fitNames[notLavaan], collapse = "\t"))) } ## check whether any models include categorical outcomes catMod <- sapply(fitList, function(x) lavaan::lavInspect(x, "options")$categorical) if (any(catMod)) stop("This method only applies to continuous outcomes.") ## get degrees of freedom for each model DFs <- sapply(fitList, function(x) lavaan::lavInspect(x, "fit")["df"]) ## name according to named objects, with DF in parentheses fitNames <- names(fitList) dotNames <- sapply(as.list(substitute(list(...)))[-1], deparse) if (is.null(names(fitList))) { fitNames <- dotNames } else { noName <- which(fitNames == "") fitNames[noName] <- dotNames[noName] } names(fitList) <- paste(fitNames, " (df = ", DFs, ")", sep = "") ## sort list according to DFs fitList <- fitList[order(DFs)] fitNames <- fitNames[order(DFs)] orderedDFs <- DFs[order(DFs)] ## create structure for sequence of tests (logical matrix), FALSE by default nestMat <- matrix(FALSE, nFits, nFits, dimnames = list(names(fitList), fitNames)) diag(nestMat) <- TRUE # every model is equivalent with itself ## Loop through sorted models in sequence of most to least restricted model for (R in 2:nrow(nestMat)) { for (C in (R - 1):1) { ## test for nesting/equivalence nestMat[R, C] <- x.within.y(x = fitList[[R]], y = fitList[[C]], crit = crit) ## if models are equivalent, set above-diagonal value to TRUE if (identical(orderedDFs[R], orderedDFs[C])) nestMat[C, R] <- nestMat[R, C] if (C == 1) next # to prevent the next 2 tests from returning an error ## if model didn't converge (logical value is missing), go to next iteration if (is.na(nestMat[R, C]) | is.na(nestMat[R - 1, C - 1])) next ## check whether nesting is implied, to skip unnecessary tests if (nestMat[R, C] & nestMat[R - 1, C - 1]) { nestMat[R, C - 1] <- TRUE next } } } # class(nestMat) <- c("Net", class(nestMat)) # attr(nestMat, "df") <- orderedDFs out <- new("Net", test = nestMat, df = orderedDFs ) out } setMethod("show", "Net", function(object) { if (length(object@test)) { m <- as.matrix(unclass(object@test)) m[upper.tri(m, diag = TRUE)] <- "" cat(" If cell [R, C] is TRUE, the model in row R is nested within column C. If cell [R, C] is TRUE and the models have the same degrees of freedom, they are equivalent models. See Bentler & Satorra (2010) for details. If cell [R, C] is NA, then the model in column C did not converge when fit to the implied means and covariance matrix from the model in row R. The hidden diagonal is TRUE because any model is equivalent to itself. The upper triangle is hidden because for models with the same degrees of freedom, cell [C, R] == cell [R, C]. For all models with different degrees of freedom, the upper diagonal is all FALSE because models with fewer degrees of freedom (i.e., more parameters) cannot be nested within models with more degrees of freedom (i.e., fewer parameters). \n") print(m, quote = FALSE) } else { cat(data.class(object@test), "(0)\n", sep = "") } invisible(object) }) setMethod("summary", "Net", function(object) { DFs <- object@df x <- object@test mods <- colnames(x) for (R in 2:nrow(x)) { for (C in (R - 1):1) { ## if model didn't converge (logical value is missing), go to next iteration if (is.na(x[R, C])) next ## if the models are not nested, go to next iteration if (!x[R, C]) next ## choose message based on whether models are equivalent or nested if (identical(DFs[R], DFs[C])) { rel <- "equivalent to" } else { rel <- "nested within" } cat("Model \"", mods[R], "\" is ", rel, " model \"", mods[C], "\"\n", sep = "") } } invisible(object) }) semTools/R/clipboard.R0000644000175100001440000002202613000250017014346 0ustar hornikusers### Title: Copy or save each aspect of the lavaan object into a clipboard or a file ### Author: Sunthud Pornprasertmanit ### Last updated: 14 October 2016 ### Description: Copy or print each aspect of the lavaan object into a clipboard or a file # Clipboard: copy each aspect of the lavaan object into a clipboard; this function will be compatible with lavaan::lavInspect clipboard <- function(object, what="summary", ...) { if(.Platform$OS.type == "windows") { saveFile(object, file="clipboard-128", what=what, tableFormat=TRUE, ...) cat("File saved in the clipboard; please paste it in any program you wish.\n") } else { if(system("pbcopy", ignore.stderr = TRUE) == 0) { saveFile(object, file=pipe("pbcopy", "w"), what=what, tableFormat=TRUE, ...) cat("File saved in the clipboard; please paste it in any program you wish. If you cannot paste it, it is okay because this function works for some computers, which I still have no explanation currently. Please consider using the 'saveFile' function instead.\n") } else if (system("xclip -version", ignore.stderr = TRUE) == 0) { saveFile(object, file=pipe("xclip -i", "w") , what=what, tableFormat=TRUE, ...) cat("File saved in the xclip; please paste it in any program you wish. If you cannot paste it, it is okay because this function works for some computers, which I still have no explanation currently. Please consider using the 'saveFile' function instead.\n") } else { stop("For Mac users, the 'pbcopy' command in the shell file does not work. For linux users, this function depends on the 'xclip' application. Please install and run the xclip application before using this function in R (it does not guarantee to work though). Alternatively, use the 'saveFile' function to write the output into a file.") } } } # saveFile: save each aspect of the lavaan object into a file; this function will be compatible with lavaan::lavInspect saveFile <- function(object, file, what="summary", tableFormat=FALSE, ...) { # Check whether the object is in the lavaan class if(is(object, "lavaan")) { saveFileLavaan(object, file, what=what, tableFormat=tableFormat, ...) } else if(is(object, "FitDiff")) { saveFileFitDiff(object, file, what=what, tableFormat=tableFormat) } else { stop("The object must be in the `lavaan' output or the output from the compareFit function.") } } saveFileLavaan <- function(object, file, what="summary", tableFormat=FALSE, ...) { if(length(what) > 1) { stop("`what' arguments contains multiple arguments; only one is allowed") } # be case insensitive what <- tolower(what) if(what == "summary") { if(tableFormat) { copySummary(object, file=file) } else { write(paste(capture.output(summary(object, rsquare=TRUE, standardize=TRUE, fit.measure=TRUE)), collapse="\n"), file=file) } } else if (what == "mifit") { if(tableFormat) { write.table(miPowerFit(object, ...), file=file, sep="\t", row.names=FALSE, col.names=TRUE) } else { write(paste(capture.output(miPowerFit(object, ...)), collapse="\n"), file=file) } } else { target <- lavaan::lavInspect(object, what=what) if(tableFormat) { if(is(target, "lavaan.data.frame") || is(target, "data.frame")) { utils::write.table(target, file=file, sep="\t", row.names=FALSE, col.names=TRUE) } else if (is(target, "list")) { if(is(target[[1]], "list")) { target <- lapply(target, listToDataFrame) target <- mapply(function(x, y) rbind(rep("", ncol(y)), c(x, rep("", ncol(y) - 1)), y), names(target), target, SIMPLIFY=FALSE) target <- do.call(rbind, target) utils::write.table(target[-1,], file=file, sep="\t", row.names=FALSE, col.names=FALSE) } else { target <- listToDataFrame(target) utils::write.table(target, file=file, sep="\t", row.names=FALSE, col.names=FALSE) } } else { utils::write.table(target, file=file, sep="\t", row.names=TRUE, col.names=TRUE) } } else { write(paste(utils::capture.output(target), collapse="\n"), file=file) } } } # copySummary: copy the summary of the lavaan object into the clipboard and potentially be useful if users paste it into the excel application # object = lavaan object input copySummary <- function(object, file) { # Capture the output of the lavaan class outputText <- utils::capture.output(lavaan::summary(object, rsquare=TRUE, standardize=TRUE, fit.measure=TRUE)) # Split the text by two spaces outputText <- strsplit(outputText, " ") # Trim and delete the "" elements outputText <- lapply(outputText, function(x) x[x != ""]) outputText <- lapply(outputText, trim) outputText <- lapply(outputText, function(x) x[x != ""]) # Group the output into three sections: fit, parameter estimates, and r-squared cut1 <- grep("Estimate", outputText)[1] cut2 <- grep("R-Square", outputText)[1] set1 <- outputText[1:(cut1 - 1)] set2 <- outputText[cut1:(cut2 - 1)] set3 <- outputText[cut2:length(outputText)] # Assign the number of columns in the resulting data frame and check whether the output contains any labels numcol <- 7 test <- set2[-grep("Estimate", set2)] test <- test[sapply(test, length) >=2] if(any(sapply(test, function(x) is.na(suppressWarnings(as.numeric(x[2])))))) numcol <- numcol + 1 # A function to parse the fit-measures output set1Parse <- function(x, numcol) { if(length(x) == 0) { return(rep("", numcol)) } else if(length(x) == 1) { return(c(x, rep("", numcol - 1))) } else if((length(x) >= 2) & (length(x) <= numcol)) { return(c(x[1], rep("", numcol - length(x)), x[2:length(x)])) } else { stop("Cannot parse text") } } set1 <- t(sapply(set1, set1Parse, numcol)) # A function to parse the parameter-estimates output set2Parse <- function(x, numcol) { if(length(x) == 0) return(rep("", numcol)) if(any(grepl("Estimate", x))) return(c(rep("", numcol-length(x)), x)) if(length(x) == 1) { return(c(x, rep("", numcol-1))) } else { group1 <- x[1] group2 <- x[2:length(x)] if(is.na(suppressWarnings(as.numeric(x[2])))) { group1 <- x[1:2] group2 <- x[3:length(x)] } else if (numcol == 8) { group1 <- c(group1, "") } if(length(group2) == 1) { group2 <- c(group2, rep("", 6 - length(group2))) } else if(length(group2) == 4) { group2 <- c(group2, rep("", 6 - length(group2))) } else { group2 <- c(group2[1], rep("", 6 - length(group2)), group2[2:length(group2)]) } return(c(group1, group2)) } } set2 <- t(sapply(set2, set2Parse, numcol)) # A function to parse the r-squared output set3Parse <- function(x, numcol) { if(length(x) == 0) { return(rep("", numcol)) } else { return(c(x, rep("", numcol - length(x)))) } } set3 <- t(sapply(set3, set3Parse, numcol)) # Copy the output into the clipboard utils::write.table(rbind(set1, set2, set3), file=file, sep="\t", row.names=FALSE, col.names=FALSE) } # trim function from the R.oo package trim <- function(object) { s <- sub("^[\t\n\f\r ]*", "", as.character(object)); s <- sub("[\t\n\f\r ]*$", "", s); s; } # listToDataFrame: Change a list with multiple elements into a single data.frame listToDataFrame <- function(object) { name <- names(object) # Count the maximum number of column (+1 is for the column for row name) numcol <- max(sapply(object, function(x) ifelse(is(x, "lavaan.matrix") || is(x, "lavaan.matrix.symmetric") || is(x, "matrix") || is(x, "data.frame"), return(ncol(x)), return(1)))) + 1 # Change all objects in the list into a data.frame with the specified column target <- lapply(object, niceDataFrame, numcol) # Paste the name of each object into each data.frame target <- mapply(function(x, y) rbind(rep("", ncol(y)), c(x, rep("", ncol(y) - 1)), y), name, target, SIMPLIFY=FALSE) # Combine into a single data.frame target <- do.call(rbind, target) target[-1,] } # niceDataFrame: Change an object into a data.frame with a specified number of columns and the row and column names are included in the data.frame niceDataFrame <- function(object, numcol) { temp <- NULL if(is(object, "lavaan.matrix.symmetric")) { # save only the lower diagonal of the symmetric matrix temp <- matrix("", nrow(object), ncol(object)) for(i in 1:nrow(object)) { temp[i, 1:i] <- object[i, 1:i] } } else if (is(object, "data.frame") || is(object, "matrix") || is(object, "lavaan.matrix")) { # copy the matrix temp <- object } else if (is(object, "vector") || is(object, "lavaan.vector")) { # transform a vector into a matrix object <- as.matrix(object) temp <- object } else { stop("The 'niceDataFrame' function has a bug. Please contact the developer.") } # Transform into the result with a specified number of columns, excluding the row name result <- matrix("", nrow(temp), numcol - 1) # Parse the column names result[,1:ncol(temp)] <- temp firstRow <- colnames(object) ifelse(is.null(firstRow), firstRow <- rep("", ncol(result)), firstRow <- c(firstRow, rep("", numcol - length(firstRow) - 1))) # Parse the row names result <- rbind(firstRow, result) firstCol <- rownames(object) ifelse(is.null(firstCol), firstCol <- rep("", nrow(result)), firstCol <- c("", firstCol)) result <- cbind(firstCol, result) dimnames(result) <- NULL result } semTools/R/partialInvariance.R0000644000175100001440000010722313000201061016041 0ustar hornikusers# Work with only with congeneric models partialInvariance <- function(fit, type, free = NULL, fix = NULL, refgroup = 1, poolvar = TRUE, p.adjust = "none", fbound = 2, return.fit = FALSE, method = "satorra.bentler.2001") { # fit <- measurementInvariance(HW.model, data=HolzingerSwineford1939, group="school", strict = TRUE) # type <- "weak" # free <- NULL # fix <- "x1" # refgroup <- 1 # poolvar <- TRUE # p.adjust <- "none" # return.fit <- FALSE # fbound <- 2 # method <- "satorra.bentler.2001" type <- tolower(type) numType <- 0 fit1 <- fit0 <- NULL # fit0 = Nested model, fit1 = Parent model if(type %in% c("metric", "weak", "loading", "loadings")) { numType <- 1 if(all(c("fit.configural", "fit.loadings") %in% names(fit))) { fit1 <- fit$fit.configural fit0 <- fit$fit.loadings } else { stop("The elements named 'fit.configural' and 'fit.loadings' are needed in the 'fit' argument") } } else if (type %in% c("scalar", "strong", "intercept", "intercepts", "threshold", "thresholds")) { numType <- 2 if(all(c("fit.loadings", "fit.intercepts") %in% names(fit))) { fit1 <- fit$fit.loadings fit0 <- fit$fit.intercepts } else { stop("The elements named 'fit.loadings' and 'fit.intercepts' are needed in the 'fit' argument") } } else if (type %in% c("strict", "residual", "residuals", "error", "errors")) { numType <- 3 if(all(c("fit.intercepts", "fit.residuals") %in% names(fit))) { fit1 <- fit$fit.intercepts fit0 <- fit$fit.residuals } else { stop("The elements named 'fit.intercepts' and 'fit.residuals' are needed in the 'fit' argument") } } else if (type %in% c("means", "mean")) { numType <- 4 if("fit.means" %in% names(fit)) { fit0 <- fit$fit.means if("fit.residuals" %in% names(fit)) { fit1 <- fit$fit.residuals } else if ("fit.intercepts" %in% names(fit)) { fit1 <- fit$fit.intercepts } else { stop("The elements named either 'fit.residuals' or 'fit.intercepts ' is needed in the 'fit' argument") } } else { stop("The elements named 'fit.means' is needed in the 'fit' argument") } } else { stop("Please specify the correct type of measurement invariance. See the help page.") } pt1 <- lavaan::partable(fit1) pt0 <- lavaan::partable(fit0) pt0$start <- pt0$est <- pt0$se <- NULL pt1$start <- pt1$est <- pt1$se <- NULL pt1$label[substr(pt1$label, 1, 1) == "." & substr(pt1$label, nchar(pt1$label), nchar(pt1$label)) == "."] <- "" pt0$label[substr(pt0$label, 1, 1) == "." & substr(pt0$label, nchar(pt0$label), nchar(pt0$label)) == "."] <- "" namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) if(length(table(table(pt0$rhs[pt0$op == "=~"]))) != 1) stop("The model is not congeneric. This function does not support non-congeneric model.") varfree <- varnames <- unique(pt0$rhs[pt0$op == "=~"]) facnames <- unique(pt0$lhs[(pt0$op == "=~") & (pt0$rhs %in% varnames)]) facrepresent <- table(pt0$lhs[(pt0$op == "=~") & (pt0$rhs %in% varnames)], pt0$rhs[(pt0$op == "=~") & (pt0$rhs %in% varnames)]) if(any(apply(facrepresent, 2, function(x) sum(x != 0)) > 1)) stop("The model is not congeneric. This function does not support non-congeneric model.") facList <- list() for(i in 1:nrow(facrepresent)) { facList[[i]] <- colnames(facrepresent)[facrepresent[i,] > 0] } names(facList) <- rownames(facrepresent) facList <- facList[match(names(facList), facnames)] fixLoadingFac <- list() for(i in seq_along(facList)) { select <- pt1$lhs == names(facList)[i] & pt1$op == "=~" & pt1$rhs %in% facList[[i]] & pt1$group == 1 & pt1$free == 0 & (!is.na(pt1$ustart) & pt1$ustart > 0) fixLoadingFac[[i]] <- pt1$rhs[select] } names(fixLoadingFac) <- names(facList) fixIntceptFac <- list() for(i in seq_along(facList)) { select <- pt1$op == "~1" & pt1$rhs %in% facList[[i]] & pt1$group == 1 & pt1$free == 0 fixIntceptFac[[i]] <- pt1$rhs[select] } names(fixIntceptFac) <- names(facList) ngroups <- max(pt0$group) neach <- lavaan::lavInspect(fit0, "nobs") groupvar <- lavaan::lavInspect(fit0, "group") grouplab <- lavaan::lavInspect(fit0, "group.label") if(!is.numeric(refgroup)) refgroup <- which(refgroup == grouplab) grouporder <- 1:ngroups grouporder <- c(refgroup, setdiff(grouporder, refgroup)) grouplaborder <- grouplab[grouporder] complab <- paste(grouplaborder[2:ngroups], "vs.", grouplaborder[1]) if(ngroups <= 1) stop("Well, the number of groups is 1. Measurement invariance across 'groups' cannot be done.") if(numType == 4) { if(!all(c(free, fix) %in% facnames)) stop("'free' and 'fix' arguments should consist of factor names because mean invariance is tested.") } else { if(!all(c(free, fix) %in% varnames)) stop("'free' and 'fix' arguments should consist of variable names.") } result <- fixCon <- freeCon <- NULL estimates <- NULL listFreeCon <- listFixCon <- list() beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) if(numType == 1) { if(!is.null(free) | !is.null(fix)) { if(!is.null(fix)) { facinfix <- findFactor(fix, facList) dup <- duplicated(facinfix) for(i in seq_along(fix)) { if(dup[i]) { pt0 <- constrainParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups) pt1 <- constrainParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups) } else { oldmarker <- fixLoadingFac[[facinfix[i]]] if(length(oldmarker) > 0) { oldmarkerval <- pt1$ustart[pt1$lhs == facinfix[i] & pt1$op == "=~" & pt1$rhs == oldmarker & pt1$group == 1] if(oldmarker == fix[i]) { pt0 <- fixParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) } else { pt0 <- freeParTable(pt0, facinfix[i], "=~", oldmarker, 1:ngroups) pt0 <- constrainParTable(pt0, facinfix[i], "=~", oldmarker, 1:ngroups) pt1 <- freeParTable(pt1, facinfix[i], "=~", oldmarker, 1:ngroups) pt0 <- fixParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) fixLoadingFac[[facinfix[i]]] <- fix[i] } } else { pt0 <- constrainParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups) pt1 <- constrainParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups) } } } } if(!is.null(free)) { facinfree <- findFactor(free, facList) for(i in seq_along(free)) { # Need to change marker variable if fixed oldmarker <- fixLoadingFac[[facinfree[i]]] if(length(oldmarker) > 0 && oldmarker == free[i]) { oldmarkerval <- pt1$ustart[pt1$lhs == facinfix[i] & pt1$op == "=~" & pt1$rhs == oldmarker & pt1$group == 1] candidatemarker <- setdiff(facList[[facinfree[i]]], free[i])[1] pt0 <- freeParTable(pt0, facinfree[i], "=~", free[i], 1:ngroups) pt1 <- freeParTable(pt1, facinfree[i], "=~", free[i], 1:ngroups) pt0 <- fixParTable(pt0, facinfix[i], "=~", candidatemarker, 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, facinfix[i], "=~", candidatemarker, 1:ngroups, oldmarkerval) fixLoadingFac[[facinfix[i]]] <- candidatemarker } else { pt0 <- freeParTable(pt0, facinfree[i], "=~", free[i], 1:ngroups) pt1 <- freeParTable(pt1, facinfree[i], "=~", free[i], 1:ngroups) } } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } obsmean <- sapply(lavaan::lavInspect(fit0, "sampstat"), "[[", "mean") obsmean <- obsmean[,grouporder] obsdiff <- obsmean[,2:ngroups, drop = FALSE] - matrix(obsmean[,1], nrow(obsmean), ngroups - 1) obsdiff <- obsdiff[varfree, , drop = FALSE] colnames(obsdiff) <- paste0("diff_mean:", complab) estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("load:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- esz <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) colnames(esz) <- paste0("q:", complab) esdiff <- matrix(NA, length(varfree), ngroups - 1) # Extract facmean, facsd, load, tau -> lowdiff, highdiff lowdiff <- matrix(NA, length(varfree), ngroups - 1) highdiff <- matrix(NA, length(varfree), ngroups - 1) colnames(lowdiff) <- paste0("low_fscore:", complab) colnames(highdiff) <- paste0("high_fscore:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$rhs %in% varfree) & (pt1$op == "=~") & (pt1$group == 1)) facinfix <- findFactor(fix, facList) varinfixvar <- unlist(facList[facinfix]) varinfixvar <- setdiff(varinfixvar, setdiff(varinfixvar, varfree)) indexfixvar <- which((pt1$rhs %in% varinfixvar) & (pt1$op == "=~") & (pt1$group == 1)) varnonfixvar <- setdiff(varfree, varinfixvar) indexnonfixvar <- setdiff(index, indexfixvar) pos <- 1 for(i in seq_along(indexfixvar)) { runnum <- indexfixvar[i] temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) loadVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 2:ncol(estimates)] <- loadVal facVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~~", pt0$lhs[runnum], 1:ngroups) totalVal <- sapply(lavaan::fitted.values(tempfit0), function(x, v) x$cov[v, v], v = pt0$rhs[runnum]) names(facVal) <- names(totalVal) <- grouplab ifelse(poolvar, refFacVal <- poolVariance(facVal, neach), refFacVal <- facVal[refgroup]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdLoadVal <- loadVal * sqrt(refFacVal) / sqrt(refTotalVal) stdestimates[pos,] <- stdLoadVal stdLoadVal <- stdLoadVal[grouporder] esstd[pos,] <- stdLoadVal[2:ngroups] - stdLoadVal[1] if(any(abs(stdLoadVal) > 0.9999)) warning(paste("Standardized Loadings of", pt0$rhs[runnum], "in some groups are less than -1 or over 1. The standardized loadings used in Fisher z transformation are changed to -0.9999 or 0.9999.")) stdLoadVal[stdLoadVal > 0.9999] <- 0.9999 stdLoadVal[stdLoadVal < -0.9999] <- -0.9999 zLoadVal <- atanh(stdLoadVal) esz[pos,] <- zLoadVal[2:ngroups] - zLoadVal[1] facMean <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~1", "", 1:ngroups) wlow <- min(facMean - fbound * sqrt(facVal)) whigh <- max(facMean + fbound * sqrt(facVal)) intVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$rhs[runnum], "~1", "", 1:ngroups) loadVal <- loadVal[grouporder] intVal <- intVal[grouporder] loaddiff <- loadVal[2:ngroups] - loadVal[1] intdiff <- intVal[2:ngroups] - intVal[1] lowdiff[pos,] <- intdiff + wlow * loaddiff highdiff[pos,] <- intdiff + whigh * loaddiff } listFreeCon <- c(listFreeCon, tryresult0) waldCon[pos,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) pos <- pos + 1 } facinvarfree <- findFactor(varnonfixvar, facList) for(i in seq_along(indexnonfixvar)) { runnum <- indexnonfixvar[i] # Need to change marker variable if fixed oldmarker <- fixLoadingFac[[facinvarfree[i]]] if(length(oldmarker) > 0 && oldmarker == varnonfixvar[i]) { candidatemarker <- setdiff(facList[[facinvarfree[i]]], varnonfixvar[i])[1] temp <- freeParTable(pt1, facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups) temp <- fixParTable(temp, facinvarfree[i], "=~", candidatemarker, 1:ngroups, ustart = 1) temp <- constrainParTable(temp, facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups) newparent <- freeParTable(pt1, facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups) newparent <- fixParTable(newparent, facinvarfree[i], "=~", candidatemarker, 1:ngroups, ustart = 1) newparentresult <- try(newparentfit <- refit(newparent, fit1), silent = TRUE) if(!is(newparentresult, "try-error")) { tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, newparentfit, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(newparentfit, tempfit)) } waldCon[pos,] <- waldConstraint(newparentfit, newparent, waldMat, cbind(facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups)) } } else { temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } waldCon[pos,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } listFixCon <- c(listFixCon, tryresult) if(length(oldmarker) > 0 && oldmarker == varnonfixvar[i]) { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 2:ngroups) } else { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) } estimates[pos, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) loadVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 2:ncol(estimates)] <- loadVal facVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~~", pt0$lhs[runnum], 1:ngroups) totalVal <- sapply(lavaan::fitted.values(tempfit0), function(x, v) x$cov[v, v], v = pt0$rhs[runnum]) names(facVal) <- names(totalVal) <- grouplab ifelse(poolvar, refFacVal <- poolVariance(facVal, neach), refFacVal <- facVal[refgroup]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdLoadVal <- loadVal * sqrt(refFacVal) / sqrt(refTotalVal) stdestimates[pos,] <- stdLoadVal stdLoadVal <- stdLoadVal[grouporder] esstd[pos,] <- stdLoadVal[2:ngroups] - stdLoadVal[1] if(any(abs(stdLoadVal) > 0.9999)) warning(paste("Standardized Loadings of", pt0$rhs[runnum], "in some groups are less than -1 or over 1. The standardized loadings used in Fisher z transformation are changed to -0.9999 or 0.9999.")) stdLoadVal[stdLoadVal > 0.9999] <- 0.9999 stdLoadVal[stdLoadVal < -0.9999] <- -0.9999 zLoadVal <- atanh(stdLoadVal) esz[pos,] <- zLoadVal[2:ngroups] - zLoadVal[1] facMean <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~1", "", 1:ngroups) wlow <- min(facMean - fbound * sqrt(facVal)) whigh <- max(facMean + fbound * sqrt(facVal)) intVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$rhs[runnum], "~1", "", 1:ngroups) loadVal <- loadVal[grouporder] intVal <- intVal[grouporder] loaddiff <- loadVal[2:ngroups] - loadVal[1] intdiff <- intVal[2:ngroups] - intVal[1] lowdiff[pos,] <- intdiff + wlow * loaddiff highdiff[pos,] <- intdiff + whigh * loaddiff } listFreeCon <- c(listFreeCon, tryresult0) pos <- pos + 1 } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[c(indexfixvar, indexnonfixvar)] estimates <- cbind(estimates, stdestimates, esstd, esz, obsdiff, lowdiff, highdiff) result <- cbind(freeCon, fixCon, waldCon) } else if (numType == 2) { if(!is.null(free) | !is.null(fix)) { if(!is.null(fix)) { facinfix <- findFactor(fix, facList) dup <- duplicated(facinfix) for(i in seq_along(fix)) { if(dup[i]) { pt0 <- constrainParTable(pt0, fix[i], "~1", "", 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "~1", "", 1:ngroups) } else { oldmarker <- fixIntceptFac[[facinfix[i]]] if(length(oldmarker) > 0) { oldmarkerval <- pt1$ustart[pt1$lhs == fix[i] & pt1$op == "~1" & pt1$rhs == "" & pt1$group == 1] if(oldmarker == fix[i]) { pt0 <- fixParTable(pt0, fix[i], "~1", "", 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, fix[i], "~1", "", 1:ngroups, oldmarkerval) } else { pt0 <- freeParTable(pt0, oldmarker, "~1", "", 1:ngroups) pt0 <- constrainParTable(pt0, oldmarker, "~1", "", 1:ngroups) pt1 <- freeParTable(pt1, oldmarker, "~1", "", 1:ngroups) pt0 <- fixParTable(pt0, fix[i], "~1", "", 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, fix[i], "~1", "", 1:ngroups, oldmarkerval) fixIntceptFac[[facinfix[i]]] <- fix[i] } } else { pt0 <- constrainParTable(pt0, fix[i], "~1", "", 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "~1", "", 1:ngroups) } } } } if(!is.null(free)) { facinfree <- findFactor(free, facList) for(i in seq_along(free)) { # Need to change marker variable if fixed oldmarker <- fixIntceptFac[[facinfree[i]]] if(length(oldmarker) > 0 && oldmarker == free[i]) { oldmarkerval <- pt1$ustart[pt1$lhs == oldmarker & pt1$op == "~1" & pt1$rhs == "" & pt1$group == 1] candidatemarker <- setdiff(facList[[facinfree[i]]], free[i])[1] pt0 <- freeParTable(pt0, free[i], "~1", "", 1:ngroups) pt1 <- freeParTable(pt1, free[i], "~1", "", 1:ngroups) pt0 <- fixParTable(pt0, candidatemarker, "~1", "", 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, candidatemarker, "~1", "", 1:ngroups, oldmarkerval) fixIntceptFac[[facinfix[i]]] <- candidatemarker } else { pt0 <- freeParTable(pt0, free[i], "~1", "", 1:ngroups) pt1 <- freeParTable(pt1, free[i], "~1", "", 1:ngroups) } } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } obsmean <- sapply(lavaan::lavInspect(fit0, "sampstat"), "[[", "mean") obsmean <- obsmean[,grouporder] obsdiff <- obsmean[,2:ngroups, drop = FALSE] - matrix(obsmean[,1], nrow(obsmean), ngroups - 1) obsdiff <- obsdiff[varfree, , drop = FALSE] colnames(obsdiff) <- paste0("diff_mean:", complab) # Prop diff propdiff <- matrix(NA, length(varfree), ngroups - 1) colnames(propdiff) <- paste0("propdiff:", complab) estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("int:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$lhs %in% varfree) & (pt1$op == "~1") & (pt1$group == 1)) facinfix <- findFactor(fix, facList) varinfixvar <- unlist(facList[facinfix]) varinfixvar <- setdiff(varinfixvar, setdiff(varinfixvar, varfree)) indexfixvar <- which((pt1$lhs %in% varinfixvar) & (pt1$op == "~1") & (pt1$group == 1)) varnonfixvar <- setdiff(varfree, varinfixvar) indexnonfixvar <- setdiff(index, indexfixvar) pos <- 1 for(i in seq_along(varinfixvar)) { runnum <- indexfixvar[i] temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) intVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 2:ncol(estimates)] <- intVal totalVal <- sapply(lavaan::fitted.values(tempfit0), function(x, v) x$cov[v, v], v = pt0$lhs[runnum]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdIntVal <- intVal / sqrt(refTotalVal) stdestimates[pos,] <- stdIntVal stdIntVal <- stdIntVal[grouporder] esstd[pos,] <- stdIntVal[2:ngroups] - stdIntVal[1] intVal <- intVal[grouporder] propdiff[pos,] <- (intVal[2:ngroups] - intVal[1]) / obsdiff[pos,] } listFreeCon <- c(listFreeCon, tryresult0) waldCon[pos,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) pos <- pos + 1 } facinvarfree <- findFactor(varfree, facList) for(i in seq_along(varnonfixvar)) { runnum <- indexnonfixvar[i] # Need to change marker variable if fixed oldmarker <- fixIntceptFac[[facinvarfree[i]]] if(length(oldmarker) > 0 && oldmarker == varfree[i]) { candidatemarker <- setdiff(facList[[facinvarfree[i]]], varfree[i])[1] temp <- freeParTable(pt1, varfree[i], "~1", "", 1:ngroups) temp <- constrainParTable(temp, varfree[i], "~1", "", 1:ngroups) temp <- fixParTable(temp, candidatemarker, "~1", "", 1:ngroups) newparent <- freeParTable(pt1, varfree[i], "~1", "", 1:ngroups) newparent <- fixParTable(newparent, candidatemarker, "~1", "", 1:ngroups) newparentresult <- try(newparentfit <- refit(newparent, fit1), silent = TRUE) if(!is(newparentresult, "try-error")) { tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, newparentfit, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(newparentfit, tempfit)) } waldCon[pos,] <- waldConstraint(newparentfit, newparent, waldMat, cbind(varfree[i], "~1", "", 1:ngroups)) } } else { temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } waldCon[pos,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } listFixCon <- c(listFixCon, tryresult) if(length(oldmarker) > 0 && oldmarker == varfree[i]) { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 2:ngroups) } else { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) } estimates[pos, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) intVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 2:ncol(estimates)] <- intVal totalVal <- sapply(lavaan::fitted.values(tempfit0), function(x, v) x$cov[v, v], v = pt0$lhs[runnum]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdIntVal <- intVal / sqrt(refTotalVal) stdestimates[pos,] <- stdIntVal stdIntVal <- stdIntVal[grouporder] esstd[pos,] <- stdIntVal[2:ngroups] - stdIntVal[1] intVal <- intVal[grouporder] propdiff[pos,] <- (intVal[2:ngroups] - intVal[1]) / obsdiff[pos,] } listFreeCon <- c(listFreeCon, tryresult0) pos <- pos + 1 } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[c(indexfixvar, indexnonfixvar)] estimates <- cbind(estimates, stdestimates, esstd, obsdiff, propdiff) result <- cbind(freeCon, fixCon, waldCon) } else if (numType == 3) { if(!is.null(free) | !is.null(fix)) { if(!is.null(fix)) { for(i in seq_along(fix)) { pt0 <- constrainParTable(pt0, fix[i], "~~", fix[i], 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "~~", fix[i], 1:ngroups) } } if(!is.null(free)) { for(i in seq_along(free)) { pt0 <- freeParTable(pt0, free[i], "~~", free[i], 1:ngroups) pt1 <- freeParTable(pt1, free[i], "~~", free[i], 1:ngroups) } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } # Prop diff propdiff <- matrix(NA, length(varfree), ngroups - 1) colnames(propdiff) <- paste0("propdiff:", complab) estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("errvar:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- esz <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) colnames(esz) <- paste0("h:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$lhs %in% varfree) & (pt1$op == "~~") & (pt1$lhs == pt1$rhs) & (pt1$group == 1)) for(i in seq_along(index)) { runnum <- index[i] temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[i,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[i, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[i,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) errVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[i, 2:ncol(estimates)] <- errVal totalVal <- sapply(lavaan::fitted.values(tempfit0), function(x, v) x$cov[v, v], v = pt0$rhs[runnum]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdErrVal <- errVal / sqrt(refTotalVal) stdestimates[i,] <- stdErrVal stdErrVal <- stdErrVal[grouporder] esstd[i,] <- stdErrVal[2:ngroups] - stdErrVal[1] if(any(abs(stdErrVal) > 0.9999)) warning(paste("The uniqueness of", pt0$rhs[runnum], "in some groups are over 1. The uniqueness used in arctan transformation are changed to 0.9999.")) stdErrVal[stdErrVal > 0.9999] <- 0.9999 zErrVal <- asin(sqrt(stdErrVal)) esz[i,] <- zErrVal[2:ngroups] - zErrVal[1] errVal <- errVal[grouporder] totalVal <- totalVal[grouporder] errdiff <- errVal[2:ngroups] - errVal[1] totaldiff <- totalVal[2:ngroups] - totalVal[1] propdiff[i,] <- errdiff / totaldiff } listFreeCon <- c(listFreeCon, tryresult0) waldCon[i,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[index] estimates <- cbind(estimates, stdestimates, esstd, esz, propdiff) result <- cbind(freeCon, fixCon, waldCon) } else if (numType == 4) { varfree <- facnames if(!is.null(free) | !is.null(fix)) { if(!is.null(fix)) { for(i in seq_along(fix)) { pt0 <- constrainParTable(pt0, fix[i], "~1", "", 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "~1", "", 1:ngroups) } } if(!is.null(free)) { for(i in seq_along(free)) { pt0 <- freeParTable(pt0, free[i], "~1", "", 1:ngroups) pt1 <- freeParTable(pt1, free[i], "~1", "", 1:ngroups) } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("mean:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$lhs %in% varfree) & (pt1$op == "~1") & (pt1$group == 1)) for(i in seq_along(index)) { runnum <- index[i] isfree <- pt1$free[runnum] != 0 if(isfree) { temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) } else { temp <- fixParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 2:ngroups, ustart = pt1$ustart[runnum]) } tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[i,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) isfree0 <- pt0$free[runnum] != 0 if(isfree0) { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) } else { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 2:ngroups) } estimates[i, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[i,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) meanVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[i, 2:ncol(estimates)] <- meanVal facVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~~", pt0$lhs[runnum], 1:ngroups) ifelse(poolvar, refFacVal <- poolVariance(facVal, neach), refFacVal <- facVal[refgroup]) stdMeanVal <- meanVal / sqrt(refFacVal) stdestimates[i,] <- stdMeanVal stdMeanVal <- stdMeanVal[grouporder] esstd[i,] <- stdMeanVal[2:ngroups] - stdMeanVal[1] } listFreeCon <- c(listFreeCon, tryresult0) waldCon[i,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[index] estimates <- cbind(estimates, stdestimates, esstd) result <- cbind(freeCon, fixCon, waldCon) } if(return.fit) { return(invisible(list(estimates = estimates, results = result, models = list(free = listFreeCon, fix = listFixCon, nested = fit0, parent = fit1)))) } else { return(list(estimates = estimates, results = result)) } } findFactor <- function(var, facList) { tempfac <- lapply(facList, intersect, var) facinvar <- rep(names(tempfac), sapply(tempfac, length)) facinvar[match(unlist(tempfac), var)] } waldConstraint <- function(fit, pt, mat, ...) { dotdotdot <- list(...) overallMat <- NULL for(i in seq_along(dotdotdot)) { target <- dotdotdot[[i]] tempMat <- mat element <- apply(target, 1, matchElement, parTable=pt) freeIndex <- pt$free[element] tempMat[,freeIndex[1]] <- -1 for(m in 2:length(freeIndex)) { tempMat[m - 1, freeIndex[m]] <- 1 } overallMat <- rbind(overallMat, tempMat) } result <- rep(NA, 3) if(!any(apply(overallMat, 1, sum) != 0)) { try(result <- waldContrast(fit, overallMat), silent = TRUE) } return(result) } poolVariance <- function(var, n) { nm <- n - 1 sum(var * nm) / sum(nm) } deltacfi <- function(parent, nested) lavaan::fitmeasures(nested)["cfi"] - lavaan::fitmeasures(parent)["cfi"]semTools/R/zzz.R0000644000175100001440000000106513000201061013237 0ustar hornikusers.onAttach <- function(libname, pkgname) { version <- read.dcf(file = system.file("DESCRIPTION", package = pkgname), fields = "Version") packageStartupMessage(" ") packageStartupMessage("###############################################################################") packageStartupMessage("This is ", paste(pkgname, version)) packageStartupMessage("All users of R (or SEM) are invited to submit functions or ideas for functions.") packageStartupMessage("###############################################################################") } semTools/R/reliability.R0000644000175100001440000003116613000201061014720 0ustar hornikusers## Title: Reliability of factors ## Author: Sunthud Pornprasertmanit ; Yves Rosseel ## Description: Find the relability values of each factor ##----------------------------------------------------------------------------## reliability <- function(object) { param <- lavaan::lavInspect(object, "coef") ngroup <- lavaan::lavInspect(object, "ngroups") name <- names(param) if(ngroup == 1) { ly <- param[name == "lambda"] } else { ly <- lapply(param, "[[", "lambda") } ps <- lavaan::lavInspect(object, "cov.lv") if(ngroup == 1) ps <- list(ps) if(ngroup == 1) { te <- param[name == "theta"] } else { te <- lapply(param, "[[", "theta") } SigmaHat <- lavaan::lavInspect(object, "cov.ov") if(ngroup == 1) SigmaHat <- list(SigmaHat) if(ngroup == 1) { tau <- param[name == "tau"] } else { tau <- lapply(param, "[[", "tau") } implied <- lavaan::fitted.values(object)[name = "cov"] categorical <- (length(tau) > 0) && !is.null(tau[[1]]) threshold <- NULL if(ngroup == 1) { S <- list(lavaan::lavInspect(object, "sampstat")$cov) } else { S <- lapply(lavaan::lavInspect(object, "sampstat"), function(x) x$cov) } if(categorical) { polycor <- polycorLavaan(object) if(ngroup == 1) polycor <- list(polycor) S <- lapply(polycor, function(x) x[rownames(ly[[1]]), rownames(ly[[1]])]) threshold <- getThreshold(object) SigmaHat <- thetaImpliedTotalVar(object) } flag <- FALSE result <- list() for(i in 1:ngroup) { common <- (apply(ly[[i]], 2, sum)^2) * diag(ps[[i]]) truevar <- ly[[i]]%*%ps[[i]]%*%t(ly[[i]]) error <- rep(NA, length(common)) alpha <- rep(NA, length(common)) total <- rep(NA, length(common)) omega1 <- omega2 <- omega3 <- rep(NA, length(common)) impliedTotal <- rep(NA, length(common)) avevar <- rep(NA, length(common)) for(j in 1:length(error)) { index <- which(ly[[i]][,j] != 0) error[j] <- sum(te[[i]][index, index]) sigma <- S[[i]][index, index] alpha[j] <- computeAlpha(sigma, length(index)) total[j] <- sum(sigma) impliedTotal[j] <- sum(SigmaHat[[i]][index, index]) faccontrib <- ly[[i]][,j, drop = FALSE] %*%ps[[i]][j,j,drop = FALSE]%*%t(ly[[i]][,j, drop = FALSE]) truefac <- diag(faccontrib[index, index]) commonfac <- sum(faccontrib[index, index]) trueitem <- diag(truevar[index, index]) erritem <- diag(te[[i]][index, index]) if(sum(abs(trueitem - truefac)) < 0.00001) { avevar[j] <- sum(trueitem) / sum(trueitem + erritem) } else { avevar[j] <- NA } if(categorical) { omega1[j] <- omegaCat(faccontrib[index, index], SigmaHat[[i]][index, index], threshold[[i]][index], faccontrib[index, index] + te[[i]][index, index]) omega2[j] <- omegaCat(faccontrib[index, index], SigmaHat[[i]][index, index], threshold[[i]][index], SigmaHat[[i]][index, index]) omega3[j] <- omegaCat(faccontrib[index, index], SigmaHat[[i]][index, index], threshold[[i]][index], sigma) } else { omega1[j] <- commonfac / (commonfac + error[j]) omega2[j] <- commonfac / impliedTotal[j] omega3[j] <- commonfac / total[j] } } alpha <- c(alpha, total = computeAlpha(S[[i]], nrow(S[[i]]))) names(alpha) <- c(names(common), "total") if(categorical) { omega1 <- c(omega1, total = omegaCat(truevar, SigmaHat[[i]], threshold[[i]], truevar + te[[i]])) omega2 <- c(omega2, total = omegaCat(truevar, SigmaHat[[i]], threshold[[i]], SigmaHat[[i]])) omega3 <- c(omega3, total = omegaCat(truevar, SigmaHat[[i]], threshold[[i]], S[[i]])) } else { omega1 <- c(omega1, total = sum(truevar) / (sum(truevar) + sum(te[[i]]))) omega2 <- c(omega2, total = sum(truevar) / (sum(SigmaHat[[i]]))) omega3 <- c(omega3, total = sum(truevar) / (sum(S[[i]]))) } avevar <- c(avevar, total = sum(diag(truevar))/ sum((diag(truevar) + diag(te[[i]])))) singleIndicator <- apply(ly[[i]], 2, function(x) sum(x != 0)) %in% 0:1 result[[i]] <- rbind(alpha=alpha, omega=omega1, omega2=omega2,omega3=omega3, avevar = avevar)[,!singleIndicator] } if(flag) warning("The alpha and the average variance extracted are calculated from polychoric (polyserial) correlation not from Pearson's correlation.\n") if(ngroup == 1) { result <- result[[1]] } else { names(result) <- lavaan::lavInspect(object, "group.label") } result } computeAlpha <- function(S, k) k/(k - 1) * (1.0 - sum(diag(S))/sum(S)) reliabilityL2 <- function(object, secondFactor) { param <- lavaan::lavInspect(object, "coef") ngroup <- lavaan::lavInspect(object, "ngroups") name <- names(param) if(ngroup == 1) { ly <- param[name == "lambda"] } else { ly <- lapply(param, "[[", "lambda") } ve <- lavaan::lavInspect(object, "cov.lv") if(ngroup == 1) ve <- list(ve) if(ngroup == 1) { ps <- param[name == "psi"] te <- param[name == "theta"] be <- param[name == "beta"] } else { ps <- lapply(param, "[[", "psi") te <- lapply(param, "[[", "theta") be <- lapply(param, "[[", "beta") } SigmaHat <- lavaan::lavInspect(object, "cov.ov") if(ngroup == 1) { SigmaHat <- list(SigmaHat) S <- list(lavaan::lavInspect(object, "sampstat")$cov) } else { S <- lapply(lavaan::lavInspect(object, "sampstat"), function(x) x$cov) } threshold <- lavaan::lavInspect(object, "th") result <- list() for(i in 1:ngroup) { # Prepare for higher-order reliability l2var <- ve[[i]][secondFactor, secondFactor] l2load <- be[[1]][,secondFactor] indexl2 <- which(l2load != 0) commonl2 <- (sum(l2load)^2) * l2var errorl2 <- sum(ps[[i]][indexl2, indexl2]) # Prepare for lower-order reliability indexl1 <- which(apply(ly[[i]][,indexl2], 1, function(x) sum(x != 0)) > 0) l1load <- ly[[i]][,indexl2] %*% as.matrix(be[[1]][indexl2,secondFactor]) commonl1 <- (sum(l1load)^2) * l2var errorl1 <- sum(te[[i]][indexl1, indexl1]) uniquel1 <- 0 for (j in seq_along(indexl2)) { uniquel1 <- uniquel1 + (sum(ly[[i]][,indexl2[j]])^2) * ps[[i]][indexl2[j], indexl2[j]] } # Adjustment for direct loading from L2 to observed variables if(any(ly[[i]][,secondFactor] != 0)) { indexind <- which(ly[[i]][,secondFactor] != 0) if(length(intersect(indexind, indexl1)) > 0) stop("Direct and indirect loadings of higher-order factor to observed variables are specified at the same time.") commonl2 <- sum(c(ly[[i]][,secondFactor], l2load))^2 * l2var errorl2 <- errorl2 + sum(te[[i]][indexind, indexind]) commonl1 <- sum(c(ly[[i]][,secondFactor], l1load))^2 * l2var errorl1 <- errorl1 + sum(te[[i]][indexind, indexind]) } # Calculate Reliability omegaL1 <- commonl1 / (commonl1 + uniquel1 + errorl1) omegaL2 <- commonl2 / (commonl2 + errorl2) partialOmegaL1 <- commonl1 / (commonl1 + errorl1) result[[i]] <- c(omegaL1=omegaL1, omegaL2=omegaL2, partialOmegaL1=partialOmegaL1) } if(ngroup == 1) { result <- result[[1]] } else { names(result) <- lavaan::lavInspect(object, "group.label") } result } omegaCat <- function(truevar, implied, threshold, denom) { # denom could be polychoric correlation, model-implied correlation, or model-implied without error correlation polyc <- truevar invstdvar <- 1 / sqrt(diag(implied)) polyr <- diag(invstdvar) %*% polyc %*% diag(invstdvar) nitem <- ncol(implied) denom <- cov2cor(denom) sumnum <- 0 addden <- 0 for(j in 1:nitem) { for(jp in 1:nitem) { sumprobn2 <- 0 addprobn2 <- 0 t1 <- threshold[[j]] t2 <- threshold[[jp]] for(c in 1:length(t1)) { for(cp in 1:length(t2)) { sumprobn2 <- sumprobn2 + p2(t1[c], t2[cp], polyr[j, jp]) addprobn2 <- addprobn2 + p2(t1[c], t2[cp], denom[j, jp]) } } sumprobn1 <- sum(pnorm(t1)) sumprobn1p <- sum(pnorm(t2)) sumnum <- sumnum + (sumprobn2 - sumprobn1 * sumprobn1p) addden <- addden + (addprobn2 - sumprobn1 * sumprobn1p) } } reliab <- sumnum / addden reliab } p2 <- function(t1, t2, r) { mnormt::pmnorm(c(t1, t2), c(0,0), matrix(c(1, r, r, 1), 2, 2)) } polycorLavaan <- function(object) { ngroups <- lavaan::lavInspect(object, "ngroups") coef <- lavaan::lavInspect(object, "coef") targettaunames <- NULL if(ngroups == 1) { targettaunames <- rownames(coef$tau) } else { targettaunames <- rownames(coef[[1]]$tau) } barpos <- sapply(strsplit(targettaunames, ""), function(x) which(x == "|")) varnames <- unique(apply(data.frame(targettaunames, barpos - 1), 1, function(x) substr(x[1], 1, x[2]))) script <- "" for(i in 2:length(varnames)) { temp <- paste0(varnames[1:(i - 1)], collapse = " + ") temp <- paste0(varnames[i], "~~", temp, "\n") script <- paste(script, temp) } newobject <- refit(script, object) if(ngroups == 1) { return(lavaan::lavInspect(newobject, "coef")$theta) } else { return(lapply(lavaan::lavInspect(newobject, "coef"), "[[", "theta")) } } getThreshold <- function(object) { ngroups <- lavaan::lavInspect(object, "ngroups") coef <- lavaan::lavInspect(object, "coef") result <- NULL if(ngroups == 1) { targettaunames <- rownames(coef$tau) barpos <- sapply(strsplit(targettaunames, ""), function(x) which(x == "|")) varthres <- apply(data.frame(targettaunames, barpos - 1), 1, function(x) substr(x[1], 1, x[2])) result <- list(split(coef$tau, varthres)) } else { result <- list() for(g in 1:ngroups) { targettaunames <- rownames(coef[[g]]$tau) barpos <- sapply(strsplit(targettaunames, ""), function(x) which(x == "|")) varthres <- apply(data.frame(targettaunames, barpos - 1), 1, function(x) substr(x[1], 1, x[2])) result[[g]] <- split(coef[[g]]$tau, varthres) } } return(result) } invGeneralRelia <- function(w, truevar, totalvar) { 1-(t(w) %*% truevar %*% w) / (t(w) %*% totalvar %*% w) } invGeneralReliaCat <- function(w, polyr, threshold, denom, nitem) { # denom could be polychoric correlation, model-implied correlation, or model-implied without error correlation upper <- matrix(NA, nitem, nitem) lower <- matrix(NA, nitem, nitem) for(j in 1:nitem) { for(jp in 1:nitem) { sumprobn2 <- 0 addprobn2 <- 0 t1 <- threshold[[j]] t2 <- threshold[[jp]] for(c in 1:length(t1)) { for(cp in 1:length(t2)) { sumprobn2 <- sumprobn2 + p2(t1[c], t2[cp], polyr[j, jp]) addprobn2 <- addprobn2 + p2(t1[c], t2[cp], denom[j, jp]) } } sumprobn1 <- sum(pnorm(t1)) sumprobn1p <- sum(pnorm(t2)) upper[j, jp] <- (sumprobn2 - sumprobn1 * sumprobn1p) lower[j, jp] <- (addprobn2 - sumprobn1 * sumprobn1p) } } 1 - (t(w) %*% upper %*% w) / (t(w) %*% lower %*% w) } calcMaximalRelia <- function(truevar, totalvar, varnames) { start <- rep(1, nrow(truevar)) out <- nlminb(start, invGeneralRelia, truevar = truevar, totalvar = totalvar) if(out$convergence != 0) stop("The numerical method for finding the maximal reliability was not converged.") result <- 1 - out$objective weight <- out$par weight <- weight/mean(weight) names(weight) <- varnames attr(result, "weight") <- weight result } calcMaximalReliaCat <- function(polyr, threshold, denom, nitem, varnames) { start <- rep(1, nrow(polyr)) out <- nlminb(start, invGeneralReliaCat, polyr = polyr, threshold = threshold, denom = denom, nitem = nitem) if(out$convergence != 0) stop("The numerical method for finding the maximal reliability was not converged.") result <- 1 - out$objective weight <- out$par weight <- weight/mean(weight) names(weight) <- varnames attr(result, "weight") <- weight result } maximalRelia <- function(object) { param <- lavaan::lavInspect(object, "coef") ngroup <- lavaan::lavInspect(object, "ngroups") name <- names(param) if(ngroup == 1) { ly <- param[name == "lambda"] } else { ly <- lapply(param, "[[", "lambda") } ps <- lavaan::lavInspect(object, "cov.lv") if(ngroup == 1) ps <- list(ps) SigmaHat <- lavaan::lavInspect(object, "cov.ov") if(ngroup == 1) { SigmaHat <- list(SigmaHat) S <- list(lavaan::lavInspect(object, "sampstat")$cov) } else { S <- lapply(lavaan::lavInspect(object, "sampstat"), function(x) x$cov) } if(ngroup == 1) { tau <- param[name = "tau"] } else { tau <- lapply(param, "[[", "tau") } categorical <- length(tau) > 0 && !is.null(tau[[1]]) threshold <- NULL result <- list() if(categorical) { polycor <- polycorLavaan(object) if(ngroup == 1) polycor <- list(polycor) S <- lapply(polycor, function(x) x[rownames(ly[[1]]), rownames(ly[[1]])]) threshold <- getThreshold(object) # change to lavaan::lavInspect(object, "th") SigmaHat <- thetaImpliedTotalVar(object) } for(i in 1:ngroup) { truevar <- ly[[i]]%*%ps[[i]]%*%t(ly[[i]]) varnames <- colnames(truevar) if(categorical) { invstdvar <- 1 / sqrt(diag(SigmaHat[[i]])) polyr <- diag(invstdvar) %*% truevar %*% diag(invstdvar) nitem <- ncol(SigmaHat[[i]]) result[[i]] <- calcMaximalReliaCat(polyr, threshold[[i]], S[[i]], nitem, varnames) } else { result[[i]] <- calcMaximalRelia(truevar, S[[i]], varnames) } } if(ngroup == 1) { result <- result[[1]] } else { names(result) <- lavaan::lavInspect(object, "group.label") } result } semTools/vignettes/0000755000175100001440000000000013002400141014066 5ustar hornikuserssemTools/vignettes/partialInvariance.Rnw0000644000175100001440000002437213000201061020220 0ustar hornikusers\documentclass[12pt]{article} %%\VignetteIndexEntry{Partial Invariance} %%\VignetteDepends{semTools} \usepackage[utf8]{inputenc} \usepackage{amsfonts} \usepackage{amstext} \usepackage{amsmath} \usepackage{natbib} \title{A Note on Effect Size for Measurement Invariance} \author{Sunthud Pornprasertmanit} \begin{document} \maketitle This article aims to show the mathematical reasoning behind all effect sizes used in the \texttt{partialInvariance} and \texttt{partialInvarianceCat} functions in \texttt{semTools} package. In the functions, the following statistics are compared across groups: factor loadings, item intercepts (for continuous items), item thresholds (for categorical items), measurement error variances, and factor means. The comparison can be compared between two groups (e.g., Cohen's \emph{d}) or multiple groups (e.g., $R^2$). This note provides the details of the effect sizes in comparing two groups only. The comparison between multiple groups can be done by picking the reference group and compare the other groups with the reference group in the similar fashion to dummy variables. For example, the comparison between four groups would create three effect size values (i.e., Group 1 vs. Reference, Group 2 vs. Reference, and Group 3 vs. Reference). Alternatively, for the measurement invariance, the change in comparative fit index (CFI) can be used as the measure of effect size. In the measurement invariance literature \citep{cheung2002, meade2008}, the change in CFI is used to test the equality constraints for multiple items simultaneously. The functions in \texttt{semTools} will show the change in CFI for each individual item. That is, if an item were to allow to have different statistics (e.g., loading), how large the CFI would drop from the original model. Please note that more research is needed in finding the appropriate cutoffs for the change in CFI for individual items. Are the cutoffs of .002 or .01 appropriate for this context? In creating effect size, a target statistic needs to be standardized. Sample variances are used in the standardization formula. If researchers can assume that target variances across groups are equal in population, then pooled variances can be used in the standardization. The pooled variance $s^2_P$ can be computed as follows: $$s^2_P = \frac{\sum^G_{g=1}(n_g - 1)s^2_g}{\sum^G_{g=1}(n_g - 1)},$$ \noindent where $g$ represents the index of groups, $G$ is the number of groups, $s^2_g$ represents the variance of Group $g$, and $n_g$ is the Group $g$ size. If the variances are not assumed to be equal across groups, I recommend to pick a reference (baseline) group for the standardization. In the following sections, I will show how effect sizes are defined in each type of partial invariance testing. \section{Factor Loading} Let $\lambda_{ijg}$ be the unstandardized factor loading of Item $i$ from Factor $j$ in Group $g$. A standardized factor loading $\lambda^*_{ijg}$ can be computed \citep{muthen1998}: $$\lambda^*_{ijg} = \lambda_{ijg}\cdot\frac{\psi_{jg}}{\sigma_{ig}},$$ \noindent where $\psi_{jg}$ is the standard deviation of Factor $j$ from Group $g$ and $\sigma_{ig}$ is the total standard deviation of Item $i$ from Group $g$. To quantify the difference in factor loadings between groups in standardized scale, the standard deviation in the standardization formula needs to be the same across groups. If Group A and Group B are compared, the standardized difference in factor loading is defined: $$\Delta\lambda^*_{ij} = (\lambda_{ijA} - \lambda_{ijB})\cdot\frac{\psi_{jP}}{\sigma_{iP}},$$ \noindent where $\psi_{jP}$ is the pooled standard deviation of Factor $j$ and $\sigma_{iP}$ is the pooled total standard deviation of Item $i$. If Group A is the reference group, $\psi_{jA}$ and $\sigma_{iA}$ can substitute $\psi_{jP}$ and $\sigma_{iP}$. Assume that standardized factor loadings are from congeneric measurement model, standardized factor loadings represent the correlation between items and factors. \cite{cohen1992} provide a guideline for interpreting the magnitude of the difference in correlations for independent groups. The correlations are transformed to Fisher's z transformation: $$q = \arctan\left(\lambda_{ijA}\cdot\frac{\psi_{jP}}{\sigma_{iP}}\right) - \arctan\left(\lambda_{ijB}\cdot\frac{\psi_{jP}}{\sigma_{iP}}\right)$$ Then, the $q$ values of .1, .3, and .5 are interpreted as small, medium, and large effect sizes. For continuous outcomes, the amount of mean differences implied by the factor loading difference given a factor score can be used as an effect size \citep{millsap2012}. Let $X_ijg$ be the observed score of Item $i$ loaded on Factor $j$ from Group $g$ and $W_{j}$ represents the score of Factor $j$. The expected value of the observed score differences between Group A and Group B is calculated as follows: $$E\left(X_{iA} - X_iB | W_j \right) = \left( \nu_{iA} - \nu_{iB} \right) + \left( \lambda_{ijA} - \lambda_{ijB} \right) \times W_{j}, $$ \noindent where $\nu_{ig}$ represents the intercept of Item $i$ in Group $g$. Let the values between $W_{jl}$ and $W_{jh}$ be the values of interest. We can find the expected difference in the observed scores under this range of the factor scores. \cite{millsap2012} proposed that, if the size of the expected difference is over the value of meaningful differences, the loading difference is not negligible. See their article for the discussion of the meaningful difference. Note that, in the \texttt{partialInvariance} function, $W_{jl}$ is calculated by (a) finding the factor scores representing a low \emph{z}-score (e.g., -2) from all groups and (b) selecting the lowest factor score across all groups. $W_{jh}$ is calculated by (a) finding the factor scores representing a high \emph{z}-score (e.g., 2) from all groups and (b) selecting the highest factor score across all groups. \section{Item Intercepts} Let $\nu_{ig}$ be the intercept of Item $i$ in Group $g$. A standardized intercept $\nu^*_{ig}$ is defined as follows \citep{muthen1998}: $$\nu^*_{ig} = \nu_{ig} / \sigma_{ig}.$$ Thus, the standardized difference between Groups A and B in item intercepts is defined: $$\Delta\nu^*_{i} = (\nu_{iA} - \nu_{iB}) / \sigma_{iP}.$$ Note that $\sigma_{iA}$ can substitute $\sigma_{iP}$ if Group A is the reference group. By using this scale, .2, .5, and .8 can be interpreted as small, medium, and large effect sizes according to \cite{cohen1992}. The proportion of the intercept difference over the observed score difference may be used as an effect size \citep{millsap2012}: $$(\nu_{iA} - \nu_{iB}) / (M_{iA} - M_{iB}), $$ \noindent where $M_{ig}$ represents the observed mean of Item $i$ in Group $g$. \cite{millsap2012} noted that a relatively small proportion (e.g., less than 20\%) is ignorable. If the sign is negative or the value is over 1, the interpretation is doubtful. \section{Item Thresholds} Let $\tau_{cig}$ be the threshold categorizing between category $c$ and $c + 1$ for Item $i$ in Group $g$. Note that the maximum number of $c$ is the number of categories minus 1. Because thresholds are the location of the distribution underlying ordered categorical items (usually normal distribution), the location statistic can be standardized by dividing it by the standard deviation of the underlying distribution. The standardized threshold $\tau^*_{cig}$ is defined as follows: $$\tau^*_{cig} = \tau_{cig} / \sigma^u_{ig},$$ \noindent where $\sigma^u_{ig}$ is the standard deviation of the distribution underlying the categorical data for Item $i$ in Group $g$. In theta parameterization of categorical confirmatory factor analysis, $\sigma^u_{ig}$ may not be equal across groups. The standardized difference in thresholds between Group A and B needs the pooled standard deviation. The standardized difference in thresholds is defined: $$\Delta\tau^*_{ci} = (\tau_{ciA} - \tau_{ciB}) / \sigma^u_{iP}.$$ Note that $\sigma^u_{iA}$ can substitute $\sigma^u_{iP}$ if Group A is the reference group. By using this scale, .2, .5, and .8 can be interpreted as small, medium, and large effect sizes according to \cite{cohen1992}. \section{Measurement Error Variances} Let $\theta_{ig}$ be the measurement error variance of Item $i$ in Group $g$. A standardized measurement error variance $\theta^*_{ig}$ is defined as follows \citep{muthen1998}: $$\theta^*_{ig} = \theta_{ig} / \sigma_{ig},$$ Thus, the standardized difference between Groups A and B in measurement error variances could be defined: $$\Delta\theta^*_{i} = (\theta_{iA} - \theta_{iB}) / \sigma_{iP}.$$ Note that $\sigma_{iA}$ can substitute $\sigma_{iP}$ if Group A is the reference group. However, there is no direct guideline to interpret the magnitude of the difference in measurement error variances according to Cohen (1992). A new standardized difference in measurement error variances is needed. Assume that $\sigma_{iP}$ is always greater than $\theta_{iA}$ and $\theta_{iB}$, which is usually correct, then $\theta_{iA} / \sigma_{iP}$ and $\theta_{iB} / \sigma_{iP}$ ranges between 0 and 1 similar to a proportion statistic. \cite{cohen1992} provided a guideline in interpreting the magnitude of the difference in proportions using arcsine transformation. The new index ($h$) is defined as follows: $$h = \sin^{-1}\sqrt{\frac{\theta_{iA}}{\sigma_{iP}}} - \sin^{-1}\sqrt{\frac{\theta_{iB}}{\sigma_{iP}}}.$$ Then, the $h$ values of .2, .5, and .8 are interpreted as small, medium, and large effect sizes. If items are continuous, the proportion of the error variance difference over the observed variance difference may be used as an effect size \citep{millsap2012}: $$(\theta_{iA} - \theta_{iB}) / (\sigma_{iA} - \sigma_{iB}). $$ \noindent If the sign is negative or the value is over 1, the interpretation is doubtful. \section{Factor Means} Let $\alpha_{jg}$ be the mean of Factor $j$ in Group $g$. A standardized factor mean $\alpha^*_{jg}$ is defined as follows \citep{muthen1998}: $$\alpha^*_{jg} = \alpha_{jg} / \psi_{jg}$$ Thus, the standardized difference between Groups A and B in factor means is defined: $$\Delta\alpha^*_{j} = (\alpha_{jA} - \alpha_{jB}) / \psi_{jP}.$$ Note that $\psi_{jA}$ can substitute $\psi_{jP}$ if Group A is the reference group. By using this scale, .2, .5, and .8 can be interpreted as small, medium, and large effect sizes according to \cite{cohen1992}. \bibliographystyle{plainnat} \bibliography{partialInvariance} \end{document}semTools/vignettes/partialInvariance.bib0000644000175100001440000000270413000201061020201 0ustar hornikusers@preamble{ " \newcommand{\noop}[1]{} " } @article{cheung2002, title={Evaluating goodness-of-fit indexes for testing measurement invariance}, author={Cheung, Gordon W and Rensvold, Roger B}, journal={Structural equation modeling}, volume={9}, number={2}, pages={233--255}, year={2002}, publisher={Taylor \& Francis} } @article{meade2008, title={Power and sensitivity of alternative fit indices in tests of measurement invariance.}, author={Meade, Adam W and Johnson, Emily C and Braddy, Phillip W}, journal={Journal of Applied Psychology}, volume={93}, number={3}, pages={568}, year={2008}, publisher={American Psychological Association} } @book{muthen1998, title={Mplus technical appendices}, author={Muth{\'e}n, Bengt O}, publisher={Muth{\'e}n \& Muth{\'e}n}, address={Los Angeles, CA}, year={\noop{1998}1998--2004} } @article{cohen1992, title={A power primer.}, author={Cohen, Jacob}, journal={Psychological bulletin}, volume={112}, number={1}, pages={155--159}, year={1992}, publisher={American Psychological Association} } @incollection{millsap2012, author = {Millsap, Roger E and Olivera-Aguilar, Margarita}, title = {Investigating measurement invariance using confirmatory factor analysis}, editor = {Hoyle, Rick H}, booktitle = {Handbook of structural equation modeling}, pages = {380--392}, publisher = {Guilford}, address = {New York}, year = {2012} }semTools/MD50000644000175100001440000001514613002716223012410 0ustar hornikusers875a3b26b1fe2b929b518453651c1444 *DESCRIPTION 6e8b868ee425d966510d021978baa8f4 *NAMESPACE 310920ad14c1132389790abb4990fce5 *R/NET.R 1de19b1c4637cda48fc16f9c9e685f82 *R/PAVranking.R cbe816c8b97f2beec6e6723c6c59f122 *R/TSML.R e956d6438ded241574448335de494eb6 *R/auxiliary.R 3f9c1fed2999e61a63e4e4d73d0c1913 *R/clipboard.R 1954f2fad1064b1c8b2f5932c557ff40 *R/compareFit.R e902924f1c086d73e84c00476c2d4f52 *R/dataDiagnosis.R 93f4e9e1ee476d5710fa0c1b250cd1aa *R/efa.R cd6f7b2fd8463a822d7910dbfb10ab72 *R/fitIndices.R f8c84afdf152962238167427f0786ad2 *R/fitOpenMx.R f04d1166e8c2f052cfe07b546b59518f *R/fmi.R 4316dbce195eb595a780b0e23e5c5a44 *R/htmt.R 05080e921acdfef731616afc936f955f *R/imposeStart.R d2c267f412605b21c602689ac70c3b7a *R/indProd.R 0b3cb217de78ec40e0257184821c2980 *R/kd.R 0115c95b54d0d4d6df27e9fd9d194da6 *R/lisrel2lavaan.R 0c3a853ef650edde439661ba9a198af3 *R/loadingFromAlpha.R d9610fd2f7a2d093c6add24d138540b4 *R/longInvariance.R f78a87f068644a4f98c6cf5b5c86a873 *R/measurementInvariance.R 01e8377cc0ed4e42e5b95fd9468998f6 *R/measurementInvarianceCat.R db826a05fcdc27b1d802d877334f9997 *R/miPowerFit.R af20e44ed7512b1df96caac698727355 *R/missingBootstrap.R 6e6aa020023ba9c41ec493dc65bf5736 *R/monteCarloMed.R bdfe2a592ba3e4095583a2f2f436f7ef *R/mvrnonnorm.R 07e6f5965634fb8c70fdfd5980df43f8 *R/parcelAllocation.R 9402a55a88552d635f0a84e15780ffe1 *R/partialInvariance.R d9c1c95d9210b6e731ad5168e5cae11d *R/partialInvarianceCat.R ada7d2fed73343a3e17eb66871de36e4 *R/permuteMeasEq.R e8feb00445a9a27e21319d84b0632bfe *R/poolMAlloc.R fed7a1e44769dbeb3e0dbdf111d75e65 *R/powerAnalysis.R ee97b4a367f247d1a62c23d534320b77 *R/powerAnalysisNested.R 3b676e747ea08e121a123244e8eede3e *R/powerAnalysisSS.R ed0ac285eb7c00dc5b60cd9db8f89794 *R/probeInteraction.R 0176a1b59771cad73def3e1e9934dafe *R/quark.R 3a9f6041660959fe18a21bfec482f0c5 *R/reliability.R 66fbcd194d75454c4e4e5ed47bd5c445 *R/residualCovariate.R af8506899739e987bd9705a5b385fe9b *R/runMI.R cca98371f1e860d10b0821e4bd549a4b *R/singleParamTest.R e197f2d14cf82ba70a2de639c46ac921 *R/splitSample.R 5ebe2b6b09164210d8e7db37504a0597 *R/tukeySEM.R 55e1c48f9753406ad8ab3f5c086f0c04 *R/wald.R 601d9cc4e9066fdc9f64dea1a68fa9c1 *R/zzz.R 356b4f195cdab4ad90aa68c9e36149ee *build/vignette.rds 4d1090db8b160303f235075ebbda16d7 *data/dat2way.rda c56c2358c6e4d82c4cd7855e4df37282 *data/dat3way.rda d3f489a4fefd07ccfcd3875bc71e0b83 *data/datCat.rda ba21a4b5113fd7515fbf26fccfe0fe17 *data/exLong.rda 35cb60d186fd440645d81b56e22a3fa2 *data/simParcel.rda 24e62650eb20ada41123144a32d0f7c7 *inst/CITATION 81dc319ba4e3739b20cbcd3196f00e1e *inst/doc/partialInvariance.Rnw 4f5891dc46f7212c1ce6189c4467adba *inst/doc/partialInvariance.bib 423b2dcd312ca2190fb90c21d50a0f02 *inst/doc/partialInvariance.pdf 051afce8bfe1f5850c9a928a4c211113 *man/BootMiss-class.Rd 789bb57cd701e69fa76d75664f42760a *man/EFA-class.Rd 3088e7c0e7b22cbdc83038b4c9d2eb93 *man/FitDiff-class.Rd cf8ebe0461f358886c862819ff92e84d *man/Net-class.Rd 758acdd5f9e6d28060f1dc5b924f5aed *man/PAVranking.Rd 400d6c3328e9f845b4967b48e464aa3f *man/SSpower.Rd 2d64d6a37f591ecfd9002c639a968c10 *man/auxiliary.Rd fb9a663d9c663742a32c1561ba09fe39 *man/bsBootMiss.Rd 8a2cf1cd89b514bb2238b6ac96b7727b *man/chisqSmallN.Rd 4b1bd4d6a3251465a1cf68b8246c0da5 *man/clipboard.Rd a5ec84228b781300dfcde1d7c65f055f *man/combinequark.Rd 89e6fc45067caf0cc9af0db0e6b9d956 *man/compareFit.Rd 0652ec1f468f7d5b7fbaab93bbdb686b *man/dat2way.Rd 34151a790c2733b1a767c45bb53f69ce *man/dat3way.Rd aa3fc3f7d78f11789afb3a9090565e63 *man/datCat.Rd 3d653edc46c76f7945bed07f1cd40306 *man/efaUnrotate.Rd 9f4002301d9bde6a1b7e0b34894a3585 *man/exLong.Rd 835661a7f893987d0eaa128aea9b9c35 *man/findRMSEApower.Rd 0c615412362d8657d03743351d22af52 *man/findRMSEApowernested.Rd 3a777bcdbcb0b0cf6a7cffee92af94e4 *man/findRMSEAsamplesize.Rd ec1669651a06b492cc61f45386789ec9 *man/findRMSEAsamplesizenested.Rd 3a2a6e96669f9b9fb6a828953b4db536 *man/fitMeasuresMx.Rd b6a3e2b6dfe6fe01a6b640dcf5c874f6 *man/fmi.Rd 8f6b842409cdfaa16c5768084b43c8e4 *man/htmt.Rd 5610302699505c139b905b4123f5835d *man/imposeStart.Rd 6c0258e5b0ad4b40bcdbfd92a1971fe3 *man/indProd.Rd bc9f4f1a34a71effb92d3affac95e07d *man/kd.Rd 289810e0631423fdfa7e5b26e396e035 *man/kurtosis.Rd 2f9ee2abda674a63efde872967f77fe3 *man/lavaanStar-class.Rd 66e84e27e37a6bc50e413bb50d713fca *man/lisrel2lavaan.Rd e2f1d5c0754e87cdf87032661828f259 *man/loadingFromAlpha.Rd 55942878a11ab359609e65862677ecad *man/longInvariance.Rd df58577077bf9680e8decd1efbc64c19 *man/mardiaKurtosis.Rd 86d6177745e4b46ece1af0f9d36ac7a1 *man/mardiaSkew.Rd 58f35fbfb69dec2a8ab366f6c07a59bc *man/maximalRelia.Rd fb1c235e431a1bfec644031e76019d1c *man/measurementInvariance.Rd 2c86eae80ea6061a3e982c01139130a7 *man/measurementInvarianceCat.Rd 5d7f84717c177eccc631a20eb04934b5 *man/miPowerFit.Rd 958cefe2370f406ff8d7f2be0b734bc5 *man/monteCarloMed.Rd 4c2bd94d158a06ede3ba4851ea76c9de *man/moreFitIndices.Rd d4299fa9a1ddcf8375457f2a5034243d *man/mvrnonnorm.Rd 7dc60009699fac33154c94b5140d3b5c *man/net.Rd 1e338d43ce014a1c4056dd455cfa8c0e *man/nullMx.Rd 84dcdab468b85fb0c30d284deab74d93 *man/nullRmsea.Rd 7e0d75f2c92e277030a75e0c288910e7 *man/parcelAllocation.Rd c791d5d02532bc40d11d1467f095b630 *man/partialInvariance.Rd 36a3a983da75072e64121559fca4bb97 *man/permuteMeasEq-class.Rd 5a5919d5715199454303c56a53bedc0e *man/permuteMeasEq.Rd 2a845d7afb0981de4a1af72dc0813008 *man/plotProbe.Rd e553cfaa865d0843e9d9118632cbc9d6 *man/plotRMSEAdist.Rd 319bf923b4db20b7c468046d2b2f16bd *man/plotRMSEApower.Rd dc2a73fdb1aa348f23ea0c8a5ca1d594 *man/plotRMSEApowernested.Rd 42f3e747345ab5eedb5fe1cadd83f10b *man/poolMAlloc.Rd 31d19c926b85f7911dc0228945ef85f9 *man/probe2WayMC.Rd 327ca9e06ece593ced47e82eb21b76d0 *man/probe2WayRC.Rd 9d8edeb8cd70c19d6a90b123216116ac *man/probe3WayMC.Rd a26f32fde33328cdfb3591de3e59d17c *man/probe3WayRC.Rd abbd314af9b687274821d2a76b0138bd *man/quark.Rd 1e3835c9587a78a1af4ce86a497e342f *man/reliability.Rd b9d6698b2d7e616acbe47e7f2b9e12c7 *man/reliabilityL2.Rd a6839cb39fadbc13478e12b19ee880fb *man/residualCovariate.Rd ebec2c18f86b00ae485563c3a797145d *man/rotate.Rd 69dbae753e1b9dcc1787b98c7b24577d *man/runMI.Rd a736a81c47c188ac312024124d48bc1b *man/saturateMx.Rd 629e0a71164edf949d2a4bad4a641381 *man/simParcel.Rd 830da4c698bf219103e106267a56203f *man/singleParamTest.Rd a4ef14fd6ab8ee46694d51f527c4fb70 *man/skew.Rd 8aded169014b5c23c16759de4cbb685c *man/splitSample.rd 1de20b1661876f07945cf473c0b00777 *man/standardizeMx.Rd 48fbef830fc4370886705ea4f4f11d28 *man/tukeySEM.rd 76a1a8228891e906ce393d2e3aad7b46 *man/twostage-class.Rd c06f7f5e182fdcfdb4fe8b080c6e6059 *man/twostage.Rd b600ba526ceb1b7d6075fa3f16bb2db9 *man/wald.Rd 81dc319ba4e3739b20cbcd3196f00e1e *vignettes/partialInvariance.Rnw 4f5891dc46f7212c1ce6189c4467adba *vignettes/partialInvariance.bib semTools/build/0000755000175100001440000000000013002400141013155 5ustar hornikuserssemTools/build/vignette.rds0000644000175100001440000000032213002400141015511 0ustar hornikusersO 0R"')DnDvd<ιlZؙ@o!eĕ(FDALth)+ 4 _rѴΥu+Aj-: ~X|0ޣ?['|xM9kNdjQfk_vp776ZpmAIZUsemTools/DESCRIPTION0000644000175100001440000000626613002716223013611 0ustar hornikusersPackage: semTools Title: Useful Tools for Structural Equation Modeling Version: 0.4-14 Authors@R: c(person(given = c("Terrence","D."), family = "Jorgensen", role = c("aut", "cre"), email="TJorgensen314@gmail.com"), person(given = "Sunthud", family = "Pornprasertmanit", role = "aut", email = "psunthud@gmail.com"), person(given = "Patrick", family = "Miller", role = "aut", email="pmille13@nd.edu"), person(given = "Alexander", family = "Schoemann", role = "aut", email="schoemanna@ecu.edu"), person(given = "Yves", family = "Rosseel", role = "aut", email="Yves.Rosseel@UGent.be"), person(given = "Corbin", family = "Quick", role = "ctb", email="corbinq@umich.edu"), person(given = "Mauricio", family = "Garnier-Villarreal", role = "ctb", email="mgv@ku.edu"), person(given = "James", family = "Selig", role = "ctb", email="selig@unm.edu"), person(given = "Aaron", family = "Boulton", role = "ctb", email="aboulton@email.unc.edu"), person(given = "Kristopher", family = "Preacher", role = "ctb", email="kris.preacher@vanderbilt.edu"), person(given = "Donna", family = "Coffman", role = "ctb", email="dlc30@psu.edu"), person(given = "Mijke", family = "Rhemtulla", role = "ctb", email="M.T.Rhemtulla@uva.nl"), person(given = "Alexander", family = "Robitzsch", role = "ctb", email="a.robitzsch@bifie.at"), person(given = "Craig", family = "Enders", role = "ctb", email="Craig.Enders@asu.edu"), person(given = "Ruber", family = "Arslan", role = "ctb", email="rubenarslan@gmail.com"), person(given = "Bell", family = "Clinton", role = "ctb", email="clintonbell@ku.edu"), person(given = "Pavel", family = "Panko", role = "ctb", email="pavel.panko@ttu.edu"), person(given = "Edgar", family = "Merkle", role = "ctb", email="merklee@missouri.edu"), person(given = "Steven", family = "Chesnut", role = "ctb", email="Steven.Chesnut@usm.edu"), person(given = "Jarrett", family = "Byrnes", role = "ctb", email="Jarrett.Byrnes@umb.edu"), person(given = "Jason", family = "Rights", role = "ctb", email="jason.d.rights@vanderbilt.edu"), person(given = "Ylenio", family = "Longo", role = "ctb", email="yleniolongo@gmail.com") ) Description: Provides useful tools for structural equation modeling packages. Depends: R(>= 3.0), methods, lavaan(>= 0.5-22), utils, stats, graphics Suggests: MASS, parallel, Amelia, mice, foreign, OpenMx(>= 2.0.0), GPArotation, mnormt, boot License: GPL (>= 2) LazyData: yes LazyLoad: yes URL: https://github.com/simsem/semTools/wiki Author: Terrence D. Jorgensen [aut, cre], Sunthud Pornprasertmanit [aut], Patrick Miller [aut], Alexander Schoemann [aut], Yves Rosseel [aut], Corbin Quick [ctb], Mauricio Garnier-Villarreal [ctb], James Selig [ctb], Aaron Boulton [ctb], Kristopher Preacher [ctb], Donna Coffman [ctb], Mijke Rhemtulla [ctb], Alexander Robitzsch [ctb], Craig Enders [ctb], Ruber Arslan [ctb], Bell Clinton [ctb], Pavel Panko [ctb], Edgar Merkle [ctb], Steven Chesnut [ctb], Jarrett Byrnes [ctb], Jason Rights [ctb], Ylenio Longo [ctb] Maintainer: Terrence D. Jorgensen Date/Publication: 2016-10-22 19:06:27 NeedsCompilation: no Packaged: 2016-10-21 11:47:45 UTC; tdjorgen Repository: CRAN semTools/man/0000755000175100001440000000000013002107075012643 5ustar hornikuserssemTools/man/PAVranking.Rd0000644000175100001440000002520013000201061015115 0ustar hornikusers\name{PAVranking} \alias{PAVranking} \title{ Parcel-Allocation Variability in Model Ranking } \description{ This function quantifies and assesses the consequences of parcel-allocation variability for model ranking of structural equation models (SEMs) that differ in their structural specification but share the same parcel-level measurement specification (see Sterba & Rights, 2016). This function is a modified version of \code{\link{parcelAllocation}} which can be used with only one SEM in isolation. The \code{PAVranking} function repeatedly generates a specified number of random item-to-parcel allocations, and then fits two models to each allocation. Output includes summary information about the distribution of model selection results (including plots) and the distribution of results for each model individually, across allocations within-sample. Note that this function can be used when selecting among more than two competing structural models as well (see instructions below involving \code{seed}). } \usage{ PAVranking(nPerPar, facPlc, nAlloc=100, parceloutput = 0, syntaxA, syntaxB, dataset, names = NULL, leaveout=0, seed=NA, ...) } \arguments{ \item{nPerPar}{ A list in which each element is a vector, corresponding to each factor, indicating sizes of parcels. If variables are left out of parceling, they should not be accounted for here (i.e., there should not be parcels of size "1"). } \item{facPlc}{ A list of vectors, each corresponding to a factor, specifying the item indicators of that factor (whether included in parceling or not). Either variable names or column numbers. Variables not listed will not be modeled or included in output datasets. } \item{nAlloc}{ The number of random allocations of items to parcels to generate. } \item{syntaxA}{ lavaan syntax for Model A. Note that, for likelihood ratio test (LRT) results to be interpreted, Model A should be nested within Model B (though the function will still provide results when Models A and B are nonnested). } \item{syntaxB}{ lavaan syntax for Model B. Note that, for likelihood ratio test (LRT) results to be appropriate, Model A should be nested within Model B (though the function will still provide results when Models A and B are nonnested). } \item{dataset}{ Item-level dataset } \item{parceloutput}{ folder where parceled data sets will be outputted (note for Windows users: file path must specified using forward slashes). } \item{seed}{ (Optional) Random seed used for parceling items. When the same random seed is specified and the program is re-run, the same allocations will be generated. The seed argument can be used to assess parcel-allocation variability in model ranking when considering more than two models. For each pair of models under comparison, the program should be rerun using the same random seed. Doing so ensures that multiple model comparisons will employ the same set of parcel datasets. } \item{names}{ (Optional) A character vector containing the names of parceled variables. } \item{leaveout}{ (Optional) A vector of variables to be left out of randomized parceling. Either variable names or column numbers are allowed. } \item{\dots}{ Additional arguments to be passed to \code{\link[lavaan]{lavaan}} } } \details{ This is a modified version of \code{\link{parcelAllocation}} which was, in turn, based on the SAS macro \code{ParcelAlloc} (Sterba & MacCallum, 2010). The \code{PAVranking} function produces results discussed in Sterba and Rights (2016) relevant to the assessment of parcel-allocation variability in model selection and model ranking. Specifically, the \code{PAVranking} function first uses a modified version of parcelAllocation to generate a given number (\code{nAlloc}) of item-to-parcel allocations. Then, \code{PAVranking} provides the following new developments: specifying more than one SEM and producing results for Model A and Model B separately that summarize parcel allocation variability in estimates, standard errors, and fit indices. \code{PAVranking} also newly produces results summarizing parcel allocation variability in model selection index values and model ranking between Models A and B. Additionally, \code{PAVranking} newly allows for nonconverged solutions and outputs the proportion of allocations that converged as well as the proportion of proper solutions (results are summarized for converged and proper allocations only). For further details on the benefits of the random allocation of items to parcels, see Sterba (2011) and Sterba and MacCallum (2010). NOTE: This function requires the \code{lavaan} package. Missing data code needs to be \code{NA}. If function returns \code{"Error in plot.new() : figure margins too large,"} user may need to increase size of the plot window and rerun. } \value{ \item{Estimates_A, Estimates_B}{A table containing results related to parameter estimates (in table Estimates_A for Model A and in table Estimates_B for Model B) with columns corresponding to parameter name, average parameter estimate across allocations, standard deviation of parameter estimate across allocations, the maximum parameter estimate across allocations, the minimum parameter estimate across allocations, the range of parameter estimates across allocations, and the percent of allocations in which the parameter estimate is significant.} \item{SE_A, SE_B}{A table containing results related to standard errors (in table SE_A for Model A and in table SE_B for Model B) with columns corresponding to parameter name, average standard error across allocations, the standard deviation of standard errors across allocations, the maximum standard error across allocations, the minimum standard error across allocations, and the range of standard errors across allocations.} \item{Fit_A, Fit_B}{A table containing results related to model fit (in table Fit_A for Model A and in table Fit_B for Model B) with columns corresponding to fit index name, the average of the fit index across allocations, the standard deviation of the fit index across allocations, the maximum of the fit index across allocations, the minimum of the fit index across allocations, the range of the fit index across allocations, and the percent of allocations where the chi-square test of absolute fit was significant.} \item{LRT Summary, Model A vs. Model B}{A table with columns corresponding to: average likelihood ratio test (LRT) statistic for comparing Model A vs. Model B (null hypothesis is no difference in fit between Models A and B in the population), degrees of freedom (i.e. difference in the number of free parameters between Models A and B), as well as the standard deviation, maximum, and minimum of LRT statistics across allocations, and the percent of allocations where the LRT was significant (indicating preference for the more complex Model B). } \item{LRT Summary, Model A vs. Model B}{A table with columns corresponding to: average likelihood ratio test (LRT) statistic for comparing Model A vs. Model B (null hypothesis is no difference in fit between Models A and B in the population), degrees of freedom (i.e. difference in the number of free parameters between Models A and B), as well as the standard deviation, maximum, and minimum of LRT statistics across allocations, and the percent of allocations where the LRT was significant (indicating preference for the more complex Model B). } \item{Fit index differences}{A table containing percentage of allocations where Model A is preferred over Model B according to BIC, AIC, RMSEA, CFI, TLI and SRMR and where Model B is preferred over Model A according to the same indices. Also includes the average amount by which the given model is preferred (calculated only using allocations where it was preferred).} \item{Fit index difference histograms}{Histograms are automatically outputted showing the distribution of the differences (Model A - Model B) for each fit index and for the p-value of the likelihood ratio difference test.} \item{Percent of Allocations with | BIC Diff | > 10}{A table containing the percentage of allocations with (BIC for Model A) - (BIC for Model B) < -10, indicating "very strong evidence" to prefer Model A over Model B and the percentage of allocations with (BIC for Model A) - (BIC for Model B) > 10, indicating "very strong evidence" to prefer Model B over Model A (Raftery, 1995).} \item{Converged and proper}{A table containing the proportion of allocations that converged for Model A, Model B, and both models, and the proportion of allocations with converged and proper solutions for Model A, Model B, and both models.} } \references{ Raftery, A. E. (1995). Bayesian model selection in social research. \emph{Sociological Methodology, 25}, 111-163. Sterba, S. K. (2011). Implications of parcel-allocation variability for comparing fit of item-solutions and parcel-solutions. \emph{Structural Equation Modeling: A Multidisciplinary Journal, 18}(4), 554-577. Sterba, S. K., & MacCallum, R. C. (2010). Variability in parameter estimates and model fit across repeated allocations of items to parcels. \emph{Multivariate Behavioral Research, 45}(2), 322-358. "Sterba, S. K., & Rights, J. D. (2016). Effects of parceling on model selection: Parcel-allocation variability in model ranking. \emph{Psychological Methods}. \url{http://dx.doi.org/10.1037/met0000067} } \seealso{ \code{\link{parcelAllocation}}, \code{\link{poolMAlloc}} } \author{ Jason D. Rights (Vanderbilt University; \email{jason.d.rights@vanderbilt.edu}) The author would also like to credit Corbin Quick and Alexander Schoemann for providing the original parcelAllocation function on which this function is based. } \examples{ \dontrun{ ## Lavaan syntax for Model A: a 2 Uncorrelated ## factor CFA model to be fit to parceled data parmodelA <- ' f1 =~ NA*p1f1 + p2f1 + p3f1 f2 =~ NA*p1f2 + p2f2 + p3f2 p1f1 ~ 1 p2f1 ~ 1 p3f1 ~ 1 p1f2 ~ 1 p2f2 ~ 1 p3f2 ~ 1 p1f1 ~~ p1f1 p2f1 ~~ p2f1 p3f1 ~~ p3f1 p1f2 ~~ p1f2 p2f2 ~~ p2f2 p3f2 ~~ p3f2 f1 ~~ 1*f1 f2 ~~ 1*f2 f1 ~~ 0*f2 ' ## Lavaan syntax for Model B: a 2 Correlated ## factor CFA model to be fit to parceled data parmodelB <- ' f1 =~ NA*p1f1 + p2f1 + p3f1 f2 =~ NA*p1f2 + p2f2 + p3f2 p1f1 ~ 1 p2f1 ~ 1 p3f1 ~ 1 p1f2 ~ 1 p2f2 ~ 1 p3f2 ~ 1 p1f1 ~~ p1f1 p2f1 ~~ p2f1 p3f1 ~~ p3f1 p1f2 ~~ p1f2 p2f2 ~~ p2f2 p3f2 ~~ p3f2 f1 ~~ 1*f1 f2 ~~ 1*f2 f1 ~~ f2 ' ##specify items for each factor f1name <- colnames(simParcel)[1:9] f2name <- colnames(simParcel)[10:18] ##run function PAVranking(nPerPar=list(c(3,3,3),c(3,3,3)), facPlc=list(f1name,f2name), nAlloc=100, parceloutput=0, syntaxA=parmodelA, syntaxB=parmodelB, dataset = simParcel, names=list("p1f1","p2f1","p3f1","p1f2","p2f2","p3f2"), leaveout=0) } } semTools/man/lavaanStar-class.Rd0000644000175100001440000000575313001167164016347 0ustar hornikusers\name{lavaanStar-class} \docType{class} \alias{lavaanStar-class} \alias{inspect,lavaanStar-method} \alias{summary,lavaanStar-method} \alias{anova,lavaanStar-method} \alias{vcov,lavaanStar-method} \title{Class For Representing A (Fitted) Latent Variable Model with Additional Elements} \description{This is the \code{lavaan} class that contains additional information about the fit values from the null model. Some functions are adjusted according to the change.} \section{Objects from the Class}{ Objects can be created via the \code{\link{auxiliary}} function or \code{\link{runMI}}. } \section{Slots}{ \describe{ \item{\code{call}:}{The function call as returned by \code{match.called()}.} \item{\code{timing}:}{The elapsed time (user+system) for various parts of the program as a list, including the total time.} \item{\code{Options}:}{Named list of options that were provided by the user, or filled-in automatically.} \item{\code{ParTable}:}{Named list describing the model parameters. Can be coerced to a data.frame. In the documentation, this is called the `parameter table'.} \item{\code{Data}:}{Object of internal class \code{"Data"}: information about the data.} \item{\code{SampleStats}:}{Object of internal class \code{"SampleStats"}: sample statistics} \item{\code{Model}:}{Object of internal class \code{"Model"}: the internal (matrix) representation of the model} \item{\code{Fit}:}{Object of internal class \code{"Fit"}: the results of fitting the model} \item{\code{nullfit}:}{The fit-indices information from the null model} \item{\code{imputed}:}{The list of information from running multiple imputation. The first element is the convergence rate of the target and null models. The second element is the fraction missing information. The first estimate of FMI (FMI.1) is asymptotic FMI and the second estimate of FMI (FMI.2) is corrected for small numbers of imputation. The third element is the fit values of the target model by the specified chi-squared methods. The fourth element is the fit values of the null model by the specified chi-square methods. The fifth element is the adjusted log-likelihood for target model and satuated model. The sixth element is the chi-square values and the log-likehood values (based on fixing parameter estimates as the estimated values) from each imputed data set.} \item{\code{imputedResults}:}{Results from fitting models for imputed data sets.} \item{\code{auxNames}:}{The list of auxiliary variables in the analysis.} } } \references{ see \code{\linkS4class{lavaan}}} \seealso{ \code{\link{auxiliary}}; \code{\link{runMI}} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' dat <- data.frame(HolzingerSwineford1939, z=rnorm(nrow(HolzingerSwineford1939), 0, 1)) fit <- cfa(HS.model, data=dat) fitaux <- auxiliary(HS.model, aux="z", data=dat, fun="cfa") } semTools/man/probe2WayRC.Rd0000644000175100001440000001177413000201061015227 0ustar hornikusers\name{probe2WayRC} \alias{probe2WayRC} \title{ Probing two-way interaction on the residual-centered latent interaction } \description{ Probing interaction for simple intercept and simple slope for the residual-centered latent two-way interaction (Pornprasertmanit, Schoemann, Geldhof, & Little, submitted) } \usage{ probe2WayRC(fit, nameX, nameY, modVar, valProbe) } \arguments{ \item{fit}{The lavaan model object used to evaluate model fit} \item{nameX}{The vector of the factor names used as the predictors. The first-order factor will be listed first. The last name must be the name representing the interaction term.} \item{nameY}{The name of factor that is used as the dependent variable.} \item{modVar}{The name of factor that is used as a moderator. The effect of the other independent factor on each moderator variable value will be probed.} \item{valProbe}{The values of the moderator that will be used to probe the effect of the other independent factor.} } \details{ Before using this function, researchers need to make the products of the indicators between the first-order factors and residualize the products by the original indicators (Lance, 1988; Little, Bovaird, & Widaman, 2006). The process can be automated by the \code{\link{indProd}} function. Note that the indicator products can be made for all possible combination or matched-pair approach (Marsh et al., 2004). Next, the hypothesized model with the regression with latent interaction will be used to fit all original indicators and the product terms. To use this function the model must be fit with a mean structure. See the example for how to fit the product term below. Once the lavaan result is obtained, this function will be used to probe the interaction. The probing process on residual-centered latent interaction is based on transforming the residual-centered result into the no-centered result. See Pornprasertmanit, Schoemann, Geldhof, and Little (submitted) for further details. Note that this approach based on a strong assumption that the first-order latent variables are normally distributed. The probing process is applied after the no-centered result (parameter estimates and their covariance matrix among parameter estimates) has been computed. See the \code{\link{probe2WayMC}} for further details. } \value{ A list with two elements: \enumerate{ \item{SimpleIntercept} The intercepts given each value of the moderator. This element will be shown only if the factor intercept is estimated (e.g., not fixed as 0). \item{SimpleSlope} The slopes given each value of the moderator. } In each element, the first column represents the values of the moderators specified in the \code{valProbe} argument. The second column is the simple intercept or simple slope. The third column is the standard error of the simple intercept or simple slope. The fourth column is the Wald (\emph{z}) statistic. The fifth column is the \emph{p}-value testing whether the simple intercepts or slopes are different from 0. } \references{ Lance, C. E. (1988). Residual centering, exploratory and confirmatory moderator analysis, and decomposition of effects in path models containing interactions. \emph{Applied Psychological Measurement, 12}, 163-175. Little, T. D., Bovaird, J. A., & Widaman, K. F. (2006). On the merits of orthogonalizing powered and product terms: Implications for modeling interactions. \emph{Structural Equation Modeling, 13}, 497-519. Marsh, H. W., Wen, Z., & Hau, K. T. (2004). Structural equation models of latent interactions: Evaluation of alternative estimation strategies and indicator construction. \emph{Psychological Methods, 9}, 275-300. Pornprasertmanit, S., Schoemann, A. M., Geldhof, G. J., & Little, T. D. (submitted). \emph{Probing latent interaction estimated with a residual centering approach.} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{indProd}} For creating the indicator products with no centering, mean centering, double-mean centering, or residual centering. \item \code{\link{probe2WayMC}} For probing the two-way latent interaction when the results are obtained from mean-centering, or double-mean centering. \item \code{\link{probe3WayMC}} For probing the three-way latent interaction when the results are obtained from mean-centering, or double-mean centering. \item \code{\link{probe3WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the latent interaction. } } \examples{ library(lavaan) dat2wayRC <- orthogonalize(dat2way, 1:3, 4:6) model1 <- " f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f12 =~ x1.x4 + x2.x5 + x3.x6 f3 =~ x7 + x8 + x9 f3 ~ f1 + f2 + f12 f12 ~~0*f1 f12 ~~ 0*f2 x1 ~ 0*1 x4 ~ 0*1 x1.x4 ~ 0*1 x7 ~ 0*1 f1 ~ NA*1 f2 ~ NA*1 f12 ~ NA*1 f3 ~ NA*1 " fitRC2way <- sem(model1, data=dat2wayRC, meanstructure=TRUE, std.lv=FALSE) summary(fitRC2way) result2wayRC <- probe2WayRC(fitRC2way, c("f1", "f2", "f12"), "f3", "f2", c(-1, 0, 1)) result2wayRC } semTools/man/chisqSmallN.Rd0000644000175100001440000000410213001164727015353 0ustar hornikusers\name{chisqSmallN} \alias{chisqSmallN} \title{ \emph{k}-factor correction for chi-squared test statistic } \description{ Calculate \emph{k}-factor correction for chi-squared model-fit test statistic to adjust for small sample size. } \usage{ chisqSmallN(fit0, fit1 = NULL, ...) } \arguments{ \item{fit0}{The lavaan model object provided after running the \code{cfa}, \code{sem}, \code{growth}, or \code{lavaan} functions.} \item{fit1}{Optional additional \linkS4class{lavaan} model, in which \code{fit0} is nested. If \code{fit0} has fewer \emph{df} than \code{fit1}, the models will be swapped, still on the assumption that they are nested.} \item{\dots}{Additional arguments to the \code{\link[lavaan]{lavTestLRT}} function.} } \details{ The \emph{k}-factor correction (Nevitt & Hancock, 2004) is a global fit index which can be computed by: \deqn{ kc = 1 - \frac{2 \times P + 4 \times K + 5}{6 \times N}} where \eqn{N} is the sample size when using normal likelihood, or \eqn{N - 1} when using \code{likelihood = 'wishart'}. } \value{ A numeric vector including the unadjusted (naive) chi-squared test statistic, the \emph{k}-factor correction, the corrected test statistic, the \emph{df} for the test, and the \emph{p} value for the test under the null hypothesis that the model fits perfectly (or that the 2 models have equivalent fit). } \references{ Nevitt, J., & Hancock, G. R. (2004). Evaluating small sample approaches for model test statistics in structural equation modeling. \emph{Multivariate Behavioral Research, 39}(3), 439-478. doi:10.1207/S15327906MBR3903_3 } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } \examples{ HS.model <- ' visual =~ x1 + b1*x2 + x3 textual =~ x4 + b2*x5 + x6 speed =~ x7 + b3*x8 + x9 ' fit1 <- cfa(HS.model, data = HolzingerSwineford1939) ## test a single model (implicitly compared to a saturated model) chisqSmallN(fit1) ## fit a more constrained model fit0 <- cfa(HS.model, data = HolzingerSwineford1939, orthogonal = TRUE) ## compare 2 models chisqSmallN(fit1, fit0) } semTools/man/auxiliary.Rd0000644000175100001440000001111313000201061015122 0ustar hornikusers\name{auxiliary} \alias{auxiliary} \alias{cfa.auxiliary} \alias{sem.auxiliary} \alias{growth.auxiliary} \alias{lavaan.auxiliary} \title{ Analyzing data with full-information maximum likelihood with auxiliary variables } \description{ Analyzing data with full-information maximum likelihood with auxiliary variables. The techniques used to account for auxiliary variables are both extra-dependent-variables and saturated-correlates approaches (Enders, 2008). The extra-dependent-variables approach is used for all single variables in the model (such as covariates or single-indicator dependent varaible) For variables that are belong to a multiple-indicator factor, the saturated-correlates approach is used. Note that all covariates are treated as endogenous varaibles in this model (fixed.x = FALSE) so multivariate normality is assumed for the covariates. CAUTION: (1) this function will automatically change the missing data handling method to full-information maximum likelihood and (2) this function is still not applicable for categorical variables (because the maximum likelhood method is not available in lavaan for estimating models with categorical variables currently). } \usage{ auxiliary(model, aux, fun, ...) cfa.auxiliary(model, aux, ...) sem.auxiliary(model, aux, ...) growth.auxiliary(model, aux, ...) lavaan.auxiliary(model, aux, ...) } \arguments{ \item{model}{ The \code{lavaan} object, the parameter table, or lavaan script. If the \code{lavaan} object is provided, the \code{lavaan} object must be evaluated with mean structure. } \item{aux}{ The list of auxiliary variable } \item{fun}{ The character of the function name used in running lavaan model (\code{"cfa"}, \code{"sem"}, \code{"growth"}, \code{"lavaan"}). } \item{\dots}{ The additional arguments in the \code{\link[lavaan]{lavaan}} function. } } \value{ The \code{\linkS4class{lavaanStar}} object which contains the original \code{lavaan} object and the additional values of the null model, which need to be adjusted to account for auxiliary variables. } \references{ Enders, C. K. (2008). A note of the use of missing auxiliary variables in full information maximum likelihood-based structural equation models. \emph{Structural Equation Modeling, 15}, 434-448. } \seealso{ \code{\linkS4class{lavaanStar}} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ # Example of confirmatory factor analysis HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' dat <- data.frame(HolzingerSwineford1939, z=rnorm(nrow(HolzingerSwineford1939), 0, 1)) fit <- cfa(HS.model, data=dat, meanstructure=TRUE) fitaux <- auxiliary(HS.model, aux="z", data=dat, fun="cfa") # Use lavaan script fitaux <- cfa.auxiliary(fit, aux="z", data=dat) # Use lavaan output # Example of multiple groups confirmatory factor analysis fitgroup <- cfa(HS.model, data=dat, group="school", meanstructure=TRUE) fitgroupaux <- cfa.auxiliary(fitgroup, aux="z", data=dat, group="school") \dontrun{ # Example of path analysis mod <- ' x5 ~ x4 x4 ~ x3 x3 ~ x1 + x2' fitpath <- sem(mod, data=dat, fixed.x=FALSE, meanstructure=TRUE) # fixed.x must be FALSE fitpathaux <- sem.auxiliary(fitpath, aux="z", data=dat) # Example of full structural equation modeling dat2 <- data.frame(PoliticalDemocracy, z=rnorm(nrow(PoliticalDemocracy), 0, 1)) model <- ' ind60 =~ x1 + x2 + x3 dem60 =~ y1 + a*y2 + b*y3 + c*y4 dem65 =~ y5 + a*y6 + b*y7 + c*y8 dem60 ~ ind60 dem65 ~ ind60 + dem60 y1 ~~ y5 y2 ~~ y4 + y6 y3 ~~ y7 y4 ~~ y8 y6 ~~ y8 ' fitsem <- sem(model, data=dat2, meanstructure=TRUE) fitsemaux <- sem.auxiliary(fitsem, aux="z", data=dat2, meanstructure=TRUE) # Example of covariate at the factor level HS.model.cov <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 visual ~ sex textual ~ sex speed ~ sex' fitcov <- cfa(HS.model.cov, data=dat, fixed.x=FALSE, meanstructure=TRUE) fitcovaux <- cfa.auxiliary(fitcov, aux="z", data=dat) # Example of Endogenous variable with single indicator HS.model.cov2 <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 x7 ~ visual + textual' fitcov2 <- sem(HS.model.cov2, data=dat, fixed.x=FALSE, meanstructure=TRUE) fitcov2aux <- sem.auxiliary(fitcov2, aux="z", data=dat) # Multiple auxiliary variables HS.model2 <- ' visual =~ x1 + x2 + x3 speed =~ x7 + x8 + x9' fit <- cfa(HS.model2, data=HolzingerSwineford1939) fitaux <- cfa.auxiliary(HS.model2, data=HolzingerSwineford1939, aux=c("x4", "x5")) } } semTools/man/maximalRelia.Rd0000644000175100001440000000727413000201061015535 0ustar hornikusers\name{maximalRelia} \alias{maximalRelia} \title{ Calculate maximal reliability } \description{ Calculate maximal reliability of a scale } \usage{ maximalRelia(object) } \arguments{ \item{object}{The lavaan model object provided after running the \code{cfa}, \code{sem}, \code{growth}, or \code{lavaan} functions.} } \details{ Given that a composite score (\eqn{W}) is a weighted sum of item scores: \deqn{ W = \bold{w}^\prime \bold{x} ,} where \eqn{\bold{x}} is a \eqn{k \times 1} vector of the scores of each item, \eqn{\bold{w}} is a \eqn{k \times 1} weight vector of each item, and \eqn{k} represents the number of items. Then, maximal reliability is obtained by finding \eqn{\bold{w}} such that reliability attains its maximum (Li, 1997; Raykov, 2012). Note that the reliability can be obtained by \deqn{ \rho = \frac{\bold{w}^\prime \bold{S}_T \bold{w}}{\bold{w}^\prime \bold{S}_X \bold{w}}} where \eqn{\bold{S}_T} is the covariance matrix explained by true scores and \eqn{\bold{S}_X} is the observed covariance matrix. Numerical method is used to find \eqn{\bold{w}} in this function. For continuous items, \eqn{\bold{S}_T} can be calculated by \deqn{ \bold{S}_T = \Lambda \Psi \Lambda^\prime,} where \eqn{\Lambda} is the factor loading matrix and \eqn{\Psi} is the covariance matrix among factors. \eqn{\bold{S}_X} is directly obtained by covariance among items. For categorical items, Green and Yang's (2009) method is used for calculating \eqn{\bold{S}_T} and \eqn{\bold{S}_X}. The element \eqn{i} and \eqn{j} of \eqn{\bold{S}_T} can be calculated by \deqn{ \left[\bold{S}_T\right]_{ij} = \sum^{C_i - 1}_{c_i = 1} \sum^{C_j - 1}_{c_j - 1} \Phi_2\left( \tau_{x_{c_i}}, \tau_{x_{c_j}}, \left[ \Lambda \Psi \Lambda^\prime \right]_{ij} \right) - \sum^{C_i - 1}_{c_i = 1} \Phi_1(\tau_{x_{c_i}}) \sum^{C_j - 1}_{c_j - 1} \Phi_1(\tau_{x_{c_j}}),} where \eqn{C_i} and \eqn{C_j} represents the number of thresholds in Items \eqn{i} and \eqn{j}, \eqn{\tau_{x_{c_i}}} represents the threshold \eqn{c_i} of Item \eqn{i}, \eqn{\tau_{x_{c_j}}} represents the threshold \eqn{c_i} of Item \eqn{j}, \eqn{ \Phi_1(\tau_{x_{c_i}})} is the cumulative probability of \eqn{\tau_{x_{c_i}}} given a univariate standard normal cumulative distribution and \eqn{\Phi_2\left( \tau_{x_{c_i}}, \tau_{x_{c_j}}, \rho \right)} is the joint cumulative probability of \eqn{\tau_{x_{c_i}}} and \eqn{\tau_{x_{c_j}}} given a bivariate standard normal cumulative distribution with a correlation of \eqn{\rho} Each element of \eqn{\bold{S}_X} can be calculated by \deqn{ \left[\bold{S}_T\right]_{ij} = \sum^{C_i - 1}_{c_i = 1} \sum^{C_j - 1}_{c_j - 1} \Phi_2\left( \tau_{V_{c_i}}, \tau_{V_{c_j}}, \rho^*_{ij} \right) - \sum^{C_i - 1}_{c_i = 1} \Phi_1(\tau_{V_{c_i}}) \sum^{C_j - 1}_{c_j - 1} \Phi_1(\tau_{V_{c_j}}),} where \eqn{\rho^*_{ij}} is a polychoric correlation between Items \eqn{i} and \eqn{j}. } \value{ Maximal reliability values of each group. The maximal-reliability weights are also provided. Users may extracted the weighted by the \code{attr} function (see example below). } \references{ Li, H. (1997). A unifying expression for the maximal reliability of a linear composite. \emph{Psychometrika, 62}, 245-249. Raykov, T. (2012). Scale construction and development using structural equation modeling. In R. H. Hoyle (Ed.), \emph{Handbook of structural equation modeling} (pp. 472-494). New York: Guilford. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \code{\link{reliability}} for reliability of an unweighted composite score } \examples{ total <- 'f =~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 ' fit <- cfa(total, data=HolzingerSwineford1939) maximalRelia(fit) # Extract the weight mr <- maximalRelia(fit) attr(mr, "weight") } semTools/man/runMI.Rd0000644000175100001440000001463613000201061014162 0ustar hornikusers\name{runMI} \alias{runMI} \alias{cfa.mi} \alias{sem.mi} \alias{growth.mi} \alias{lavaan.mi} \title{ Multiply impute and analyze data using lavaan } \description{ This function takes data with missing observations, multiple imputes the data, runs a SEM using lavaan and combines the results using Rubin's rules. Note that parameter estimates and standard errors are pooled by the Rubin's (1987) rule. The chi-square statistics and the related fit indices are pooled by the method described in \code{"chi"} argument. SRMR is calculated based on the average model-implied means and covariance matrices across imputations. } \usage{ runMI(model, data, m, miArgs=list(), chi="all", miPackage="Amelia", seed=12345, fun, nullModel = NULL, includeImproper = FALSE, ...) cfa.mi(model, data, m, miArgs=list(), miPackage="Amelia", chi="all", seed=12345, nullModel = NULL, includeImproper = FALSE, ...) sem.mi(model, data, m, miArgs=list(), miPackage="Amelia", chi="all", seed=12345, nullModel = NULL, includeImproper = FALSE, ...) growth.mi(model, data, m, miArgs=list(), miPackage="Amelia", chi="all", seed=12345, nullModel = NULL, includeImproper = FALSE, ...) lavaan.mi(model, data, m, miArgs=list(), miPackage="Amelia", chi="all", seed=12345, nullModel = NULL, includeImproper = FALSE, ...) } \arguments{ \item{model}{ lavaan syntax for the model to be analyzed. } \item{data}{ Data frame with missing observations or a list of data frames where each data frame is one imputed data set (for imputed data generated outside of the function). If a list of data frames is supplied, then other options can be left at the default. } \item{m}{ Number of imputations wanted. } \item{miArgs}{ Addition arguments for the multiple-imputation function. The arguments should be put in a list (see example below). } \item{miPackage}{ Package to be used for imputation. Currently these functions only support \code{"Amelia"} or \code{"mice"} for imputation. } \item{chi}{ The method to combine the chi-square. Can be one of the following: \code{"mr"} for the method proposed for Meng & Rubin (1992), \code{"mplus"} for the method used in Mplus (Asparouhov & Muthen, 2010), \code{"lmrr"} for the method proposed by Li, Meng, Raghunathan, & Rubin (1991), \code{"all"} to show the three methods in the output, and \code{"none"} to not pool any chi-square values. The default is \code{"all"}. } \item{seed}{ Random number seed to be used in imputations. } \item{nullModel}{ lavaan syntax for the null model. If not specified, the default null model from lavaan is used. } \item{includeImproper}{ If \code{TRUE}, the function will combine the results with improper solutions to get the combined solution. } \item{fun}{ The character of the function name used in running lavaan model (\code{"cfa"}, \code{"sem"}, \code{"growth"}, \code{"lavaan"}). } \item{...}{ Other arguments to be passed to the specified lavaan function (\code{"cfa"}, \code{"sem"}, \code{"growth"}, \code{"lavaan"}). } } \value{ The \code{\linkS4class{lavaanStar}} object which contains the original \code{lavaan} object (where the appropriate parameter estimates, appropriate standard errors, and chi-squares are filled), the additional fit-index values of the null model, which need to be adjusted to multiple datasets, and the information from pooling multiple results. } \references{ Asparouhov T. & Muthen B. (2010).\emph{Chi-Square Statistics with Multiple Imputation}. Technical Report. www.statmodel.com. Li, K.H., Meng, X.-L., Raghunathan, T.E. and Rubin, D.B. (1991). Significance Levels From Repeated p-values with Multiply-Imputed Data. \emph{Statistica Sinica, 1}, 65-92. Meng, X.L. & Rubin, D.B. (1992). Performing likelihood ratio tests with multiply-imputed data sets. \emph{Biometrika, 79}, 103 - 111. Rubin, D.B. (1987) \emph{Multiple Imputation for Nonresponse in Surveys.} J. Wiley & Sons, New York. } \author{Alexander M. Schoemann (East Carolina University; \email{schoemanna@ecu.edu}) Patrick Miller (University of Notre Dame; \email{pmille13@nd.edu}) Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Mijke Rhemtulla (University of Amsterdam; \email{M.T.Rhemtulla@uva.nl}) Alexander Robitzsch (Federal Institute for Education Research, Innovation, and Development of the Austrian School System, Salzburg, Austria; \email{a.robitzsch@bifie.at}) Craig Enders (Arizona State University; \email{Craig.Enders@asu.edu}) Mauricio Garnier Villarreal (University of Kansas; \email{mgv@ku.edu}) Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}) } \examples{ \dontrun{ library(lavaan) HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' HSMiss <- HolzingerSwineford1939[,paste("x", 1:9, sep="")] randomMiss <- rbinom(prod(dim(HSMiss)), 1, 0.1) randomMiss <- matrix(as.logical(randomMiss), nrow=nrow(HSMiss)) HSMiss[randomMiss] <- NA out <- cfa.mi(HS.model, data=HSMiss, m = 3, chi="all") summary(out) inspect(out, "fit") inspect(out, "impute") ##Multiple group example HSMiss2 <- cbind(HSMiss, school = HolzingerSwineford1939[,"school"]) out2 <- cfa.mi(HS.model, data=HSMiss2, m = 3, miArgs=list(noms="school"), chi="MR", group="school") summary(out2) inspect(out2, "fit") inspect(out2, "impute") ##Example using previously imputed data with runMI library(Amelia) modsim <- ' f1 =~ 0.7*y1+0.7*y2+0.7*y3 f2 =~ 0.7*y4+0.7*y5+0.7*y6 f3 =~ 0.7*y7+0.7*y8+0.7*y9' mod <- ' f1 =~ y1+y2+y3 f2 =~ y4+y5+y6 f3 =~ y7+y8+y9' datsim <- simulateData(modsim,model.type="cfa", meanstructure=TRUE, std.lv=TRUE, sample.nobs=c(200,200)) randomMiss2 <- rbinom(prod(dim(datsim)), 1, 0.1) randomMiss2 <- matrix(as.logical(randomMiss2), nrow=nrow(datsim)) datsim[randomMiss2] <- NA datsimMI <- amelia(datsim,m=3, noms="group") out3 <- runMI(mod, data=datsimMI$imputations, chi="LMRR", group="group", fun="cfa") summary(out3) inspect(out3, "fit") inspect(out3, "impute") # Categorical variables popModel <- " f1 =~ 0.6*y1 + 0.6*y2 + 0.6*y3 + 0.6*y4 y1 ~*~ 1*y1 y2 ~*~ 1*y2 y3 ~*~ 1*y3 y4 ~*~ 1*y4 f1 ~~ 1*f1 y1 | 0.5*t1 y2 | 0.25*t1 y3 | 0*t1 y4 | -0.5*t1 " analyzeModel <- " f1 =~ y1 + y2 + y3 + y4 y1 ~*~ 1*y1 y2 ~*~ 1*y2 y3 ~*~ 1*y3 y4 ~*~ 1*y4 " dat <- simulateData(popModel, sample.nobs = 200L) miss.pat <- matrix(as.logical(rbinom(prod(dim(dat)), 1, 0.2)), nrow(dat), ncol(dat)) dat[miss.pat] <- NA out5 <- cfa.mi(analyzeModel, data=dat, ordered=paste0("y", 1:4), m = 3, miArgs=list(ords = c("y1", "y2", "y3", "y4"))) summary(out5) inspect(out5, "fit") inspect(out5, "impute") } } semTools/man/plotRMSEApowernested.Rd0000644000175100001440000000340413000201061017145 0ustar hornikusers\name{plotRMSEApowernested} \alias{plotRMSEApowernested} \title{Plot power of nested model RMSEA} \description{ Plot power of nested model RMSEA over a range of possible sample sizes. } \usage{ plotRMSEApowernested(rmsea0A = NULL, rmsea0B = NULL, rmsea1A, rmsea1B = NULL, dfA, dfB, nlow, nhigh, steps=1, alpha=.05, group=1, ...) } \arguments{ \item{rmsea0A}{The H0 baseline RMSEA.} \item{rmsea0B}{The H0 alternative RMSEA (trivial misfit).} \item{rmsea1A}{The H1 baseline RMSEA.} \item{rmsea1B}{The H1 alternative RMSEA (target misfit to be rejected).} \item{dfA}{degree of freedom of the more-restricted model.} \item{dfB}{degree of freedom of the less-restricted model.} \item{nlow}{Lower bound of sample size.} \item{nhigh}{Upper bound of sample size.} \item{steps}{Step size.} \item{alpha}{The alpha level.} \item{group}{The number of group in calculating RMSEA.} \item{\dots}{The additional arguments for the plot function.} } \references{ MacCallum, R. C., Browne, M. W., & Cai, L. (2006). Testing differences between nested covariance structure models: Power analysis and null hypotheses. \emph{Psychological Methods, 11}, 19-35. } \author{ Bell Clinton; Pavel Panko (Texas Tech University; \email{pavel.panko@ttu.edu}); Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{findRMSEApowernested}} to find the power for a given sample size in nested model comparison based on population RMSEA \item \code{\link{findRMSEAsamplesizenested}} to find the minium sample size for a given statistical power in nested model comparison based on population RMSEA } } \examples{ plotRMSEApowernested(rmsea0A = 0, rmsea0B = 0, rmsea1A = 0.06, rmsea1B = 0.05, dfA=22, dfB=20, nlow=50, nhigh=500, steps=1, alpha=.05, group=1) } semTools/man/nullMx.Rd0000644000175100001440000000171713000201061014403 0ustar hornikusers\name{nullMx} \alias{nullMx} \title{ Analyzing data using a null model } \description{ Analyzing data using a null model by full-information maximum likelihood. In the null model, all means and covariances are free if items are continuous. All covariances are fixed to 0. For ordinal variables, their means are fixed as 0 and their variances are fixed as 1 where their thresholds are estimated. In multiple-group model, all means are variances are separately estimated. } \usage{ nullMx(data, groupLab = NULL) } \arguments{ \item{data}{ The target data frame } \item{groupLab}{ The name of grouping variable } } \value{ The \code{MxModel} object which contains the analysis result of the null model. } \seealso{ \code{\link{saturateMx}}, \code{\link{fitMeasuresMx}}, \code{\link{standardizeMx}} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ \dontrun{ library(OpenMx) data(demoOneFactor) nullModel <- nullMx(demoOneFactor) } } semTools/man/saturateMx.Rd0000644000175100001440000000172513000201061015260 0ustar hornikusers\name{saturateMx} \alias{saturateMx} \title{ Analyzing data using a saturate model } \description{ Analyzing data using a saturate model by full-information maximum likelihood. In the saturate model, all means and covariances are free if items are continuous. For ordinal variables, their means are fixed as 0 and their variances are fixed as 1--their covariances and thresholds are estimated. In multiple-group model, all means are variances are separately estimated. } \usage{ saturateMx(data, groupLab = NULL) } \arguments{ \item{data}{ The target data frame } \item{groupLab}{ The name of grouping variable } } \value{ The \code{MxModel} object which contains the analysis result of the saturate model. } \seealso{ \code{\link{nullMx}}, \code{\link{fitMeasuresMx}}, \code{\link{standardizeMx}} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ \dontrun{ library(OpenMx) data(demoOneFactor) satModel <- saturateMx(demoOneFactor) } } semTools/man/FitDiff-class.Rd0000644000175100001440000000366113000201061015542 0ustar hornikusers\name{FitDiff-class} \docType{class} \alias{FitDiff-class} \alias{show,FitDiff-method} \alias{summary,FitDiff-method} \title{ Class For Representing A Template of Model Fit Comparisons } \description{ This class contains model fit measures and model fit comparisons among multiple models } \section{Objects from the Class}{ Objects can be created via the \code{\link{compareFit}} function. } \section{Slots}{ \describe{ \item{\code{name}:}{The name of each model} \item{\code{nested}:}{Model fit comparisons between adjacent nested models that are ordered based on their degrees of freedom} \item{\code{ordernested}:}{The order of nested models regarding to their degrees of freedom} \item{\code{fit}:}{Fit measures of all models specified in the \code{name} slot} } } \section{methods}{ \itemize{ \item \code{summary} The summary function is used to provide the nested model comparison results and the summary of the fit indices across models. This function has one argument: \code{fit.measures}. If \code{"default"} is specified, chi-square values, degree of freedom, \emph{p} value, CFI, TLI, RMSEA, SRMR, AIC, and BIC are provided. If \code{"all"} is specified, all information given in the \code{\link[lavaan]{fitMeasures}} function is provided. Users may specify a vector of the name of fit indices that they wish. } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \code{\link{compareFit}}; \code{\link{clipboard}} } \examples{ HW.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' out <- measurementInvariance(HW.model, data=HolzingerSwineford1939, group="school", quiet=TRUE) modelDiff <- compareFit(out) summary(modelDiff) summary(modelDiff, fit.measures="all") summary(modelDiff, fit.measures=c("aic", "bic")) \dontrun{ # Save results to a file saveFile(modelDiff, file="modelDiff.txt") # Copy to a clipboard clipboard(modelDiff) } } semTools/man/permuteMeasEq.Rd0000644000175100001440000006144713000201061015707 0ustar hornikusers\name{permuteMeasEq} \alias{permuteMeasEq} \title{ Permutation Randomization Tests of Measurement Equivalence and Differential Item Functioning (DIF) } \description{ The function \code{permuteMeasEq} provides tests of hypotheses involving measurement equivalence, in one of two frameworks: (1) For multiple-group CFA models, provide a pair of nested lavaan objects, the less constrained of which (\code{uncon}) freely estimates a set of measurement parameters (e.g., factor loadings, intercepts, or thresholds; specified in \code{param}) in all groups, and the more constrained of which (\code{con}) constrains those measurement parameters to equality across groups. Group assignment is repeatedly permuted and the models are fit to each permutation, in order to produce an empirical distribution under the null hypothesis of no group differences, both for (a) changes in user-specified fit measures (see \code{AFIs} and \code{moreAFIs}) and for (b) the maximum modification index among the user-specified equality constraints. Configural invariance can also be tested by providing that fitted lavaan object to \code{con} and leaving \code{uncon = NULL}, in which case \code{param} must be \code{NULL} as well. (2) In MIMIC models, one or a set of continuous and/or discrete \code{covariates} can be permuted, and a constrained model is fit to each permutation in order to provide a distribution of any fit measures (namely, the maximum modification index among fixed parameters in \code{param}) under the null hypothesis of measurement equivalence across levels of those covariates. In either framework, modification indices for equality constraints or fixed parameters specified in \code{param} are calculated from the constrained model (\code{con}) using the function \code{\link[lavaan]{lavTestScore}}. } \usage{ permuteMeasEq(nPermute, modelType = c("mgcfa","mimic"), con, uncon = NULL, null = NULL, param = NULL, freeParam = NULL, covariates = NULL, AFIs = NULL, moreAFIs = NULL, maxSparse = 10L, maxNonconv = 10L, showProgress = TRUE, warn = -1L, datafun, extra, parallelType = c("none", "multicore", "snow"), ncpus = NULL, cl = NULL, iseed = 12345L) } \arguments{ \item{nPermute}{ An integer indicating the number of random permutations used to form empirical distributions under the null hypothesis. } \item{modelType}{ A character string indicating type of model employed: multiple-group CFA (\code{"mgcfa"}) or MIMIC (\code{"mimic"}). } \item{con}{ The constrained \code{lavaan} object, in which the parameters specified in \code{param} are constrained to equality across all groups when \code{modelType = "mgcfa"}, or which regression paths are fixed to zero when \code{modelType = "mimic"}. In the case of testing \emph{configural} invariance when \code{modelType = "mgcfa"}, \code{con} is the configural model (implicitly, the unconstrained model is the saturated model, so use the defaults \code{uncon = NULL} and \code{param = NULL}). When \code{modelType = "mimic"}, \code{con} is the MIMIC model in which the covariate predicts the latent construct(s) but no indicators (unless they have already been identified as DIF items). } \item{uncon}{ Optional. The unconstrained \code{lavaan} object, in which the parameters specified in \code{param} are freely estimated in all groups. When \code{modelType = "mgcfa"}, only in the case of testing \emph{configural} invariance should \code{uncon = NULL}. When \code{modelType = "mimic"}, any non-\code{NULL uncon} is silently set to \code{NULL}. } \item{null}{ Optional. A \code{lavaan} object, in which an alternative null model is fit (besides the default independence model specified by \code{lavaan}) for the calculation of incremental fit indices. See Widamin & Thompson (2003) for details. If \code{NULL}, \code{lavaan}'s default independence model is used. } \item{param}{ An optional character vector or list of character vectors indicating which parameters the user would test for DIF following a rejection of the omnibus null hypothesis tested using (\code{more})\code{AFIs}. Note that \code{param} does not guarantee certain parameters \emph{are} constrained in \code{con}; that is for the user to specify when fitting the model. If users have any "anchor items" that they would never intend to free across groups (or levels of a covariate), these should be excluded from \code{param}; exceptions to a type of parameter can be specified in \code{freeParam}. When \code{modelType = "mgcfa"}, \code{param} indicates which parameters of interest are constrained across groups in \code{con} and are unconstrained in \code{uncon}. Parameter names must match those returned by \code{names(coef(con))}, but omitting any group-specific suffixes (e.g., \code{"f1~1"} rather than \code{"f1~1.g2"}) or user-specified labels (that is, the parameter names must follow the rules of lavaan's \code{\link[lavaan]{model.syntax}}). Alternatively (or additionally), to test all constraints of a certain type (or multiple types) of parameter in \code{con}, \code{param} may take any combination of the following values: \code{"loadings"}, \code{"intercepts"}, \code{"thresholds"}, \code{"residuals"}, \code{"residual.covariances"}, \code{"means"}, \code{"lv.variances"}, and/or \code{"lv.covariances"}. When \code{modelType = "mimic"}, \code{param} must be a vector of individual parameters or a list of character strings to be passed one-at-a-time to \code{\link[lavaan]{lavTestScore}}\code{(object = con, add = param[i])}, indicating which (sets of) regression paths fixed to zero in \code{con} that the user would consider freeing (i.e., exclude anchor items). If \code{modelType = "mimic"} and \code{param} is a list of character strings, the multivariate test statistic will be saved for each list element instead of 1-\emph{df} modification indices for each individual parameter, and \code{names(param)} will name the rows of the \code{MI.obs} slot (see \linkS4class{permuteMeasEq}). Set \code{param = NULL} (default) to avoid collecting modification indices for any follow-up tests. } \item{freeParam}{ An optional character vector, silently ignored when \code{modelType = "mimic"}. If \code{param} includes a type of parameter (e.g., \code{"loadings"}), \code{freeParam} indicates exceptions (i.e., anchor items) that the user would \emph{not} intend to free across groups and should therefore be ignored when calculating \emph{p} values adjusted for the number of follow-up tests. Parameter types that are already unconstrained across groups in the fitted \code{con} model (i.e., a \emph{partial} invariance model) will automatically be ignored, so they do not need to be specified in \code{freeParam}. Parameter names must match those returned by \code{names(coef(con))}, but omitting any group-specific suffixes (e.g., \code{"f1~1"} rather than \code{"f1~1.g2"}) or user-specified labels (that is, the parameter names must follow the rules of lavaan \code{\link[lavaan]{model.syntax}}). } \item{covariates}{ An optional character vector, only applicable when \code{modelType = "mimic"}. The observed data are partitioned into columns indicated by \code{covariates}, and the rows are permuted simultaneously for the entire set before being merged with the remaining data. Thus, the covariance structure is preserved among the covariates, which is necessary when (e.g.) multiple dummy codes are used to represent a discrete covariate or when covariates interact. If \code{covariates = NULL} when \code{modelType = "mimic"}, the value of \code{covariates} is inferred by searching \code{param} for predictors (i.e., variables appearing after the "\code{~}" operator). } \item{AFIs}{ A character vector indicating which alternative fit indices (or chi-squared itself) are to be used to test the multiparameter omnibus null hypothesis that the constraints specified in \code{con} hold in the population. Any fit measures returned by \code{\link[lavaan]{fitMeasures}} may be specified (including constants like \code{"df"}, which would be nonsensical). If both \code{AFIs} and \code{moreAFIs} are \code{NULL}, only \code{"chisq"} will be returned. } \item{moreAFIs}{ Optional. A character vector indicating which (if any) alternative fit indices returned by \code{\link[semTools]{moreFitIndices}} are to be used to test the multiparameter omnibus null hypothesis that the constraints specified in \code{con} hold in the population. } \item{maxSparse}{ Only applicable when \code{modelType = "mgcfa"} and at least one indicator is \code{ordered}. An integer indicating the maximum number of consecutive times that randomly permuted group assignment can yield a sample in which at least one category (of an \code{ordered} indicator) is unobserved in at least one group, such that the same set of parameters cannot be estimated in each group. If such a sample occurs, group assignment is randomly permuted again, repeatedly until a sample is obtained with all categories observed in all groups. If \code{maxSparse} is exceeded, \code{NA} will be returned for that iteration of the permutation distribution. } \item{maxNonconv}{ An integer indicating the maximum number of consecutive times that a random permutation can yield a sample for which the model does not converge on a solution. If such a sample occurs, permutation is attempted repeatedly until a sample is obtained for which the model does converge. If \code{maxNonconv} is exceeded, \code{NA} will be returned for that iteration of the permutation distribution, and a warning will be printed when using \code{show} or \code{summary}. } \item{showProgress}{ Logical. Indicating whether to display a progress bar while permuting. Silently set to \code{FALSE} when using parallel options. } \item{warn}{ Sets the handling of warning messages when fitting model(s) to permuted data sets. See \code{\link[base]{options}}. } \item{datafun}{ An optional function that can be applied to the data (extracted from \code{con}) after each permutation, but before fitting the model(s) to each permutation. The \code{datafun} function must have an argument named \code{data} that accepts a \code{data.frame}, and it must return a \code{data.frame} containing the same column names. The column order may differ, the values of those columns may differ (so be careful!), and any additional columns will be ignored when fitting the model, but an error will result if any column names required by the model syntax do not appear in the transformed data set. Although available for any \code{modelType}, \code{datafun} may be useful when using the MIMIC method to test for nonuniform DIF (metric/weak invariance) by using product indicators for a latent factor representing the interaction between a factor and one of the \code{covariates}, in which case the product indicators would need to be recalculated after each permutation of the \code{covariates}. To access other R objects used within \code{permuteMeasEq}, the arguments to \code{datafun} may also contain any subset of the following: \code{"con"}, \code{"uncon"}, \code{"null"}, \code{"param"}, \code{"freeParam"}, \code{"covariates"}, \code{"AFIs"}, \code{"moreAFIs"}, \code{"maxSparse"}, \code{"maxNonconv"}, and/or \code{"iseed"}. The values for those arguments will be the same as the values supplied to \code{permuteMeasEq}. } \item{extra}{ An optional function that can be applied to any (or all) of the fitted lavaan objects (\code{con}, \code{uncon}, and/or \code{null}). This function will also be applied after fitting the model(s) to each permuted data set. To access the R objects used within \code{permuteMeasEq}, the arguments to \code{extra} must be any subset of the following: \code{"con"}, \code{"uncon"}, \code{"null"}, \code{"param"}, \code{"freeParam"}, \code{"covariates"}, \code{"AFIs"}, \code{"moreAFIs"}, \code{"maxSparse"}, \code{"maxNonconv"}, and/or \code{"iseed"}. The values for those arguments will be the same as the values supplied to \code{permuteMeasEq}. The \code{extra} function must return a named \code{numeric} vector or a named \code{list} of scalars (i.e., a \code{list} of \code{numeric} vectors of \code{length == 1}). Any unnamed elements (e.g., \code{""} or \code{NULL}) of the returned object will result in an error. } \item{parallelType}{ The type of parallel operation to be used (if any). The default is \code{"none"}. Forking is not possible on Windows, so if \code{"multicore"} is requested on a Windows machine, the request will be changed to \code{"snow"} with a message. } \item{ncpus}{ Integer: number of processes to be used in parallel operation. If \code{NULL} (the default) and \code{parallelType \%in\% c("multicore","snow")}, the default is one less than the maximum number of processors detected by \code{\link[parallel]{detectCores}}. This default is also silently set if the user specifies more than the number of processors detected. } \item{cl}{ An optional \pkg{parallel} or \pkg{snow} cluster for use when \code{parallelType = "snow"}. If \code{NULL}, a \code{"PSOCK"} cluster on the local machine is created for the duration of the \code{permuteMeasEq} call. If a valid \code{\link[parallel]{makeCluster}} object is supplied, \code{parallelType} is silently set to \code{"snow"}, and \code{ncpus} is silently set to \code{length(cl)}. } \item{iseed}{ Integer: Only used to set the states of the RNG when using parallel options, in which case \code{\link[base]{RNGkind}} is set to \code{"L'Ecuyer-CMRG"} with a message. See \code{\link[parallel]{clusterSetRNGStream}} and Section 6 of \code{vignette("parallel", "parallel")} for more details. If user supplies an invalid value, \code{iseed} is silently set to the default (12345). To set the state of the RNG when not using parallel options, call \code{\link[base]{set.seed}} before calling \code{permuteMeasEq}. } } \details{ For multiple-group CFA models, the multiparameter omnibus null hypothesis of measurement equivalence/invariance is that there are no group differences in any measurement parameters (of a particular type). This can be tested using the \code{anova} method on nested \code{lavaan} objects, as seen in the output of \code{\link[semTools]{measurementInvariance}}, or by inspecting the change in alternative fit indices (AFIs) such as the CFI. The permutation randomization method employed by \code{permuteMeasEq} generates an empirical distribution of any \code{AFIs} under the null hypothesis, so the user is not restricted to using fixed cutoffs proposed by Cheung & Rensvold (2002), Chen (2007), or Meade, Johnson, & Braddy (2008). If the multiparameter omnibus null hypothesis is rejected, partial invariance can still be established by freeing invalid equality constraints, as long as equality constraints are valid for at least two indicators per factor. Modification indices can be calculated from the constrained model (\code{con}), but multiple testing leads to inflation of Type I error rates. The permutation randomization method employed by \code{permuteMeasEq} creates a distribution of the maximum modification index if the null hypothesis is true, which allows the user to control the familywise Type I error rate in a manner similar to Tukey's \emph{q} (studentized range) distribution for the Honestly Significant Difference (HSD) post hoc test. For MIMIC models, DIF can be tested by comparing modification indices of regression paths to the permutation distribution of the maximum modification index, which controls the familywise Type I error rate. The MIMIC approach could also be applied with multiple-group models, but the grouping variable would not be permuted; rather, the covariates would be permuted separately within each group to preserve between-group differences. So whether parameters are constrained or unconstrained across groups, the MIMIC approach is only for testing null hypotheses about the effects of \code{covariates} on indicators, controlling for common factors. In either framework, \code{\link[lavaan]{lavaan}}'s \code{group.label} argument is used to preserve the order of groups seen in \code{con} when permuting the data. } \value{ The \linkS4class{permuteMeasEq} object representing the results of testing measurement equivalence (the multiparameter omnibus test) and DIF (modification indices), as well as diagnostics and any \code{extra} output. } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } \references{ Chen, F. F. (2007). Sensitivity of goodness of fit indexes to lack of measurement invariance. \emph{Structural Equation Modeling, 14}(3), 464-504. doi:10.1080/10705510701301834 Cheung, G. W., & Rensvold, R. B. (2002). Evaluating goodness-of-fit indexes for testing measurement invariance. \emph{Structural Equation Modeling, 9}(2), 233-255. doi:10.1207/S15328007SEM0902_5 Meade, A. W., Johnson, E. C., & Braddy, P. W. (2008). Power and sensitivity of alternative fit indices in tests of measurement invariance. \emph{Journal of Applied Psychology, 93}(3), 568-592. doi:10.1037/0021-9010.93.3.568 Widamin, K. F., & Thompson, J. S. (2003). On specifying the null model for incremental fit indices in structural equation modeling. \emph{Psychological Methods, 8}(1), 16-37. doi:10.1037/1082-989X.8.1.16 } \seealso{ \code{\link[stats]{TukeyHSD}}, \code{\link[lavaan]{lavTestScore}}, \code{\link[semTools]{measurementInvariance}}, \code{\link[semTools]{measurementInvarianceCat}} } \examples{ \dontrun{ ######################## ## Multiple-Group CFA ## ######################## ## create 3-group data in lavaan example(cfa) data HS <- lavaan::HolzingerSwineford1939 HS$ageGroup <- ifelse(HS$ageyr < 13, "preteen", ifelse(HS$ageyr > 13, "teen", "thirteen")) ## specify and fit an appropriate null model for incremental fit indices mod.null <- c(paste0("x", 1:9, " ~ c(T", 1:9, ", T", 1:9, ", T", 1:9, ")*1"), paste0("x", 1:9, " ~~ c(L", 1:9, ", L", 1:9, ", L", 1:9, ")*x", 1:9)) fit.null <- cfa(mod.null, data = HS, group = "ageGroup") ## fit target model with varying levels of measurement equivalence mod.config <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' miout <- measurementInvariance(mod.config, data = HS, std.lv = TRUE, group = "ageGroup") (fit.config <- miout[["fit.configural"]]) (fit.metric <- miout[["fit.loadings"]]) (fit.scalar <- miout[["fit.intercepts"]]) ####################### Permutation Method ## fit indices of interest for multiparameter omnibus test myAFIs <- c("chisq","cfi","rmsea","mfi","aic") moreAFIs <- c("gammaHat","adjGammaHat") ## Use only 20 permutations for a demo. In practice, ## use > 1000 to reduce sampling variability of estimated p values ## test configural invariance set.seed(12345) out.config <- permuteMeasEq(nPermute = 20, con = fit.config) out.config ## test metric equivalence set.seed(12345) # same permutations out.metric <- permuteMeasEq(nPermute = 20, uncon = fit.config, con = fit.metric, param = "loadings", AFIs = myAFIs, moreAFIs = moreAFIs, null = fit.null) summary(out.metric, nd = 4) ## test scalar equivalence set.seed(12345) # same permutations out.scalar <- permuteMeasEq(nPermute = 20, uncon = fit.metric, con = fit.scalar, param = "intercepts", AFIs = myAFIs, moreAFIs = moreAFIs, null = fit.null) summary(out.scalar) ## Not much to see without significant DIF. ## Try using an absurdly high alpha level for illustration. outsum <- summary(out.scalar, alpha = .50) ## notice that the returned object is the table of DIF tests outsum ## visualize permutation distribution hist(out.config, AFI = "chisq") hist(out.metric, AFI = "chisq", nd = 2, alpha = .01, legendArgs = list(x = "topright")) hist(out.scalar, AFI = "cfi", printLegend = FALSE) ####################### Extra Output ## function to calculate expected change of Group-2 and -3 latent means if ## each intercept constraint were released extra <- function(con) { output <- list() output["x1.vis2"] <- lavTestScore(con, release = 19:20, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[70] output["x1.vis3"] <- lavTestScore(con, release = 19:20, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[106] output["x2.vis2"] <- lavTestScore(con, release = 21:22, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[70] output["x2.vis3"] <- lavTestScore(con, release = 21:22, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[106] output["x3.vis2"] <- lavTestScore(con, release = 23:24, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[70] output["x3.vis3"] <- lavTestScore(con, release = 23:24, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[106] output["x4.txt2"] <- lavTestScore(con, release = 25:26, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[71] output["x4.txt3"] <- lavTestScore(con, release = 25:26, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[107] output["x5.txt2"] <- lavTestScore(con, release = 27:28, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[71] output["x5.txt3"] <- lavTestScore(con, release = 27:28, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[107] output["x6.txt2"] <- lavTestScore(con, release = 29:30, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[71] output["x6.txt3"] <- lavTestScore(con, release = 29:30, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[107] output["x7.spd2"] <- lavTestScore(con, release = 31:32, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[72] output["x7.spd3"] <- lavTestScore(con, release = 31:32, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[108] output["x8.spd2"] <- lavTestScore(con, release = 33:34, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[72] output["x8.spd3"] <- lavTestScore(con, release = 33:34, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[108] output["x9.spd2"] <- lavTestScore(con, release = 35:36, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[72] output["x9.spd3"] <- lavTestScore(con, release = 35:36, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[108] output } ## observed EPC extra(fit.scalar) ## permutation results, including extra output set.seed(12345) # same permutations out.scalar <- permuteMeasEq(nPermute = 20, uncon = fit.metric, con = fit.scalar, param = "intercepts", AFIs = myAFIs, moreAFIs = moreAFIs, null = fit.null, extra = extra) ## summarize extra output summary(out.scalar, extra = TRUE) ########### ## MIMIC ## ########### ## Specify Restricted Factor Analysis (RFA) model, equivalent to MIMIC, but ## the factor covaries with the covariate instead of being regressed on it. ## The covariate defines a single-indicator construct, and the ## double-mean-centered products of the indicators define a latent ## interaction between the factor and the covariate. mod.mimic <- ' visual =~ x1 + x2 + x3 age =~ ageyr age.by.vis =~ x1.ageyr + x2.ageyr + x3.ageyr x1 ~~ x1.ageyr x2 ~~ x2.ageyr x3 ~~ x3.ageyr ' HS.orth <- indProd(var1 = paste0("x", 1:3), var2 = "ageyr", match = FALSE, data = HS[ , c("ageyr", paste0("x", 1:3))] ) fit.mimic <- cfa(mod.mimic, data = HS.orth, meanstructure = TRUE) summary(fit.mimic, stand = TRUE) ## Whereas MIMIC models specify direct effects of the covariate on an indicator, ## DIF can be tested in RFA models by specifying free loadings of an indicator ## on the covariate's construct (uniform DIF, scalar invariance) and the ## interaction construct (nonuniform DIF, metric invariance). param <- as.list(paste0("age + age.by.vis =~ x", 1:3)) names(param) <- paste0("x", 1:3) # param <- as.list(paste0("x", 1:3, " ~ age + age.by.vis")) # equivalent ## test both parameters simultaneously for each indicator do.call(rbind, lapply(param, function(x) lavTestScore(fit.mimic, add = x)$test)) ## or test each parameter individually lavTestScore(fit.mimic, add = as.character(param)) ####################### Permutation Method ## function to recalculate the interaction terms after permuting the covariate datafun <- function(data) { d <- data[, !names(data) \%in\% paste0("x", 1:3, ".ageyr")] indProd(var1 = paste0("x", 1:3), var2 = "ageyr", match = FALSE, data = d) } set.seed(12345) perm.mimic <- permuteMeasEq(nPermute = 20, modelType = "mimic", con = fit.mimic, param = param, covariates = "ageyr", datafun = datafun) summary(perm.mimic) } } semTools/man/partialInvariance.Rd0000644000175100001440000003042613000201061016557 0ustar hornikusers\name{partialInvariance} \alias{partialInvariance} \alias{partialInvarianceCat} \title{ Partial Measurement Invariance Testing Across Groups } \description{ This test will provide partial invariance testing by (a) freeing a parameter one-by-one from nested model and compare with the original nested model or (b) fixing (or constraining) a parameter one-by-one from the parent model and compare with the original parent model. This function only works with congeneric models. The \code{partialInvariance} is used for continuous variable. The \code{partialInvarianceCat} is used for categorical variables. } \usage{ partialInvariance(fit, type, free = NULL, fix = NULL, refgroup = 1, poolvar = TRUE, p.adjust = "none", fbound = 2, return.fit = FALSE, method = "satorra.bentler.2001") partialInvarianceCat(fit, type, free = NULL, fix = NULL, refgroup = 1, poolvar = TRUE, p.adjust = "none", return.fit = FALSE, method = "satorra.bentler.2001") } \arguments{ \item{fit}{A list of models for invariance testing. Each model should be assigned by appropriate names (see details). The result from \code{\link{measurementInvariance}} or \code{\link{measurementInvarianceCat}} could be used in this argument directly.} \item{type}{The types of invariance testing: "metric", "scalar", "strict", or "means"} \item{free}{A vector of variable names that are free across groups in advance. If partial mean invariance is tested, this argument represents a vector of factor names that are free across groups.} \item{fix}{A vector of variable names that are constrained to be equal across groups in advance. If partial mean invariance is tested, this argument represents a vector of factor names that are fixed across groups.} \item{refgroup}{The reference group used to make the effect size comparison with the other groups.} \item{poolvar}{If \code{TRUE}, the variances are pooled across group for standardization. Otherwise, the variances of the reference group are used for standardization.} \item{p.adjust}{The method used to adjust p values. See \code{\link[stats]{p.adjust}} for the options for adjusting p values. The default is to not use any corrections.} \item{fbound}{The z-scores of factor that is used to calculate the effect size of the loading difference proposed by Millsap and Olivera-Aguilar (2012).} \item{return.fit}{Return the submodels fitted by this function} \item{method}{The method used to calculate likelihood ratio test. See \code{\link[lavaan]{lavTestLRT}} for available options} } \details{ There are four types of partial invariance testing: \itemize{ \item{Partial weak invariance. The model named 'fit.configural' from the list of models is compared with the model named 'fit.loadings'. Each loading will be freed or fixed from the metric and configural invariance models respectively. The modified models are compared with the original model. Note that the objects in the list of models must have the names of "fit.configural" and "fit.loadings". Users may use "metric", "weak", "loading", or "loadings" in the \code{type} argument. Note that, for testing invariance on marker variables, other variables will be assigned as marker variables automatically.} \item{Partial strong invariance. The model named 'fit.loadings' from the list of models is compared with the model named either 'fit.intercepts' or 'fit.thresholds'. Each intercept will be freed or fixed from the scalar and metric invariance models respectively. The modified models are compared with the original model. Note that the objects in the list of models must have the names of "fit.loadings" and either "fit.intercepts" or "fit.thresholds". Users may use "scalar", "strong", "intercept", "intercepts", "threshold", or "thresholds" in the \code{type} argument. Note that, for testing invariance on marker variables, other variables will be assigned as marker variables automatically. Note that if all variables are dichotomous, scalar invariance testing is not available.} \item{Partial strict invariance. The model named either 'fit.intercepts' or 'fit.thresholds' (or 'fit.loadings') from the list of models is compared with the model named 'fit.residuals'. Each residual variance will be freed or fixed from the strict and scalar (or metric) invariance models respectively. The modified models are compared with the original model. Note that the objects in the list of models must have the names of "fit.residuals" and either "fit.intercepts", "fit.thresholds", or "fit.loadings". Users may use "strict", "residual", "residuals", "error", or "errors" in the \code{type} argument.} \item{Partial mean invariance. The model named either 'fit.intercepts' or 'fit.thresholds' (or 'fit.residuals' or 'fit.loadings') from the list of models is compared with the model named 'fit.means'. Each factor mean will be freed or fixed from the means and scalar (or strict or metric) invariance models respectively. The modified models are compared with the original model. Note that the objects in the list of models must have the names of "fit.means" and either "fit.residuals", "fit.intercepts", "fit.thresholds", or "fit.loadings". Users may use "means" or "mean" in the \code{type} argument.} } Two types of comparisons are used in this function: \enumerate{ \item{\code{free}: The nested model is used as a template. Then, one parameter indicating the differences between two models is free. The new model is compared with the nested model. This process is repeated for all differences between two models. The likelihood-ratio test and the difference in CFI are provided.} \item{\code{fix}: The parent model is used as a template. Then, one parameter indicating the differences between two models is fixed or constrained to be equal to other parameters. The new model is then compared with the parent model. This process is repeated for all differences between two models. The likelihood-ratio test and the difference in CFI are provided.} \item{\code{wald}: This method is similar to the \code{fix} method. However, instead of building a new model and compare them with likelihood-ratio test, multivariate wald test is used to compare equality between parameter estimates. See \code{\link{wald}} for further details. Note that if any rows of the contrast cannot be summed to 0, the Wald test is not provided, such as comparing two means where one of the means is fixed as 0. This test statistic is not as accurate as likelihood-ratio test provided in \code{fix}. I provide it here in case that likelihood-ratio test fails to converge.} } Note that this function does not adjust for the inflated Type I error rate from multiple tests. The degree of freedom of all tests would be the number of groups minus 1. The details of standardized estimates and the effect size used for each parameters are provided in the vignettes by running \code{vignette("partialInvariance")}. } \value{ A list of results are provided. The list will consists of at least two elements: \enumerate{ \item{\code{estimates}: The results of parameter estimates including pooled estimates (\code{poolest}), the estimates for each group, standardized estimates for each group (\code{std}), the difference in standardized values, and the effect size statistic (\emph{q} for factor loading difference and \emph{h} for error variance difference). See the details of this effect size statistic by running \code{vignette("partialInvariance")}. In the \code{partialInvariance} function, the additional effect statistics proposed by Millsap and Olivera-Aguilar (2012) are provided. For factor loading, the additional outputs are the observed mean difference (\code{diff_mean}), the mean difference if factor scores are low (\code{low_fscore}), and the mean difference if factor scores are high (\code{high_fscore}). The low factor score is calculated by (a) finding the factor scores that its z-score equals -\code{bound} (the default is -2) from all groups and (b) picking the minimum value among the factor scores. The high factor score is calculated by (a) finding the factor scores that its z-score equals \code{bound} (the default is 2) from all groups and (b) picking the maximum value among the factor scores. For measurement intercepts, the additional outputs are the observed means difference (\code{diff_mean}) and the proportion of the differences in the intercepts over the observed means differences (\code{propdiff}). For error variances, the additional outputs are the proportion of the difference in error variances over the difference in observed variances (\code{propdiff}).} \item{\code{results}: Statistical tests as well as the change in CFI are provided. Chi-square and p-value are provided for all methods. } \item{\code{models}: The submodels used in the \code{free} and \code{fix} methods, as well as the nested and parent models. The nested and parent models will be changed from the original models if \code{free} or \code{fit} arguments are specified. } } } \references{ Millsap, R. E., & Olivera-Aguilar, M. (2012). Investigating measurement invariance using confirmatory factor analysis. In R. H. Hoyle (Ed.), \emph{Handbook of structural equation modeling} (pp. 380-392). New York: Guilford. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \code{\link{measurementInvariance}} for measurement invariance for continuous variables; \code{\link{measurementInvarianceCat}} for measurement invariance for categorical variables; \code{\link{wald}} for multivariate Wald test } \examples{ # Conduct weak invariance testing manually by using fixed-factor # method of scale identification library(lavaan) conf <- " f1 =~ NA*x1 + x2 + x3 f2 =~ NA*x4 + x5 + x6 f1 ~~ c(1, 1)*f1 f2 ~~ c(1, 1)*f2 " weak <- " f1 =~ NA*x1 + x2 + x3 f2 =~ NA*x4 + x5 + x6 f1 ~~ c(1, NA)*f1 f2 ~~ c(1, NA)*f2 " configural <- cfa(conf, data = HolzingerSwineford1939, std.lv = TRUE, group="school") weak <- cfa(weak, data = HolzingerSwineford1939, group="school", group.equal="loadings") models <- list(fit.configural = configural, fit.loadings = weak) partialInvariance(models, "metric") \dontrun{ partialInvariance(models, "metric", free = "x5") # "x5" is free across groups in advance partialInvariance(models, "metric", fix = "x4") # "x4" is fixed across groups in advance # Use the result from the measurementInvariance function HW.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' models2 <- measurementInvariance(HW.model, data=HolzingerSwineford1939, group="school") partialInvariance(models2, "scalar") # Conduct weak invariance testing manually by using fixed-factor # method of scale identification for dichotomous variables f <- rnorm(1000, 0, 1) u1 <- 0.9*f + rnorm(1000, 1, sqrt(0.19)) u2 <- 0.8*f + rnorm(1000, 1, sqrt(0.36)) u3 <- 0.6*f + rnorm(1000, 1, sqrt(0.64)) u4 <- 0.7*f + rnorm(1000, 1, sqrt(0.51)) u1 <- as.numeric(cut(u1, breaks = c(-Inf, 0, Inf))) u2 <- as.numeric(cut(u2, breaks = c(-Inf, 0.5, Inf))) u3 <- as.numeric(cut(u3, breaks = c(-Inf, 0, Inf))) u4 <- as.numeric(cut(u4, breaks = c(-Inf, -0.5, Inf))) g <- rep(c(1, 2), 500) dat2 <- data.frame(u1, u2, u3, u4, g) configural2 <- " f1 =~ NA*u1 + u2 + u3 + u4 u1 | c(t11, t11)*t1 u2 | c(t21, t21)*t1 u3 | c(t31, t31)*t1 u4 | c(t41, t41)*t1 f1 ~~ c(1, 1)*f1 f1 ~ c(0, NA)*1 u1 ~~ c(1, 1)*u1 u2 ~~ c(1, NA)*u2 u3 ~~ c(1, NA)*u3 u4 ~~ c(1, NA)*u4 " outConfigural2 <- cfa(configural2, data = dat2, group = "g", parameterization="theta", estimator="wlsmv", ordered = c("u1", "u2", "u3", "u4")) weak2 <- " f1 =~ NA*u1 + c(f11, f11)*u1 + c(f21, f21)*u2 + c(f31, f31)*u3 + c(f41, f41)*u4 u1 | c(t11, t11)*t1 u2 | c(t21, t21)*t1 u3 | c(t31, t31)*t1 u4 | c(t41, t41)*t1 f1 ~~ c(1, NA)*f1 f1 ~ c(0, NA)*1 u1 ~~ c(1, 1)*u1 u2 ~~ c(1, NA)*u2 u3 ~~ c(1, NA)*u3 u4 ~~ c(1, NA)*u4 " outWeak2 <- cfa(weak2, data = dat2, group = "g", parameterization="theta", estimator="wlsmv", ordered = c("u1", "u2", "u3", "u4")) modelsCat <- list(configural = outConfigural2, metric = outWeak2) partialInvarianceCat(modelsCat, type = "metric") partialInvarianceCat(modelsCat, type = "metric", free = "u2") partialInvarianceCat(modelsCat, type = "metric", fix = "u3") # Use the result from the measurementInvarianceCat function model <- ' f1 =~ u1 + u2 + u3 + u4 f2 =~ u5 + u6 + u7 + u8' modelsCat2 <- measurementInvarianceCat(model, data = datCat, group = "g", parameterization="theta", estimator="wlsmv", strict = TRUE) partialInvarianceCat(modelsCat2, type = "scalar") } } semTools/man/skew.Rd0000644000175100001440000000273413000201061014075 0ustar hornikusers\name{skew} \alias{skew} \title{ Finding skewness } \description{ Finding skewness (g1) of an object } \usage{ skew(object, population=FALSE) } \arguments{ \item{object}{ A vector used to find a skewness } \item{population}{ \code{TRUE} to compute the parameter formula. \code{FALSE} to compute the sample statistic formula. } } \value{ A value of a skewness with a test statistic if the population is specified as \code{FALSE} } \details{ The skewness computed is g1. The parameter skewness \eqn{\gamma_{2}} formula is \deqn{\gamma_{2} = \frac{\mu_{3}}{\mu^{3/2}_{2}},} where \eqn{\mu_{i}} denotes the \eqn{i} order central moment. The excessive kurtosis formula for sample statistic \eqn{g_{2}} is \deqn{g_{2} = \frac{k_{3}}{k^{2}_{2}},} where \eqn{k_{i}} are the \eqn{i} order \emph{k}-statistic. The standard error of the skewness is \deqn{Var(\hat{g}_2) = \frac{6}{N}} where \eqn{N} is the sample size. } \references{ Weisstein, Eric W. (n.d.). \emph{Skewness.} Retrived from MathWorld--A Wolfram Web Resource \url{http://mathworld.wolfram.com/Skewness.html} } \seealso{ \itemize{ \item \code{\link{kurtosis}} Find the univariate excessive kurtosis of a variable \item \code{\link{mardiaSkew}} Find the Mardia's multivariate skewness of a set of variables \item \code{\link{mardiaKurtosis}} Find the Mardia's multivariate kurtosis of a set of variables } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ skew(1:5) } semTools/man/SSpower.Rd0000644000175100001440000000424613000201061014526 0ustar hornikusers\name{SSpower} \alias{SSpower} \title{ Power for model parameters } \description{ Determines power for model parameters using the Satorra & Sarris (1985) method } \usage{ SSpower(popModel, n, powerModel, fun = "cfa", nparam = 1, alpha = .05, ...) } \arguments{ \item{popModel}{ lavaan syntax for the population model. This model should specify population values for all paramters in the model. } \item{n}{ Sample size used in power calculation } \item{powerModel}{ lavaan syntax for the model to be analyzed. This syntax should have the parameter(s) of interest fixed to 0 (or some other number). } \item{fun}{ The character of the function name used in running lavaan model (\code{"cfa"}, \code{"sem"}, \code{"growth"}, \code{"lavaan"}). } \item{nparam}{ The number of parameters one is constrained in \code{powerModel}. } \item{alpha}{ The Type I error rate used to assess power } \item{...}{ Other arguments to be passed to the specified lavaan function (\code{"cfa"}, \code{"sem"}, \code{"growth"}, \code{"lavaan"}). } } \author{ Alexander M. Schoemann (East Carolina University; \email{schoemanna@ecu.edu}) } \references{ Satorra, A., & Saris, W. E. (1985). Power of the likelihood ratio test in covariance structure analysis. \emph{Psychometrika, 50}, 83-90.} \examples{ library(lavaan) #Specify population values. Note every paramter has a fixed value modelP <- ' f1 =~ .7*V1 + .7*V2 + .7*V3 + .7*V4 f2 =~ .7*V5 + .7*V6 + .7*V7 + .7*V8 f1 ~~ .3*f2 f1 ~~ 1*f1 f2 ~~ 1*f2 V1 ~~ .51*V1 V2 ~~ .51*V2 V3 ~~ .51*V3 V4 ~~ .51*V4 V5 ~~ .51*V5 V6 ~~ .51*V6 V7 ~~ .51*V7 V8 ~~ .51*V8 ' #Specify model to be analyzed. Note parameter of interest f1~~f2 is fixed to 0. modelA <- ' f1 =~ V1 + V2 + V3 + V4 f2 =~ V5 + V6 + V7 + V8 f1 ~~ 0*f2 ' SSpower(modelP, 150, modelA, std.lv=TRUE) ##Get power for a range of values Ns <- seq(100, 500, 40) powVals <- rep(NA, length(Ns)) for(i in 1:length(Ns)){ powVals[i] <- SSpower(modelP, Ns[i], modelA) } plot(Ns, powVals, type = 'l') } semTools/man/probe3WayMC.Rd0000644000175100001440000001616513000201061015222 0ustar hornikusers\name{probe3WayMC} \alias{probe3WayMC} \title{ Probing two-way interaction on the no-centered or mean-centered latent interaction } \description{ Probing interaction for simple intercept and simple slope for the no-centered or mean-centered latent two-way interaction } \usage{ probe3WayMC(fit, nameX, nameY, modVar, valProbe1, valProbe2) } \arguments{ \item{fit}{The lavaan model object used to evaluate model fit} \item{nameX}{The vector of the factor names used as the predictors. The three first-order factors will be listed first. Then the second-order factors will be listeed. The last element of the name will represent the three-way interaction. Note that the fourth element must be the interaction between the first and the second variables. The fifth element must be the interaction between the first and the third variables. The sixth element must be the interaction between the second and the third variables.} \item{nameY}{The name of factor that is used as the dependent variable.} \item{modVar}{The name of two factors that are used as the moderators. The effect of the independent factor on each combination of the moderator variable values will be probed.} \item{valProbe1}{The values of the first moderator that will be used to probe the effect of the independent factor.} \item{valProbe2}{The values of the second moderator that will be used to probe the effect of the independent factor.} } \details{ Before using this function, researchers need to make the products of the indicators between the first-order factors using mean centering (Marsh, Wen, & Hau, 2004). Note that the double-mean centering may not be appropriate for probing interaction if researchers are interested in simple intercepts. The mean or double-mean centering can be done by the \code{\link{indProd}} function. The indicator products can be made for all possible combination or matched-pair approach (Marsh et al., 2004). Next, the hypothesized model with the regression with latent interaction will be used to fit all original indicators and the product terms. See the example for how to fit the product term below. Once the lavaan result is obtained, this function will be used to probe the interaction. Let that the latent interaction model regressing the dependent variable (\eqn{Y}) on the independent varaible (\eqn{X}) and two moderators (\eqn{Z} and \eqn{W}) be \deqn{ Y = b_0 + b_1X + b_2Z + b_3W + b_4XZ + b_5XW + b_6ZW + b_7XZW + r, } where \eqn{b_0} is the estimated intercept or the expected value of \eqn{Y} when \eqn{X}, \eqn{Z}, and \eqn{W} are 0, \eqn{b_1} is the effect of \eqn{X} when \eqn{Z} and \eqn{W} are 0, \eqn{b_2} is the effect of \eqn{Z} when \eqn{X} and \eqn{W} is 0, \eqn{b_3} is the effect of \eqn{W} when \eqn{X} and \eqn{Z} are 0, \eqn{b_4} is the interaction effect between \eqn{X} and \eqn{Z} when \eqn{W} is 0, \eqn{b_5} is the interaction effect between \eqn{X} and \eqn{W} when \eqn{Z} is 0, \eqn{b_6} is the interaction effect between \eqn{Z} and \eqn{W} when \eqn{X} is 0, \eqn{b_7} is the three-way interaction effect between \eqn{X}, \eqn{Z}, and \eqn{W}, and \eqn{r} is the residual term. For probing three-way interaction, the simple intercept of the independent variable at the specific values of the moderators (Aiken & West, 1991) can be obtained by \deqn{ b_{0|X = 0, Z, W} = b_0 + b_2Z + b_3W + b_6ZW. } The simple slope of the independent varaible at the specific values of the moderators can be obtained by \deqn{ b_{X|Z, W} = b_1 + b_3Z + b_4W + b_7ZW. } The variance of the simple intercept formula is \deqn{ Var\left(b_{0|X = 0, Z, W}\right) = Var\left(b_0\right) + Z^2Var\left(b_2\right) + W^2Var\left(b_3\right) + Z^2W^2Var\left(b_6\right) + 2ZCov\left(b_0, b_2\right) + 2WCov\left(b_0, b_3\right) + 2ZWCov\left(b_0, b_6\right) + 2ZWCov\left(b_2, b_3\right) + 2Z^2WCov\left(b_2, b_6\right) + 2ZW^2Cov\left(b_3, b_6\right) } where \eqn{Var} denotes the variance of a parameter estimate and \eqn{Cov} denotes the covariance of two parameter estimates. The variance of the simple slope formula is \deqn{ Var\left(b_{X|Z, W}\right) = Var\left(b_1\right) + Z^2Var\left(b_4\right) + W^2Var\left(b_5\right) + Z^2W^2Var\left(b_7\right) + 2ZCov\left(b_1, b_4\right) + 2WCov\left(b_1, b_5\right) + 2ZWCov\left(b_1, b_7\right) + 2ZWCov\left(b_4, b_5\right) + 2Z^2WCov\left(b_4, b_7\right) + 2ZW^2Cov\left(b_5, b_7\right) } Wald statistic is used for test statistic. } \value{ A list with two elements: \enumerate{ \item{SimpleIntercept} The intercepts given each value of the moderator. This element will be shown only if the factor intercept is estimated (e.g., not fixed as 0). \item{SimpleSlope} The slopes given each value of the moderator. } In each element, the first column represents the values of the first moderator specified in the \code{valProbe1} argument. The second column represents the values of the second moderator specified in the \code{valProbe2} argument. The third column is the simple intercept or simple slope. The fourth column is the standard error of the simple intercept or simple slope. The fifth column is the Wald (\emph{z}) statistic. The sixth column is the \emph{p}-value testing whether the simple intercepts or slopes are different from 0. } \references{ Aiken, L. S., & West, S. G. (1991). Multiple regression: Testing and interpreting interactions. Newbury Park, CA: Sage. Marsh, H. W., Wen, Z., & Hau, K. T. (2004). Structural equation models of latent interactions: Evaluation of alternative estimation strategies and indicator construction. \emph{Psychological Methods, 9}, 275-300. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{indProd}} For creating the indicator products with no centering, mean centering, double-mean centering, or residual centering. \item \code{\link{probe2WayMC}} For probing the two-way latent interaction when the results are obtained from mean-centering, or double-mean centering. \item \code{\link{probe2WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{probe3WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the latent interaction. } } \examples{ library(lavaan) dat3wayMC <- indProd(dat3way, 1:3, 4:6, 7:9) model3 <- " f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f3 =~ x7 + x8 + x9 f12 =~ x1.x4 + x2.x5 + x3.x6 f13 =~ x1.x7 + x2.x8 + x3.x9 f23 =~ x4.x7 + x5.x8 + x6.x9 f123 =~ x1.x4.x7 + x2.x5.x8 + x3.x6.x9 f4 =~ x10 + x11 + x12 f4 ~ f1 + f2 + f3 + f12 + f13 + f23 + f123 f1 ~~ 0*f12 f1 ~~ 0*f13 f1 ~~ 0*f123 f2 ~~ 0*f12 f2 ~~ 0*f23 f2 ~~ 0*f123 f3 ~~ 0*f13 f3 ~~ 0*f23 f3 ~~ 0*f123 f12 ~~ 0*f123 f13 ~~ 0*f123 f23 ~~ 0*f123 x1 ~ 0*1 x4 ~ 0*1 x7 ~ 0*1 x10 ~ 0*1 x1.x4 ~ 0*1 x1.x7 ~ 0*1 x4.x7 ~ 0*1 x1.x4.x7 ~ 0*1 f1 ~ NA*1 f2 ~ NA*1 f3 ~ NA*1 f12 ~ NA*1 f13 ~ NA*1 f23 ~ NA*1 f123 ~ NA*1 f4 ~ NA*1 " fitMC3way <- sem(model3, data=dat3wayMC, meanstructure=TRUE, std.lv=FALSE) summary(fitMC3way) result3wayMC <- probe3WayMC(fitMC3way, c("f1", "f2", "f3", "f12", "f13", "f23", "f123"), "f4", c("f1", "f2"), c(-1, 0, 1), c(-1, 0, 1)) result3wayMC } semTools/man/poolMAlloc.Rd0000644000175100001440000002732113000201061015164 0ustar hornikusers\name{poolMAlloc} \alias{poolMAlloc} \title{ Pooled estimates and standard errors across M parcel-allocations: Combining sampling variability and parcel-allocation variability. } \description{ This function employs an iterative algorithm to pick the number of random item-to-parcel allocations needed to meet user-defined stability criteria for a fitted structural equation model (SEM) (see "Details" below for more information). Pooled parameter and standard error estimates from this SEM can be outputted at this final selected number of allocations. Additionally, new indices (see Sterba & Rights, 2016) are outputted for assessing the relative contributions of parcel-allocation variability vs. sampling variability in each estimate. At each iteration, this function generates a given number of random item-to-parcel allocations using a modified version of the \code{\link{parcelAllocation}} function (Quick & Schoemann, 2012), fits a SEM to each allocation, pools results across allocations from that iteration, and then assesses whether stopping criteria are met. If stopping criteria are not met, the algorithm increments the number of allocations used (generating all new allocations). } \usage{ poolMAlloc(nPerPar, facPlc, nAllocStart, nAllocAdd = 0, parceloutput=0, syntax, dataset, stopProp, stopValue, selectParam = NULL, double = FALSE, checkConv = FALSE, names = 'default', leaveout = 0, useTotalAlloc=FALSE, ...) } \arguments{ \item{nPerPar}{ A list in which each element is a vector, corresponding to each factor, indicating sizes of parcels. If variables are left out of parceling, they should not be accounted for here (i.e., there should not be parcels of size "1"). } \item{facPlc}{ A list of vectors, each corresponding to a factor, specifying the item indicators of that factor (whether included in parceling or not). Either variable names or column numbers. Variables not listed will not be modeled or included in output datasets. } \item{nAllocStart}{ The number of random allocations of items to parcels to generate in the first iteration of the algorithm. } \item{nAllocAdd}{ The number of allocations to add with each iteration of the algorithm. Note that if only one iteration is desired, \code{nAllocAdd} can be set to 0 and results will be output for \code{nAllocStart} allocations only. } \item{syntax}{ lavaan syntax that defines the model. } \item{dataset}{ Item-level dataset } \item{parceloutput}{ (Optional) folder where \emph{M} (the final selected number of allocations) parceled data sets will be outputted from the iteration where the algorithm met stopping criteria. (Note for Windows users: file path must be specified using forward slashes). } \item{stopProp}{ Value used in defining stopping criteria of the algorithm (\eqn{\delta_a} in Sterba & Rights, 2016). This is the minimum proportion of change (in any pooled parameter or pooled standard error estimate listed in \code{selectParam}) that is allowable from one iteration of the algorithm to the next. That is, change in pooled estimates and pooled standard errors from one iteration to the next must all be less than (\code{stopProp}) x (value from former iteration). Note that \code{stopValue} can override this criterion (see below). Also note that values less than .01 are unlikely to lead to more substantively meaningful precision. Also note that if only \code{stopValue} is a desired criterion, \code{stopProp} can be set to 0. } \item{stopValue}{ Value used in defining stopping criteria of the algorithm (\eqn{\delta_b} in Sterba & Rights, 2016). \code{stopValue} is a minimum allowable amount of absolute change (in any pooled parameter or pooled standard error estimate listed in \code{selectParam}) from one iteration of the algorithm to the next. For a given pooled estimate or pooled standard error, \code{stopValue} is only invoked as a stopping criteria when the minimum change required by \code{stopProp} is less than \code{stopValue}. Note that values less than .01 are unlikely to lead to more substantively meaningful precision. Also note that if only \code{stopProp} is a desired criterion, \code{stopValue} can be set to 0. } \item{selectParam}{ (Optional) A list of the pooled parameters to be used in defining stopping criteria (i.e., \code{stopProp} and \code{stopValue}). These parameters should appear in the order they are listed in the lavaan syntax. By default, all pooled parameters are used. Note that \code{selectParam} should only contain freely-estimated parameters. In one example from Sterba and Rights (2016) \code{selectParam} included all free parameters except item intercepts and in another example \code{selectParam} included only structural parameters. } \item{double}{ (Optional) If set to \code{TRUE}, requires stopping criteria (\code{stopProp} and \code{stopValue}) to be met for all parameters (in \code{selectParam}) for two consecutive iterations of the algorithm. By default, this is set to \code{FALSE}, meaning stopping criteria need only be met at one iteration of the algorithm. } \item{names}{ (Optional) A character vector containing the names of parceled variables. } \item{leaveout}{ (Optional) A vector of variables to be left out of randomized parceling. Either variable names or column numbers are allowed. } \item{useTotalAlloc}{ (Optional) If set to \code{TRUE}, function will output a separate set of results that uses all allocations created by the algorithm, rather than \emph{M} allocations (see "Allocations needed for stability" below). This distinction is further discussed in Sterba and Rights (2016). } \item{checkConv}{ (Optional) If set to TRUE, function will output pooled estimates and standard errors from 10 iterations post-convergence. } \item{\dots}{ Additional arguments to be passed to \code{\link[lavaan]{lavaan}} } } \details{ This is a modified version of \code{\link{parcelAllocation}}. It implements a new algorithm for choosing the number of allocations (\emph{M}), (described in Sterba & Rights (2016)), newly pools parameter estimate and standard error results across these \emph{M} allocations, and produces indices for assessing the relative contributions of parcel-allocation variability vs. sampling variability in each estimate. This function randomly generates a given number (\code{nAllocStart}) of item-to-parcel allocations, fits a SEM to each allocation, and then increments the number of allocations used (by \code{nAllocAdd}) until the pooled parameter estimates and pooled standard errors fulfill stopping criteria (\code{stopProp} and \code{stopValue}, defined above). Results from the model that was fit to the \emph{M} allocations are outputted. Additionally, this function newly outputs the proportion of allocations with solutions that converged (using a maximum likelihood estimator) as well as the proportion of allocations with solutions that were converged and proper. The converged and proper solutions among the final \emph{M} allocations are used in computing pooled results. The original parcelAllocation function could not be employed if any allocations yielded nonconverged solutions. For further details on the benefits of the random allocation of items to parcels, see Sterba (2011) and Sterba and MacCallum (2010). Additionally, after each iteration of the algorithm, information useful in monitoring the algorithm is outputted. The number of allocations used at that iteration, the proportion of pooled parameter estimates meeting stopping criteria at the previous iteration, the proportion of pooled standard errors meeting stopping criteria at the previous iteration, and the runtime of that iteration are outputted. When stopping criteria are satisfied, the full set of results are outputted. } \value{ \item{Estimates}{A table containing pooled results across \emph{M} allocations at the iteration where stopping criteria were met. Columns correspond to individual parameter name, pooled estimate, pooled standard error, \emph{p}-value for a \emph{z}-test of the parameter, \emph{z}-based 95\% confidence interval, \emph{p}-value for a \emph{t}-test of the parameter (using degrees of freedom described in Sterba & Rights, 2016), and \emph{t}-based 95\% confidence interval for the parameter.} \item{Fit}{A table containing results related to model fit from the \emph{M} allocations at the iteration where stopping criteria were met. Columns correspond to fit index names, the average of each index across allocations, the standard deviation of each fit index across allocations, the maximum of each fit index across allocations, the minimum of each fit index across allocations, the range of each fit index across allocations, and the percent of the \emph{M} allocations where the chi-square test of absolute fit was significant.} \item{Proportion of converged and proper allocations}{A table containing the proportion of the final \emph{M} allocations that converged (using a maximum likelihood estimator) and the proportion of allocations that converged to proper solutions. Note that pooled estimates, pooled standard errors, and other results are computed using only the converged, proper allocations.} \item{Allocations needed for stability (M)}{The number of allocations (\emph{M}) at which the algorithm's stopping criteria (defined above) were met.} \item{Indices used to quantify uncertainty in estimates due to sample vs. allocation variability}{A table containing individual parameter names, an estimate of the proportion of total variance of a pooled parameter estimate that is attributable to parcel-allocation variability (PPAV), and an estimate of the ratio of the between-allocation variance of a pooled parameter estimate to the within-allocation variance (RPAV). See Sterba and Rights (2016) for more detail.} \item{Total runtime (minutes)}{The total runtime of the function, in minutes. Note that the total runtime will be greater when the the specified model encounters convergence problems for some allocations, as is the case with the \code{\link{simParcel}} dataset used below.} } \references{ Sterba, S. K. (2011). Implications of parcel-allocation variability for comparing fit of item-solutions and parcel-solutions. \emph{Structural Equation Modeling: A Multidisciplinary Journal, 18}(4), 554-577. Sterba, S. K., & MacCallum, R. C. (2010). Variability in parameter estimates and model fit across repeated allocations of items to parcels. \emph{Multivariate Behavioral Research, 45}(2), 322-358. Sterba, S. K. & Rights, J. D. (2016). Accounting for parcel-allocation variability in practice: Combining sources of uncertainty and choosing the number of allocations. \emph{Multivariate Behavioral Research}. \url{http://www.tandfonline.com/doi/pdf/10.1080/00273171.2016.1144502} } \seealso{ \code{\link{parcelAllocation}}, \code{\link{PAVranking}} } \author{ Jason D. Rights (Vanderbilt University; \email{jason.d.rights@vanderbilt.edu}) The author would also like to credit Corbin Quick and Alexander Schoemann for providing the original parcelAllocation function on which this function is based. } \examples{ \dontrun{ ## Lavaan syntax: A 2 Correlated ## factor CFA model to be fit to parceled data parmodel <- ' f1 =~ NA*p1f1 + p2f1 + p3f1 f2 =~ NA*p1f2 + p2f2 + p3f2 p1f1 ~ 1 p2f1 ~ 1 p3f1 ~ 1 p1f2 ~ 1 p2f2 ~ 1 p3f2 ~ 1 p1f1 ~~ p1f1 p2f1 ~~ p2f1 p3f1 ~~ p3f1 p1f2 ~~ p1f2 p2f2 ~~ p2f2 p3f2 ~~ p3f2 f1 ~~ 1*f1 f2 ~~ 1*f2 f1 ~~ f2 ' ##specify items for each factor f1name <- colnames(simParcel)[1:9] f2name <- colnames(simParcel)[10:18] ##run function poolMAlloc(nPerPar=list(c(3,3,3),c(3,3,3)), facPlc=list(f1name,f2name), nAllocStart=10, nAllocAdd=10, syntax=parmodel, dataset=simParcel, stopProp=.03, stopValue=.03, selectParam=c(1:6,13:18,21), names=list("p1f1","p2f1","p3f1","p1f2","p2f2","p3f2"), double=FALSE, useTotalAlloc=FALSE) } } semTools/man/singleParamTest.Rd0000644000175100001440000000734213000201061016226 0ustar hornikusers\name{singleParamTest} \alias{singleParamTest} \title{ Single Parameter Test Divided from Nested Model Comparison } \description{ In comparing two nested models, chi-square test may indicate that two models are different. However, like other omnibus tests, researchers do not know which fixed parameters or constraints make these two models different. This function will help researchers identify the significant parameter. } \usage{ singleParamTest(model1, model2, return.fit = FALSE, method = "satorra.bentler.2001") } \arguments{ \item{model1}{ Model 1. } \item{model2}{ Model 2. Note that two models must be nested models. Further, the order of parameters in their parameter tables are the same. That is, nested models with different scale identifications may not be able to test by this function. } \item{return.fit}{ Return the submodels fitted by this function } \item{method}{The method used to calculate likelihood ratio test. See \code{\link[lavaan]{lavTestLRT}} for available options} } \details{ This function first identify the differences between these two models. The model with more free parameters is referred to as parent model and the model with less free parameters is referred to as nested model. Three tests are implemented here: \enumerate{ \item{\code{free}: The nested model is used as a template. Then, one parameter indicating the differences between two models is free. The new model is compared with the nested model. This process is repeated for all differences between two models.} \item{\code{fix}: The parent model is used as a template. Then, one parameter indicating the differences between two models is fixed or constrained to be equal to other parameters. The new model is then compared with the parent model. This process is repeated for all differences between two models.} \item{\code{mi}: No longer available because the test of modification indices is not consistent. For example, two parameters are equally constrained. The modification index from the first parameter is not equal to the second parameter.} } Note that this function does not adjust for the inflated Type I error rate from multiple tests. } \value{ If \code{return.fit = FALSE}, the result tables are provided. Chi-square and p-value are provided for all methods. Note that the chi-square is all based on 1 degree of freedom. Expected parameter changes and their standardized forms are also provided. If \code{return.fit = TRUE}, a list with two elements are provided. The first element is the tabular result. The second element is the submodels used in the \code{free} and \code{fix} methods. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ library(lavaan) # Nested model comparison by hand HS.model1 <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6' HS.model2 <- ' visual =~ a*x1 + a*x2 + a*x3 textual =~ b*x4 + b*x5 + b*x6' m1 <- cfa(HS.model1, data = HolzingerSwineford1939, std.lv=TRUE, estimator="MLR") m2 <- cfa(HS.model2, data = HolzingerSwineford1939, std.lv=TRUE, estimator="MLR") anova(m1, m2) singleParamTest(m1, m2) # Nested model comparison from the measurementInvariance function HW.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' models <- measurementInvariance(HW.model, data=HolzingerSwineford1939, group="school") singleParamTest(models[[1]], models[[2]]) # Note that the comparison between weak (Model 2) and scalar invariance (Model 3) cannot be done # by this function # because the weak invariance model fixes factor means as 0 in Group 2 but # the strong invariance model frees the factor means in Group 2. Users may try to compare # strong (Model 3) and means invariance models by this function. } semTools/man/probe3WayRC.Rd0000644000175100001440000001437513000201061015230 0ustar hornikusers\name{probe3WayRC} \alias{probe3WayRC} \title{ Probing three-way interaction on the residual-centered latent interaction } \description{ Probing interaction for simple intercept and simple slope for the residual-centered latent three-way interaction (Pornprasertmanit, Schoemann, Geldhof, & Little, submitted) } \usage{ probe3WayRC(fit, nameX, nameY, modVar, valProbe1, valProbe2) } \arguments{ \item{fit}{The lavaan model object used to evaluate model fit} \item{nameX}{The vector of the factor names used as the predictors. The three first-order factors will be listed first. Then the second-order factors will be listeed. The last element of the name will represent the three-way interaction. Note that the fourth element must be the interaction between the first and the second variables. The fifth element must be the interaction between the first and the third variables. The sixth element must be the interaction between the second and the third variables.} \item{nameY}{The name of factor that is used as the dependent variable.} \item{modVar}{The name of two factors that are used as the moderators. The effect of the independent factor on each combination of the moderator variable values will be probed.} \item{valProbe1}{The values of the first moderator that will be used to probe the effect of the independent factor.} \item{valProbe2}{The values of the second moderator that will be used to probe the effect of the independent factor.} } \details{ Before using this function, researchers need to make the products of the indicators between the first-order factors and residualize the products by the original indicators (Lance, 1988; Little, Bovaird, & Widaman, 2006). The process can be automated by the \code{\link{indProd}} function. Note that the indicator products can be made for all possible combination or matched-pair approach (Marsh et al., 2004). Next, the hypothesized model with the regression with latent interaction will be used to fit all original indicators and the product terms (Geldhof, Pornprasertmanit, Schoemann, & Little, in press). To use this function the model must be fit with a mean structure. See the example for how to fit the product term below. Once the lavaan result is obtained, this function will be used to probe the interaction. The probing process on residual-centered latent interaction is based on transforming the residual-centered result into the no-centered result. See Pornprasertmanit, Schoemann, Geldhof, and Little (submitted) for further details. Note that this approach based on a strong assumption that the first-order latent variables are normally distributed. The probing process is applied after the no-centered result (parameter estimates and their covariance matrix among parameter estimates) has been computed See the \code{\link{probe3WayMC}} for further details. } \value{ A list with two elements: \enumerate{ \item{SimpleIntercept} The intercepts given each value of the moderator. This element will be shown only if the factor intercept is estimated (e.g., not fixed as 0). \item{SimpleSlope} The slopes given each value of the moderator. } In each element, the first column represents the values of the first moderator specified in the \code{valProbe1} argument. The second column represents the values of the second moderator specified in the \code{valProbe2} argument. The third column is the simple intercept or simple slope. The fourth column is the standard error of the simple intercept or simple slope. The fifth column is the Wald (\emph{z}) statistic. The sixth column is the \emph{p}-value testing whether the simple intercepts or slopes are different from 0. } \references{ Geldhof, G. J., Pornprasertmanit, S., Schoemann, A., & Little, T. D. (in press). Orthogonalizing through residual centering: Applications and caveats. \emph{Educational and Psychological Measurement.} Lance, C. E. (1988). Residual centering, exploratory and confirmatory moderator analysis, and decomposition of effects in path models containing interactions. \emph{Applied Psychological Measurement, 12}, 163-175. Little, T. D., Bovaird, J. A., & Widaman, K. F. (2006). On the merits of orthogonalizing powered and product terms: Implications for modeling interactions. \emph{Structural Equation Modeling, 13}, 497-519. Marsh, H. W., Wen, Z., & Hau, K. T. (2004). Structural equation models of latent interactions: Evaluation of alternative estimation strategies and indicator construction. \emph{Psychological Methods, 9}, 275-300. Pornprasertmanit, S., Schoemann, A. M., Geldhof, G. J., & Little, T. D. (submitted). \emph{Probing latent interaction estimated with a residual centering approach.} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{indProd}} For creating the indicator products with no centering, mean centering, double-mean centering, or residual centering. \item \code{\link{probe2WayMC}} For probing the two-way latent interaction when the results are obtained from mean-centering, or double-mean centering. \item \code{\link{probe3WayMC}} For probing the three-way latent interaction when the results are obtained from mean-centering, or double-mean centering. \item \code{\link{probe2WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the latent interaction. } } \examples{ library(lavaan) dat3wayRC <- orthogonalize(dat3way, 1:3, 4:6, 7:9) model3 <- " f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f3 =~ x7 + x8 + x9 f12 =~ x1.x4 + x2.x5 + x3.x6 f13 =~ x1.x7 + x2.x8 + x3.x9 f23 =~ x4.x7 + x5.x8 + x6.x9 f123 =~ x1.x4.x7 + x2.x5.x8 + x3.x6.x9 f4 =~ x10 + x11 + x12 f4 ~ f1 + f2 + f3 + f12 + f13 + f23 + f123 f1 ~~ 0*f12 f1 ~~ 0*f13 f1 ~~ 0*f123 f2 ~~ 0*f12 f2 ~~ 0*f23 f2 ~~ 0*f123 f3 ~~ 0*f13 f3 ~~ 0*f23 f3 ~~ 0*f123 f12 ~~ 0*f123 f13 ~~ 0*f123 f23 ~~ 0*f123 x1 ~ 0*1 x4 ~ 0*1 x7 ~ 0*1 x10 ~ 0*1 x1.x4 ~ 0*1 x1.x7 ~ 0*1 x4.x7 ~ 0*1 x1.x4.x7 ~ 0*1 f1 ~ NA*1 f2 ~ NA*1 f3 ~ NA*1 f12 ~ NA*1 f13 ~ NA*1 f23 ~ NA*1 f123 ~ NA*1 f4 ~ NA*1 " fitRC3way <- sem(model3, data=dat3wayRC, meanstructure=TRUE, std.lv=FALSE) summary(fitRC3way) result3wayRC <- probe3WayRC(fitRC3way, c("f1", "f2", "f3", "f12", "f13", "f23", "f123"), "f4", c("f1", "f2"), c(-1, 0, 1), c(-1, 0, 1)) result3wayRC } semTools/man/mvrnonnorm.Rd0000644000175100001440000000272413000201061015336 0ustar hornikusers\name{mvrnonnorm} \alias{mvrnonnorm} \title{ Generate Non-normal Data using Vale and Maurelli (1983) method } \description{ Generate Non-normal Data using Vale and Maurelli (1983) method. The function is designed to be as similar as the popular \code{mvrnorm} function in the \code{MASS} package. The codes are copied from \code{mvrnorm} function in the \code{MASS} package for argument checking and \code{lavaan} package for data generation using Vale and Maurelli (1983) method. } \usage{ mvrnonnorm(n, mu, Sigma, skewness = NULL, kurtosis = NULL, empirical = FALSE) } \arguments{ \item{n}{Sample size} \item{mu}{A mean vector} \item{Sigma}{A positive-definite symmetric matrix specifying the covariance matrix of the variables} \item{skewness}{A vector of skewness of the variables} \item{kurtosis}{A vector of excessive kurtosis of the variables} \item{empirical}{If \code{TRUE}, \code{mu} and \code{Sigma} specify the empirical not population mean and covariance matrix} } \value{ A data matrix } \references{ Vale, C. D. & Maurelli, V. A. (1983) Simulating multivariate nonormal distributions. \emph{Psychometrika, 48}, 465-471. } \author{ The original function is the \code{simulateData} function written by Yves Rosseel in the \code{lavaan} package. The function is adjusted for a convenient usage by Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ mvrnonnorm(100, c(1, 2), matrix(c(10, 2, 2, 5), 2, 2), skewness = c(5, 2), kurtosis = c(3, 3)) } semTools/man/htmt.Rd0000644000175100001440000000312513000201061014073 0ustar hornikusers\name{htmt} \alias{htmt} \title{ Assessing Discriminant Validity using Heterotrait-Monotrait Ratio } \description{ This function assesses discriminant validity through the heterotrait-monotrait ratio (HTMT) of the correlations (Henseler, Ringlet & Sarstedt, 2015). Specifically, it assesses the average correlation among indicators across constructs (i.e. heterotrait-heteromethod correlations), relative to the average correlation among indicators within the same construct (i.e. monotrait-heteromethod correlations). The resulting HTMT values are interpreted as estimates of inter-construct correlations. Absolute values of the correlations are used to calculate the HTMT matrix. } \usage{ htmt(data, model, ...) } \arguments{ \item{data}{ A desired data set } \item{model}{ lavaan syntax of a confirmatory factor analysis model where at least two factors are required to indicate indicators measuring the same construct. } \item{\dots}{ Other arguments shown in \link[lavaan]{lavCor} } } \value{ A matrix showing HTMT values (i.e., discriminant validity) between each pair of factors. } \references{ Henseler, J., Ringle, C. M., & Sarstedt, M. (2015). A new criterion for assessing discriminant validity in variance-based structural equation modeling. \emph{Journal of the Academy of Marketing Science, 43}, 115-135. } \author{ Ylenio Longo (University of Nottingham; \email{yleniolongo@gmail.com}) } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' dat <- HolzingerSwineford1939[, paste0("x", 1:9)] htmt(dat, HS.model) } semTools/man/monteCarloMed.Rd0000644000175100001440000001307313000201061015653 0ustar hornikusers\name{monteCarloMed} \alias{monteCarloMed} \title{ Monte Carlo Confidence Intervals to Test Complex Indirect Effects } \description{ This function takes an expression for an indirect effect, the parameters and standard errors associated with the expression and returns a confidence interval based on a Monte Carlo test of mediation (MacKinnon, Lockwood, & Williams, 2004). } \usage{ monteCarloMed(expression, ..., ACM=NULL, object=NULL, rep=20000, CI=95, plot=FALSE, outputValues=FALSE) } \arguments{ \item{expression}{A character scalar representing the computation of an indirect effect. Different parameters in the expression should have different alphanumeric values. Expressions can use either addition (+) or multiplication (*) operators.} \item{\dots}{Parameter estimates for all parameters named in \code{expression}. The order of parameters should follow from \code{expression} (the first parameter named in \code{expression} should be the first parameter listed in \dots). Alternatively \dots can be a vector of parameter estimates.} \item{ACM}{A matrix representing the asymptotic covariance matrix of the parameters described in \code{expression}. This matrix should be a symetric matrix with dimensions equal to the number of parameters names in \code{expression}. Information on finding the ACOV is popular SEM software is described below.)} \item{object}{A lavaan model object fitted after running the running the \code{cfa}, \code{sem}, \code{growth}, or \code{lavaan} functions. The model must have parameters labelled with the same labels used in \code{expression}. When using this option do not specify values for \dots or \code{ACM}} \item{rep}{The number of replications to compute. Many thousand are reccomended.} \item{CI}{Width of the confidence interval computed.} \item{plot}{Should the function output a plot of simulated values of the indirect effect?} \item{outputValues}{Should the function output all simulated values of the indirect effect?} } \details{ This function implements the Monte Carlo test of mediation first described in MacKinnon, Lockwood, & Williams (2004) and extends it to complex cases where the indirect effect is more than a function of two parameters. The function takes an expression for the indirect effect, randomly simulated values of the indirect effect based on the values of the parameters (and the associated standard errors) comprising the indirect effect, and outputs a confidence interval of the indirect effect based on the simulated values. For further information on the Monte Carlo test of mediation see MacKinnon, Lockwood, & Williams (2004), Preacher & Selig (in press), and Selig & Preacher (2008). For a Monte Carlo test of mediation with a random effects model see Selig & Preacher (2010). The asymptotic covariance matrix can be easily found in many popular SEM software applications. \itemize{ \item{LISREL}{Including the EC option on the OU line will print the ACM to a seperate file. The file contains the lower triangular elements of the ACM in free format and scientific notation} \item{Mplus}{Include the command TECH3; in the OUTPUT section. The ACM will be printed in the output.} \item{lavaan} {Use the command \code{vcov} on the fitted lavaan object to print the ACM to the screen} } } \value{ A list with two elements. The first element is the point estimate for the indirect effect. The second element is a matrix with values for the upper and lower limits of the confidence interval generated from the Monte Carlo test of mediation. If \code{outputValues=TRUE}, output will be a list with a list with the point estimate and values for the upper and lower limits of the confidence interval as the first element and a vector of simulated values of the indirect effect as the second element. } \references{ Preacher, K. J., & Selig, J. P. (2010, July). Monte Carlo method for assessing multilevel mediation: An interactive tool for creating confidence intervals for indirect effects in 1-1-1 multilevel models [Computer software]. Available from \url{http://quantpsy.org/}. Preacher, K. J., & Selig, J. P. (2012). Advantages of Monte Carlo confidence intervals for indirect effects. \emph{Communication Methods and Measures, 6}, 77-98. Selig, J. P., & Preacher, K. J. (2008, June). Monte Carlo method for assessing mediation: An interactive tool for creating confidence intervals for indirect effects [Computer software]. Available from \url{http://quantpsy.org/}. } \author{ Corbin Quick (University of Michigan; \email{corbinq@umich.edu}) Alexander M. Schoemann (East Carolina University; \email{schoemanna@ecu.edu}) James P. Selig (University of New Mexico; \email{selig@unm.edu}) } \examples{ #Simple two path mediation #Write expression of indirect effect med <- 'a*b' #Paramter values from analyses aparam <- 1 bparam<-2 #Asymptotic covariance matrix from analyses AC <- matrix(c(.01,.00002, .00002,.02), nrow=2, byrow=TRUE) #Compute CI, include a plot monteCarloMed(med, coef1=aparam, coef2=bparam, outputValues=FALSE, plot=TRUE, ACM=AC) #Use a vector of parameter estimates as input aparam<-c(1,2) monteCarloMed(med, coef1=aparam, outputValues=FALSE, plot=TRUE, ACM=AC) #Complex mediation with two paths for the indirect effect #Write expression of indirect effect med <- 'a1*b1 + a1*b2' #Paramter values and standard errors from analyses aparam <- 1 b1param<-2 b2param<-1 #Asymptotic covariance matrix from analyses AC <- matrix(c(1,.00002, .00003, .00002,1, .00002, .00003, .00002, 1), nrow=3, byrow=TRUE) #Compute CI do not include a plot monteCarloMed(med, coef1=aparam, coef2=b1param, coef3=b2param, ACM=AC) } semTools/man/standardizeMx.Rd0000644000175100001440000000437013000201061015737 0ustar hornikusers\name{standardizeMx} \alias{standardizeMx} \title{ Find standardized estimates for OpenMx output } \description{ Find standardized estimates for OpenMx output. This function is applicable for the \code{MxRAMObjective} only. } \usage{ standardizeMx(object, free = TRUE) } \arguments{ \item{object}{ Target OpenMx output using \code{MxRAMObjective} } \item{free}{ If \code{TRUE}, the function will show only standardized values of free parameters. If \code{FALSE}, the function will show the results for fixed and free parameters. } } \value{ A vector of standardized estimates } \seealso{ \code{\link{saturateMx}}, \code{\link{nullMx}}, \code{\link{fitMeasuresMx}} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ \dontrun{ library(OpenMx) data(myFADataRaw) myFADataRaw <- myFADataRaw[,c("x1","x2","x3","x4","x5","x6")] oneFactorModel <- mxModel("Common Factor Model Path Specification", type="RAM", mxData( observed=myFADataRaw, type="raw" ), manifestVars=c("x1","x2","x3","x4","x5","x6"), latentVars="F1", mxPath(from=c("x1","x2","x3","x4","x5","x6"), arrows=2, free=TRUE, values=c(1,1,1,1,1,1), labels=c("e1","e2","e3","e4","e5","e6") ), # residual variances # ------------------------------------- mxPath(from="F1", arrows=2, free=TRUE, values=1, labels ="varF1" ), # latent variance # ------------------------------------- mxPath(from="F1", to=c("x1","x2","x3","x4","x5","x6"), arrows=1, free=c(FALSE,TRUE,TRUE,TRUE,TRUE,TRUE), values=c(1,1,1,1,1,1), labels =c("l1","l2","l3","l4","l5","l6") ), # factor loadings # ------------------------------------- mxPath(from="one", to=c("x1","x2","x3","x4","x5","x6","F1"), arrows=1, free=c(TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,FALSE), values=c(1,1,1,1,1,1,0), labels =c("meanx1","meanx2","meanx3","meanx4","meanx5","meanx6",NA) ) # means # ------------------------------------- ) # close model # Create an MxModel object # ----------------------------------------------------------------------------- oneFactorFit <- mxRun(oneFactorModel) standardizeMx(oneFactorFit) # Compare with lavaan library(lavaan) script <- "f1 =~ x1 + x2 + x3 + x4 + x5 + x6" fit <- cfa(script, data=myFADataRaw, meanstructure=TRUE) standardizedSolution(fit) } } semTools/man/rotate.Rd0000644000175100001440000000510013000201061014410 0ustar hornikusers\name{rotate} \alias{orthRotate} \alias{oblqRotate} \alias{funRotate} \title{ Implement orthogonal or oblique rotation } \description{ These functions will implement orthogonal or oblique rotation on standardized factor loadings from a lavaan output. } \usage{ orthRotate(object, method="varimax", ...) oblqRotate(object, method="quartimin", ...) funRotate(object, fun, ...) } \arguments{ \item{object}{ A lavaan output } \item{method}{ The method of rotations, such as \code{"varimax"}, \code{"quartimax"}, \code{"geomin"}, \code{"oblimin"}, or any gradient projection algorithms listed in the \code{\link[GPArotation]{GPA}} function in the \code{GPArotation} package. } \item{fun}{ The name of the function that users wish to rotate the standardized solution. The functions must take the first argument as the standardized loading matrix and return the \code{GPArotation} object. Check this page for available functions: \code{\link[GPArotation]{rotations}}. } \item{\dots}{ Additional arguments for the \code{\link[GPArotation]{GPForth}} function (for \code{orthRotate}), the \code{\link[GPArotation]{GPFoblq}} function (for \code{oblqRotate}), or the function that users provide in the \code{fun} argument. } } \details{ These functions will rotate the unrotated standardized factor loadings by orthogonal rotation using the \code{\link[GPArotation]{GPForth}} function or oblique rotation using the \code{\link[GPArotation]{GPFoblq}} function the \code{GPArotation} package. The resulting rotation matrix will be used to calculate standard errors of the rotated standardized factor loading by delta method by numerically computing the Jacobian matrix by the \code{lavJacobianD} function in the \code{lavaan} package. } \value{ An \code{linkS4class{EFA}} object that saves the rotated EFA solution. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ library(lavaan) unrotated <- efaUnrotate(HolzingerSwineford1939, nf=3, varList=paste0("x", 1:9), estimator="mlr") # Orthogonal varimax out.varimax <- orthRotate(unrotated, method="varimax") summary(out.varimax, sort=FALSE, suppress=0.3) # Orthogonal Quartimin orthRotate(unrotated, method="quartimin") # Oblique Quartimin oblqRotate(unrotated, method="quartimin") # Geomin oblqRotate(unrotated, method="geomin") \dontrun{ # Target rotation library(GPArotation) target <- matrix(0, 9, 3) target[1:3, 1] <- NA target[4:6, 2] <- NA target[7:9, 3] <- NA colnames(target) <- c("factor1", "factor2", "factor3") # This function works with GPArotation version 2012.3-1 funRotate(unrotated, fun="targetQ", Target=target) } } semTools/man/dat3way.Rd0000644000175100001440000000232413000201061014473 0ustar hornikusers\name{dat3way} \alias{dat3way} \title{ Simulated Dataset to Demonstrate Three-way Latent Interaction } \description{ A simulated data set with 3 independent factors and 1 dependent factor where each factor has three indicators } \usage{ data(dat3way) } \format{ A data frame with 500 observations of 12 variables. \describe{ \item{x1}{The first indicator of the first independent factor} \item{x2}{The second indicator of the first independent factor} \item{x3}{The third indicator of the first independent factor} \item{x4}{The first indicator of the second independent factor} \item{x5}{The second indicator of the second independent factor} \item{x6}{The third indicator of the second independent factor} \item{x7}{The first indicator of the third independent factor} \item{x8}{The second indicator of the third independent factor} \item{x9}{The third indicator of the third independent factor} \item{x10}{The first indicator of the dependent factor} \item{x11}{The second indicator of the dependent factor} \item{x12}{The third indicator of the dependent factor} } } \source{ Data was generated by the \link[MASS]{mvrnorm} function in the \code{MASS} package. } \examples{ head(dat3way) } semTools/man/clipboard.Rd0000644000175100001440000000532213000201061015057 0ustar hornikusers\name{clipboard_saveFile} \alias{clipboard} \alias{saveFile} \title{ Copy or save the result of \code{lavaan} or \code{FitDiff} objects into a clipboard or a file } \description{ Copy or save the result of \code{lavaan} or \code{\linkS4class{FitDiff}} object into a clipboard or a file. From the clipboard, users may paste the result into the Microsoft Excel or spreadsheet application to create a table of the output. } \usage{ clipboard(object, what="summary", ...) saveFile(object, file, what="summary", tableFormat=FALSE, ...) } \arguments{ \item{object}{ The \code{lavaan} or \code{\linkS4class{FitDiff}} object } \item{what}{ The attributes of the \code{lavaan} object to be copied in the clipboard. \code{"summary"} is to copy the screen provided from the \code{summary} function. \code{"mifit"} is to copy the result from the \code{\link{miPowerFit}} function. Other attributes listed in the \code{inspect} method in the \link[lavaan]{lavaan-class} could also be used, such as \code{"coef"}, \code{"se"}, \code{"fit"}, \code{"samp"}, and so on. For the The \code{\linkS4class{FitDiff}} object, this argument is not active yet. } \item{file}{ A file name used for saving the result } \item{tableFormat}{ If \code{TRUE}, save the result in the table format using tabs for seperation. Otherwise, save the result as the output screen printed in the R console. } \item{\dots}{ Additional argument listed in the \code{\link{miPowerFit}} function (for \code{lavaan} object only). } } \value{ The resulting output will be saved into a clipboard or a file. If using the \code{clipboard} function, users may paste it in the other applications. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ \dontrun{ library(lavaan) HW.model <- ' visual =~ x1 + c1*x2 + x3 textual =~ x4 + c1*x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HW.model, data=HolzingerSwineford1939, group="school", meanstructure=TRUE) # Copy the summary of the lavaan object clipboard(fit) # Copy the modification indices and the model fit from the miPowerFit function clipboard(fit, "mifit") # Copy the parameter estimates clipboard(fit, "coef") # Copy the standard errors clipboard(fit, "se") # Copy the sample statistics clipboard(fit, "samp") # Copy the fit measures clipboard(fit, "fit") # Save the summary of the lavaan object saveFile(fit, "out.txt") # Save the modification indices and the model fit from the miPowerFit function saveFile(fit, "out.txt", "mifit") # Save the parameter estimates saveFile(fit, "out.txt", "coef") # Save the standard errors saveFile(fit, "out.txt", "se") # Save the sample statistics saveFile(fit, "out.txt", "samp") # Save the fit measures saveFile(fit, "out.txt", "fit") } } semTools/man/findRMSEApower.Rd0000644000175100001440000000377513000201061015717 0ustar hornikusers\name{findRMSEApower} \alias{findRMSEApower} \title{ Find the statistical power based on population RMSEA } \description{ Find the proportion of the samples from the sampling distribution of RMSEA in the alternative hypothesis rejected by the cutoff dervied from the sampling distribution of RMSEA in the null hypothesis. This function can be applied for both test of close fit and test of not-close fit (MacCallum, Browne, & Suguwara, 1996) } \usage{ findRMSEApower(rmsea0, rmseaA, df, n, alpha=.05, group=1) } \arguments{ \item{rmsea0}{Null RMSEA} \item{rmseaA}{Alternative RMSEA} \item{df}{Model degrees of freedom} \item{n}{Sample size of a dataset} \item{alpha}{Alpha level used in power calculations} \item{group}{The number of group that is used to calculate RMSEA.} } \details{ This function find the proportion of sampling distribution derived from the alternative RMSEA that is in the critical region derived from the sampling distribution of the null RMSEA. If \code{rmseaA} is greater than \code{rmsea0}, the test of close fit is used and the critical region is in the right hand side of the null sampling distribution. On the other hand, if \code{rmseaA} is less than \code{rmsea0}, the test of not-close fit is used and the critical region is in the left hand side of the null sampling distribution (MacCallum, Browne, & Suguwara, 1996). } \references{ MacCallum, R. C., Browne, M. W., & Sugawara, H. M. (1996). Power analysis and determination of sample size for covariance structure modeling. \emph{Psychological Methods, 1,} 130-149. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{plotRMSEApower}} to plot the statistical power based on population RMSEA given the sample size \item \code{\link{plotRMSEAdist}} to visualize the RMSEA distributions \item \code{\link{findRMSEAsamplesize}} to find the minium sample size for a given statistical power based on population RMSEA } } \examples{ findRMSEApower(rmsea0=.05, rmseaA=.08, df=20, n=200) }semTools/man/plotRMSEAdist.Rd0000644000175100001440000000554713000201061015563 0ustar hornikusers\name{plotRMSEAdist} \alias{plotRMSEAdist} \title{ Plot the sampling distributions of RMSEA } \description{ Plots the sampling distributions of RMSEA based on the noncentral chi-square distributions } \usage{ plotRMSEAdist(rmsea, n, df, ptile=NULL, caption=NULL, rmseaScale = TRUE, group=1) } \arguments{ \item{rmsea}{The vector of RMSEA values to be plotted} \item{n}{Sample size of a dataset} \item{df}{Model degrees of freedom} \item{ptile}{The percentile rank of the distribution of the first RMSEA that users wish to plot a vertical line in the resulting graph} \item{caption}{The name vector of each element of \code{rmsea}} \item{rmseaScale}{If \code{TRUE}, the RMSEA scale is used in the x-axis. If \code{FALSE}, the chi-square scale is used in the x-axis.} \item{group}{The number of group that is used to calculate RMSEA.} } \details{ This function creates overlappling plots of the sampling distribution of RMSEA based on noncentral chi-square distribution (MacCallum, Browne, & Suguwara, 1996). First, the noncentrality parameter (\eqn{\lambda}) is calculated from RMSEA (Steiger, 1998; Dudgeon, 2004) by \deqn{\lambda = (N - 1)d\varepsilon^2 / K,} where \eqn{N} is sample size, \eqn{d} is the model degree of freedom, \eqn{K} is the number of groupand \eqn{\varepsilon} is the population RMSEA. Next, the noncentral chi-square distribution with a specified degree of freedom and noncentrality parameter is plotted. Thus, the x-axis represent the sample chi-square value. The sample chi-square value can be transformed to the sample RMSEA scale (\eqn{\hat{\varepsilon}}) by \deqn{\hat{\varepsilon} = \sqrt{K}\sqrt{\frac{\chi^2 - d}{(N - 1)d}},} where \eqn{\chi^2} is the chi-square value obtained from the noncentral chi-square distribution. } \references{ Dudgeon, P. (2004). A note on extending Steiger's (1998) multiple sample RMSEA adjustment to other noncentrality parameter-based statistic. \emph{Structural Equation Modeling, 11}, 305-319. MacCallum, R. C., Browne, M. W., & Sugawara, H. M. (1996). Power analysis and determination of sample size for covariance structure modeling. \emph{Psychological Methods, 1,} 130-149. Steiger, J. H. (1998). A note on multiple sample extensions of the RMSEA fit index. \emph{Structural Equation Modeling, 5}, 411-419. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{plotRMSEApower}} to plot the statistical power based on population RMSEA given the sample size \item \code{\link{findRMSEApower}} to find the statistical power based on population RMSEA given a sample size \item \code{\link{findRMSEAsamplesize}} to find the minium sample size for a given statistical power based on population RMSEA } } \examples{ plotRMSEAdist(rmsea=c(.05, .08), n=200, df=20, ptile=0.95, rmseaScale = TRUE) plotRMSEAdist(rmsea=c(.05, .01), n=200, df=20, ptile=0.05, rmseaScale = FALSE) }semTools/man/BootMiss-class.Rd0000644000175100001440000000467313000201061015772 0ustar hornikusers\name{BootMiss-class} \docType{class} \alias{BootMiss-class} \alias{show,BootMiss-method} \alias{summary,BootMiss-method} \alias{hist,BootMiss-method} \title{ Class For the Results of Bollen-Stine Bootstrap with Incomplete Data } \description{ This class contains the results of Bollen-Stine bootstrap with missing data. } \section{Objects from the Class}{ Objects can be created via the \code{\link{bsBootMiss}} function. } \section{Slots}{ \describe{ \item{\code{time}:}{A list containing 2 \code{difftime} objects (\code{transform} and \code{fit}), indicating the time elapsed for data transformation and for fitting the model to bootstrap data sets, respectively.} \item{\code{transData}:}{Transformed data} \item{\code{bootDist}:}{The vector of chi-square values from Bootstrap data sets fitted by the target model} \item{\code{origChi}:}{The chi-square value from the original data set} \item{\code{df}:}{The degree of freedom of the model} \item{\code{bootP}:}{The p-value comparing the original chi-square with the bootstrap distribution} } } \section{methods}{ \describe{ \item{show}{\code{signature(object = "BootMiss"):} The \code{show} function is used to display the results of the Bollen-Stine bootstrap.} \item{summary}{\code{signature(object = "BootMiss"):} The summary function prints the same information from the \code{show} method, but also provides information about the time elapsed, as well as the expected (theoretical) and observed (bootstrap) mean and variance of the chi-squared distribution.} \item{hist}{\code{signature(x = "BootMiss", ..., alpha = .05, nd = 2, printLegend = TRUE, legendArgs = list(x = "topleft")):} The \code{hist} function provides a histogram for the bootstrap distribution of chi-squared, including observed and critical values from the specified \code{alpha} level. The user can also specify additional graphical parameters to \code{\link[graphics]{hist}} via \code{...}, as well as pass a list of arguments to an optional \code{\link[graphics]{legend}} via \code{legendArgs}. If the user wants more control over customization, \code{hist} returns a list of \code{length == 2}, containing the arguments for the call to \code{hist} and the arguments to the call for \code{legend}, respectively.} } } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } \seealso{ \code{\link{bsBootMiss}} } \examples{ # See the example from the bsBootMiss function } semTools/man/reliabilityL2.Rd0000644000175100001440000000730613000201061015633 0ustar hornikusers\name{reliabilityL2} \alias{reliabilityL2} \title{ Calculate the reliability values of a second-order factor } \description{ Calculate the reliability values (coefficient omega) of a second-order factor } \usage{ reliabilityL2(object, secondFactor) } \arguments{ \item{object}{The lavaan model object provided after running the \code{cfa}, \code{sem}, \code{growth}, or \code{lavaan} functions that has a second-order factor} \item{secondFactor}{The name of the second-order factor} } \details{ The first formula of the coefficient omega (in the \code{\link{reliability}}) will be mainly used in the calculation. The model-implied covariance matrix of a second-order factor model can be separated into three sources: the second-order factor, the uniqueness of the first-order factor, and the measurement error of indicators: \deqn{ \hat{\Sigma} = \Lambda \bold{B} \Phi_2 \bold{B}^{\prime} \Lambda^{\prime} + \Lambda \Psi_{u} \Lambda^{\prime} + \Theta, } where \eqn{\hat{\Sigma}} is the model-implied covariance matrix, \eqn{\Lambda} is the first-order factor loading, \eqn{\bold{B}} is the second-order factor loading, \eqn{\Phi_2} is the covariance matrix of the second-order factors, \eqn{\Psi_{u}} is the covariance matrix of the unique scores from first-order factors, and \eqn{\Theta} is the covariance matrix of the measurement errors from indicators. Thus, the proportion of the second-order factor explaining the total score, or the coefficient omega at Level 1, can be calculated: \deqn{ \omega_{L1} = \frac{\bold{1}^{\prime} \Lambda \bold{B} \Phi_2 \bold{B}^{\prime} \Lambda^{\prime} \bold{1}}{\bold{1}^{\prime} \Lambda \bold{B} \Phi_2 \bold{B} ^{\prime} \Lambda^{\prime} \bold{1} + \bold{1}^{\prime} \Lambda \Psi_{u} \Lambda^{\prime} \bold{1} + \bold{1}^{\prime} \Theta \bold{1}}, } where \eqn{\bold{1}} is the \emph{k}-dimensional vector of 1 and \emph{k} is the number of observed variables. When model-implied covariance matrix among first-order factors (\eqn{\Phi_1}) can be calculated: \deqn{ \Phi_1 = \bold{B} \Phi_2 \bold{B}^{\prime} + \Psi_{u}, } Thus, the proportion of the second-order factor explaining the varaince at first-order factor level, or the coefficient omega at Level 2, can be calculated: \deqn{ \omega_{L2} = \frac{\bold{1_F}^{\prime} \bold{B} \Phi_2 \bold{B}^{\prime} \bold{1_F}}{\bold{1_F}^{\prime} \bold{B} \Phi_2 \bold{B}^{\prime} \bold{1_F} + \bold{1_F}^{\prime} \Psi_{u} \bold{1_F}}, } where \eqn{\bold{1_F}} is the \emph{F}-dimensional vector of 1 and \emph{F} is the number of first-order factors. The partial coefficient omega at Level 1, or the proportion of observed variance explained by the second-order factor after partialling the uniqueness from the first-order factor, can be calculated: \deqn{ \omega_{L1} = \frac{\bold{1}^{\prime} \Lambda \bold{B} \Phi_2 \bold{B}^{\prime} \Lambda^{\prime} \bold{1}}{\bold{1}^{\prime} \Lambda \bold{B} \Phi_2 \bold{B}^{\prime} \Lambda^{\prime} \bold{1} + \bold{1}^{\prime} \Theta \bold{1}}, } Note that if the second-order factor has a direct factor loading on some observed variables, the observed variables will be counted as first-order factors. } \value{ Reliability values at Levels 1 and 2 of the second-order factor, as well as the partial reliability value at Level 1 } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \code{\link{reliability}} for the reliability of the first-order factors. } \examples{ library(lavaan) HS.model3 <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 higher =~ visual + textual + speed' fit6 <- cfa(HS.model3, data=HolzingerSwineford1939) reliability(fit6) # Should provide a warning for the endogenous variable reliabilityL2(fit6, "higher") } semTools/man/plotProbe.Rd0000644000175100001440000000603413000201061015067 0ustar hornikusers\name{plotProbe} \alias{plotProbe} \title{ Plot the graphs for probing latent interaction } \description{ This function will plot the line graphs representing the simple effect of the independent variable given the values of the moderator. } \usage{ plotProbe(object, xlim, xlab="Indepedent Variable", ylab="Dependent Variable", ...) } \arguments{ \item{object}{ The result of probing latent interaction obtained from \code{\link{probe2WayMC}}, \code{\link{probe2WayRC}}, \code{\link{probe3WayMC}}, or \code{\link{probe3WayRC}} function. } \item{xlim}{ The vector of two numbers: the minimum and maximum values of the independent variable } \item{xlab}{ The label of the x-axis } \item{ylab}{ The label of the y-axis } \item{\dots}{ Any addition argument for the \code{\link{plot}} function } } \value{ None. This function will plot the simple main effect only. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{indProd}} For creating the indicator products with no centering, mean centering, double-mean centering, or residual centering. \item \code{\link{probe2WayMC}} For probing the two-way latent interaction when the results are obtained from mean-centering, or double-mean centering. \item \code{\link{probe3WayMC}} For probing the three-way latent interaction when the results are obtained from mean-centering, or double-mean centering. \item \code{\link{probe2WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{probe3WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. } } \examples{ library(lavaan) dat2wayMC <- indProd(dat2way, 1:3, 4:6) model1 <- " f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f12 =~ x1.x4 + x2.x5 + x3.x6 f3 =~ x7 + x8 + x9 f3 ~ f1 + f2 + f12 f12 ~~0*f1 f12 ~~ 0*f2 x1 ~ 0*1 x4 ~ 0*1 x1.x4 ~ 0*1 x7 ~ 0*1 f1 ~ NA*1 f2 ~ NA*1 f12 ~ NA*1 f3 ~ NA*1 " fitMC2way <- sem(model1, data=dat2wayMC, meanstructure=TRUE, std.lv=FALSE) result2wayMC <- probe2WayMC(fitMC2way, c("f1", "f2", "f12"), "f3", "f2", c(-1, 0, 1)) plotProbe(result2wayMC, xlim=c(-2, 2)) dat3wayMC <- indProd(dat3way, 1:3, 4:6, 7:9) model3 <- " f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f3 =~ x7 + x8 + x9 f12 =~ x1.x4 + x2.x5 + x3.x6 f13 =~ x1.x7 + x2.x8 + x3.x9 f23 =~ x4.x7 + x5.x8 + x6.x9 f123 =~ x1.x4.x7 + x2.x5.x8 + x3.x6.x9 f4 =~ x10 + x11 + x12 f4 ~ f1 + f2 + f3 + f12 + f13 + f23 + f123 f1 ~~ 0*f12 f1 ~~ 0*f13 f1 ~~ 0*f123 f2 ~~ 0*f12 f2 ~~ 0*f23 f2 ~~ 0*f123 f3 ~~ 0*f13 f3 ~~ 0*f23 f3 ~~ 0*f123 f12 ~~ 0*f123 f13 ~~ 0*f123 f23 ~~ 0*f123 x1 ~ 0*1 x4 ~ 0*1 x7 ~ 0*1 x10 ~ 0*1 x1.x4 ~ 0*1 x1.x7 ~ 0*1 x4.x7 ~ 0*1 x1.x4.x7 ~ 0*1 f1 ~ NA*1 f2 ~ NA*1 f3 ~ NA*1 f12 ~ NA*1 f13 ~ NA*1 f23 ~ NA*1 f123 ~ NA*1 f4 ~ NA*1 " fitMC3way <- sem(model3, data=dat3wayMC, meanstructure=TRUE, std.lv=FALSE) result3wayMC <- probe3WayMC(fitMC3way, c("f1", "f2", "f3", "f12", "f13", "f23", "f123"), "f4", c("f1", "f2"), c(-1, 0, 1), c(-1, 0, 1)) plotProbe(result3wayMC, xlim=c(-2, 2)) } semTools/man/parcelAllocation.Rd0000644000175100001440000001110313000201061016366 0ustar hornikusers\name{parcelAllocation} \alias{parcelAllocation} \title{ Random Allocation of Items to Parcels in a Structural Equation Model } \description{ This function generates a given number of randomly generated item-to-parcel allocations, fits a model to each allocation, and provides averaged results over all allocations. } \usage{ parcelAllocation(nPerPar, facPlc, nAlloc=100, syntax, dataset, names='default', leaveout=0, ...) } \arguments{ \item{nPerPar}{A list in which each element is a vector corresponding to each factor indicating sizes of parcels. If variables are left out of parceling, they should not be accounted for here (there should NOT be parcels of size "1").} \item{facPlc}{A list of vectors, each corresponding to a factor, specifying the variables in that factor (whether included in parceling or not). Either variable names or column numbers. Variables not listed will not be modeled or included in output datasets. } \item{nAlloc}{The number of random allocations of items to parcels to generate.} \item{syntax}{\link{lavaan} syntax. If substituted with a file name, parcelAllocation will print output data sets to a specified folder rather than analyzing using lavaan (note for Windows users: file path must be specified using forward slashes).} \item{dataset}{Data set. Can be file path or R object (matrix or dataframe). If the data has missing values multiple imputation before parceling is recommended.} \item{names}{(Optional) A character vector containing the names of parceled variables.} \item{leaveout}{A vector of variables to be left out of randomized parceling. Either variable names or column numbers are allowed.} \item{\dots}{Additional arguments to be passed to \link{lavaan}} } \details{ This function implements the random item to parcel allocation procedure described in Sterba (2011) and Sterba and MccCallum (2010). The function takes a single data set with item level data, randomly assigns items to parcels, fits a structural equation model to the parceled data (using \link{lavaan}), and repeats this process for a user specified number of random allocations. Results from all fitted models are summarized and output. For further details on the benefits of the random allocation of itesm to parcels see Sterba (2011) and Sterba and MccCallum (2010). } \value{ \item{Estimates}{A data frame containing results related to parameter estimates with columns corresponding to parameter names, average parameter estimates across allocations, the standard deviation of parameter estimates across allocations, the minimum parameter estimate across allocations, the maximum parameter estimate across allocations, the range of parameter estimates across allocations, and the proportions of allocations in which the parameter estimate is significant.} \item{SE}{A data frame containing results related to standard errors with columns corresponding to parameter names, average standard errors across allocations, the standard deviation of standard errors across allocations, the minimum standard error across allocations, the maximum standard error across allocations, and the range of standard errors across allocations.} \item{Fit}{A data frame containing results related to model fit with columns corresponding to fit index names, the average of each index across allocations, the standard deviation of each fit index across allocations, the minimum of each fit index across allocations, the maximum of each fit index across allocations, and the range of each fit index across allocations.} } \references{ Sterba, S.K. (2011). Implications of parcel-allocation variability for comparing fit of item-solutions and parcel-solutions. \emph{Structural Equation Modeling, 18,} 554-577. Sterba, S.K. & MacCallum, R.C. (2010). Variability in parameter estimates and model fit across random allocations of items to parcels. \emph{Multivariate Behavioral Research, 45,} 322-358. } \seealso{ \code{\link{PAVranking}}, \code{\link{poolMAlloc}} } \author{ Corbin Quick (University of Michigan; \email{corbinq@umich.edu}) Alexander M. Schoemann (East Carolina University; \email{schoemanna@ecu.edu}) } \examples{ #Fit 3 factor CFA to simulated data. #Each factor has 9 indicators that are randomly parceled into 3 parcels #Lavaan syntax for the model to be fit to parceled data library(lavaan) syntax <- 'La =~ V1 + V2 + V3 Lb =~ V4 + V5 + V6 ' #Parcel and fit data 20 times. The actual parcel number should be higher than 20 times. name1 <- colnames(simParcel)[1:9] name2 <- colnames(simParcel)[10:18] parcelAllocation(list(c(3,3,3),c(3,3,3)), list(name1, name2), nAlloc=20, syntax=syntax, dataset=simParcel) } semTools/man/combinequark.Rd0000644000175100001440000000261513000201061015602 0ustar hornikusers\name{combinequark} \alias{combinequark} \title{ Combine the results from the quark function } \description{ This function builds upon the \code{\link{quark}} function to provide a final dataset comprised of the original dataset provided to \code{\link{quark}} and enough principal components to be able to account for a certain level of variance in the data. } \usage{ combinequark(quark, percent) } \arguments{ \item{quark}{Provide the \code{\link{quark}} object that was returned. It should be a list of objects. Make sure to include it in its entirety.} \item{percent}{Provide a percentage of variance that you would like to have explained. That many components (columns) will be extracted and kept with the output dataset. Enter this variable as a number WITHOUT a percentage sign.} } \value{ The output of this function is the original dataset used in quark combined with enough principal component scores to be able to account for the amount of variance that was requested. } \author{ Steven R. Chesnut (University of Southern Mississippi \email{Steven.Chesnut@usm.edu}) } \seealso{ \code{\link{quark}} } \examples{ set.seed(123321) dat <- HolzingerSwineford1939[,7:15] misspat <- matrix(runif(nrow(dat) * 9) < 0.3, nrow(dat)) dat[misspat] <- NA dat <- cbind(HolzingerSwineford1939[,1:3], dat) quark.list <- quark(data = dat, id = c(1, 2)) final.data <- combinequark(quark = quark.list, percent = 80) } semTools/man/fmi.Rd0000644000175100001440000000730013000201061013671 0ustar hornikusers\name{fmi} \alias{fmi} \title{ Fraction of Missing Information. } \description{ This function takes a list of imputed data sets and estimates the Fraction of Missing Information of the Variances and Means for each variable. } \usage{ fmi(dat.imp, method="saturated", varnames=NULL, group=NULL, exclude=NULL, digits=3) } \arguments{ \item{dat.imp}{ List of imputed data sets, the function only accept a list of data frames. } \item{method}{ Specified the model used to estimated the variances and means. Can be one of the following: \code{"saturated"} (\code{"sat"}) or \code{"null"}, the default is \code{"saturated"}. See Details for more information. } \item{varnames}{ A vector of variables names. This argument allow the user to get the fmi of a subset of variables. The function by default will estimate the fmi for all the variables. } \item{group}{ A variable name defining the groups. This will give the fmi for each group. } \item{exclude}{ A vector of variables names. These variables will be excluded from the analysis. } \item{digits}{ Number of decimals to print in the results. } } \details{ The function estimates a variance/covariance model for each data set using lavaan. If method = \code{"saturated"} the function will estimate all the variances and covariances, if method = \code{"null"} the function will only estimate the variances. The saturated model gives more reliable estimates. With big data sets using the saturated model could take a lot of time. In the case of having problems with big data sets it is helpful to select a subset of variables with \code{varnames} and/or use the \code{"null"} model. The function does not accept character variables. } \value{ fmi returns a list with the Fraction of Missing Information of the Variances and Means for each variable in the data set. \item{Variances}{The estimated variance for each variable, and the respective standard error. Two estimates Fraction of Missing Information of the Variances. The first estimate of fmi (fmi.1) is asymptotic fmi and the second estimate of fmi (fmi.2) is corrected for small numbers of imputations} \item{Means}{The estimated mean for each variable, and the respective standard error. Two estimates Fraction of Missing Information of the Means. The first estimate of fmi (fmi.1) is asymptotic fmi and the second estimate of fmi (fmi.2) is corrected for small numbers of imputations} } \references{ Rubin, D.B. (1987) \emph{Multiple Imputation for Nonresponse in Surveys.} J. Wiley & Sons, New York. Savalei, V. & Rhemtulla, M. (2012) On Obtaining Estimates of the Fraction of Missing Information From Full Information Maximum Likelihood, \emph{Structural Equation Modeling: A Multidisciplinary Journal, 19:3}, 477-494. Wagner, J. (2010) The Fraction of Missing Information as a Tool for Monitoring the Quality of Survey Data, \emph{Public Opinion Quarterly, 74:2}, 223-243. } \author{Mauricio Garnier Villarreal (University of Kansas; \email{mgv@ku.edu}) } \examples{ library(Amelia) library(lavaan) modsim <- ' f1 =~ 0.7*y1+0.7*y2+0.7*y3 f2 =~ 0.7*y4+0.7*y5+0.7*y6 f3 =~ 0.7*y7+0.7*y8+0.7*y9' datsim <- simulateData(modsim,model.type="cfa", meanstructure=TRUE, std.lv=TRUE, sample.nobs=c(200,200)) randomMiss2 <- rbinom(prod(dim(datsim)), 1, 0.1) randomMiss2 <- matrix(as.logical(randomMiss2), nrow=nrow(datsim)) randomMiss2[,10] <- FALSE datsim[randomMiss2] <- NA datsimMI <- amelia(datsim,m=3,idvars="group") out1 <- fmi(datsimMI$imputations, exclude="group") out1 out2 <- fmi(datsimMI$imputations, exclude="group", method="null") out2 out3 <- fmi(datsimMI$imputations, varnames=c("y1","y2","y3","y4")) out3 out4 <- fmi(datsimMI$imputations, group="group") out4 } semTools/man/wald.Rd0000644000175100001440000000412213000201061014044 0ustar hornikusers\name{wald} \alias{wald} \title{ Calculate multivariate Wald statistics } \description{ Calculate multivariate Wald statistics based on linear combinations of model parameters } \usage{ wald(object, syntax) } \arguments{ \item{object}{An output from \code{lavaan}} \item{syntax}{Syntax that each line represents one linear constraint. A plus or minus sign is used to separate between each coefficient. An asterisk is used to separate between coefficients and parameters. The coefficient can have a forward slash to represent a division. The parameter names must be matched with the names of lavaan parameters investigated by running the \code{coef} function on a lavaan output. Lines can be separated by semi-colon. A pound sign is allowed for comments. Note that the defined parameters (created by ":=") do not work with this function.} } \details{ The formula for multivariate Wald test is \deqn{ \chi^2 = \left(C\hat{b}\right)^\prime\left[C\hat{V}C^\prime\right]^{-1}\left(C\hat{b}\right),} where \eqn{C} is the contrast matrix, \eqn{\hat{b}} is the estimated fixed effect, \eqn{\hat{V}} is the asymptotic covariance matrix among fixed effects. } \value{ Chi-square value with \emph{p} value. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ # Test the difference in factor loadings library(lavaan) HS.model <- ' visual =~ x1 + con1*x2 + con1*x3 textual =~ x4 + x5 + x6 speed =~ x7 + con2*x8 + con2*x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) wald(fit, "con2 - con1") # Simultaneously test the difference in the influences # of x1 and x2 on intercept and slope model.syntax <- ' i =~ 1*t1 + 1*t2 + 1*t3 + 1*t4 s =~ 0*t1 + 1*t2 + 2*t3 + 3*t4 i ~ x1 + x2 s ~ x1 + x2 t1 ~ c1 t2 ~ c2 t3 ~ c3 t4 ~ c4 ' fit2 <- growth(model.syntax, data=Demo.growth) wald.syntax <- ' i~x1 - i~x2 1/2*s~x1 - 1/2*s~x2 ' wald(fit2, wald.syntax) # Mplus example of MODEL TEST model3 <- ' f1 =~ x1 + p2*x2 + p3*x3 + p4*x4 + p5*x5 + p6*x6 p4 == 2*p2' fit3 <- cfa(model3, data=HolzingerSwineford1939) wald(fit3, "p3; p6 - 0.5*p5") } semTools/man/measurementInvariance.Rd0000644000175100001440000000733413000201061017452 0ustar hornikusers\name{measurementInvariance} \alias{measurementInvariance} \alias{measurementinvariance} \title{ Measurement Invariance Tests } \description{ Testing measurement invariance across groups using a typical sequence of model comparison tests. } \usage{ measurementInvariance(..., std.lv = FALSE, strict = FALSE, quiet = FALSE, fit.measures = "default", method = "satorra.bentler.2001") } \arguments{ \item{...}{The same arguments as for any lavaan model. See \code{\link{cfa}} for more information.} \item{std.lv}{If \code{TRUE}, the fixed-factor method of scale identification is used. If \code{FALSE}, the first variable for each factor is used as marker variable.} \item{strict}{If \code{TRUE}, the sequence requires `strict' invariance. See details for more information.} \item{quiet}{If \code{FALSE} (default), a summary is printed out containing an overview of the different models that are fitted, together with some model comparison tests. If \code{TRUE}, no summary is printed.} \item{fit.measures}{Fit measures used to calculate the differences between nested models.} \item{method}{The method used to calculate likelihood ratio test. See \code{\link[lavaan]{lavTestLRT}} for available options} } \details{ If \code{strict = FALSE}, the following four models are tested in order: \enumerate{ \item{Model 1: configural invariance. The same factor structure is imposed on all groups.} \item{Model 2: weak invariance. The factor loadings are constrained to be equal across groups.} \item{Model 3: strong invariance. The factor loadings and intercepts are constrained to be equal across groups.} \item{Model 4: The factor loadings, intercepts and means are constrained to be equal across groups.} } Each time a more restricted model is fitted, a chi-square difference test is reported, comparing the current model with the previous one, and comparing the current model to the baseline model (Model 1). In addition, the difference in cfi is also reported (delta.cfi). If \code{strict = TRUE}, the following five models are tested in order: \enumerate{ \item{Model 1: configural invariance. The same factor structure is imposed on all groups.} \item{Model 2: weak invariance. The factor loadings are constrained to be equal across groups.} \item{Model 3: strong invariance. The factor loadings and intercepts are constrained to be equal across groups.} \item{Model 4: strict invariance. The factor loadings, intercepts and residual variances are constrained to be equal across groups.} \item{Model 5: The factor loadings, intercepts, residual variances and means are constrained to be equal across groups.} } Note that if the chi-square test statistic is scaled (eg. a Satorra-Bentler or Yuan-Bentler test statistic), a special version of the chi-square difference test is used as described in \url{http://www.statmodel.com/chidiff.shtml} } \value{ Invisibly, all model fits in the sequence are returned as a list. } \references{ Vandenberg, R. J., and Lance, C. E. (2000). A review and synthesis of the measurement invariance literature: Suggestions, practices, and recommendations for organizational research. \emph{Organizational Research Methods, 3,} 4-70. } \author{ Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}); Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \code{\link{longInvariance}} for the measurement invariance test within person; \code{partialInvariance} for the automated function for finding partial invariance models } \examples{ HW.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' measurementInvariance(HW.model, data=HolzingerSwineford1939, group="school") } semTools/man/moreFitIndices.Rd0000644000175100001440000001210113000201061016015 0ustar hornikusers\name{moreFitIndices} \alias{moreFitIndices} \title{ Calculate more fit indices } \description{ Calculate more fit indices that are not already provided in lavaan. } \usage{ moreFitIndices(object, fit.measures = "all", nPrior = 1) } \arguments{ \item{object}{The lavaan model object provided after running the \code{cfa}, \code{sem}, \code{growth}, or \code{lavaan} functions.} \item{fit.measures}{Additional fit measures to be calculated. All additional fit measures are calculated by default} \item{nPrior}{The sample size on which prior is based. This argument is used to compute BIC*.} } \details{ Gamma Hat (gammaHat; West, Taylor, & Wu, 2012) is a global fit index which can be computed by \deqn{ gammaHat =\frac{p}{p + 2 \times \frac{\chi^{2}_{k} - df_{k}}{N - 1}},} where \eqn{p} is the number of variables in the model, \eqn{\chi^{2}_{k}} is the chi-square test statistic value of the target model, \eqn{df_{k}} is the degree of freedom when fitting the target model, and \eqn{N} is the sample size. This formula assumes equal number of indicators across groups. Adjusted Gamma Hat (adjGammaHat; West, Taylor, & Wu, 2012) is a global fit index which can be computed by \deqn{ adjGammaHat = \left(1 - \frac{K \times p \times (p + 1)}{2 \times df_{k}} \right) \times \left( 1 - gammaHat \right) ,} where \eqn{K} is the number of groups (please refer to Dudgeon, 2004 for the multiple-group adjustment for agfi*). Corrected Akaike Information Criterion (aic.smallN; Burnham & Anderson, 2003) is the corrected version of aic for small sample size: \deqn{ aic.smallN = f + \frac{2k(k + 1)}{N - k - 1},} where \eqn{f} is the minimized discrepancy function, which is the product of the log likelihood and -2, and \eqn{k} is the number of parameters in the target model. Corrected Bayesian Information Criterion (bic.priorN; Kuha, 2004) is similar to bic but explicitly specifying the sample size on which the prior is based (\eqn{N_{prior}}). \deqn{ bic.priorN = f + k\log{(1 + N/N_{prior})},} Stochastic information criterion (sic; Preacher, 2006) is similar to aic or bic. This index will account for model complexity in the model's function form, in addition to the number of free parameters. This index will be provided only when the chi-squared value is not scaled. The sic can be computed by \deqn{ sic = \frac{1}{2}\left(f - \log{\det{I(\hat{\theta})}}\right),} where \eqn{I(\hat{\theta})} is the information matrix of the parameters. Hannan-Quinn Information Criterion (hqc; Hannan & Quinn, 1979) is used for model selection similar to aic or bic. \deqn{ hqc = f + 2k\log{(\log{N})},} Note that if Satorra-Bentler or Yuan-Bentler's method is used, the fit indices using the scaled chi-square values are also provided. See \code{\link{nullRMSEA}} for the further details of the computation of RMSEA of the null model. } \value{ \enumerate{ \item{gammaHat} Gamma Hat \item{adjGammaHat} Adjusted Gamma Hat \item{baseline.rmsea} RMSEA of the Baseline (Null) Model \item{aic.smallN} Corrected (for small sample size) Akaike Information Criterion \item{bic.priorN} Bayesian Information Criterion with specifying the prior sample size \item{sic} Stochastic Information Criterion \item{hqc} Hannan-Quinn Information Criterion \item{gammaHat.scaled} Gamma Hat using Scaled Chi-square \item{adjGammaHat.scaled} Adjusted Gamma Hat using Scaled Chi-square \item{baseline.rmsea.scaled} RMSEA of the Baseline (Null) Model using Scaled Chi-square } } \references{ Burnham, K., & Anderson, D. (2003). \emph{Model selection and multimodel inference: A practical-theoretic approach.} New York, NY: Springer-Verlag. Dudgeon, P. (2004). A note on extending Steiger's (1998) multiple sample RMSEA adjustment to other noncentrality parameter-based statistic. \emph{Structural Equation Modeling, 11}, 305-319. Kuha, J. (2004). AIC and BIC: Comparisons of assumptions and performance. \emph{Sociological Methods Research, 33}, 188-229. Preacher, K. J. (2006). Quantifying parsimony in structural equation modeling. \emph{Multivariate Behavioral Research, 43}, 227-259. West, S. G., Taylor, A. B., & Wu, W. (2012). Model fit and model selection in structural equation modeling. In R. H. Hoyle (Ed.), \emph{Handbook of Structural Equation Modeling.} New York: Guilford. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Terrence Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) Aaron Boulton (University of North Carolina, Chapel Hill; \email{aboulton@email.unc.edu}) Ruben Arslan (Humboldt-University of Berlin, \email{rubenarslan@gmail.com}) Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}) } \seealso{ \itemize{ \item \code{\link{miPowerFit}} For the modification indices and their power approach for model fit evaluation \item \code{\link{nullRMSEA}} For RMSEA of the null model } } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) moreFitIndices(fit) fit2 <- cfa(HS.model, data=HolzingerSwineford1939, estimator="mlr") moreFitIndices(fit2) } semTools/man/kurtosis.Rd0000644000175100001440000000305013000201061014777 0ustar hornikusers\name{kurtosis} \alias{kurtosis} \title{ Finding excessive kurtosis } \description{ Finding excessive kurtosis (g2) of an object } \usage{ kurtosis(object, population=FALSE) } \arguments{ \item{object}{ A vector used to find a excessive kurtosis } \item{population}{ \code{TRUE} to compute the parameter formula. \code{FALSE} to compute the sample statistic formula. } } \value{ A value of an excessive kurtosis with a test statistic if the population is specified as \code{FALSE} } \details{ The excessive kurtosis computed is g2. The parameter excessive kurtosis \eqn{\gamma_{2}} formula is \deqn{\gamma_{2} = \frac{\mu_{4}}{\mu^{2}_{2}} - 3,} where \eqn{\mu_{i}} denotes the \eqn{i} order central moment. The excessive kurtosis formula for sample statistic \eqn{g_{2}} is \deqn{g_{2} = \frac{k_{4}}{k^{2}_{2}},} where \eqn{k_{i}} are the \eqn{i} order \emph{k}-statistic. The standard error of the excessive kurtosis is \deqn{Var(\hat{g}_2) = \frac{24}{N}} where \eqn{N} is the sample size. } \references{ Weisstein, Eric W. (n.d.). \emph{Kurtosis.} Retrived from MathWorld--A Wolfram Web Resource \url{http://mathworld.wolfram.com/Kurtosis.html} } \seealso{ \itemize{ \item \code{\link{skew}} Find the univariate skewness of a variable \item \code{\link{mardiaSkew}} Find the Mardia's multivariate skewness of a set of variables \item \code{\link{mardiaKurtosis}} Find the Mardia's multivariate kurtosis of a set of variables } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ kurtosis(1:5) } semTools/man/reliability.Rd0000644000175100001440000001425113000201061015432 0ustar hornikusers\name{reliability} \alias{reliability} \title{ Calculate reliability values of factors } \description{ Calculate reliability values of factors by coefficient omega } \usage{ reliability(object) } \arguments{ \item{object}{The lavaan model object provided after running the \code{cfa}, \code{sem}, \code{growth}, or \code{lavaan} functions.} } \details{ The coefficient alpha (Cronbach, 1951) can be calculated by \deqn{ \alpha = \frac{k}{k - 1}\left[ 1 - \frac{\sum^{k}_{i = 1} \sigma_{ii}}{\sum^{k}_{i = 1} \sigma_{ii} + 2\sum_{i < j} \sigma_{ij}} \right],} where \eqn{k} is the number of items in a factor, \eqn{\sigma_{ii}} is the item \emph{i} observed variances, \eqn{\sigma_{ij}} is the observed covariance of items \emph{i} and \emph{j}. The coefficient omega (Raykov, 2001) can be calculated by \deqn{ \omega_1 =\frac{\left( \sum^{k}_{i = 1} \lambda_i \right)^{2} Var\left( \psi \right)}{\left( \sum^{k}_{i = 1} \lambda_i \right)^{2} Var\left( \psi \right) + \sum^{k}_{i = 1} \theta_{ii} + 2\sum_{i < j} \theta_{ij} }, } where \eqn{\lambda_i} is the factor loading of item \emph{i}, \eqn{\psi} is the factor variance, \eqn{\theta_{ii}} is the variance of measurement errors of item \emph{i}, and \eqn{\theta_{ij}} is the covariance of measurement errors from item \emph{i} and \emph{j}. The second coefficient omega (Bentler, 1972, 2009) can be calculated by \deqn{ \omega_2 = \frac{\left( \sum^{k}_{i = 1} \lambda_i \right)^{2} Var\left( \psi \right)}{\bold{1}^\prime \hat{\Sigma} \bold{1}}, } where \eqn{\hat{\Sigma}} is the model-implied covariance matrix, and \eqn{\bold{1}} is the \eqn{k}-dimensional vector of 1. The first and the second coefficients omega will have different values if there are dual loadings (or the existence of method factors). The first coefficient omega can be viewed as the reliability controlling for the other factors (like partial eta-squared in ANOVA). The second coefficient omega can be viewed as the unconditional reliability (like eta-squared in ANOVA). The third coefficient omega (McDonald, 1999), which is sometimes referred to hierarchical omega, can be calculated by \deqn{ \omega_3 =\frac{\left( \sum^{k}_{i = 1} \lambda_i \right)^{2} Var\left( \psi \right)}{\bold{1}^\prime \Sigma \bold{1}}, } where \eqn{\Sigma} is the observed covariance matrix. If the model fits the data well, the third coefficient omega will be similar to the \eqn{\omega_2}. Note that if there is a directional effect in the model, all coefficients omega will use the total factor variances, which is calculated by \code{\link[lavaan]{lavInspect}(object, "cov.lv")}. In conclusion, \eqn{\omega_1}, \eqn{\omega_2}, and \eqn{\omega_3} are different in the denominator. The denominator of the first formula assumes that a model is congeneric factor model where measurement errors are not correlated. The second formula is accounted for correlated measurement errors. However, these two formulas assume that the model-implied covariance matrix explains item relationships perfectly. The residuals are subject to sampling error. The third formula use observed covariance matrix instead of model-implied covariance matrix to calculate the observed total variance. This formula is the most conservative method in calculating coefficient omega. The average variance extracted (AVE) can be calculated by \deqn{ AVE = \frac{\bold{1}^\prime \textrm{diag}\left(\Lambda\Psi\Lambda^\prime\right)\bold{1}}{\bold{1}^\prime \textrm{diag}\left(\hat{\Sigma}\right) \bold{1}}, } Note that this formula is modified from Fornell & Larcker (1981) in the case that factor variances are not 1. The proposed formula from Fornell & Larcker (1981) assumes that the factor variances are 1. Note that AVE will not be provided for factors consisting of items with dual loadings. AVE is the property of items but not the property of factors. Regarding to categorical items, coefficient alpha and AVE are calculated based on polychoric correlations. The coefficient alpha from this function may be not the same as the standard alpha calculation for categorical items. Researchers may check the \code{alpha} function in the \code{psych} package for the standard coefficient alpha calculation. Item thresholds are not accounted for. Coefficient omega for categorical items, however, is calculated by accounting for both item covariances and item thresholds using Green and Yang's (2009, formula 21) approach. Three types of coefficient omega indicate different methods to calculate item total variances. The original formula from Green and Yang is equivalent to \eqn{\omega_3} in this function. } \value{ Reliability values (coefficient alpha, coefficients omega, average variance extracted) of each factor in each group } \references{ Bentler, P. M. (1972). A lower-bound method for the dimension-free measurement of internal consistency. \emph{Social Science Research, 1}, 343-357. Bentler, P. M. (2009). Alpha, dimension-free, and model-based internal consistency reliability. \emph{Psychometrika, 74}, 137-143. Cronbach, L. J. (1951). Coefficient alpha and the internal structure of tests. \emph{Psychometrika, 16}, 297-334. Fornell, C., & Larcker, D. F. (1981). Evaluating structural equation models with unobservable variables and measurement errors. \emph{Journal of Marketing Research, 18}, 39-50. Green, S. B., & Yang, Y. (2009). Reliability of summed item scores using structural equation modeling: An alternative to coefficient alpha. \emph{Psychometrika, 74}, 155-167. McDonald, R. P. (1999). Test theory: A unified treatment. Mahwah, NJ: Erlbaum. Raykov, T. (2001). Estimation of congeneric scale reliability using covariance structure analysis with nonlinear constraints \emph{British Journal of Mathematical and Statistical Psychology, 54}, 315-323. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}); Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}) } \seealso{ \code{\link{reliabilityL2}} for reliability value of a desired second-order factor, \code{\link{maximalRelia}} for the maximal reliability of weighted composite } \examples{ library(lavaan) HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) reliability(fit) } semTools/man/splitSample.rd0000644000175100001440000000605313000201061015457 0ustar hornikusers\name{splitSample} \alias{splitSample} \title{ Randomly Split a Data Set into Halves } \description{ This function randomly splits a data set into two halves, and saves the resulting data sets to the same folder as the original. } \usage{ splitSample(dataset,path="default", div=2, type="default", name="splitSample") } \arguments{ \item{dataset}{The original data set to be divided. Can be a file path to a .csv or .dat file (headers will automatically be detected) or an R object (matrix or dataframe). (Windows users: file path must be specified using FORWARD SLASHES ONLY.)} \item{path}{File path to folder for output data sets. NOT REQUIRED if dataset is a filename. Specify ONLY if dataset is an R object, or desired output folder is not that of original data set. If path is specified as "object", output data sets will be returned as a list, and not saved to hard drive. } \item{div}{Number of output data sets. NOT REQUIRED if default, 2 halves.} \item{type}{Output file format ("dat" or "csv"). NOT REQUIRED unless desired output formatting differs from that of input, or dataset is an R object and csv formatting is desired.} \item{name}{Output file name. NOT REQUIRED unless desired output name differs from that of input, or input dataset is an R object. (If input is an R object and name is not specified, name will be "splitSample".)} } \details{ This function randomly orders the rows of a data set, divides the data set into two halves, and saves the halves to the same folder as the original data set, preserving the original formatting. Data set type (.csv or .dat) and formatting (headers) are automatically detected, and output data sets will preserve input type and formatting unless specified otherwise. Input can be in the form of a file path (.dat or .csv), or an R object (matrix or dataframe). If input is an R object and path is default, output data sets will be returned as a list object. } \value{ \item{dataL}{List of output data sets. ONLY IF dataset is an R object and path is default. Otherwise, output will saved to hard drive with the same formatting as input.} } \author{ Corbin Quick (University of Michigan; \email{corbinq@umich.edu}) } \examples{ #### Input is .dat file #splitSample("C:/Users/Default/Desktop/MYDATA.dat") #### Output saved to "C:/Users/Default/Desktop/" in .dat format #### Names are "MYDATA_s1.dat" and "MYDATA_s2.dat" #### Input is R object ##Split C02 dataset from the datasets package library(datasets) splitMyData <- splitSample(CO2, path="object") summary(splitMyData[[1]]) summary(splitMyData[[2]]) #### Output object splitMyData becomes list of output data sets #### Input is .dat file in "C:/" folder #splitSample("C:/testdata.dat", path = "C:/Users/Default/Desktop/", type = "csv") #### Output saved to "C:/Users/Default/Desktop/" in .csv format #### Names are "testdata_s1.csv" and "testdata_s2.csv" #### Input is R object #splitSample(myData, path = "C:/Users/Default/Desktop/", name = "splitdata") #### Output saved to "C:/Users/Default/Desktop/" in .dat format #### Names are "splitdata_s1.dat" and "splitdata_s2.dat" } semTools/man/efaUnrotate.Rd0000644000175100001440000000425613000201061015402 0ustar hornikusers\name{efaUnrotate} \alias{efaUnrotate} \title{ Analyze Unrotated Exploratory Factor Analysis Model } \description{ This function will analyze unrotated exploratory factor analysis model. The unrotated solution can be rotated by the \code{\link{orthRotate}} and \code{\link{oblqRotate}} functions. } \usage{ efaUnrotate(data, nf, varList=NULL, start=TRUE, aux=NULL, ...) } \arguments{ \item{data}{ A target data frame. } \item{nf}{ The desired number of factors } \item{varList}{ Target observed variables. If not specified, all variables in the target data frame will be used. } \item{start}{ Use starting values in the analysis from the \code{\link{factanal}} function. If \code{FALSE}, the starting values from the \code{lavaan} package will be used. } \item{aux}{ The list of auxiliary variables. These variables will be included in the model by the saturated-correlates approach to account for missing information. } \item{\dots}{ Other arguments in the \code{\link[lavaan]{cfa}} function in the \code{lavaan} package, such as \code{ordered}, \code{se}, or \code{estimator} } } \details{ This function will generate a lavaan script for unrotated exploratory factor analysis model such that 1) all factor loadings are estimated, 2) factor variances are fixed to 1, 3) factor covariances are fixed to 0, and 4) the dot products of any pairs of columns in the factor loading matrix are fixed to zero (Johnson and Wichern, 2002). The reason for creating this function in addition to the \code{\link{factanal}} function is that users can enjoy some advanced features from the \code{lavaan} package such as scaled chi-square, diagonal weighted least square for ordinal indicators, or full-information maximum likelihood. } \value{ A \code{lavaan} output of unrotated exploratory factor analysis solution. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ unrotated <- efaUnrotate(HolzingerSwineford1939, nf=3, varList=paste0("x", 1:9), estimator="mlr") summary(unrotated, std=TRUE) inspect(unrotated, "std") dat <- data.frame(HolzingerSwineford1939, z=rnorm(nrow(HolzingerSwineford1939), 0, 1)) unrotated2 <- efaUnrotate(dat, nf=2, varList=paste0("x", 1:9), aux="z") } semTools/man/findRMSEApowernested.Rd0000644000175100001440000000325513000201061017113 0ustar hornikusers\name{findRMSEApowernested} \alias{findRMSEApowernested} \title{Find power given a sample size in nested model comparison} \description{ Find the sample size that the power in rejection the samples from the alternative pair of RMSEA is just over the specified power. } \usage{ findRMSEApowernested(rmsea0A = NULL, rmsea0B = NULL, rmsea1A, rmsea1B = NULL, dfA, dfB, n, alpha=.05, group=1) } \arguments{ \item{rmsea0A}{The H0 baseline RMSEA.} \item{rmsea0B}{The H0 alternative RMSEA (trivial misfit).} \item{rmsea1A}{The H1 baseline RMSEA.} \item{rmsea1B}{The H1 alternative RMSEA (target misfit to be rejected).} \item{dfA}{degree of freedom of the more-restricted model.} \item{dfB}{degree of freedom of the less-restricted model.} \item{n}{Sample size.} \item{alpha}{The alpha level.} \item{group}{The number of group in calculating RMSEA.} } \references{ MacCallum, R. C., Browne, M. W., & Cai, L. (2006). Testing differences between nested covariance structure models: Power analysis and null hypotheses. \emph{Psychological Methods, 11}, 19-35. } \author{ Bell Clinton; Pavel Panko (Texas Tech University; \email{pavel.panko@ttu.edu}); Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{plotRMSEApowernested}} to plot the statistical power for nested model comparison based on population RMSEA given the sample size \item \code{\link{findRMSEAsamplesizenested}} to find the minium sample size for a given statistical power in nested model comparison based on population RMSEA } } \examples{ findRMSEApowernested(rmsea0A = 0.06, rmsea0B = 0.05, rmsea1A = 0.08, rmsea1B = 0.05, dfA = 22, dfB = 20, n = 200, alpha = 0.05, group = 1) } semTools/man/findRMSEAsamplesize.Rd0000644000175100001440000000346013000201061016726 0ustar hornikusers\name{findRMSEAsamplesize} \alias{findRMSEAsamplesize} \title{ Find the minimum sample size for a given statistical power based on population RMSEA } \description{ Find the minimum sample size for a specified statistical power based on population RMSEA. This function can be applied for both test of close fit and test of not-close fit (MacCallum, Browne, & Suguwara, 1996) } \usage{ findRMSEAsamplesize(rmsea0, rmseaA, df, power=0.80, alpha=.05, group=1) } \arguments{ \item{rmsea0}{Null RMSEA} \item{rmseaA}{Alternative RMSEA} \item{df}{Model degrees of freedom} \item{power}{Desired statistical power to reject misspecified model (test of close fit) or retain good model (test of not-close fit)} \item{alpha}{Alpha level used in power calculations} \item{group}{The number of group that is used to calculate RMSEA.} } \details{ This function find the minimum sample size for a specified power based on an iterative routine. The sample size keep increasing until the calculated power from \code{\link{findRMSEApower}} function is just over the specified power. If \code{group} is greater than 1, the resulting sample size is the sample size per group. } \references{ MacCallum, R. C., Browne, M. W., & Sugawara, H. M. (1996). Power analysis and determination of sample size for covariance structure modeling. \emph{Psychological Methods, 1,} 130-149. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{plotRMSEApower}} to plot the statistical power based on population RMSEA given the sample size \item \code{\link{plotRMSEAdist}} to visualize the RMSEA distributions \item \code{\link{findRMSEApower}} to find the statistical power based on population RMSEA given a sample size } } \examples{ findRMSEAsamplesize(rmsea0=.05, rmseaA=.08, df=20, power=0.80) }semTools/man/residualCovariate.Rd0000644000175100001440000000254513000201061016572 0ustar hornikusers\name{residualCovariate} \alias{residualCovariate} \title{ Residual centered all target indicators by covariates } \description{ This function will regress target variables on the covariate and replace the target variables by the residual of the regression analysis. This procedure is useful to control the covariate from the analysis model (Geldhof, Pornprasertmanit, Schoemann, & Little, in press). } \usage{ residualCovariate(data, targetVar, covVar) } \arguments{ \item{data}{ The desired data to be transformed. } \item{targetVar}{ Varible names or the position of indicators that users wish to be residual centered (as dependent variables) } \item{covVar}{ Covariate names or the position of the covariates using for residual centering (as independent variables) onto target variables } } \value{ The data that the target variables replaced by the residuals } \references{ Geldhof, G. J., Pornprasertmanit, S., Schoemann, A. M., & Little, T. D. (2013). Orthogonalizing through residual centering: Applications and caveats. \emph{Educational and Psychological Measurement, 73}, 27-46. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \code{\link{indProd}} For creating the indicator products with no centering, mean centering, double-mean centering, or residual centering. } \examples{ dat <- residualCovariate(attitude, 2:7, 1) } semTools/man/measurementInvarianceCat.Rd0000644000175100001440000000656213000201061020104 0ustar hornikusers\name{measurementInvarianceCat} \alias{measurementInvarianceCat} \title{ Measurement Invariance Tests for Categorical Items } \description{ Testing measurement invariance across groups using a typical sequence of model comparison tests. } \usage{ measurementInvarianceCat(..., std.lv = FALSE, strict = FALSE, quiet = FALSE, fit.measures = "default", method = "satorra.bentler.2001") } \arguments{ \item{...}{The same arguments as for any lavaan model. See \code{\link{cfa}} for more information.} \item{std.lv}{If \code{TRUE}, the fixed-factor method of scale identification is used. If \code{FALSE}, the first variable for each factor is used as marker variable.} \item{strict}{If \code{TRUE}, the sequence requires `strict' invariance. See details for more information.} \item{quiet}{If \code{TRUE}, a summary is printed out containing an overview of the different models that are fitted, together with some model comparison tests.} \item{fit.measures}{Fit measures used to calculate the differences between nested models.} \item{method}{The method used to calculate likelihood ratio test. See \code{\link[lavaan]{lavTestLRT}} for available options} } \details{ Theta parameterization is used to represent SEM for categorical items. That is, residual variances are modeled instead of the total variance of underlying normal variate for each item. Five models can be tested based on different constraints across groups. \enumerate{ \item{Model 1: configural invariance. The same factor structure is imposed on all groups.} \item{Model 2: weak invariance. The factor loadings are constrained to be equal across groups.} \item{Model 3: strong invariance. The factor loadings and thresholds are constrained to be equal across groups.} \item{Model 4: strict invariance. The factor loadings, thresholds and residual variances are constrained to be equal across groups. For categorical variables, all residual variances are fixed as 1.} \item{Model 5: The factor loadings, threshoulds, residual variances and means are constrained to be equal across groups.} } However, if all items have two items (dichotomous), scalar invariance and weak invariance cannot be separated because thresholds need to be equal across groups for scale identification. Users can specify \code{strict} option to include the strict invariance model for the invariance testing. See the further details of scale identification and different parameterization in Millsap and Yun-Tein (2004). } \value{ Invisibly, all model fits in the sequence are returned as a list. } \references{ Millsap, R. E., & Yun-Tein, J. (2004). Assessing factorial invariance in ordered-categorical measures. \emph{Multivariate Behavioral Research, 39}, 479-515. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}) } \seealso{ \code{\link{measurementInvariance}} for measurement invariance for continuous variables; \code{\link{longInvariance}} For the measurement invariance test within person with continuous variables; \code{partialInvariance} for the automated function for finding partial invariance models } \examples{ \dontrun{ model <- ' f1 =~ u1 + u2 + u3 + u4' measurementInvarianceCat(model, data = datCat, group = "g", parameterization="theta", estimator="wlsmv", ordered = c("u1", "u2", "u3", "u4")) } } semTools/man/permuteMeasEq-class.Rd0000644000175100001440000001477013000201061017007 0ustar hornikusers\name{permuteMeasEq-class} \docType{class} \alias{permuteMeasEq-class} \alias{show,permuteMeasEq-method} \alias{summary,permuteMeasEq-method} \alias{hist,permuteMeasEq-method} \title{ Class for the Results of Permutation Randomization Tests of Measurement Equivalence and DIF } \description{ This class contains the results of tests of Measurement Equivalence and Differential Item Functioning (DIF). } \section{Objects from the Class}{ Objects can be created via the \code{\link[semTools]{permuteMeasEq}} function. } \section{Slots}{ \describe{ \item{\code{PT}:}{A \code{data.frame} returned by a call to \code{\link[lavaan]{parTable}} on the constrained model} \item{\code{modelType}:}{A character indicating the specified \code{modelType} in the call to \code{permuteMeasEq}} \item{\code{ANOVA}:}{A vector indicating the results of the observed chi-squared (difference) test, based on the central chi-squared distribution} \item{\code{AFI.obs}:}{A vector of observed (changes in) user-selected fit measures} \item{\code{AFI.dist}:}{The permutation distribution(s) of user-selected fit measures. A \code{data.frame} with \code{n.Permutations} rows and one column for each \code{AFI.obs}.} \item{\code{AFI.pval}:}{A vector of \emph{p} values (one for each element in slot \code{AFI.obs}) calculated using slot \code{AFI.dist}, indicating the probability of observing a change at least as extreme as \code{AFI.obs} if the null hypothesis were true} \item{\code{MI.obs}:}{A \code{data.frame} of observed Lagrange Multipliers (modification indices) associated with the equality constraints or fixed parameters specified in the \code{param} argument. This is a subset of the output returned by a call to \code{\link[lavaan]{lavTestScore}} on the constrained model.} \item{\code{MI.dist}:}{The permutation distribution of the maximum modification index (among those seen in slot \code{MI.obs$X2}) at each permutation of group assignment or of \code{covariates}} \item{\code{extra.obs}:}{If \code{permuteMeasEq} was called with an \code{extra} function, the output when applied to the original data is concatenated into this vector} \item{\code{extra.dist}:}{A \code{data.frame}, each column of which contains the permutation distribution of the corresponding statistic in slot \code{extra.obs}} \item{\code{n.Permutations}:}{An integer indicating the number of permutations requested by the user} \item{\code{n.Converged}:}{An integer indicating the number of permuation iterations which yielded a converged solution} \item{\code{n.nonConverged}:}{A vector of length \code{n.Permutations} indicating how many times group assignment was randomly permuted (at each iteration) before converging on a solution} \item{\code{n.Sparse}:}{Only relevant with \code{ordered} indicators when \code{modelType == "mgcfa"}. A vector of length \code{n.Permutations} indicating how many times group assignment was randomly permuted (at each iteration) before obtaining a sample with all categories observed in all groups} \item{\code{oldSeed}:}{An integer vector storing the value of \code{.Random.seed} before running \code{permuteMeasEq}. Only relevant when using a parallel/multicore option and the original \code{RNGkind() != "L'Ecuyer-CMRG"}. This enables users to restore their previous \code{.Random.seed} state, if desired, by running: \code{.Random.seed[-1] <- permutedResults@oldSeed[-1]}} } } \section{Methods}{ \describe{ \item{show}{\code{signature(object = "permuteMeasEq"):} The \code{show} function is used to summarize the results of the multiparameter omnibus test of measurement equivalence, using the user-specified AFIs. The parametric chi-squared (difference) test is also displayed.} \item{summary}{\code{signature(object = "permuteMeasEq", alpha = .05, nd = 3, extra = FALSE):} The summary function prints the same information from the \code{show} method, but when \code{extra = FALSE} (the default) it also provides a table summarizing any requested follow-up tests of DIF using modification indices in slot \code{MI.obs}. The user can also specify an \code{alpha} level for flagging modification indices as significant, as well as \code{nd} (the number of digits displayed). For each modification index, the \emph{p} value is displayed using a central chi-squared distribution with the \emph{df} shown in that column. Additionally, a \emph{p} value is displayed using the permutation distribution of the maximum index, which controls the familywise Type I error rate in a manner similar to Tukey's studentized range test. If any indices are flagged as significant using the \code{tukey.p.value}, then a message is displayed for each flagged index. The invisibly returned \code{data.frame} is the displayed table of modification indices, unless \code{\link[semTools]{permuteMeasEq}} was called with \code{param = NULL}, in which case the invisibly returned object is \code{object}. If \code{extra = TRUE}, the permutation-based \emph{p} values for each statistic returned by the \code{extra} function are displayed and returned in a \code{data.frame} instead of the modification indices requested in the \code{param} argument.} \item{hist}{\code{signature(x = "permuteMeasEq", ..., AFI, alpha = .05, nd = 3, printLegend = TRUE, legendArgs = list(x = "topleft")):} The \code{hist} function provides a histogram for the permutation distribution of the specified \code{AFI}, including observed and critical values from the specified \code{alpha} level. Distributions of modification indices and any extra output are not available with this method, but they can be created manually by accessing the distributions in slot \code{MI.dist} or \code{extra.dist}. The user can also specify additional graphical parameters to \code{\link[graphics]{hist}} via \code{...}, as well as pass a list of arguments to an optional \code{\link[graphics]{legend}} via \code{legendArgs}. If \code{AFI = "chisq"}, then the probability density and critical value from the central chi-squared distribution are also included in the plot. If the user wants more control over customization, \code{hist} returns a list of \code{length == 2}, containing the arguments for the call to \code{hist} and the arguments to the call for \code{legend}, respectively. This list may facilitate creating a customized histogram of \code{AFI.dist}, \code{MI.dist}, or \code{extra.dist}.} } } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } \seealso{ \code{\link[semTools]{permuteMeasEq}} } \examples{ # See the example from the permuteMeasEq function } semTools/man/twostage-class.Rd0000644000175100001440000001153613001167756016113 0ustar hornikusers\name{twostage-class} \docType{class} \alias{twostage-class} \alias{show,twostage-method} \alias{summary,twostage-method} \alias{anova,twostage-method} \alias{vcov,twostage-method} \alias{coef,twostage-method} \alias{fitted.values,twostage-method} \alias{fitted,twostage-method} \alias{residuals,twostage-method} \alias{resid,twostage-method} \alias{nobs,twostage-method} \title{ Class for the Results of 2-Stage Maximum Likelihood (TSML) Estimation for Missing Data } \description{ This class contains the results of 2-Stage Maximum Likelihood (TSML) estimation for missing data. The \code{summary}, \code{anova}, \code{vcov} methods return corrected \emph{SE}s and test statistics. Other methods are simply wrappers around the corresponding \code{\linkS4class{lavaan}} methods. } \section{Objects from the Class}{ Objects can be created via the \code{\link{twostage}} function. } \section{Slots}{ \describe{ \item{\code{saturated}:}{A fitted \code{\linkS4class{lavaan}} object containing the saturated model results.} \item{\code{target}:}{A fitted \code{\linkS4class{lavaan}} object containing the target/hypothesized model results.} \item{\code{baseline}:}{A fitted \code{\linkS4class{lavaan}} object containing the baseline/null model results.} \item{\code{auxNames}:}{A character string (potentially of \code{length == 0}) of any auxiliary variable names, if used.} } } \section{methods}{ \describe{ \item{anova}{\code{signature(object = "twostage", h1 = NULL, baseline = FALSE:} The \code{anova} function returns the residual-based chi-squared test statistic result, as well as the scaled chi-squared test statistic result, for the model in the \code{target} slot, or for the model in the \code{baseline} slot if \code{baseline = TRUE}. The user can also provide a single additional \code{twostage} object to the \code{h1} argument, in which case \code{anova} returns residual-based and scaled chi-squared difference test results, under the assumption that the models are nested. The models will be automatically sorted according their degrees of freedom.} \item{show}{\code{signature(object = "twostage"):} The \code{show} function is used to display the results of the \code{anova} method, as well as the header of the (uncorrected) target model results.} \item{summary}{\code{signature(object = "twostage", ...):} The summary function prints the same information from the \code{show} method, but also provides (and returns) the output of \code{\link[lavaan]{parameterEstimates}(object@target, ...)} with corrected \emph{SE}s, test statistics, and confidence intervals. Additional arguments can be passed to \code{\link[lavaan]{parameterEstimates}}, including \code{fmi = TRUE} to provide an estimate of the fraction of missing information.} \item{vcov}{\code{signature(object = "twostage", baseline = FALSE:} Returns the asymptotic covariance matrix of the estimated parameters (corrected for additional uncertainty due to missing data) for the model in the \code{target} slot, or for the model in the \code{baseline} slot if \code{baseline = TRUE}.} \item{nobs}{\code{signature(object = "twostage", type = c("ntotal", "ngroups", "n.per.group", "norig", "patterns", "coverage")):} The \code{nobs} function will return the total sample sized used in the analysis by default. Also available are the number of groups or the sample size per group, the original sample size (if any rows were deleted because all variables were missing), the missing data patterns, and the matrix of coverage (diagonal is the proportion of sample observed on each variable, and off-diagonal is the proportion observed for both of each pair of variables).} \item{coef}{\code{signature(object = "twostage", type = c("free", "user"):} This is simply a wrapper around the corresponding \code{\linkS4class{lavaan}} method, providing point estimates from the \code{target} slot.} \item{fitted.values}{\code{signature(object = "twostage", model = c("target", "saturated", "baseline"):} This is simply a wrapper around the corresponding \code{\linkS4class{lavaan}} method, providing model-implied sample moments from the slot specified in the \code{model} argument.} \item{fitted}{\code{signature(object = "twostage", model = c("target", "saturated", "baseline"):} an alias for \code{fitted.values}.} \item{residuals}{\code{signature(object = "twostage", type = c("raw", "cor", "normalized", "standardized"):} This is simply a wrapper around the corresponding \code{\linkS4class{lavaan}} method, providing residuals of the specified \code{type} from the \code{target} slot.} \item{resid}{\code{signature(object = "twostage", model = c("raw", "cor", "normalized", "standardized"):} an alias for \code{residuals}.} } } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } \seealso{ \code{\link{twostage}} } \examples{ # See the example from the twostage function } semTools/man/kd.Rd0000644000175100001440000000447313000201061013524 0ustar hornikusers\name{kd} \alias{kd} \title{ Generate data via the Kaiser-Dickman (1962) algorithm. } \description{ Given a covariance matrix and sample size, generate raw data that correspond to the covariance matrix. Data can be generated to match the covariance matrix exactly, or to be a sample from the population covariance matrix. } \usage{ kd(covmat, n, type = c("exact", "sample")) } \arguments{ \item{covmat}{a symmetric, positive definite covariance matrix} \item{n}{the sample size for the data that will be generated} \item{type}{type of data generation. \code{exact} generates data that exactly correspond to \code{covmat}. \code{sample} treats \code{covmat} as a poulation covariance matrix, generating a sample of size \code{n}.} } \details{ By default, R's \code{cov()} function divides by \code{n}-1. The data generated by this algorithm result in a covariance matrix that matches \code{covmat}, but you must divide by \code{n} instead of \code{n}-1. } \value{ \code{kd} returns a data matrix of dimension \code{n} by \code{nrow(covmat)}. } \references{ Kaiser, H. F. and Dickman, K. (1962). Sample and population score matrices and sample correlation matrices from an arbitrary population correlation matrix. \emph{Psychometrika, 27}, 179-182. } \author{ Ed Merkle (University of Missouri; \email{merklee@missouri.edu}) } \examples{ #### First Example ## Get data dat <- HolzingerSwineford1939[,7:15] hs.n <- nrow(dat) ## Covariance matrix divided by n hscov <- ((hs.n-1)/hs.n) * cov(dat) ## Generate new, raw data corresponding to hscov newdat <- kd(hscov, hs.n) ## Difference between new covariance matrix and hscov is minimal newcov <- (hs.n-1)/hs.n * cov(newdat) summary(as.numeric(hscov - newcov)) ## Generate sample data, treating hscov as population matrix newdat2 <- kd(hscov, hs.n, type="sample") #### Another example ## Define a covariance matrix covmat <- matrix(0, 3, 3); diag(covmat) <- 1.5; covmat[2:3,1] <- c(1.3, 1.7); covmat[3,2] <- 2.1 covmat <- covmat + t(covmat) ## Generate data of size 300 that have this covariance matrix rawdat <- kd(covmat, 300) ## Covariances are exact if we compute sample covariance matrix by ## dividing by n (vs by n-1) summary(as.numeric((299/300)*cov(rawdat) - covmat)) ## Generate data of size 300 where covmat is the population covariance matrix rawdat2 <- kd(covmat, 300) } semTools/man/Net-class.Rd0000644000175100001440000000155413000201061014754 0ustar hornikusers\name{Net-class} \docType{class} \alias{Net-class} \alias{show,Net-method} \alias{summary,Net-method} \title{ Class For the Result of Nesting and Equivalence Testing } \description{ This class contains the results of nesting and equivalence testing among multiple models } \section{Objects from the Class}{ Objects can be created via the \code{\link{net}} function. } \section{Slots}{ \describe{ \item{\code{test}:}{Logical matrix of results of nesting and equivalence testing across models} \item{\code{df}:}{The degrees of freedom of tested models} } } \section{methods}{ \itemize{ \item \code{summary} The summary function is used to provide the results in narrative. } } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } \seealso{ \code{\link{net}} } \examples{ # See the example in the net function. } semTools/man/lisrel2lavaan.Rd0000644000175100001440000001200113000201061015647 0ustar hornikusers\name{lisrel2lavaan} \alias{lisrel2lavaan} \title{ Latent variable modeling in \code{\linkS4class{lavaan}} using LISREL syntax } \description{ This function can be used to estimate a structural equation model in \code{\linkS4class{lavaan}} using LISREL syntax. Data are automatically imported from the LISREL syntax file, or, if data files names are provided within LISREL syntax, from the same directory as the syntax itself, as per standard LISREL data importation. } \usage{ lisrel2lavaan(filename = NULL, analyze = TRUE, silent = FALSE, ...) } \arguments{ \item{filename}{ Filename of the LISREL syntax file. If the \code{filename} arguement is not specified, the user will be prompted with a file browser with which LISREL syntax file can be selected (recommended). } \item{analyze}{ Logical. If \code{analyze==TRUE} (default), data will be automatically imported and analyzed; \code{\linkS4class{lavaan}} summary output displayed and fit object will be returned silently. If \code{analyze==FALSE}, data will not be imported or analyzed; instead, a \code{\linkS4class{lavaan}} parameter table containing the model specifications will be returned. } \item{silent}{ Logical. If false (default) the data will be analyzed and output displayed. If true, a fit object will be returned and summary output will not be displayed. } \item{\dots}{ Additional arguments to be passed to \code{\link[lavaan]{lavaan}}. } } \value{ Output summary is printed to screen and \code{\linkS4class{lavaan}} fit object is returned. } \note{ \code{lisrel2lavaan} is still in development, and not all LISREL commands are currently functional. A number of known limitations are outlined below. If an error is encountered that is not listed, please contact \email{corbinq@ku.edu}. \enumerate{ \item{data importation}{ \code{lisrel2lavaan} currently supports .csv, .dat, and most other delimited data formats. However, formats that are specific to LISREL or PRELIS (e.g., the .PSF file format) cannot be imported. \code{lisrel2lavaan} supports raw data, covariance matrices, and correlation matrices (accompanied by a variance vector). Symmetric matrices can either contain lower triangle or full matrix. For MACS structure models, either raw data or summary statistics (that include a mean vector) are supported. } \item{variable labels}{ Certain variable labels that are permitted in LISREL cannot be supported in \code{lisrel2lavaan}. \item{duplicate labels}{ Most importantly, no two variables of any kind (including phantom variables) should be given the same label when using \code{lisrel2lavaan}. If multiple variables are given the same label, \code{\link[lavaan]{lavaan}} will estimate an incorrect model. } \item{numeric character labels}{ All variable labels are recommended to include non-numeric characters. In addition, the first character in each variable label is recommended to be non-numeric. } \item{labels not specified}{ If variable labels are not provided by the user, names will be generated reflecting variable assignment (e.g. 'eta1', 'ksi1'); manifest variables will be in lower case and latent variables in upper case. } } \item{OU paragraph}{ Not all commands in the OU paragraph are presently supported in \code{lisrel2lavaan}. The ME command can be used to specify estimation method; however, not all estimations available in LISREL are currently supported by \code{\link[lavaan]{lavaan}}. If the specified ME is unsupported, \code{lisrel2lavaan} will revert to default estimation. The AD, EP, IT, ND and NP keywords will be ignored. Requests for text files containing starting values (e.g., \code{OU BE}) will also be ignored. } \item{starting values}{ Certain functionalities related to starting values in LISREL are not yet operational in \code{lisrel2lavaan}. Note that due to differences in estimation, starting values are not as important in \code{\link[lavaan]{lavaan}} model estimation as in LISREL. \item{text file output}{ Requests for text files containing starting values for individual matrices in the in the \code{OU} command (e.g., \code{OU BE}) are not currently supported. These requests will be ignored. } \item{MA paragraph}{ Specification of matrix starting values using the MA command is permitted by providing starting values within syntax directly. However, \code{lisrel2lavaan} has sometimes encountered problems with importation when files are specified following the MA paragraph. } } } } \author{ Corbin Quick (University of Michigan; \email{corbinq@umich.edu}) } \examples{ \dontrun{ ## calling lisrel2lavaan without specifying the filename argument will ## open a file browser window with which LISREL syntax can be selected. ## any additional arguments to be passed to lavaan for data analysis can ## be specified normally. lisrel2lavaan(se="standard") ## lavaan output summary printed to screen ## lavaan fit object returned silently ## manual file specification lisrel2lavaan(filename="myFile.LS8", se="standard") ## lavaan output summary printed to screen ## lavaan fit object returned silently } } semTools/man/exLong.Rd0000644000175100001440000000140013000201061014345 0ustar hornikusers\name{exLong} \alias{exLong} \title{ Simulated Data set to Demonstrate Longitudinal Measurement Invariance } \description{ A simulated data set with 1 factors with 3 indicators in three timepoints } \usage{ data(exLong) } \format{ A data frame with 200 observations of 10 variables. \describe{ \item{sex}{Sex of respondents} \item{y1t1}{Indicator 1 in Time 1} \item{y2t1}{Indicator 2 in Time 1} \item{y3t1}{Indicator 3 in Time 1} \item{y1t2}{Indicator 1 in Time 2} \item{y2t2}{Indicator 2 in Time 2} \item{y3t2}{Indicator 3 in Time 2} \item{y1t3}{Indicator 1 in Time 3} \item{y2t3}{Indicator 2 in Time 3} \item{y3t3}{Indicator 3 in Time 3} } } \source{ Data was generated using the \code{simsem} package. } \examples{ head(exLong) }semTools/man/imposeStart.Rd0000644000175100001440000000636713000201061015444 0ustar hornikusers\name{imposeStart} \alias{imposeStart} \title{ Specify starting values from a lavaan output } \description{ This function will save the parameter estimates of a lavaan output and impose those parameter estimates as starting values for another analysis model. The free parameters with the same names or the same labels across two models will be imposed the new starting values. This function may help to increase the chance of convergence in a complex model (e.g., multitrait-multimethod model or complex longitudinal invariance model). } \usage{ imposeStart(out, expr, silent = TRUE) } \arguments{ \item{out}{ The \code{lavaan} output that users wish to use the parameter estimates as staring values for an analysis model } \item{expr}{ The original code that users use to run a lavaan model } \item{silent}{ Logical to print the parameter table with new starting values } } \value{ A fitted lavaan model } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ # The following example show that the longitudinal weak invariance model # using effect coding was not convergent with three time points but convergent # with two time points. Thus, the parameter estimates from the model with # two time points are used as starting values of the three time points. # The model with new starting values is convergent properly. weak2time <- ' # Loadings f1t1 =~ LOAD1*y1t1 + LOAD2*y2t1 + LOAD3*y3t1 f1t2 =~ LOAD1*y1t2 + LOAD2*y2t2 + LOAD3*y3t2 # Factor Variances f1t1 ~~ f1t1 f1t2 ~~ f1t2 # Factor Covariances f1t1 ~~ f1t2 # Error Variances y1t1 ~~ y1t1 y2t1 ~~ y2t1 y3t1 ~~ y3t1 y1t2 ~~ y1t2 y2t2 ~~ y2t2 y3t2 ~~ y3t2 # Error Covariances y1t1 ~~ y1t2 y2t1 ~~ y2t2 y3t1 ~~ y3t2 # Factor Means f1t1 ~ NA*1 f1t2 ~ NA*1 # Measurement Intercepts y1t1 ~ INT1*1 y2t1 ~ INT2*1 y3t1 ~ INT3*1 y1t2 ~ INT4*1 y2t2 ~ INT5*1 y3t2 ~ INT6*1 # Constraints for Effect-coding Identification LOAD1 == 3 - LOAD2 - LOAD3 INT1 == 0 - INT2 - INT3 INT4 == 0 - INT5 - INT6 ' model2time <- lavaan(weak2time, data = exLong) weak3time <- ' # Loadings f1t1 =~ LOAD1*y1t1 + LOAD2*y2t1 + LOAD3*y3t1 f1t2 =~ LOAD1*y1t2 + LOAD2*y2t2 + LOAD3*y3t2 f1t3 =~ LOAD1*y1t3 + LOAD2*y2t3 + LOAD3*y3t3 # Factor Variances f1t1 ~~ f1t1 f1t2 ~~ f1t2 f1t3 ~~ f1t3 # Factor Covariances f1t1 ~~ f1t2 + f1t3 f1t2 ~~ f1t3 # Error Variances y1t1 ~~ y1t1 y2t1 ~~ y2t1 y3t1 ~~ y3t1 y1t2 ~~ y1t2 y2t2 ~~ y2t2 y3t2 ~~ y3t2 y1t3 ~~ y1t3 y2t3 ~~ y2t3 y3t3 ~~ y3t3 # Error Covariances y1t1 ~~ y1t2 y2t1 ~~ y2t2 y3t1 ~~ y3t2 y1t1 ~~ y1t3 y2t1 ~~ y2t3 y3t1 ~~ y3t3 y1t2 ~~ y1t3 y2t2 ~~ y2t3 y3t2 ~~ y3t3 # Factor Means f1t1 ~ NA*1 f1t2 ~ NA*1 f1t3 ~ NA*1 # Measurement Intercepts y1t1 ~ INT1*1 y2t1 ~ INT2*1 y3t1 ~ INT3*1 y1t2 ~ INT4*1 y2t2 ~ INT5*1 y3t2 ~ INT6*1 y1t3 ~ INT7*1 y2t3 ~ INT8*1 y3t3 ~ INT9*1 # Constraints for Effect-coding Identification LOAD1 == 3 - LOAD2 - LOAD3 INT1 == 0 - INT2 - INT3 INT4 == 0 - INT5 - INT6 INT7 == 0 - INT8 - INT9 ' ### The following command does not provide convergent result # model3time <- lavaan(weak3time, data = exLong) ### Use starting values from the model with two time points model3time <- imposeStart(model2time, lavaan(weak3time, data = exLong)) summary(model3time) } semTools/man/bsBootMiss.Rd0000644000175100001440000001370213000201061015205 0ustar hornikusers\name{bsBootMiss} \alias{bsBootMiss} \title{ Bollen-Stine Bootstrap with the Existence of Missing Data } \description{ Implement the Bollen and Stine's (1992) Bootstrap when missing observations exist. The implemented method is proposed by Savalei and Yuan (2009). This can be used in two ways. The first and easiest option is to fit the model to incomplete data in \code{lavaan} using the FIML estimator, then pass that \code{lavaan} object to \code{bsBootMis}. The second is designed for users of other software packages (e.g., LISREL, EQS, Amos, or Mplus). Users can import their data, chi-squared value, and model-implied moments from another package, and they have the option of saving (or writing to a file) either the transformed data or bootstrapped samples of that data, which can be analyzed in other programs. In order to analyze the bootstrapped samples and return a p value, users of other programs must still specify their model using lavaan syntax. } \usage{ bsBootMiss(x, transformation = 2, nBoot = 500, model, rawData, Sigma, Mu, group, ChiSquared, EMcov, writeTransData = FALSE, transDataOnly = FALSE, writeBootData = FALSE, bootSamplesOnly = FALSE, writeArgs, seed = NULL, suppressWarn = TRUE, showProgress = TRUE, ...) } \arguments{ \item{x}{ A target \code{lavaan} object used in the Bollen-Stine bootstrap } \item{transformation}{ The transformation methods in Savalei and Yuan (2009). There are three methods in the article, but only the first two are currently implemented here. Use transformation = 1 when there are few missing data patterns, each of which has a large size, such as in a planned-missing-data design. Use transformation = 2 when there are more missing data patterns. The currently unavailable transformation = 3 would be used when several missing data patterns have n = 1. } \item{nBoot}{ The number of bootstrap samples. } \item{model}{ Optional. The target model if \code{x} is not provided. } \item{rawData}{ Optional. The target raw data set if \code{x} is not provided. } \item{Sigma}{ Optional. The model-implied covariance matrix if \code{x} is not provided. } \item{Mu}{ Optional. The model-implied mean vector if \code{x} is not provided. } \item{group}{ Optional character string specifying the name of the grouping variable in \code{rawData} if \code{x} is not provided. } \item{ChiSquared}{ Optional. The model-implied mean vector if \code{x} is not provided. } \item{EMcov}{ Optional, if \code{x} is not provided. The EM (or Two-Stage ML) estimated covariance matrix used to speed up Transformation 2 algorithm. } \item{transDataOnly}{ Logical. If \code{TRUE}, the result will provide the transformed data only. } \item{writeTransData}{ Logical. If \code{TRUE}, the transformed data set is written to a text file, \code{transDataOnly} is set to \code{TRUE}, and the transformed data is returned invisibly. } \item{bootSamplesOnly}{ Logical. If \code{TRUE}, the result will provide bootstrap data sets only. } \item{writeBootData}{ Logical. If \code{TRUE}, the stacked bootstrap data sets are written to a text file, \code{bootSamplesOnly} is set to \code{TRUE}, and the list of bootstrap data sets are returned invisibly. } \item{writeArgs}{ Optional \code{list}. If \code{writeBootData = TRUE} or \code{writeBootData = TRUE}, user can pass arguments to the \code{\link[utils]{write.table}} function as a list. Some default values are provided: \code{file} = "bootstrappedSamples.dat", \code{row.names} = \code{FALSE}, and \code{na} = "-999", but the user can override all of these by providing other values for those arguments in the \code{writeArgs} list. } \item{seed}{ The seed number used in randomly drawing bootstrap samples. } \item{suppressWarn}{ Logical. If \code{TRUE}, warnings from \code{lavaan} function will be suppressed when fitting the model to each bootstrap sample. } \item{showProgress}{ Logical. Indicating whether to display a progress bar while fitting models to bootstrap samples. } \item{\dots}{ The additional arguments in the \code{\link[lavaan]{lavaan}} function. } } \value{ As a default, this function returns a \code{\linkS4class{BootMiss}} object containing the results of the bootstrap samples. Use \code{show}, \code{summary}, or \code{hist} to examine the results. Optionally, the transformed data set is returned if \code{transDataOnly = TRUE}. Optionally, the bootstrap data sets are returned if \code{bootSamplesOnly = TRUE}. } \references{ Bollen, K. A., \& Stine, R. A. (1992). Bootstrapping goodness-of-fit measures in structural equation models. \emph{Sociological Methods \& Research, 21}, 205-229. doi:10.1177/0049124192021002004 Savalei, V., \& Yuan, K.-H. (2009). On the model-based bootstrap with missing data: Obtaining a p-value for a test of exact fit. \emph{Multivariate Behavioral Research, 44}, 741-763. doi:10.1080/00273170903333590 } \seealso{ \code{\linkS4class{BootMiss}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } \examples{ \dontrun{ dat1 <- HolzingerSwineford1939 dat1$x5 <- ifelse(dat1$x1 <= quantile(dat1$x1, .3), NA, dat1$x5) dat1$x9 <- ifelse(is.na(dat1$x5), NA, dat1$x9) targetModel <- " visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 " targetFit <- sem(targetModel, dat1, meanstructure = TRUE, std.lv = TRUE, missing = "fiml", group = "school") summary(targetFit, fit = TRUE, standardized = TRUE) # The number of bootstrap samples should be much higher. temp <- bsBootMiss(targetFit, transformation = 1, nBoot = 10, seed = 31415) temp summary(temp) hist(temp) hist(temp, printLegend = FALSE) # suppress the legend ## user can specify alpha level (default: alpha = 0.05), and the number of ## digits to display (default: nd = 2). Pass other arguments to hist(...), ## or a list of arguments to legend() via "legendArgs" hist(temp, alpha = .01, nd = 3, xlab = "something else", breaks = 25, legendArgs = list("bottomleft", box.lty = 2)) } } semTools/man/indProd.Rd0000644000175100001440000001003213000201061014511 0ustar hornikusers\name{indProd} \alias{indProd} \alias{orthogonalize} \title{ Make products of indicators using no centering, mean centering, double-mean centering, or residual centering } \description{ The \code{indProd} function will make products of indicators using no centering, mean centering, double-mean centering, or residual centering. The \code{orthogonalize} function is the shortcut of the \code{indProd} function to make the residual-centered indicators products. } \usage{ indProd(data, var1, var2, var3=NULL, match = TRUE, meanC = TRUE, residualC = FALSE, doubleMC = TRUE, namesProd = NULL) orthogonalize(data, var1, var2, var3=NULL, match=TRUE, namesProd=NULL) } \arguments{ \item{data}{ The desired data to be transformed. } \item{var1}{ Names or indices of the variables loaded on the first factor } \item{var2}{ Names or indices of the variables loaded on the second factor } \item{var3}{ Names or indices of the variables loaded on the third factor (for three-way interaction) } \item{match}{ Specify \code{TRUE} to use match-paired approach (Marsh, Wen, & Hau, 2004). If \code{FALSE}, the resulting products are all possible products. } \item{meanC}{ Specify \code{TRUE} for mean centering the main effect indicator before making the products } \item{residualC}{ Specify \code{TRUE} for residual centering the products by the main effect indicators (Little, Bovaird, & Widaman, 2006). } \item{doubleMC}{ Specify \code{TRUE} for centering the resulting products (Lin et. al., 2010) } \item{namesProd}{ The names of resulting products } } \value{ The original data attached with the products. } \references{ Marsh, H. W., Wen, Z. & Hau, K. T. (2004). Structural equation models of latent interactions: Evaluation of alternative estimation strategies and indicator construction. \emph{Psychological Methods, 9,} 275-300. Lin, G. C., Wen, Z., Marsh, H. W., & Lin, H. S. (2010). Structural equation models of latent interactions: Clarification of orthogonalizing and double-mean-centering strategies. \emph{Structural Equation Modeling, 17}, 374-391. Little, T. D., Bovaird, J. A., & Widaman, K. F. (2006). On the merits of orthogonalizing powered and product terms: Implications for modeling interactions among latent variables. \emph{Structural Equation Modeling, 13}, 497-519. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Alexander Schoemann (East Carolina University; \email{schoemanna@ecu.edu}) } \seealso{ \itemize{ \item \code{\link{probe2WayMC}} For probing the two-way latent interaction when the results are obtained from mean-centering, or double-mean centering. \item \code{\link{probe3WayMC}} For probing the three-way latent interaction when the results are obtained from mean-centering, or double-mean centering. \item \code{\link{probe2WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{probe3WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the latent interaction. } } \examples{ # Mean centering / two-way interaction / match-paired dat <- indProd(attitude[,-1], var1=1:3, var2=4:6) # Residual centering / two-way interaction / match-paired dat2 <- indProd(attitude[,-1], var1=1:3, var2=4:6, match=FALSE, meanC=FALSE, residualC=TRUE, doubleMC=FALSE) # Double-mean centering / two-way interaction / match-paired dat3 <- indProd(attitude[,-1], var1=1:3, var2=4:6, match=FALSE, meanC=TRUE, residualC=FALSE, doubleMC=TRUE) # Mean centering / three-way interaction / match-paired dat4 <- indProd(attitude[,-1], var1=1:2, var2=3:4, var3=5:6) # Residual centering / three-way interaction / match-paired dat5 <- indProd(attitude[,-1], var1=1:2, var2=3:4, var3=5:6, match=FALSE, meanC=FALSE, residualC=TRUE, doubleMC=FALSE) # Double-mean centering / three-way interaction / match-paired dat6 <- indProd(attitude[,-1], var1=1:2, var2=3:4, var3=5:6, match=FALSE, meanC=TRUE, residualC=TRUE, doubleMC=TRUE) } semTools/man/EFA-class.Rd0000644000175100001440000000352213000201061014616 0ustar hornikusers\name{EFA-class} \docType{class} \alias{EFA-class} \alias{show,EFA-method} \alias{summary,EFA-method} \title{ Class For Rotated Results from EFA } \description{ This class contains the results of rotated exploratory factor analysis } \section{Objects from the Class}{ Objects can be created via the \code{\link{orthRotate}} or \code{\link{oblqRotate}} function. } \section{Slots}{ \describe{ \item{\code{loading}:}{Rotated standardized factor loading matrix} \item{\code{rotate}:}{Rotation matrix} \item{\code{gradRotate}:}{The gradient of the objective function at the rotated loadings} \item{\code{convergence}:}{Convergence status} \item{\code{phi}:}{Factor correlation. Will be an identity matrix if orthogonal rotation is used.} \item{\code{se}:}{Standard errors of the rotated standardized factor loading matrix} \item{\code{method}:}{Method of rotation} \item{\code{call}:}{The command used to generate this object} } } \section{methods}{ \itemize{ \item \code{summary} The \code{summary} function shows the detailed results of the rotated solution. This function has two arguments: \code{suppress} and \code{sort}. The \code{suppress} argument is used to not show the standardized loading values that less than the specified value. The default is 0.1. The \code{sort} is used to sort the factor loadings by the sizes of factor loadings in each factor. The default is \code{TRUE}. } } \seealso{ \code{\link{efaUnrotate}}; \code{\link{orthRotate}}; \code{\link{oblqRotate}} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ library(lavaan) unrotated <- efaUnrotate(HolzingerSwineford1939, nf=3, varList=paste0("x", 1:9), estimator="mlr") summary(unrotated, std=TRUE) inspect(unrotated, "std") # Rotated by Quartimin rotated <- oblqRotate(unrotated, method="quartimin") summary(rotated) } semTools/man/quark.Rd0000644000175100001440000001154613000201061014250 0ustar hornikusers\name{quark} \alias{quark} \title{ Quark } \description{ The \code{quark} function provides researchers with the ability to calculate and include component scores calculated by taking into account the variance in the original dataset and all of the interaction and polynomial effects of the data in the dataset. } \usage{ quark(data, id, order = 1, silent = FALSE) } \arguments{ \item{data}{ The data frame is a required component for \code{quark}. In order for \code{quark} to process a data frame, it must not contain any factors or text-based variables. All variables must be in numeric format. Identifiers and dates can be left in the data; however, they will need to be identified under the \code{id} argument. } \item{id}{ Identifiers and dates within the dataset will need to be acknowledged as \code{quark} cannot process these. Be acknowledging the the identifiers and dates as a vector of column numbers or variable names, \code{quark} will remove them from the data temporarily to complete its main processes. Among many potential issues of not acknowledging identifiers and dates are issues involved with imputation, product and polynomial effects, and principal component analysis. } \item{order}{ Order is an optional argument provided by quark that can be used when the imputation procedures in mice fails. Under some circumstances, mice cannot calculate missing values due to issues with extreme missingness. Should an error present itself stating a failure due to not having any columns selected, incorporate the argument order=2 into the quark function in order to reorder the imputation method procedure. Otherwise, the order is defaulted to 1. Example to rerun quark after imputation failure, quark.list <- quark(data=yourdataframe,id=vectorofIDs,order=2). } \item{silent}{ If \code{FALSE}, the details of the \code{quark} process are printed. } } \details{ The \code{quark} function calculates these component scores by first filling in the data via means of multiple imputation methods and then expanding the dataset by aggregating the non-overlapping interaction effects between variables by calculating the mean of the interactions and polynomial effects. The multiple imputation methods include one of iterative sampling and group mean substitution and multiple imputation using a polytomous regression algorithm (mice). During the expansion process, the dataset is expanded to three times its normal size (in width). The first third of the dataset contains all of the original data post imputation, the second third contains the means of the polynomial effects (squares and cubes), and the final third contains the means of the non-overlapping interaction effects. A full principal componenent analysis is conducted and the individual components are retained. The subsequent \code{\link{combinequark}} function provides researchers the control in determining how many components to extract and retain. The function returns the dataset as submitted (with missing values) and the component scores as requested for a more accurate multiple imputation in subsequent steps. } \value{ The output value from using the quark function is a list. It will return a list with 7 components. \item{ID Columns}{Is a vector of the identifier columns entered when running quark.} \item{ID Variables}{Is a subset of the dataset that contains the identifiers as acknowledged when running quark.} \item{Used Data}{Is a matrix / dataframe of the data provided by user as the basis for quark to process.} \item{Imputed Data}{Is a matrix / dataframe of the data after the multiple method imputation process.} \item{Big Matrix}{Is the expanded product and polynomial matrix.} \item{Principal Components}{Is the entire dataframe of principal components for the dataset. This dataset will have the same number of rows of the big matrix, but will have 1 less column (as is the case with principal component analyses).} \item{Percent Variance Explained}{Is a vector of the percent variance explained with each column of principal components.} } \references{ Howard, W. J., Little, T. D., & Rhemtulla, M. (in press). Using principal component analysis (PCA) to obtain auxiliary variables for missing data estimation in large data sets. \emph{Multivariate Behavioral Research}. } \author{ Steven R. Chesnut (University of Southern Mississippi; \email{Steven.Chesnut@usm.edu}), Danny Squire (Texas Tech University). The PCA code is copied and modified from the \code{FactoMineR} package. The function to print correlation matrix is copied from the \code{psych} package. } \seealso{ \code{\link{combinequark}} } \examples{ set.seed(123321) library(lavaan) dat <- HolzingerSwineford1939[,7:15] misspat <- matrix(runif(nrow(dat) * 9) < 0.3, nrow(dat)) dat[misspat] <- NA dat <- cbind(HolzingerSwineford1939[,1:3], dat) quark.list <- quark(data = dat, id = c(1, 2)) final.data <- combinequark(quark = quark.list, percent = 80) }semTools/man/miPowerFit.Rd0000644000175100001440000001704013000201061015205 0ustar hornikusers\name{miPowerFit} \alias{miPowerFit} \alias{miPowerFit} \title{ Modification indices and their power approach for model fit evaluation } \description{ The model fit evaluation approach using modification indices and expected parameter changes. } \usage{ miPowerFit(lavaanObj, stdLoad=0.4, cor=0.1, stdBeta=0.1, intcept=0.2, stdDelta=NULL, delta=NULL, cilevel = 0.90) } \arguments{ \item{lavaanObj}{The lavaan model object used to evaluate model fit} \item{stdLoad}{The amount of standardized factor loading that one would like to be detected (rejected). The default value is 0.4, which is suggested by Saris and colleagues (2009, p. 571).} \item{cor}{The amount of factor or error correlations that one would like to be detected (rejected). The default value is 0.1, which is suggested by Saris and colleagues (2009, p. 571).} \item{stdBeta}{The amount of standardized regression coefficients that one would like to be detected (rejected). The default value is 0.1, which is suggested by Saris and colleagues (2009, p. 571).} \item{intcept}{The amount of standardized intercept (similar to Cohen's \emph{d} that one would like to be detected (rejected). The default value is 0.2, which is equivalent to a low effect size proposed by Cohen (1988, 1992).} \item{stdDelta}{The vector of the standardized parameters that one would like to be detected (rejected). If this argument is specified, the value here will overwrite the other arguments above. The order of the vector must be the same as the row order from modification indices from the \code{lavaan} object. If a single value is specified, the value will be applied to all parameters.} \item{delta}{The vector of the unstandardized parameters that one would like to be detected (rejected). If this argument is specified, the value here will overwrite the other arguments above. The order of the vector must be the same as the row order from modification indices from the \code{lavaan} object. If a single value is specified, the value will be applied to all parameters.} \item{cilevel}{The confidence level of the confidence interval of expected parameter changes. The confidence intervals are used in the equivalence testing.} } \details{ In the lavaan object, one can inspect the modification indices and expected parameter changes. Those values can be used to evaluate model fit by two methods. First, Saris, Satorra, and van der Veld (2009, pp. 570-573) used the power to detect modification indices and expected parameter changes to evaluate model fit. First, one should evaluate whether the modification index of each parameter is significant. Second, one should evaluate whether the power to detect a target expected parameter change is high enough. If the modification index is not significant and the power is high, there is no misspecification. If the modification index is significant and the power is low, the fixed parameter is misspecified. If the modification index is significant and the power is high, the expected parameter change is investigated. If the expected parameter change is large (greater than the the target expected parameter change), the parameter is misspecified. If the expected parameter change is low (lower than the target expected parameter change), the parameter is not misspecificied. If the modification index is not significant and the power is low, the decision is inconclusive. Second, the confidence intervals of the expected parameter changes are formed. These confidence intervals are compared with the range of trivial misspecification, which could be (-\code{delta}, \code{delta}) or (0, \code{delta}) for nonnegative parameters. If the confidence intervals are outside of the range of trivial misspecification, the fixed parameters are severely misspecified. If the confidence intervals are inside the range of trivial misspecification, the fixed parameters are trivially misspecified. If confidence intervals are overlapped the range of trivial misspecification, the decision is inconclusive. } \value{ A data frame with these variables: \enumerate{ \item{lhs} The left-hand side variable (with respect to the lavaan operator) \item{op} The lavaan syntax operator: "~~" represents covariance, "=~" represents factor loading, "~" represents regression, and "~1" represents intercept. \item{rhs} The right-hand side variable (with respect to the lavaan operator) \item{group} The group of the parameter \item{mi} The modification index of the fixed parameter \item{epc} The expected parameter change if the parameter is freely estimated \item{target.epc} The target expected parameter change that represents the minimum size of misspecification that one would like to be detected by the test with a high power \item{std.epc} The standardized expected parameter change if the parameter is freely estimated \item{std.target.epc} The standardized target expected parameter change \item{significant.mi} Represents whether the modification index value is significant \item{high.power} Represents whether the power is enough to detect the target expected parameter change \item{decision.pow} The decision whether the parameter is misspecified or not based on Saris et al's method: \code{"M"} represents the parameter is misspecified, \code{"NM"} represents the parameter is not misspecified, \code{"EPC:M"} represents the parameter is misspecified decided by checking the expected parameter change value, \code{"EPC:NM"} represents the parameter is not misspecified decided by checking the expected parameter change value, and \code{"I"} represents the decision is inconclusive. \item{se.epc} The standard errors of the expected parameter changes. \item{lower.epc} The lower bound of the confidence interval of expected parameter changes. \item{upper.epc} The upper bound of the confidence interval of expected parameter changes. \item{lower.std.epc} The lower bound of the confidence interval of standardized expected parameter changes. \item{upper.std.epc} The upper bound of the confidence interval of standardized expected parameter changes. \item{decision.ci} The decision whether the parameter is misspecified or not based on the confidence interval method: \code{"M"} represents the parameter is misspecified, \code{"NM"} represents the parameter is not misspecified, and \code{"I"} represents the decision is inconclusive. } The row numbers matches with the results obtained from the \code{inspect(object, "mi")} function. } \references{ Cohen, J. (1988). \emph{Statistical power analysis for the behavioral sciences} (2nd ed.). Hillsdale, NJ: Erlbaum. Cohen, J. (1992). A power primer. \emph{Psychological Bulletin, 112}, 155-159. Saris, W. E., Satorra, A., & van der Veld, W. M. (2009). Testing structural equation models or detection of misspecifications? \emph{Structural Equation Modeling, 16}, 561-582. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \code{\link{moreFitIndices}} For the additional fit indices information } \examples{ library(lavaan) HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939, group="sex", meanstructure=TRUE) miPowerFit(fit) model <- ' # latent variable definitions ind60 =~ x1 + x2 + x3 dem60 =~ y1 + a*y2 + b*y3 + c*y4 dem65 =~ y5 + a*y6 + b*y7 + c*y8 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 # residual correlations y1 ~~ y5 y2 ~~ y4 + y6 y3 ~~ y7 y4 ~~ y8 y6 ~~ y8 ' fit2 <- sem(model, data=PoliticalDemocracy, meanstructure=TRUE) miPowerFit(fit2, stdLoad=0.3, cor=0.2, stdBeta=0.2, intcept=0.5) } semTools/man/fitMeasuresMx.Rd0000644000175100001440000000305113000201061015711 0ustar hornikusers\name{fitMeasuresMx} \alias{fitMeasuresMx} \title{ Find fit measures from an MxModel result } \description{ Find fit measures from MxModel result. The saturate and null models are analyzed in the function and fit measures are calculated based on the comparison with the null and saturate models. The function is adjusted from the \code{fitMeasures} function in the lavaan package. } \usage{ fitMeasuresMx(object, fit.measures="all") } \arguments{ \item{object}{ The target \code{MxModel} object } \item{fit.measures}{ Target fit measures } } \value{ A vector of fit measures } \seealso{ \code{\link{nullMx}}, \code{\link{saturateMx}}, \code{\link{standardizeMx}} } \author{ The original function is the \code{fitMeasures} function written by Yves Rosseel in the \code{lavaan} package. The function is adjusted for an \code{MxModel} object by Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ \dontrun{ library(OpenMx) data(demoOneFactor) manifests <- names(demoOneFactor) latents <- c("G") factorModel <- mxModel("One Factor", type="RAM", manifestVars=manifests, latentVars=latents, mxPath(from=latents, to=manifests), mxPath(from=manifests, arrows=2), mxPath(from=latents, arrows=2, free=FALSE, values=1.0), mxData(observed=cov(demoOneFactor), type="cov", numObs=500) ) factorFit <- mxRun(factorModel) round(fitMeasuresMx(factorFit), 3) # Compare with lavaan library(lavaan) script <- "f1 =~ x1 + x2 + x3 + x4 + x5" fitMeasures(cfa(script, sample.cov = cov(demoOneFactor), sample.nobs = 500, std.lv = TRUE)) } } semTools/man/compareFit.Rd0000644000175100001440000000276213000201061015216 0ustar hornikusers\name{compareFit} \alias{compareFit} \title{ Build an object summarizing fit indices across multiple models } \description{ This function will create the template that compare fit indices across multiple lavaan outputs. The results can be exported to a clipboard or a file later. } \usage{ compareFit(..., nested = TRUE) } \arguments{ \item{...}{ \code{lavaan} outputs or lists of \code{lavaan} outputs } \item{nested}{ Logical whether the specified models are nested } } \value{ A \code{\linkS4class{FitDiff}} object that saves model fit comparisons across multiple models. If the output is not assigned as an object, the output is printed in two parts: 1) nested model comparison (if models are nested) and 2) fit indices summaries. In the fit indices summaries, daggers are tagged to the model with the best fit for each fit index. } \seealso{ \code{\linkS4class{FitDiff}}, \code{\link{clipboard}} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ m1 <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit1 <- cfa(m1, data=HolzingerSwineford1939) m2 <- ' f1 =~ x1 + x2 + x3 + x4 f2 =~ x5 + x6 + x7 + x8 + x9 ' fit2 <- cfa(m2, data=HolzingerSwineford1939) compareFit(fit1, fit2, nested=FALSE) HW.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' out <- measurementInvariance(HW.model, data=HolzingerSwineford1939, group="school", quiet=TRUE) compareFit(out) } semTools/man/simParcel.Rd0000644000175100001440000000230513000201061015035 0ustar hornikusers\name{simParcel} \alias{simParcel} \title{ Simulated Data set to Demonstrate Random Allocations of Parcels } \description{ A simulated data set with 2 factors with 9 indicators for each factor } \usage{ data(simParcel) } \format{ A data frame with 800 observations of 18 variables. \describe{ \item{f1item1}{Item 1 loading on factor 1} \item{f1item2}{Item 2 loading on factor 1} \item{f1item3}{Item 3 loading on factor 1} \item{f1item4}{Item 4 loading on factor 1} \item{f1item5}{Item 5 loading on factor 1} \item{f1item6}{Item 6 loading on factor 1} \item{f1item7}{Item 7 loading on factor 1} \item{f1item8}{Item 8 loading on factor 1} \item{f1item9}{Item 9 loading on factor 1} \item{f2item1}{Item 1 loading on factor 2} \item{f2item2}{Item 2 loading on factor 2} \item{f2item3}{Item 3 loading on factor 2} \item{f2item4}{Item 4 loading on factor 2} \item{f2item5}{Item 5 loading on factor 2} \item{f2item6}{Item 6 loading on factor 2} \item{f2item7}{Item 7 loading on factor 2} \item{f2item8}{Item 8 loading on factor 2} \item{f2item9}{Item 9 loading on factor 2} } } \source{ Data was generated using the \code{simsem} package. } \examples{ head(simParcel) }semTools/man/net.Rd0000644000175100001440000000442613000201061013712 0ustar hornikusers\name{net} \alias{net} \title{ Nesting and Equivalence Testing } \description{ This test examines whether models are nested or equivalent based on Bentler and Satorra's (2010) procedure. } \usage{ net(..., crit = .0001) } \arguments{ \item{\dots}{ The \code{lavaan} objects used for test of nesting and equivalence } \item{crit}{ The upper-bound criterion for testing the equivalence of models. Models are considered nested (or equivalent) if the difference between their chi-squared fit statistics is less than this criterion. } } \details{ The concept of nesting/equivalence should be the same regardless of estimation method. However, the particular method of testing nesting/equivalence (as described in Bentler & Satorra, 2010) employed by the net function is based on a limited-information estimator (analyzing model-implied means and covariance matrices, not raw data). In the case of robust methods like MLR, the raw data is only utilized for the robust adjustment to SE and chi-sq, and the net function only checks the unadjusted chi-sq for the purposes of testing nesting/equivalence. This method does not apply to models that estimate thresholds for categorical data, so an error message will be issued if such a model is provided. } \value{ The \linkS4class{Net} object representing the outputs for nesting and equivalent testing, including a logical matrix of test results and a vector of degrees of freedom for each model. } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } \references{ Bentler, P. M., & Satorra, A. (2010). Testing model nesting and equivalence. \emph{Psychological Methods, 15}, 111-123. doi:10.1037/a0019625 } \examples{ \dontrun{ m1 <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' m2 <- ' f1 =~ x1 + x2 + x3 + x4 f2 =~ x5 + x6 + x7 + x8 + x9 ' m3 <- ' visual =~ x1 + x2 + x3 textual =~ eq*x4 + eq*x5 + eq*x6 speed =~ x7 + x8 + x9 ' fit1 <- cfa(m1, data = HolzingerSwineford1939) fit1a <- cfa(m1, data = HolzingerSwineford1939, std.lv = TRUE) # Equivalent to fit1 fit2 <- cfa(m2, data = HolzingerSwineford1939) # Not equivalent to or nested in fit1 fit3 <- cfa(m3, data = HolzingerSwineford1939) # Nested in fit1 and fit1a tests <- net(fit1, fit1a, fit2, fit3) tests summary(tests) } } semTools/man/twostage.Rd0000644000175100001440000001245413001142753014776 0ustar hornikusers\name{twostage} \alias{twostage} \alias{cfa.2stage} \alias{sem.2stage} \alias{growth.2stage} \alias{lavaan.2stage} \title{ Fit a lavaan model using 2-Stage Maximum Likelihood (TSML) estimation for missing data. } \description{ This function automates 2-Stage Maximum Likelihood (TSML) estimation, optionally with auxiliary variables. Step 1 involves fitting a saturated model to the partially observed data set (to variables in the hypothesized model as well as auxiliary variables related to missingness). Step 2 involves fitting the hypothesized model to the model-implied means and covariance matrix (also called the "EM" means and covariance matrix) as if they were complete data. Step 3 involves correcting the Step-2 standard errors (\emph{SE}s) and chi-squared statistic to account for additional uncertainty due to missing data (using information from Step 1; see References section for sources with formulas). All variables (including auxiliary variables) are treated as endogenous varaibles in the Step-1 saturated model (\code{fixed.x = FALSE}), so data are assumed continuous, although not necessarily multivariate normal (dummy-coded auxiliary variables may be included in Step 1, but categorical endogenous variables in the Step-2 hypothesized model are not allowed). To avoid assuming multivariate normality, request \code{se = "robust.huber.white"}. CAUTION: In addition to setting \code{fixed.x = FALSE} and \code{conditional.x = FALSE} in \code{\link[lavaan]{lavaan}}, this function will automatically set \code{meanstructure = TRUE}, \code{estimator = "ML"}, \code{missing = "fiml"}, and \code{test = "standard"}. \code{\link[lavaan]{lavaan}}'s \code{se} option can only be set to \code{"standard"} to assume multivariate normality or to \code{"robust.huber.white"} to relax that assumption. } \usage{ twostage(..., aux, fun, baseline.model = NULL) cfa.2stage(..., aux = NULL, baseline.model = NULL) sem.2stage(..., aux = NULL, baseline.model = NULL) growth.2stage(..., aux = NULL, baseline.model = NULL) lavaan.2stage(..., aux = NULL, baseline.model = NULL) } \arguments{ \item{\dots}{ Arguments passed to the \code{\link[lavaan]{lavaan}} function specified in the \code{fun} argument. At a minimum, the user must supply the first two named arguments to \code{\link[lavaan]{lavaan}} (i.e., \code{model} and \code{data}). } \item{aux}{ An optional character vector naming auxiliary variable(s) in \code{data} } \item{fun}{ The character string naming the lavaan function used to fit the Step-2 hypothesized model (\code{"cfa"}, \code{"sem"}, \code{"growth"}, or \code{"lavaan"}). } \item{baseline.model}{ An optional character string, specifying the lavaan \code{\link[lavaan]{model.syntax}} for a user-specified baseline model. Interested users can use the fitted baseline model to calculate incremental fit indices (e.g., CFI and TLI) using the corrected chi-squared values (see the \code{anova} method in \code{\linkS4class{twostage}}). If \code{NULL}, the default "independence model" (i.e., freely estimated means and variances, but all covariances constrained to zero) will be specified internally. } } \value{ The \code{\linkS4class{twostage}} object contains 3 fitted lavaan models (saturated, target/hypothesized, and baseline) as well as the names of auxiliary variables. None of the individual models provide the correct model results (except the point estimates in the target model are unbiased). Use the methods in \code{\linkS4class{twostage}} to extract corrected \emph{SE}s and test statistics. } \references{ Savalei, V., \& Bentler, P. M. (2009). A two-stage approach to missing data: Theory and application to auxiliary variables. \emph{Structural Equation Modeling, 16}(3), 477-497. doi:10.1080/10705510903008238 Savalei, V., \& Falk, C. F. (2014). Robust two-stage approach outperforms robust full information maximum likelihood with incomplete nonnormal data. \emph{Structural Equation Modeling, 21}(2), 280-302. doi:10.1080/10705511.2014.882692 } \seealso{ \code{\linkS4class{twostage}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } \examples{ ## set some example data missing at random dat1 <- HolzingerSwineford1939 dat1$x5 <- ifelse(dat1$x1 <= quantile(dat1$x1, .3), NA, dat1$x5) dat1$age <- dat1$ageyr + dat1$agemo/12 dat1$x9 <- ifelse(dat1$age <= quantile(dat1$age, .3), NA, dat1$x9) ## fit CFA model from lavaan's ?cfa help page model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' ## use ageyr and agemo as auxiliary variables out <- cfa.2stage(model = model, data = dat1, aux = c("ageyr","agemo")) ## two versions of a corrected chi-squared test results are shown out ## see Savalei & Bentler (2009) and Savalei & Falk (2014) for details ## the summary additionally provides the parameter estimates with corrected ## standard errors, test statistics, and confidence intervals, along with ## any other options that can be passed to parameterEstimates() summary(out, standardized = TRUE) ## use parameter labels to fit a more constrained model modc <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + a*x8 + a*x9 ' outc <- cfa.2stage(model = modc, data = dat1, aux = c("ageyr","agemo")) ## use the anova() method to test this constraint anova(out, outc) ## like for a single model, two corrected statistics are provided } semTools/man/longInvariance.Rd0000644000175100001440000001314513000201061016061 0ustar hornikusers\name{longInvariance} \alias{longInvariance} \alias{longInvariance} \title{ Measurement Invariance Tests Within Person } \description{ Testing measurement invariance across timepoints (longitudinal) or any context involving the use of the same scale in one case (e.g., a dyad case with husband and wife answering the same scale). The measurement invariance uses a typical sequence of model comparison tests. This function currently works with only one scale. } \usage{ longInvariance(model, varList, auto = "all", constrainAuto = FALSE, fixed.x = TRUE, std.lv = FALSE, group=NULL, group.equal="", group.partial="", warn=TRUE, debug=FALSE, strict = FALSE, quiet = FALSE, fit.measures = "default", method = "satorra.bentler.2001", ...) } \arguments{ \item{model}{lavaan syntax or parameter table} \item{varList}{A list containing indicator names of factors used in the invariance testing, such as the list that the first element is the vector of indicator names in the first timepoint and the second element is the vector of indicator names in the second timepoint. The order of indicator names should be the same (but measured in different times or different units).} \item{auto}{The order of autocorrelation on the measurement errors on the similar items across factor (e.g., Item 1 in Time 1 and Time 2). If 0 is specified, the autocorrelation will be not imposed. If 1 is specified, the autocorrelation will imposed for the adjacent factor listed in \code{varList}. The maximum number can be specified is the number of factors specified minus 1. If \code{"all"} is specified, the maximum number of order will be used.} \item{constrainAuto}{If \code{TRUE}, the function will equate the auto-\emph{covariance} to be equal within the same item across factors. For example, the covariance of item 1 in time 1 and time 2 is equal to the covariance of item 1 in time 2 and time 3.} \item{fixed.x}{See \code{\link[lavaan]{lavaan}.}} \item{std.lv}{See \code{\link[lavaan]{lavaan}.}} \item{group}{See \code{\link[lavaan]{lavaan}.}} \item{group.equal}{See \code{\link[lavaan]{lavaan}.}} \item{group.partial}{See \code{\link[lavaan]{lavaan}.}} \item{warn}{See \code{\link[lavaan]{lavaan}.}} \item{debug}{See \code{\link[lavaan]{lavaan}.}} \item{strict}{If \code{TRUE}, the sequence requires `strict' invariance. See details for more information.} \item{quiet}{If \code{TRUE}, a summary is printed out containing an overview of the different models that are fitted, together with some model comparison tests.} \item{fit.measures}{Fit measures used to calculate the differences between nested models.} \item{method}{The method used to calculate likelihood ratio test. See \code{\link[lavaan]{lavTestLRT}} for available options} \item{...}{Additional arguments in the \code{\link[lavaan]{lavaan}} function.} } \details{ If \code{strict = FALSE}, the following four models are tested in order: \enumerate{ \item{Model 1: configural invariance. The same factor structure is imposed on all units.} \item{Model 2: weak invariance. The factor loadings are constrained to be equal across units.} \item{Model 3: strong invariance. The factor loadings and intercepts are constrained to be equal across units.} \item{Model 4: The factor loadings, intercepts and means are constrained to be equal across units.} } Each time a more restricted model is fitted, a chi-square difference test is reported, comparing the current model with the previous one, and comparing the current model to the baseline model (Model 1). In addition, the difference in cfi is also reported (delta.cfi). If \code{strict = TRUE}, the following five models are tested in order: \enumerate{ \item{Model 1: configural invariance. The same factor structure is imposed on all units.} \item{Model 2: weak invariance. The factor loadings are constrained to be equal across units.} \item{Model 3: strong invariance. The factor loadings and intercepts are constrained to be equal across units.} \item{Model 4: strict invariance. The factor loadings, intercepts and residual variances are constrained to be equal across units.} \item{Model 5: The factor loadings, intercepts, residual variances and means are constrained to be equal across units.} } Note that if the chi-square test statistic is scaled (eg. a Satorra-Bentler or Yuan-Bentler test statistic), a special version of the chi-square difference test is used as described in \url{http://www.statmodel.com/chidiff.shtml} } \value{ Invisibly, all model fits in the sequence are returned as a list. } \references{ Vandenberg, R. J., and Lance, C. E. (2000). A review and synthesis of the measurement invariance literature: Suggestions, practices, and recommendations for organizational research. \emph{Organizational Research Methods, 3,} 4-70. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}); Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}) } \seealso{ \code{\link{measurementinvariance}} For the measurement invariance test between groups } \examples{ model <- ' f1t1 =~ y1t1 + y2t1 + y3t1 f1t2 =~ y1t2 + y2t2 + y3t2 f1t3 =~ y1t3 + y2t3 + y3t3' # Create list of variables var1 <- c("y1t1", "y2t1", "y3t1") var2 <- c("y1t2", "y2t2", "y3t2") var3 <- c("y1t3", "y2t3", "y3t3") constrainedVar <- list(var1, var2, var3) # Invariance of the same factor across timepoints longInvariance(model, auto=1, constrainAuto=TRUE, varList=constrainedVar, data=exLong) # Invariance of the same factor across timepoints and groups longInvariance(model, auto=1, constrainAuto=TRUE, varList=constrainedVar, data=exLong, group="sex", group.equal=c("loadings", "intercepts")) } semTools/man/probe2WayMC.Rd0000644000175100001440000001335113000201061015213 0ustar hornikusers\name{probe2WayMC} \alias{probe2WayMC} \title{ Probing two-way interaction on the no-centered or mean-centered latent interaction } \description{ Probing interaction for simple intercept and simple slope for the no-centered or mean-centered latent two-way interaction } \usage{ probe2WayMC(fit, nameX, nameY, modVar, valProbe) } \arguments{ \item{fit}{The lavaan model object used to evaluate model fit} \item{nameX}{The vector of the factor names used as the predictors. The first-order factor will be listed first. The last name must be the name representing the interaction term.} \item{nameY}{The name of factor that is used as the dependent variable.} \item{modVar}{The name of factor that is used as a moderator. The effect of the other independent factor on each moderator variable value will be probed.} \item{valProbe}{The values of the moderator that will be used to probe the effect of the other independent factor.} } \details{ Before using this function, researchers need to make the products of the indicators between the first-order factors using mean centering (Marsh, Wen, & Hau, 2004). Note that the double-mean centering may not be appropriate for probing interaction if researchers are interested in simple intercepts. The mean or double-mean centering can be done by the \code{\link{indProd}} function. The indicator products can be made for all possible combination or matched-pair approach (Marsh et al., 2004). Next, the hypothesized model with the regression with latent interaction will be used to fit all original indicators and the product terms. See the example for how to fit the product term below. Once the lavaan result is obtained, this function will be used to probe the interaction. Let that the latent interaction model regressing the dependent variable (\eqn{Y}) on the independent varaible (\eqn{X}) and the moderator (\eqn{Z}) be \deqn{ Y = b_0 + b_1X + b_2Z + b_3XZ + r, } where \eqn{b_0} is the estimated intercept or the expected value of \eqn{Y} when both \eqn{X} and \eqn{Z} are 0, \eqn{b_1} is the effect of \eqn{X} when \eqn{Z} is 0, \eqn{b_2} is the effect of \eqn{Z} when \eqn{X} is 0, \eqn{b_3} is the interaction effect between \eqn{X} and \eqn{Z}, and \eqn{r} is the residual term. For probing two-way interaction, the simple intercept of the independent variable at each value of the moderator (Aiken & West, 1991; Cohen, Cohen, West, & Aiken, 2003; Preacher, Curran, & Bauer, 2006) can be obtained by \deqn{ b_{0|X = 0, Z} = b_0 + b_2Z. } The simple slope of the independent varaible at each value of the moderator can be obtained by \deqn{ b_{X|Z} = b_1 + b_3Z. } The variance of the simple intercept formula is \deqn{ Var\left(b_{0|X = 0, Z}\right) = Var\left(b_0\right) + 2ZCov\left(b_0, b_2\right) + Z^2Var\left(b_2\right) } where \eqn{Var} denotes the variance of a parameter estimate and \eqn{Cov} denotes the covariance of two parameter estimates. The variance of the simple slope formula is \deqn{ Var\left(b_{X|Z}\right) = Var\left(b_1\right) + 2ZCov\left(b_1, b_3\right) + Z^2Var\left(b_3\right) } Wald statistic is used for test statistic. } \value{ A list with two elements: \enumerate{ \item{SimpleIntercept} The intercepts given each value of the moderator. This element will be shown only if the factor intercept is estimated (e.g., not fixed as 0). \item{SimpleSlope} The slopes given each value of the moderator. } In each element, the first column represents the values of the moderators specified in the \code{valProbe} argument. The second column is the simple intercept or simple slope. The third column is the standard error of the simple intercept or simple slope. The fourth column is the Wald (\emph{z}) statistic. The fifth column is the \emph{p}-value testing whether the simple intercepts or slopes are different from 0. } \references{ Aiken, L. S., & West, S. G. (1991). Multiple regression: Testing and interpreting interactions. Newbury Park, CA: Sage. Cohen, J., Cohen, P., West, S. G., & Aiken, L. S. (2003). Applied multiple regression/correlation analysis for the behavioral sciences (3rd ed.). New York: Routledge. Marsh, H. W., Wen, Z., & Hau, K. T. (2004). Structural equation models of latent interactions: Evaluation of alternative estimation strategies and indicator construction. \emph{Psychological Methods, 9}, 275-300. Preacher, K. J., Curran, P. J., & Bauer, D. J. (2006). Computational tools for probing interactions in multiple linear regression, multilevel modeling, and latent curve analysis. \emph{Journal of Educational and Behavioral Statistics, 31}, 437-448. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{indProd}} For creating the indicator products with no centering, mean centering, double-mean centering, or residual centering. \item \code{\link{probe3WayMC}} For probing the three-way latent interaction when the results are obtained from mean-centering, or double-mean centering. \item \code{\link{probe2WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{probe3WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the latent interaction. } } \examples{ library(lavaan) dat2wayMC <- indProd(dat2way, 1:3, 4:6) model1 <- " f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f12 =~ x1.x4 + x2.x5 + x3.x6 f3 =~ x7 + x8 + x9 f3 ~ f1 + f2 + f12 f12 ~~0*f1 f12 ~~ 0*f2 x1 ~ 0*1 x4 ~ 0*1 x1.x4 ~ 0*1 x7 ~ 0*1 f1 ~ NA*1 f2 ~ NA*1 f12 ~ NA*1 f3 ~ NA*1 " fitMC2way <- sem(model1, data=dat2wayMC, meanstructure=TRUE, std.lv=FALSE) summary(fitMC2way) result2wayMC <- probe2WayMC(fitMC2way, c("f1", "f2", "f12"), "f3", "f2", c(-1, 0, 1)) result2wayMC } semTools/man/plotRMSEApower.Rd0000644000175100001440000000622013000201061015741 0ustar hornikusers\name{plotRMSEApower} \alias{plotRMSEApower} \title{ Plot power curves for RMSEA } \description{ Plots power of RMSEA over a range of sample sizes } \usage{ plotRMSEApower(rmsea0, rmseaA, df, nlow, nhigh, steps=1, alpha=.05, group=1, ...) } \arguments{ \item{rmsea0}{Null RMSEA} \item{rmseaA}{Alternative RMSEA} \item{df}{Model degrees of freedom} \item{nlow}{Lower sample size} \item{nhigh}{Upper sample size} \item{steps}{Increase in sample size for each iteration. Smaller values of steps will lead to more precise plots. However, smaller step sizes means a longer run time.} \item{alpha}{Alpha level used in power calculations} \item{group}{The number of group that is used to calculate RMSEA.} \item{\dots}{The additional arguments for the plot function.} } \details{ This function creates plot of power for RMSEA against a range of sample sizes. The plot places sample size on the horizontal axis and power on the vertical axis. The user should indicate the lower and upper values for sample size and the sample size between each estimate ("step size") We strongly urge the user to read the sources below (see References) before proceeding. A web version of this function is available at: \url{http://quantpsy.org/rmsea/rmseaplot.htm}. } \value{ \enumerate{ \item{plot} Plot of power for RMSEA against a range of sample sizes } } \references{ MacCallum, R. C., Browne, M. W., & Cai, L. (2006). Testing differences between nested covariance structure models: Power analysis and null hypotheses. \emph{Psychological Methods, 11,} 19-35. MacCallum, R. C., Browne, M. W., & Sugawara, H. M. (1996). Power analysis and determination of sample size for covariance structure modeling. \emph{Psychological Methods, 1,} 130-149. MacCallum, R. C., Lee, T., & Browne, M. W. (2010). The issue of isopower in power analysis for tests of structural equation models. \emph{Structural Equation Modeling, 17,} 23-41. Preacher, K. J., Cai, L., & MacCallum, R. C. (2007). Alternatives to traditional model comparison strategies for covariance structure models. In T. D. Little, J. A. Bovaird, & N. A. Card (Eds.), \emph{Modeling contextual effects in longitudinal studies} (pp. 33-62). Mahwah, NJ: Lawrence Erlbaum Associates. Steiger, J. H. (1998). A note on multiple sample extensions of the RMSEA fit index. \emph{Structural Equation Modeling, 5,} 411-419. Steiger, J. H., & Lind, J. C. (1980, June). \emph{Statistically based tests for the number of factors.} Paper presented at the annual meeting of the Psychometric Society, Iowa City, IA. } \author{ Alexander M. Schoemann (East Carolina University; \email{schoemanna@ecu.edu}) Kristopher J. Preacher (Vanderbilt University; \email{kris.preacher@vanderbilt.edu}) Donna L. Coffman (Pennsylvania State University; \email{dlc30@psu.edu.}) } \seealso{ \itemize{ \item \code{\link{plotRMSEAdist}} to visualize the RMSEA distributions \item \code{\link{findRMSEApower}} to find the statistical power based on population RMSEA given a sample size \item \code{\link{findRMSEAsamplesize}} to find the minium sample size for a given statistical power based on population RMSEA } } \examples{ plotRMSEApower(.025, .075, 23, 100, 500, 10) }semTools/man/mardiaSkew.Rd0000644000175100001440000000335113000201061015207 0ustar hornikusers\name{mardiaSkew} \alias{mardiaSkew} \title{ Finding Mardia's multivariate skewness } \description{ Finding Mardia's multivariate skewness of multiple variables } \usage{ mardiaSkew(dat, use = "everything") } \arguments{ \item{dat}{ The target matrix or data frame with multiple variables } \item{use}{ Missing data handling method from the \code{\link[stats]{cov}} function. } } \value{ A value of a Mardia's multivariate skewness with a test statistic } \details{ The Mardia's multivariate skewness formula (Mardia, 1970) is \deqn{ b_{1, d} = \frac{1}{n^2}\sum^n_{i=1}\sum^n_{j=1}\left[ \left(\bold{X}_i - \bold{\bar{X}} \right)^{'} \bold{S}^{-1} \left(\bold{X}_j - \bold{\bar{X}} \right) \right]^3, } where \eqn{d} is the number of variables, \eqn{X} is the target dataset with multiple variables, \eqn{n} is the sample size, \eqn{\bold{S}} is the sample covariance matrix of the target dataset, and \eqn{\bold{\bar{X}}} is the mean vectors of the target dataset binded in \eqn{n} rows. When the population multivariate skewness is normal, the \eqn{\frac{n}{6}b_{1,d}} is asymptotically distributed as chi-square distribution with \eqn{d(d + 1)(d + 2)/6} degrees of freedom. } \references{ Mardia, K. V. (1970). Measures of multivariate skewness and kurtosis with applications. \emph{Biometrika, 57}, 519-530. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{skew}} Find the univariate skewness of a variable \item \code{\link{kurtosis}} Find the univariate excessive kurtosis of a variable \item \code{\link{mardiaKurtosis}} Find the Mardia's multivariate kurtosis of a set of variables } } \examples{ library(lavaan) mardiaSkew(HolzingerSwineford1939[,paste("x", 1:9, sep="")]) } semTools/man/nullRmsea.Rd0000644000175100001440000000423313001164727015104 0ustar hornikusers\name{nullRMSEA} \alias{nullRMSEA} \title{ Calculate the RMSEA of the null model } \description{ Calculate the RMSEA of the null (baseline) model } \usage{ nullRMSEA(object, scaled = FALSE, silent=FALSE) } \arguments{ \item{object}{The lavaan model object provided after running the \code{cfa}, \code{sem}, \code{growth}, or \code{lavaan} functions.} \item{scaled}{If \code{TRUE}, calculate the null model from the scaled test.} \item{silent}{If \code{TRUE}, do not print anything on the screen.} } \details{ RMSEA of the null model is calculated similar to the formula provided in the \code{lavaan} package. The standard formula of RMSEA is \deqn{ RMSEA =\sqrt{\frac{\chi^{2}}{N \times df} - \frac{1}{N}} \times \sqrt{G} } where \eqn{\chi^{2}} is the chi-square test statistic value of the target model, \eqn{N} is the total sample size, \eqn{df} is the degree of freedom of the hypothesized model, \eqn{G} is the number of groups. Kenny proposed in his website that "A reasonable rule of thumb is to examine the RMSEA for the null model and make sure that is no smaller than 0.158. An RMSEA for the model of 0.05 and a TLI of .90, implies that the RMSEA of the null model is 0.158. If the RMSEA for the null model is less than 0.158, an incremental measure of fit may not be that informative." See \url{http://davidakenny.net/cm/fit.htm}. } \value{ A value of RMSEA of the null model. This value is hidden. Users may be assigned the output of this function to any object for further usage. } \references{ Kenny, D. A., Kaniskan, B., & McCoach, D. B. (2015). The performance of RMSEA in models with small degrees of freedom. \emph{Sociological Methods Research, 44}(3), 486-507. doi:10.1177/0049124114543236 } \author{ Ruben Arslan (Humboldt-University of Berlin, \email{rubenarslan@gmail.com}) } \seealso{ \itemize{ \item \code{\link{miPowerFit}} For the modification indices and their power approach for model fit evaluation \item \code{\link{moreFitIndices}} For other fit indices } } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) nullRMSEA(fit) } semTools/man/mardiaKurtosis.Rd0000644000175100001440000000335213000201061016122 0ustar hornikusers\name{mardiaKurtosis} \alias{mardiaKurtosis} \title{ Finding Mardia's multivariate kurtosis } \description{ Finding Mardia's multivariate kurtosis of multiple variables } \usage{ mardiaKurtosis(dat, use = "everything") } \arguments{ \item{dat}{ The target matrix or data frame with multiple variables } \item{use}{ Missing data handling method from the \code{\link[stats]{cov}} function. } } \value{ A value of a Mardia's multivariate kurtosis with a test statistic } \details{ The Mardia's multivariate kurtosis formula (Mardia, 1970) is \deqn{ b_{2, d} = \frac{1}{n}\sum^n_{i=1}\left[ \left(\bold{X}_i - \bold{\bar{X}} \right)^{'} \bold{S}^{-1} \left(\bold{X}_i - \bold{\bar{X}} \right) \right]^2, } where \eqn{d} is the number of variables, \eqn{X} is the target dataset with multiple variables, \eqn{n} is the sample size, \eqn{\bold{S}} is the sample covariance matrix of the target dataset, and \eqn{\bold{\bar{X}}} is the mean vectors of the target dataset binded in \eqn{n} rows. When the population multivariate kurtosis is normal, the \eqn{b_{2,d}} is asymptotically distributed as normal distribution with the mean of \eqn{d(d + 2)} and variance of \eqn{8d(d + 2)/n}. } \references{ Mardia, K. V. (1970). Measures of multivariate skewness and kurtosis with applications. \emph{Biometrika, 57}, 519-530. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{skew}} Find the univariate skewness of a variable \item \code{\link{kurtosis}} Find the univariate excessive kurtosis of a variable \item \code{\link{mardiaSkew}} Find the Mardia's multivariate skewness of a set of variables } } \examples{ library(lavaan) mardiaKurtosis(HolzingerSwineford1939[,paste("x", 1:9, sep="")]) } semTools/man/loadingFromAlpha.Rd0000644000175100001440000000115213000201061016324 0ustar hornikusers\name{loadingFromAlpha} \alias{loadingFromAlpha} \title{Find standardized factor loading from coefficient alpha} \description{ Find standardized factor loading from coefficient alpha assuming that all items have equal loadings. } \usage{ loadingFromAlpha(alpha, ni) } \arguments{ \item{alpha}{A desired coefficient alpha value.} \item{ni}{A desired number of items.} } \value{ \item{result}{The standardized factor loadings that make desired coefficient alpha with specified number of items.} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \examples{ loadingFromAlpha(0.8, 4) } semTools/man/findRMSEAsamplesizenested.Rd0000644000175100001440000000327613000201061020136 0ustar hornikusers\name{findRMSEAsamplesizenested} \alias{findRMSEAsamplesizenested} \title{Find sample size given a power in nested model comparison} \description{ Find the sample size that the power in rejection the samples from the alternative pair of RMSEA is just over the specified power. } \usage{ findRMSEAsamplesizenested(rmsea0A = NULL, rmsea0B = NULL, rmsea1A, rmsea1B = NULL, dfA, dfB, power=0.80, alpha=.05, group=1) } \arguments{ \item{rmsea0A}{The H0 baseline RMSEA.} \item{rmsea0B}{The H0 alternative RMSEA (trivial misfit).} \item{rmsea1A}{The H1 baseline RMSEA.} \item{rmsea1B}{The H1 alternative RMSEA (target misfit to be rejected).} \item{dfA}{degree of freedom of the more-restricted model.} \item{dfB}{degree of freedom of the less-restricted model.} \item{power}{The desired statistical power.} \item{alpha}{The alpha level.} \item{group}{The number of group in calculating RMSEA.} } \references{ MacCallum, R. C., Browne, M. W., & Cai, L. (2006). Testing differences between nested covariance structure models: Power analysis and null hypotheses. \emph{Psychological Methods, 11}, 19-35. } \author{ Bell Clinton; Pavel Panko (Texas Tech University; \email{pavel.panko@ttu.edu}); Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } \seealso{ \itemize{ \item \code{\link{plotRMSEApowernested}} to plot the statistical power for nested model comparison based on population RMSEA given the sample size \item \code{\link{findRMSEApowernested}} to find the power for a given sample size in nested model comparison based on population RMSEA } } \examples{ findRMSEAsamplesizenested(rmsea0A = 0, rmsea0B = 0, rmsea1A = 0.06, rmsea1B = 0.05, dfA = 22, dfB = 20, power=0.80, alpha=.05, group=1) } semTools/man/tukeySEM.rd0000644000175100001440000000404513000201061014667 0ustar hornikusers\name{tukeySEM} \alias{tukeySEM} \title{ Tukey's WSD post-hoc test of means for unequal variance and sample size } \description{ This function computes Tukey's WSD post-hoc test of means when variances and sample sizes are not equal across groups. It can be used as a post-hoc test when comparing latent means in multiple group SEM. } \usage{ tukeySEM(m1, m2, var1, var2, n1, n2, ng) } \arguments{ \item{m1}{Mean of group 1.} \item{m2}{Mean of group 2.} \item{var1}{Variance of group 1.} \item{var2}{Variance of group 2.} \item{n1}{Sample size of group 1.} \item{n2}{Sample size of group 2.} \item{ng}{Total number of groups to be compared (i.e., the number of groups compared in the omnibus test).} } \details{ After conducting an omnibus test of means across three of more groups, researchers often wish to know which sets of means differ at a particular Type I error rate. Tukey's WSD test holds the error rate stable across multiple comparisons of means. This function implements an adaptation of Tukey's WSD test from Maxwell & Delaney (2004), that allows variances and sample sizes to differ across groups. } \value{ A vector with three elements: \enumerate{ \item{q} The q statistic \item{df} The degrees of freedom for the q statistic \item{p} A p value based on the q statistic, degrees of freedom and the total number of groups to be compared } } \references{ Maxwell, S. E., & Delaney, H. D. (2004). \emph{Designing experiments and analyzing data: A model comparison perspective} (2nd ed.). Mahwah, NJ.: Lawrence Erlbaum Associates. } \author{ Alexander M. Schoemann (East Carolina University; \email{schoemanna@ecu.edu}) } \examples{ ##For a case where three groups have been compared: ##Group 1: mean = 3.91, var = 0.46, n = 246 ##Group 2: mean = 3.96, var = 0.62, n = 465 ##Group 3: mean = 2.94, var = 1.07, n = 64 #compare group 1 and group 2 tukeySEM(3.91, 3.96, 0.46, 0.62, 246, 425, 3) #compare group 1 and group 3 tukeySEM(3.91, 2.94, 0.46, 1.07, 246, 64, 3) #compare group 2 and group 3 tukeySEM(3.96, 2.94, 0.62, 1.07, 465, 64, 3) } semTools/man/datCat.Rd0000644000175100001440000000120113000201061014310 0ustar hornikusers\name{datCat} \alias{datCat} \title{ Simulated Data set to Demonstrate Categorical Measurement Invariance } \description{ A simulated data set with 2 factors with 4 indicators each separated into two groups } \usage{ data(datCat) } \format{ A data frame with 200 observations of 9 variables. \describe{ \item{g}{Sex of respondents} \item{u1}{Indicator 1} \item{u2}{Indicator 2} \item{u3}{Indicator 3} \item{u4}{Indicator 4} \item{u5}{Indicator 5} \item{u6}{Indicator 6} \item{u7}{Indicator 7} \item{u8}{Indicator 8} } } \source{ Data was generated using the \code{lavaan} package. } \examples{ head(datCat) }semTools/man/dat2way.Rd0000644000175100001440000000200613000201061014467 0ustar hornikusers\name{dat2way} \alias{dat2way} \title{ Simulated Dataset to Demonstrate Two-way Latent Interaction } \description{ A simulated data set with 2 independent factors and 1 dependent factor where each factor has three indicators } \usage{ data(dat2way) } \format{ A data frame with 500 observations of 9 variables. \describe{ \item{x1}{The first indicator of the first independent factor} \item{x2}{The second indicator of the first independent factor} \item{x3}{The third indicator of the first independent factor} \item{x4}{The first indicator of the second independent factor} \item{x5}{The second indicator of the second independent factor} \item{x6}{The third indicator of the second independent factor} \item{x7}{The first indicator of the dependent factor} \item{x8}{The second indicator of the dependent factor} \item{x9}{The third indicator of the dependent factor} } } \source{ Data was generated by the \link[MASS]{mvrnorm} function in the \code{MASS} package. } \examples{ head(dat2way) }