semTools/0000755000176200001440000000000013532170512012056 5ustar liggesuserssemTools/NAMESPACE0000644000176200001440000000704313532164161013304 0ustar liggesusers# Generated by roxygen2: do not edit by hand export(PAVranking) export(SSpower) export(auxiliary) export(bsBootMiss) export(calculate.D2) export(cfa.2stage) export(cfa.auxiliary) export(cfa.mi) export(chisqSmallN) export(clipboard) export(combinequark) export(compareFit) export(efa.ekc) export(efaUnrotate) export(findRMSEApower) export(findRMSEApowernested) export(findRMSEAsamplesize) export(findRMSEAsamplesizenested) export(fmi) export(funRotate) export(growth.2stage) export(growth.auxiliary) export(growth.mi) export(htmt) export(imposeStart) export(indProd) export(kd) export(kurtosis) export(lavTestLRT.mi) export(lavTestScore.mi) export(lavTestWald.mi) export(lavaan.2stage) export(lavaan.auxiliary) export(lavaan.mi) export(loadingFromAlpha) export(longInvariance) export(mardiaKurtosis) export(mardiaSkew) export(maximalRelia) export(measEq.syntax) export(measurementInvariance) export(measurementInvarianceCat) export(miPowerFit) export(modificationIndices.mi) export(modindices.mi) export(monteCarloMed) export(moreFitIndices) export(mvrnonnorm) export(net) export(nullRMSEA) export(oblqRotate) export(orthRotate) export(orthogonalize) export(parcelAllocation) export(partialInvariance) export(partialInvarianceCat) export(permuteMeasEq) export(plausibleValues) export(plotProbe) export(plotRMSEAdist) export(plotRMSEApower) export(plotRMSEApowernested) export(poolMAlloc) export(probe2WayMC) export(probe2WayRC) export(probe3WayMC) export(probe3WayRC) export(quark) export(reliability) export(reliabilityL2) export(residualCovariate) export(runMI) export(saveFile) export(sem.2stage) export(sem.auxiliary) export(sem.mi) export(singleParamTest) export(skew) export(splitSample) export(tukeySEM) export(twostage) exportMethods(anova) exportMethods(as.character) exportMethods(coef) exportMethods(fitMeasures) exportMethods(fitmeasures) exportMethods(fitted) exportMethods(fitted.values) exportMethods(hist) exportMethods(nobs) exportMethods(resid) exportMethods(residuals) exportMethods(show) exportMethods(summary) exportMethods(update) exportMethods(vcov) importClassesFrom(lavaan,lavaanList) importFrom(graphics,abline) importFrom(graphics,hist) importFrom(graphics,legend) importFrom(graphics,lines) importFrom(graphics,par) importFrom(graphics,plot) importFrom(lavaan,cfa) importFrom(lavaan,fitMeasures) importFrom(lavaan,fitmeasures) importFrom(lavaan,lavInspect) importFrom(lavaan,lavListInspect) importFrom(lavaan,lavNames) importFrom(lavaan,lavPredict) importFrom(lavaan,lavTestLRT) importFrom(lavaan,lavaan) importFrom(lavaan,lavaanList) importFrom(lavaan,lavaanify) importFrom(lavaan,parTable) importFrom(methods,as) importFrom(methods,getMethod) importFrom(methods,hasArg) importFrom(methods,is) importFrom(methods,new) importFrom(methods,setClass) importFrom(methods,setMethod) importFrom(methods,show) importFrom(methods,slot) importFrom(stats,anova) importFrom(stats,coef) importFrom(stats,cor) importFrom(stats,cov) importFrom(stats,cov2cor) importFrom(stats,dchisq) importFrom(stats,factanal) importFrom(stats,fitted) importFrom(stats,fitted.values) importFrom(stats,lm) importFrom(stats,nlminb) importFrom(stats,nobs) importFrom(stats,pchisq) importFrom(stats,pf) importFrom(stats,pnorm) importFrom(stats,pt) importFrom(stats,ptukey) importFrom(stats,qchisq) importFrom(stats,qnorm) importFrom(stats,qt) importFrom(stats,quantile) importFrom(stats,resid) importFrom(stats,residuals) importFrom(stats,rnorm) importFrom(stats,runif) importFrom(stats,sd) importFrom(stats,uniroot) importFrom(stats,update) importFrom(stats,var) importFrom(stats,vcov) importMethodsFrom(lavaan,fitMeasures) semTools/README.md0000644000176200001440000000311513510125503013331 0ustar liggesusers# `semTools` Useful tools for structural equation modeling. This is an R package whose primary purpose is to extend the functionality of the R package `lavaan`. There are several *suites* of tools in the package, which correspond to the same theme. To browse these suites, open the help page at the Console: ``` ?semTools::`semTools-package` ``` Additional tools are available to do not require users to rely on any R packages for SEM (e.g., `lavaan`, `OpenMx`, or `sem`), as long as their other software provides the information they need. Examples: - `monteCarloMed()` to calculate Monte Carlo confidence intervals for functions of parameters, such as indirect effects in mediation models - `calculate.D2()` to pool *z* or chi-squared statistics across multiple imputations of missing data - `indProd()` for creating product indicators of latent interactions - `SSpower()` provides analytically derived power estimates for SEMs - `tukeySEM()` for Tukey's WSD post-hoc test of mean-differences under unequal variance and sample size - `bsBootMiss()` to transform incomplete data to be consistent with the null-hypothesized model, appropriate for model-based (a.k.a. "Bollen--Stine") boostrapping All users of R (or SEM) are invited to submit functions or ideas for functions by contacting the maintainer, Terrence Jorgensen (TJorgensen314 at gmail dot com). Contributors are encouraged to use **Roxygen** comments to document their contributed code, which is consistent with the rest of `semTools`. Read the vignette from the `roxygen2` package for details: ``` vignette("rd", package = "roxygen2") ``` semTools/data/0000755000176200001440000000000013432435606012776 5ustar liggesuserssemTools/data/datCat.rda0000644000176200001440000000207613432435606014673 0ustar liggesusersn0m'Q=<nNfJ[ꐶpq* .1L/qa[UDLj0.(Z{h<.9-9||}Z~Qַw]1%.19[IR]p$ }I05䜳E0BIvMrîn/H`9QFRTf#$<`s"a8tm{@YڄhS*粅ȱ<^U/)5УsCj=WVJg\fX&ӏi&ꚒZ܌1ꥱtCD*eQ)OQu^%agZ-C?3:2F_}\-mB=i7f(M%jǘI9ő˄Css9`=VFOw ?'s=Y6#JnW;+2,CF3n$HvK#:p.O|[[;0^k%5vSJ[C[BcR2OQt-lî{>:͙)Hlzx\(ۡ?2rEROʹbuB _Qg\T 8\tz5,y2"i9!semTools/data/exLong.rda0000644000176200001440000003351613432435606014732 0ustar liggesusers{?UqQfJd IjLIdΔY*D$D1D$!IuIf<㾟y?#,;[v,<5p8jH=;7"; w ;?YK vuDG{˿s3w\wcޱjnZ)h~ͪ-2##q^ZK7ZĊiGQ1_hõ`Ρo*. 3UR=J3zٔ ?c[J:gJǡx@7guY!Dil} y QQ]~#5NEK1-FHÌ:/Q+:Ïv:=t>*\WʇoIȓ>,;^}D f {uwҙ j4@lLu&sh2)'sϽȏtZ] ɬ'Х5HfJ,5>?k m~wcG<0Psu:.<#JrY?)fiꧣоŢr飾>tq{zbsTnEMGצj)0/vk^0teZ))/iϨFFu!:`.:=w"6xx=rʁ0IZʶ/ݳ4Z1\2äm36,nrq&XlEL-4&,#.܋g`tQ8bBexH0|jOQ+\G\91@;$B]Gjeq7gǪS$/ -r<9'=3Eڡ -gPu1fxlӬ,kBYi@٥;{;|ǥTa Ra^-O!*ȋr>lfrrw8m.ZN5Kk.~&DVfvw0Zic:w )Q=5\0~ZLgmia( ǧ?~ iKd(04T1rLNMR\o|^:>0/1\qh4&Sm%䡥,̭hrV'2:DŽ4HeB5.4ƸQ__ hfb BU;1M u/L9${l Pg[O@3fo IXfh@#{ʪ Me)BwAQK!Q^u<`UՊ9ysspAidBa@;H1 TR6\Sa.g.T/۹~3OG.CwWV%ۏ_5 =.bhW d@Ge>^=.^EF']sݾ 7AcMgr'r 3T-@KYGǰ5N^YL=L%vͅ2\A{z/|B &]hwUQs6Xd: LgP-M:)㳾D`޵[o< ]τCZ+[_H <?ˡytchaX$ _@daA6]ߏD9q^Dm~O"F̈ &jRn0}G~*B$eɷa E+ ly_j84HĻMp ]S*Pu {8p.Ѡ-d*D~! ;ċSH yثPV'>KPE#9E6d Tc6sxUDCcyk+hRiCUP3;|P|0Z>,UmS/5Lr;_Mg$ ^4|LW4I፶Pҙ/fXfaX '"қQQo9 ,J-;~E;CBS&Ƣ&ڦq ~s8:EPcӜԣ{F$tmeH@m8ᥒҮɂq=ӨVw)ϢS<7<_>z"8r$?7U\Z!шF#f}Cɵ:( cb|иfy0Q/'y9COԫq3 6y 6w o|u| s*4L!ftȠo f;5Ѳ&#Z(D1$6ʡS f4$W/: {wѠaUʝwLݸ,L]Z{ RO&F^~QJǾ -ePG ERAY@Zv9Lyil_GCbAJyhhްڢd7A gB?Oއ|҈6[7Ԩ V0ς *I^4ϭ?8BB~r7C Bw~1EW묐uwt+~=? g=>Zv"hQYv!Qݺ)L&C]bl<<3?1 Vvh ڟW.Ynzqjʷ]n0wlԩi).*;MN)a̻ĥ$eظ+Ȭ\2\o |`*7A_rNJ`1չ 0xWc *V;+2t%m0d& \caP[zPA(䅍S2VB4uY Uǃ%1h2{ZX9baJ*Ku$b7?r\聉?.{)Un*Ætb@! 10o0~J~9g1\ -G%q{i Du Pd̆.rVV=\a0dK74`Yzz9싄rF$L}X<!heOM)7ڗH0ʞٓc`F4Ry -t|4x-˒+ݡ$? 6 L~+я5=GXaP}``lcGIOVkVeg0o3w~~~NsPX 4Gb[ h*<ަuݏ1Wb(sҔRCΫ(gm\X<ԙl[kbcB`JCcx (O/\OA<;:huMagug2T-4k4zGr.K6H[ 0+)X&fF>1An쁜{2 6ƆvU P'5 u'{׾y=z\0{Su l58T[]yDTX`.tao':v5Hq܋.dX)}v%"&'RyBՊۘ=\a">Bƹ{U0|ؐa蹼x|dԍva|>!Ubt9X ㎬u^FSZocĚzrb,s$g`I`HCe?/В!hs U <򿈇[g`;ԹrRL/}0h^>:S/,U(SE)􈸻wk3&E[+~C'"`*`̛8?ElPmP@>+X<%S \A)4@i|U%+ zYsUZ%-,}K0{q}Fp4ucXg =DC9mÐKgS@/\l]6 *5`ⅵxl4Jӭ-9z.(| t=x -wH%K9ܨ|i8I3+ˠvPx!)drUDbZǻ`H:,,ľ,m<8~UtZe?Y\C&.JeS2\b3颪;lBԠ ^l*1<'>J*[9!Cч8R#YQJ\?[{L/H||_Nh^*yez7 T305' Qu%&UKә5aWijB}١}U >9'"Hk"]~Gxoh1aX+Kx_XĦT\9qjc: 閥~ 'cL8L:+B T^^)?Z^ O+xduםPM( 6 R0Pwc&^>#~1"̵0Č\(m~z5VaGы }gM3[}/`G?A2Pɛrxr^u& |{V}at srͷgaXcMק@ѝ?;, .HSlj8mLS;k @H :^H7Cog0 )`+eݷtCկs>,lK4*Nح;xg:L?:̞l mOЦ9z\KCB'7>i:Yww'o8A!X3LnWB!uV+t(' +Mȩ?'<1\˚24Է\ 3",G7peڻ%j~D|E/kFϢ-N+7-h V{Q C Eo1أ(ͻ!3 aKVA 6fuSDDt f˥jݿgVa%-m DaXL8RzjЏHYR܅yI;Ըj<G^8qEͽ+~_L+{`$,}T ɛzt>BԳF(} %zq ĕo݈/,JVO}52:^DNuhbR%;<[Vcϭwp0B;p6 ',ta0]LOtDj̬N]??Z%x̏ZW^_ߺfSvK=kW-{UZ$Du]0L.ļ@?9IyĿ$-jQz{P8+h8݆SJ ŜRBvt* E7#V#`r5${Ejfrja6b8H/-Y仅 +hf,v^ ـeeP̵4a4'ivXs[mAM_ IPIv:u|d0swߴٷSq3z[(:i:V$έ( 镧/HQ Zb{GZʩsa{{`I%e*t~_W ׀Ur0dVTSAE`u[05W9HGP\5&}lqatBO4,3&@1GDaI.Qp=F5kOvԗ{cKJ_ u0Gh*s%Y@ kSIGz}6*LT ]w~Y{30hjW*myFbqm-yX VD|bwIa+ettaUp$X {kVCap83bU ]kR<֕@M& blOl~hjgAY#rLtYhMw2W=b'bPV]4Y(Gd1F1WdzbڏT9_uMy=kS-͓6Yu: d`D‰}1cnNW0k }Yp}]-T6}p:bӄi%q:cgn9 EvO킖*=267rC@vM)U2'CcȘYn3I-9UbTד”B,]aoH&Jw8Y-a\ &RZTp(*!T]Z mT%za}Gqd ÖŠ$h8Ra? B+5VBG>W=~6ߔLMFd3 V^&PNGJL+/`]L^uT ä캜Yg|/%PW\_I*;ڲuCɁ]h"z0FoE{k`D$' @;ђu Ý/@ _0Ic}ka+vЭ6z5jD=ڎmCM/m5-mŇ_=*#Q r,^nq+ y_n!ZS2}@aT=(&$J7`%g!U\W_,Ry32).tǯz4?zJK,zoGtpvBi#8hϽci9iz}|Q;g)VU:ϲEX 7 Q^$ŅwpEn0jFk`nGX :=\e^%x%Cۭ;ѧam mآgS4^)<\/TɊ>]];pڛ y%P?m@T:#^~^} T~FvN'Sz@CdNQۢ#n_pOak䠏VkX9btW*Ƶ19na4!4B7UxchbFt?°V,'|_O3sPe|yOHX4 dݷ"92ޑՍ.hS+;hf &%irp@k%Y9,ONn}ĕWo2{jB}kxiQ Z<5D "_`AN qժXjG#th&%X~=v鱝 DB{`9Ly:(ܶTEUVRӟF}'[?eQ/ qw/ LU]A$ [b4}+;+U@B0=9d V>$%'IF<$<*!r"V_X&z8|q LfZt(B1M`2M0%U",,o BŃ)b#gOo|_GaŠl0FUS0jZj忯 k'bwiKnl=0ptþH^ˇak4Ft 5~"Ys}Ine C4g91awG [0I}Flμ ˞e':1&1&LfPHiL`~艽׌po?q na{7}&Mb<Fm\X+۟vwX5%ڽQsM .IauX4X^esLywgs]} MŦ&F-9(rկXe-א~GupDc}hxwwUyx4fʽ0cTL9>iD$m>`l⺧.0"`[(l}AoDoGs^Bt5FlWh%ߒ9WΒuYybxpcn_u*_UL*-/]]. ,g(΃o)k8HiƒVrTr<Sݓ<cۧ$49_}w Ȃq47czDghcN B}g냯8@=>G OŪa$H_?U?y9oG L?w #dJEs*7`Oy>˖L^fD PI f0_nF0ɾ8D]vV|k'elB6ƨgѼ>> ~1up.O/NLm^] SM-R#Nsȼ^f~3Q ge3V Ɖ|Ї,cOJmM ArJ=E"\SFX e5U;**s~f*sg7גoy Q;sr&Zqp?^7 }D,y @ydci_̨>,)sGP_}>g8ar*aTJ I;67&1sWFh>OX>v]-~3 \ kPC*G8,G|@W}Wg8!K)4AF ʻԤÐхJq?PW^ Ðѫ"暃Hc~sqz@ZOu>}t*Lhj+xa "(]3?alReC `15O5v`4>;/@IO O%J'̵B`l,֮x8z+ɸ+qq `z͒j&eœ\J`^CY3F/8=7Iٿ`K˭ZlW!hYYJr7ՠ 2"24aaMꎮ̏ @'.[KC~P4ߡ7pR&h/z[zuX[k~J2?ǡ?<6X/dVrgC;i;F-L L zO_WsFvًf6^ʡժ<^"͍`=螺2,_vѳÆPEƙv u91 h",4:bRC0az^yGc}"[r-8~Z7Y*5Bg1z2~p 圿. ^f,yG7ݳ%?砝3eMX 9NtAu|5(avaARpAdxF|W8Xbt(lGd@$Yd*Ia-NoU|Lw;ʺ(71cH eoќ}`uvUýj(Ѐ[f40|Ԡ65pյFc~f_NMWƢ0} s_`31ԡX w0XhLټLlΦ`B;9Ao9伏 \M^<9#|sUc[C{z wUX EPLvSI M/kHB>Kgo2]J 'cal#; 6={c%O~1h9{| +38`95\5鹲O&vc;}goXcxϐ0(kLyOV`u0Fk!p1o,`E^bJ)hXRUiJ)jlsJmJT yKš|w#k_55pcz`4qx' } υnhBꇧN8Nϰ\b&{6O%sE4P|Ž8a]YfCE>L͎F^lhph }V;alE̽e< W.Yq\4@k"aDdO@7 m(Lq~/rXҋ*TEoo?bl_"ʣA @VXL[003'@FL+bIe9#tO$ [;|15}[M$c+# 7Z }W0\ ,̊s |X'0r2Kxfl5~4(Ԃ"Ű\1#@l[K"ABg6`vZcFe@qL;ag+(uG%2rǨ_.Ӄu. MYV éLvAW3T1衏0tOkrUd)lE~L)aϪ잯}3mᶽſ$ZNǗ7'\-Fb-ŜcN=$5a7zY2v-a3Z-/=semTools/data/simParcel.rda0000644000176200001440000003213713432435606015413 0ustar liggesusers]{ 8]ў,mRQ'EJQmI)Rfױ113ؗofu5L1=&zVGGG[qL-xxX~&DuAm"Tzꁽi:MGa砊gCENhBΪE0afnhYtPwWڴkH\J_.^aB@>zLgmq^xz=BB K)īΊ ^a}מs][tLTV&;ķn {i_6cEʵ k~;,m#4> Tۧ 5#YiU,ҎNdC6M"`0xt!iRI0.jB #7>?"Z_\4}lS_Qc>zj6̔<>5ρaBo0klM[`R轰eۛ^@mS[Rv{.m؆jՁ .kյi(qL :(%8Q|H,k޽|勐tiA2jq7wsX b=$-9T@f= = I_˞J¿B@ײ +TyJda۝J_0Gv{6%ÀCکx],OEjݎgp}XeZ{<}?4 fUi |X fY;f&yuYgN//UMrM_[lJgl}c!`\2c06l].BSCLwu+N|YTgCKjHL- /ԉ*?w;)VwD8v Q=nC%f3 ]zSm,BlRǰWq0xg޻]@ۃ#8"V nFo?C,0="ӼİOK,Z[j~ !]޶'@x6َ42v ̢Y~~(ϮK~p[4Չe{՞'kAohLso^?}#mDESC.Bm4'fgb`4j1Obyf~QN Oz7Sž6S%X1~voX#)/[o.&rgf[3mLqg7{F5(k?=>~iye8Vؙ _IRq Uf fK8W㞝0TY"!n[CYa0Aaj3lZ ~3&w?{cᅩWkk=r22O&-c =÷Nϱ:1ym2s Ntz:;](eHkUo_$Gυzj?x‡^C{_X&]~P mF 0hƍg6՝ޞ!<(i*;cmP0ja[QH>sAט|d6(̹=oE| #!3oKK zAXr(ĭ"S${yհب}sJ^)Ty 7=v5٤E\-ߤϺ+W @'=7tΧ=Fx3 qݫLȉV['%ECC\.cenoaG(bGZذQAiY W 3??3@Xǎsn'vjkK\ vjҳk(b ::nCrTRhGL꓆;{B'SCcaϮ<(,5_>sб *3UcX0Vm:\9t8EŶ~@=,sq@`7 V6*60}@Gͻe*0 faU,)*ɹy|:nVۑHڋDsuː%/:K8)w/!<,*@=#Nk?ki;A VJR;nDӉihܱZ >(8݋Balψ8PJkw7OFw_WZ ,ѿ8<ƓV8߆!*rϠe8A )SŞ ǙpoH#Pya)ϏP`iL \ˋ`#Cx聁WC`k|KL?-(jܵrh`9^׆%_É6 uP41&zƶ('c /gt~ثhh~!q[sUHCbǦ#_# :Jڒ@[G8d'H{5^yعzD9p 2WLDKiju%@6!)Fh` ^t\~!>{ *vZRA% `oߢO$ʘ֊ KÛ>5',!Cn-RZd$N]GC^CC^CPlgkB0Ʊ.b P`\X/DXğֹW~?3R2I"ԀWǚz篣ոB/[}7tc`}`,ZO7.*]9jE!.}wEtVV(0{#\C3' #e_PykdvȮuU׺6q50BOB3j%B9TKe(eᣲi.0d=V\86,#὘&0ki 3 Ǐ]{jvT6bO5+oZd}9XjH'ǖ3Ԃ+gbF>PU®faM%\ZzHx6|h`Ɗڍ rHZ3p(+ պ;'ҽZ0)ФLm2g[7?& @΃Qs`Sd-md!c6"Jopv=_;;UlF)X5i| 889,KtpK-NhqP&IҞoب:g7 W|ՌIilԳp`d0&P z;/]T rbś#0Mpƶ)GбH[nVѶQ{*v("ݻz/ֈ zOT+ %x icH"C7e1' K[C&0|/I J}yr$:ۙ5e?v~XX;y`A']y),+}ΡPz>̒SFBj薑y/T\)?U`jRd{K[BOkxaZ<g%t {e*f2]{4>2 EV9@֨σ?.'ܪ;%.RU}m&п;,FzW(")\cf޽Xf|(oFE*9䏵cݬ 2?M6H_H8;U< O*Ys{HwcQ2JfX9g{IIki}s`Ǽ|[Hi%R"m .v0wn=t(p[ P~Ta OuJ@ y۝֪(xwrhzdmX)=ǘ_Nǣcg0[ssXA"Pb#)N̖?)r:*\|>.\b yߎnr ~w;~m)Ѕݚ"Hʂ?lZ7a_hهȰu.Vb<nd:dC+[`T׆<86˟8.8 m{]IboߏufGvO|歙_]وk,y[~^;"?<i%}%Ts 7KjbJ\I#OugY =>*wMŧ& Ѷ|R-$is_Юp &]^[;ZBo=0vPW "@wU_8afb'FB,oq}n@~o`Ԧr|vS(*U7{?,l*GF%X}Cb Q&-s"u|jFCAVK5b= U,&o.oCA}exc'6$_6]CH \] s 2@l4/94>zWV`hꕐaj5J?/ʈpйT 8Jc??^GT9H I-? q68*&:xvWvJd)I =;ͰoN&ީ}f.gaM<%śE%qaM5GQa齓2Uk[3㦪.9hyBz\mh7{p*?Ii"9Oȶ@y>!8X1o΍M n݆l@Tqs_)б-# ȶ5Wf}]7ӊkP%dž9 u1 ]^1pZxNvԭUKvʮ6%vp\{_%ķ24Kêm0N-@:};H#6:Ce 9%} T-icZAJK5Ҡc `ɼONײPq%6ޤ_|4_GlYեVAPSaݓC*;&;|(C;PVa'Xf>x~n$2n4r-."Nv@Iڹe!HKEЯ!8ޛ#{.~D%HPc4NA3b:E^f,c%!mϽ -rHhM7hn;N?ع׳qlÍ{A6//|v>t42MdִFqK~ӗt?}@LNlC\K˹$gdwz yC$gri66! }l9 ;[KdZMgs./[9Cԇ-V yuHX?n[$UH2Ԋ w>`yc @NRSGS^Ȱ`mejN"|~qdtJ} ).$?e-bzykbmLgFrBlq, 7ï: S0a~Az킷"=GԤ@ikre$#G QES9Mp( 觊;7j6mۄWM ߕdd h۔A_sLyMUgk^Qy)vX@R:TfTPE ^-'3gf3]Njyоc躯+h;M 2ViH:`ZH|߇@jYm Xt[+ FT]N{ޕ'Onacj_P\ä"'n`ܤkeRaFH|Dhu&otzWCM, Zv[)>a X::7֛|8}s) z<3r PVv?1 >,u:e~!I\"}Aˍ.L4{gζwOF")܀:5J_]P exՙ<6&lb8*k|٦ʾVT8p6p̛;4?ǡY ged>p8|#wCG;o%Yέꔭ-[笖 ˋfڼޛO T y|@޶mqƯ CBuБ{wDJj CΘ)|y|BQokVqSUR_B?R/=&X>5tLai"QN oW %f_[);se":la0evj%Pi Ώ5(n6S_'=3_cYnAj" ϱ[0 65$knLwY*h^< ta^.nAϨr3|' 9V殉Gg!1Q$\}b ]eR*㓓߭jb3oV[mMZ!;бDyG pʱ-pfWn2]՘qؾN|ڈDvĪ'8'1"4dL;kqzM-S ޑ{яdqZ]Hy362-^xƊ:dj\fղr :-| hh}d} >v7[lF0c[Kbgn<[3o z@뾕9+bP1gBnE`s0O0f$/-?& .ḸsKXn$(2<\8o@ W>>ˍw(o`gƢ83{~aUKrߎق]|y_az8˷~}Pl, ~ͷ [pjr#,{^g<)a7pqiN'hfs}{i'L%jQ6h}v"gZ<;kqCv^OC =cZ |[-T& V/wol{;b_ˠ}®|"};UG5iR3(\zU'jwWN`γiHV^E 637^؜T-+ҟpoi `U2;qff\]#KV^j#K+zv*q=߷VB}#d |;:KGqzcp= }>;NWWFcw4'7DcH1(2 &? #ո8Vq6gAHM\Ʌ^> O%tS8#uDJZcW# k{@u1"p]ֿٯXCS?A>Nw?'q{#*k7)e {:|y>[K*VF]m[e־GAVdT~..)oՃ>D(Y>.T9v4*8E0R[XI [&{HVVoN0h6xnr]{S~fqUOo N½Vd?^ M뷎LXחaEHKtiMtq1.Ӕpl軃2mg*5 ZlKh[ ޻ ^Xq^#fRTaxϟW@49+h!?@FK^K#W`qf[L#?C|24.+BH۩hGRR~`Jl@~q)}INiFjίa]i^|miqk>x*.+%9+ OP0@1tt>\'H&s z"_5Z>zPh/ UpGyU`9ɏЛsY92\THj8T zT(덝1 ~Bߓz:%[<܎n q[.߸!Y,OTERMz텲iҶ$z4Ne'1”ʫ* 6Xks{|CC X}OHe '1BZ"`=o1% adV_㮬ݖw[[v]yk v>#8,W+-&e9P+:(@E'>X%][êeh(~ZpT։-ep22bO 63)qi 'q5fL´ zv!MN ߰ri*ɔ?ؾo 0G&iLP})W`<B8vx13¿")BYwofn@ |q`Wc`ߕ18ţh6=WlhYxS$-lԹؗRy74^3&L8v&i[,ʻu=赓v9 U}s,;vlSGTms( V?}YmTa|I1b 0r|gj+wTp`ekX-֧EN.`n68y]Ra اf7a7ncw sy;RTE8Cϕ!Ū"Ex'+&N K᰻g䇞c-CMeZ8'))KW?l_TsIg%t7RInt%>ꫝMnLԟuVy2S\㳚7ުrAVfN' U ~@vRdƩ+^݇ =Žm ]SoLrTrI,;s+jKĒu~%7CF5=e5'`upZ>9ݶ.F&Z%EmIiDZ{^Vs &O5&'a3UkݬL0r;?|2ͿR׮30{@yՒsUgZ u+/3C˂v3b[u;\AjX~IRIh`q(S\|VQ.FطLnnNCV%h9"liDP߀!t#ʮ8[v`c)h^{)H y5S%"5Nz 7b ɲ[.qN5;6ׅGƌ߂Iƪ>8k>pE ̫,/QU vOWȱ?W^=⒬Xb|Àu:b3!u8{l Bi "M/ca9vN7N3̟ن?|sAu 鞟00jJܥwFr<ر/Ès]j[EȩB{@۰6U82y6BQ8q,*Us*܂Zbplǹ~=0d $N&²BvZ/f+Cx`MN=HA큧=rzDK!l6Ɉ/,aA= 64t> ~lY7`R9>,=A:05ݯ;0_膩ˤDպ6mȾ0C, |bL{3Wiw)0`q:T'C@'vW;p`_p^FJ ԍ=5 v8ccuՉD[qw9k7CIcgye ctjXL0Ue%Xt`V Ίt}NdwIf.'QD#iy2!=&_HAw'B`$M9܎Cۄ.p6y6wvgF!CQڜq2/hEXI[9at&OIE8ea1`ύXm6vk$s_y S[~_%q_{tΣMN$7O6]WY*Z~f:xYS&$@Y( XsVNj |a\F3#qHԚ3) )K`#js3G'tKyX1YkyR\IZ ;EBT,=m};NiuKъG-֓sd:c;=apkve0)pܗs ㅉ7b@GOVۛ0P  *_h5fɺۊ88q^ SQCk:Fؕ]qm15uC[io ֏Ό9a4rC3aΠzX(KժԷq[O}[Ka?gwyN3$鋻ÕFIӴ t"<6f.d`"/bҏ*b3/>0}'\:ޯ C{spᒦ%4l'IRy([˜zaCq̗Vt5u&Fm!JxpݽST~º@ E UFH=6݅T3 qC??jÿYtOnNêb3C/jBXSiY%Pߔ?gSErK=7 n=j'̇ygW8jZ*1]exÉ6]N>kT#6_= l9cI9تJkFŹˑرUqnxjY;ʇ>JaɄqٗoH6N_J+&QJ02UD$[U$:n\X/+W~ [p%%q{42bqr{8X( >o='`'lgIgfޚ{hBƫݳ9<v !6Lá"hkHvJ ̩z/_y[#˂Hh=ŋ uLygZ\$W/9z,Vc\!30}Od#N;;A>' X6ov '-hU>XZ=1-;1d#rb#mncYR8=YT%frhWA}Uɼ&μJrX?M"ϛ`WQb'PpbόA"D~=my' 7$I'M|8msehQkߐH/c*Azls\ڪ}C4{aAAT,?$cV,[N8}qNEI\u5I6ՙe@eu\mEp1[uHT:sB!O׮OV^(?\}LPeMԢshLl>&sɆu‘4f}d}/4D͚FgbpZy/аnfTz6Lo.) >M>S#R"^4X~zt)X֐7GQ[[qt䉞jSgU4Zʿ[) L ]x .tO7r_.eً8;4n q@[H;y6m|J2kZyl ?lpΒo4#Nd^@l{[ T1/qW}N0Uɨ ,ڤb謘+ 6%LZrBv̱D|t>8^enLDadom61'`}C;ia0{M[DVpV{`V\T993WSwjԛ@$?Jz7f3̑eE?.ϥ5Q tŪ~jEe/; |f#VBũPQy*LDZ^2q=H-;)V4ÊSpr[2oűJ^8|ȎjOx=ZN/ħ2 5{P*?:v'0dIIPY'utsp dX[@`*-l0է uκМUokhL2e~F*x=W m4`UYoXN NǥS7K%eN'B9pηFޤ5&S)A.yt"L}Mq4ۖ"a5 n+1G3 vaERo$ǵri Ҿdtt]'.-Q1][?j,1 /kN,?,FiUzQ!u;X[4/\S,\}kt8T7LG;q&)O {m|+.lʡP^|F(훀88#*)3a}-??V59pD8nT =]>dl\JDžќ83В1B-^ګSG6llx10>UCMޥ 7n7?]]2Fau ~8OIWù-w5bXq֯F\cŹ/F0K [ Ƹnc.Nh )ܕn 8bfXHGfsOI6DU%Cc5\s 94WO bJ@e =L)= 7#)dĴHÐd5WLg5ޏ2?{Z Fj K.=*1ͧpsTTqʵgBULg̟ZMA X"h?]yi,~ .Uk9(`w~r +.)k yJYؓkRPʔE n2g11DIWu8~q%-eÊ^xOu`nFv )V7Cw7lꗇ`]tl7鹅d8t']uK׸Qh.B""`ŃaKC2 Ӊ ت26jc%&?ns'Owz.[٢wEKq;5J'ۥ"6A7{mya/V&AyUgn[aݟ)m.Hh>5x_'LPVy3ҡXsԣ{;\STgud._cYxTݸ?Gdqc_HєşgcwV2{ᣝ[uk- y0bjR'FοեױKhzN8G &37UmJY8L#Mayƈuuwpw|6]J#гPz?Dk̭\6Z<[Zq=gUDŽ}ϯ_3WȡB%Q*φʽ K)@yy+.ӻIsNļ({]6x40 $GwaW3'.|? ]b@ݣ3R05iR;vbGJ \&,(zBo0]~oW702_qWZ j]!g|a;NB U@M?J{.aNRutl6.I2%a%<}:[k&3n>Q6 ]8KTA1.LvALk$,yqfzRmˑ ny苷 jѕQ캫 VVt~H4QJ/wEʭI 0]$V^OR~ Y_Zq"`z-q%OwvB~ڿ͙ryf=+;6<5?DR,NLn}-n ufI"RK>~`m_C.~<阇'JIƑ#8ОzHhL5R. jN'MϓطN)1t8q#vcSgYV=[Uqj >htp<&M+Jwo?l 1G5M<-٤}=ViaL~59: 4ҺAs3V(UFX`Nf,i{w`֧=[1 kqxLmi!XC+|zMßCVL ;I&I!IQGrFb )vXzw7,]"}̂' ?>]Lϵ==q¸{Iirc6t Ybn{׸xHٶv.GK}fR& rudTZq)XaewL}IبG%׍,Ҥx53@^j4*{ݸ]o96Nq$sJ]7I]U a@IT)Nn.jǦԞ\2ե-QOLF(8׸`2>cSn V or 7לz)BC;d[|1Fi 4@ܹ)E<7} nxV [NrT/w‚fY>/ۣxlS!< UTM皫a8lpa>.,m?,l. 6@Pf?d޵56J?D`;ehGZu-q8*q(x\^ٜ3ԘB[=ׇ&]paAo9^qiC Év%h~[N[Ņ'sː֛?_:nqk _/8=MLgg߆Aa60Tj MzK9~$C%39_ q6"?5o̫;v>aOpA= 㿝3' 걯36/plHw$ӓ墓oV4p+O8v݁qrBPxlfw8ME6Pig`.^awېfbu-v%x|ֵy,'}~Jš]8ߖ; u׊-Eï9B{{􇹋l07$kIiz~3s2Awv=cϞ=6:; ƣ솯˱j/}ͧd>` ɼo"c jnY9D:gt "-ˮZE-jz%ۿ[a?äkV菛qa$,~;&j`aÒ=hH:'V{U1d >e$ &$ gqMh\A$o ǾO!mOze=۬> #\ݲywRYt0qoe-9u=[Y, J@\ݸ/Ьf[(tn%YM)/gǒ_jBNAVK#]\n|Kr7 jNb]7mC/[a@ƺO02pRlO+m]>uhuo$ [LuBv(뻛eű/1><-;x \Aꖐ890wԢkYɽ'c_/T^@{f&k //8~{`©k{:aThXα"L2Ʉ3&넡cMq+>3 =gF vL hLZ /v8NiJ^" 16bϛ&5xuPi:W:Y`ao>_0Y3mre&X:JB'q Mpd]?goϕV3=ZtjIIl*}_ gX?6`4zͼG,]/υ # /jPqvzr=y#Ry=ޑH}ɘPdT|6ͳg>z@~1d/dGiBntܰKi 2R`LEgLD_pxۛ/XhصRdw8wɚj{k &YW/ >}R#@ꬄ)],;8Ur[ cl}rgs]X>P*N;E#xs[cjJ XmMPЅŰ]8q2f>*-_X`2N\AAz)(ߙA8 afÛDlk o=Ќ#O1^pKٞGܻû fW2aq˫zw|xKg-_. H VC1Kw8+D =TQi[yd"gX~O,bCP|sx/: LFBmxʼnOr0?iw"rC10.u؇~^>a2ɤ^W[RÙ'b82nl? :6s=ʇ%IIv!\mݝ2fHl.M{} ڋΦ78f*z\8N}{,8. m_PB oQEa,]"0p]n{W8.U\ $sYY0<: o1j}*5l)L@՘q{MLgz}&!_C ;#4aU5ɜo zɜ֖;=Nȫ&B7?KuwW x딺B]e9 mU^D SO|0juh`O3٬qaS+?'tVkw;-C3-Rbɱ.U*3y_a7X>D]r~/V|V]ЌХAIaO2>֒JЩ'%xt=Btl\Px^Zj 7ܿ;aUoA#[Pҵ׃u<ԿKdm>,zU -yO)'=ӶS`x\+h_>)Zǀ?t ɠ?"gc"Vb-VS9Oq(xwxWs髴55޹w*_Zư_1y`ZZ Tn)0p|A+txae(I|ޚf8 AM_B6v[VupyCp*w0tRZ\K+[,FPdË.$obyS#ጲ# s~L^;<~bf s\O4`gs\?qh-Yf8sgU&F~@͵9@N__w=ڰ.5jeCk%2hJJðFɊʇm_e< =@eˀw ?)bg{*G/Nn&~|ٓ*cBdPp^~c;T|TxV,ێhү ZasƇz+U& 5lz_Ns$#\܎QDí%Hh6Tw[a)h}WfIHR 9 OeY{׷-|]r*bم-'ّ zWph8navKPk<9V;f}Q8~bO0ءoc$=llUNUToiLӶd*tzK~I ,63{ȵ8UtR]r-\cazJ엍oye"cjh~|/}{1 $͏§HxI/R*3QXiˋ)%TAYZG Hld/[ ']y{-_-,)|u zB߈ J[4jm5@0.ӑo:&8̚To$GԎп$v> +VNzM'aUxMa#\O\Y$z6$o6RH,jk>P%y~`Sp_MzUCJpAuCaCXs#T;pI'+#|c/#{0[׊ᄖNRT2Ą-;Ll]~IXyg4Bl^иo]h:PY'%Yw%ھ]^ 'B:TIΰo\B8Zl3Wg}7Pj<&U@$<Ij+J {A[9Ua{ zv7':.@L܎^Yg2Ҹ Y?KMo^gcɅU ˼8Ǭ쀬0Z{a(4/^*3]&F Mguvgu)űdV7|eIBRr%!1;YCSq)SNX 0n2x7djڞreNaLp.1l o+*\83_&ҫzYG~Ex&pl7Y*"")'< |?g?$5HNAE*9vTBE hnuΦQ^BLy+]ܽ"\բu׋O:AgQPg9!RVӝ^XfTC~N17KI{dJXm|}/J$NY._?#`Z0u.V>^3o=N&)7NeLhʘ3eth:vi/ xNq+v$pȅϑ6b!jrϽ@c8x͖u𗑄|rק ǿ8w~)tc]D`j, 8{hAdxNdx`kq1n3f*IÁ}ޖЩU(6S%k~ xs7Uޖi-/֕ Ǫ7]̀o < Tn`X^ݳPdhqql3fe,* ҊS'nbQKif~3L5=ZZœv< v P^+vD`a5L1e ]/O2yPy>4w43<pMMB.ɵ|hs `_R]gkS+.o G/U+gw B;иiF$wRdw0`ہr0~IZ3Mg=EPyG]x)?: +ٖ06Q{)3р4z93ν- K!3>ksSJ cJcL6zHy?\ӷCLWfo8 #]>y; tZPxc>癵u ;zt2o YtsbHv/(3sV?;'ެ3fyDdʛAXfl?RlBk0Ycr;v\kgYbwE<x4u3I/JH̟ ;ퟫKkRd71gHT{ לp9 !7%/F,f9QFYi:t"׹I6ZC-9Nw-Y~q}ZNxXs)y-@>s~΍8Q&v)okV%Jho(Eǩ!8x)'eM T,{3V@ ~_[g=12?qXmYcH2ŗ &rS=MV%q9N[yNlHV6K,fyl$F?P̹w0~7s\/#۲]܍{Np_8 7.ȂA5>rXz;_o*FeǛގU2!MWPtsT%<?bzK PɽGTcQqCXBhYL}9wopsGtl%΍e @[AAX{ʩ7;d٠ e%Pi([z$[DI)v+ϋ+.\r (rq' yu8= >Uqi(%(c6[x?G 76C|>Ϋz{zr,l[F1 :Ow &˒5>M)a0GT5Dq~m VkGܝDe6Ől$oMoS27bӱw.G\3kǻ~0}?g,Z} M=_X`AbxA"Gu?{6|Ū80v\Mvqx9T8p&?d1D5{ G3WTg,*j5WO8;]񝂝ָMjn!fwlu=QC%]J`VUv_35I#p9 ocoYW Kck$kE gȌbpe&l7 pۏE֚cf0.h5[wfapc[AA_]:8L•"pgoVy M,Xfpշza! D V{[j+Avs0'|+:@Rɧ sztZlw<::Nyu*y] X`B~ l ~"n 'uEL9 Wݎ QN/o_7_Bq)4v|$%6{_~${lzbzcyRG;lgڨ8| ~\]".aL8vu1eGGHTD7c敯P@h JYS CZL>8eFzgGoVcy ^HU#n_dWB{Ÿ+{`}mSi&N 3KVŅkH^qwmݘY?s޷*;~eISvEaGMZ8*q>f\ZܜM ;Hmؿ`Gvǯ*^V.TS"(۔\bo*n ZY PfR8}kW1GY`_f!IʻZ,ou!yW!xhsޏn[S1I4qsN!FL]ݽ 5!U˴twNE3yU~ߞ8k[Le.iX`98S'g/tbYE7*i*0T+{Ľ@6v#xg<22vƓ#/,"DD&xIg{ ŝz0=pC1.~yk8&>ɦVqE/,]l> ?/n΁60è}{Iy{ L8ZKTEH6k~Qfb49̈[qnSIb<(,=ָG~?vNC 5Ԑ'a^={o J5Ӯi '`"e3vL?~X/<:K4C{g Ð ŞzڳKT» 0yaRqij8rfG?1VK xA kݥd5h:5-f27^1&9o ;zL };e(#- gd\=IC0] 2{"يIỰyXY&Yɛ2GbyMdr^p G0Gio'mS}5;ǁvuug=ۄs } m\.˜`0ze㘥=>Ss]m-+ g EF1;·$a˷瘀QWS",cpثKp\xO VD*]R( swn0pIW ]t@+w!mNFl_=bT}EO,eԆp63)ױ9 7E5c|O?e[l+94Dױǧ-썯|ӥBiu96uBB, U2f.0v X?5qiAyw3h.GFɆ5|"OOC7/ҋa^2']lޱÞAʐɐU:Tz;p-\b$%L]]6yT$̈́9MumtX6v6+. 'HuͨRL֯0'b=2fop;qzj}̯4% ε^&96$T`Aȯ݇ϔűkGjiσX)CoRS8@R*4&9 3O6vۓ4j%L{…fbgrG;\ͷA!Uo4׾&: 8uJZ@4#7aU 9BYu&X#ko4S|lZ bEWo`#W]YT=mw.yu_y+jnoIhj;` < oQ [-H~-.\Lاz6 [`뇽毰S=+`;Ŭkstۓr4hON|` -k^i g"+ۡ@\MSz{dh.ɔ^b=6*.="X$_@gỳןJ+gM g~4%/+{pj?*xz3gERz%W?|W~LOa[dcY9 4cÄzx{[}{pX)gDRX31@YL'Xn,ut\~"sEgN]Vn0rW8cud==S* l7%6h;=]m 1~?Nobv#IW{_k_ɥ&o;E)ue%LEΕZp=93Ln| m;jH־ƽBp9\^i?,<>IZ`LeFj˽ >H:$CMR9K9ކx d4l:^yQ3CkKx=̿~kd;e p>JK>2u~-;5e$pf0,$ɛ6n; s? qZGgjُK\9_!ը`~I|!g qk4ڈӾ Nd.G-N=7& Eټ_`矍Vo`yRQ|ƈNI/챍QWJ& za[^}@3%jr`H GϦK\yt9ɼU*Ykէn@ӚO{ZKG:T;MؙnXy\"c +߫I&eK0jNk[Yr㊧΍NPyN blӶ0`or:=k0L૛/HW>r,LW_c&gNNUXW WSTCLhb2v!򇷰rPD6Hyۉf$as-A8Ȯvn1ѯּ}G_r%\SҙzJɿtatYn;E0ɨ&X!3s/HWucH- hY<|-z_Ȭ ,>@hDα0O0 3)L乹W M W| %^ l?^Vpr@E5ɤ4dZwR,pw˂(ܼ#;|t p"Jjm7L\{JyF0l=z45 HttO5b1U4`@̙ˮ,3ɏ{qQaBu;FžcZ J$.F`lp dc}yԏM,Aj,\>$SGE엻}WI$VTS7`.`=8gífC0)=IoyuV-T)I[T+7P\/)gf?#uꈋi:oqeL_?o밟_Ʈ~egIFm#WqNXmq 3:z0]Ă&4؝S_] )0bDpȘ1kkQ) KAR+TEx7J#ӂdP~1aNum֭fvTut6 ByǗB鶂, G4u`cA Nz$ =Auߐ[":6%&Zv`6#v9rH-.ʂdZ򙾅%44h~ czu Qx  *z%W>d3G}Bz}~G@R8M\Y$%JmXbfNݖ%!)m$ WH%lojRK)G8B`7? cѪRӵ^[̚gtdH UNN3h^>7YN^ĥ/1'Y9f}ׄ˶Z8*tAT3N|pu%g]ry߫,B1P|vK=N&xo%M'K.IjL8W`%) ޚ } soW,o=X[ھ)_deM O#B[yN_뵾t=z+c /]cq̜ح*X2mn<6=쾂R0ws|cA5{>q:8# V690Ė& p\AKiJh{Kh"\ k8M~v4Y:cBS@j%xb^Pd73N} ^gt@w+% `ߥ=}/mE X+:rv a# x1V&lTehq*-DŽdݰo7d8k~ʶ+uˆhgz't3<g>e;Cv&<`'VyȖ'sy.mY7n HZv<=ko:V(x0°B2_.lsc;{~g)Hzm^sC/)קBîo%`tWY6O^<` h i7㈇,sZ76 #wUaF eLP~:RIu5k6tV|^a.g?Ð1@EJDt,TYf8SzȶPf8XU][&YHBJq!fq#/zL+wZJ$HT*H I{>c'(G~6ps0tΨ_6vW Y؆|P!t9~N셮/e.IS|DVF: L(KQhfaDFfpo,qwvbhQ ^Џ2|D3Q`yLHE?OhK+s=q&\a:AWw+lOU6{N C殥nخNyc sB4R| i#MBvZL7^~ eX'R Fo?pW缿/ĺष @^2Y' rЩ$#~@* < ɻ~//GS,ROiqM;`eOCCj.=-͟~]yQ.(=V#ٳY$y}\ECpգc}–{.BnHዷ3dAX?*.5:WLHj -;n: >=厓Mރ8JxQ` ig‘fݱXN)_Q(ųL+ eSݫϣMJ!XwUC259qK[2 0v{.L4-PF֍,aW]_x}Xd/]JސN g6/®ҝf/bHg|J (~29Gⱐ+u@H .>Jy]SU]sά)WnvRIPs|%#p̴$&[qK\Yz+ {x:Zz pB \9qĴ66N-Z}9s*W3귿M ^xL.v)+-BgN]=O}fm<$!/AǏ WЭaΠ}"k,|NIysi!!+Q<ܷY8:sJpGJC#̼g員J^ҳlGSV8}0w%^Sƚ=60R[g5A;$!N6h z؇>1 :<+!Mپ$F5/]Z&%|85SĠ3P/TfuXNnpMNzwaSgE$ӣu 026T8k }%ž{* /loZ]?u B[۱&0-azdڐ etq^^͔: eB {:ob~=e yU$[4_&2og`TZc@~:FiX.0ɵùSu8a SOs|NV!/ﱏT$( ޷L]a8^h[0zC;sX/ɶu$3Rȵ o$8 ;m{FĦQ(tZvbIߎ I9 򷜲Z֔.CىDZVWJQŪz2wنB3 , }{NR"xEMW-E=C-L8ҡ&'=bc`" e&&eىGmPfþ W:ƔiaÝjmycS٦&\l_0xEWG9oP\?v9ʛV'J}bZnԪR*}j/<]Sޱ*~kʲ/{11kjU&$CL;aik@PZI×\cy*+ Ͼ6&芭=>Urn8fsL2E25t={ 5Y9ѩ[Kk'"uڄqvW3\.~17ƞ]$ӉvW`2wIqu5dv7Hzox|$(0^zPg2x2 `E==!OM["r 'w6 5X/k$4=;PUd.Y؇n>WԖ:1YUm#QqLJLo*L2$rJ8F^ {@hywYG>xth?灶d.uD :KdFKufs5뼣+9N~ԃ nڪν;(fx$RPoA!se#',%_܋ѭV\flK>OdspQNɠqia,[f ]"ծay8TƿWNAp+aݕm pSZ% _K¹G ,I1a&/PqK7a"}#:IYҲ-_jc-d5i⇫3u_/%)ӗ⹶z;4_G]E!viVgCAO*.ʶv[na፪yP䭲$vج\:du1q{&jLޜ]!0{vD(PFK;g KIPJKz;c/,3Niކ-GpcǓh(?&_ j3tV=6~h"{o0')-.ma5lDF._r;WR_B#3]YィJgJܰGvz9UgQ* R0S9.v}X0xZB՘} Ea.Yz;F*%Jz4VfU)aàJA A \8,yJsRG_HS<+JFQڲ/2 3om ݱJ'>3%58@3f0'6jUl8iПgvkl]9VرtQX %4r )YJP$7hLb9y)02>}sES8 ;gX|uTJի_BtௌPt;a0$i \dE{Gg Ljn@iʴ&.ȝ9UҪA0^a~ {To^j*Ll \jhna̗~yEkȂmT6R%} ?$wZMܳp37ea%|ZhiIBhQ0թ6O7a8V5s`*(c9Mp{lr>I2~Cω#>]p-gA -hnʠtlƉs$: m|?c>~% pH{~ :L=vILd=Lm׎IZg~z"7=;9.L*TI$iY-ThIQTBFFHe'{{>9y|sCEc'c; ROs_ҫb⧽(^a\P+xfc(PQS -/v>^m)8]+3U>]O?.ZA%|)\Ip&g)=a:|M>U;rns:,V9lJ׃jnReK2`^߅ s0a[=_a]~L*ּQvv^E6X3s-j|x8H1Hk1KO Y|F8d-)Wt.^FYV  0WΘ Z`22{dxrmu vI?Yd=,xsz} ؆Cۤ#Ȯ엙'6VBw 9J8@2ލfP?68C'+TK^67)_ 0_qתo~)Ň##~[ZtX'.U$6Ș熎rґ54 Gچc)#~]^쳻/ݤya\iX9c_kI X}8hdR ]FMO8SqkDZ=Iƛi iМiץ|$Ϋ|˫ȇESM#Rl*#+ 3XTD2jIg<6}ؤ~V7sv fJ]ngpj{2[+bt8\>js'ڛ+=a`kr<RY.wl#2ܶtV'ݷH}+\5E?@ "0yL|iZC576f#ώ:Eӑ65& mް#4ﳾ= t\χmt.A7A %*P%to<(cT/3vp.{vْ[aVCFE !#Xϱ UD>Cs\Sp 0i ye5=A ֝5r l!\ >™ޮc G);DL%]@mO9ߕ*^©x.9D 18@j^qX׉+<7>$ai`ǘoPpX ƒvԳwZؼK2Fs6IIO jxK=%]-Ce3̙Լ/¥}SQ`U;@rRxיLω r+n_o3#ɺˌCêC'Ba~*Eji(0PׇSʂ޳ib",SӦZX+8w2N1$o!gP5 sL{TaAMj3+wy5}O㤸zq''ؿ }Ǘd Ʊ;vű\u/9Cg5@Q +0g+6{(v6/MO}[.JHq6<\ɲ{Dos"ijf@.y9gR/WU>QgϘS{zH" ,/p[Yt7KsƯO,\h9b9U\87ef-:dmFrܦ%gco޺AQa\xz(~ts.iL:J0dh06|G5V'u!'b]M$ js' fI2P3di1 +v7.|'`8*'R7(}:`ڻ,lO'$&dKI28K_9/S738i |ΰ\wSuIq>zG`ӳ%݃s u5vDIFa2uGLW.M$&gHU*hEP޾ˌ!R ܫu ?R} mQRQG`zltC"ȹ]=hO'⊁{B?(Sit莤F HcJ)7}}:jC^zD>X؄^ZfOaPI7bc1Zf9Ǚj`)\aKrNK k"KpDF@_[Xv>eq 3ֶ;_x b;!S٬Y=,n=9Z^bcbNl.3aƥ$ݱX[&LI{fhټԙr@ -]u"9^yhP:ш4|ŧ>1,® fqu\=ʾS0:' 5f.@r'zzR[Ϯcַ\} ?߳6bNMӒ/b.ԍ)Ò#à԰&|_#42M ]{}H+s5\:"ZV/ !aVLjڂ SfvmBڳCq|1_Z,jtEp tؒnsصSdem\-wp6Bs-[(`n=yf T%du+106zK v\8EbMY~oVٱXȒtۉ@V&Ӓl'\zP@+"Pym9Xa/Α->e_C&#dNJI1]NrShT&V>N8%E2ś=qS['Urr_$O(}'SNpX9l]K~Γ{F0f,frs F Gn)5boC8iI};f']nVrzpȎ߹׵"&..Mٍ 7m؞&Ya͏i3dKa*tJ lC{ ̇>_q 0ݖ(MFR`n"CHag_ [& vT#/IJvYIp?MW܋ COB#BRlXx'"{/VŎ _'AGKje,0)&Xqr"6^W& kv@ޅL):aw]:,w`V~ⰬU\ص;&b;Tʾ/tulno^wRWZVJ?/у{Y.JDa{GrJ! "pv^+v(l黑{7OsFRZJnϕs0%Vb[r6T7wD`$ܙr\|9S>B/a!-{cP?]x,سAOېQ/ΊA/NgG`#*QL.<(ly|3*٥;Sh}M~%[s1ۥv7Kp3ʏ3ʫUaz|Ƞ/!;N_P )YnÎVֻJg,R?yk>Vc,9}IxC?:C?\1I;o>oqzv1Y%A+>:v5snJt%X<`|BRޞ(QGhPh/cd$Z$6o$Y㘣a\bQV v֮#7`ݗ4疰Mk\)KfXԊrI5iϺ+Ay RUnY@wXwh@j\s+]v^Ş`+ZW7Dxsշ|`=peС]oѭ1M G-ACL9T,i[T0ѝ0'N0w= 2#roPPݭ{JVS 黡u硥0xbn cʏX kREՈ Uѥ#հ{Gs%{uЛgY{CR`?|kw_^{2!X^1D K†٫-H<4DVB{WڽKgz"E~K9̬zTyB#ռWOaod-48LIw3NZl:.y'7&ƻq" _ݷt9LXB1~_H O.hiNq@ᎆ {ao@~? Sq7J4N}]ǝ[f޼<uz$KƳ0^F՚]V2Xu7Ti|A'pR󔯐oRY  UWƶfyc. 8x:boAc0߻wqfݠV;M7&ƧW?`7>7O睫SCc竸X=BEeر:;ԡ;wपZW W|Ut,ZxS|Bޡqϴ-R tD>Ϫ4ЄվLޚyϬ:&I.O!_o|kIr߲y)QGC}i8/qsV>̉=qRo:ӻ\U-h罰k赩 +VWp ⼽̹WʟC)?}^&fHLpnL?`ƾ1aÜjGy0 aՋ{?V_YfL`Mk6:2A&>wnmG8 -[ azVh)M2.,])rYs LK2.݉[|H VmDZquY!J6޸lQoԺ<^ 3>t]2V~y3 ~9QmJ|m)x Vk7*v%&/=ǎn1xgrJ\CxSl霒/M_w< )% կyj*!.<ЦR8-XZgbן仆Y3}Ǖ_޲ݖOW A7rX_ɨ,Pb0nS w X3y"W %a&v]&br\$9 ×p`>/҇hOOÇp*oė aO[k\ƞu)Vv|lkk㬕In;[6|}{4G]W(6aۭ#ҴQdzjκ ۸6v/Fӷ! &*x]B ٫LU\Ḻ@X3% 󖶽?܆JS~!8{eAuD&ɖ2qs\8Ї{C>kGMIS$#Cst*Jvi73v879OuN}䏘 .y?c6KGaxy19_`y`VzX|w9%TW/s /n\P<^aszpZo P _.2/H֓m>$/+nrasynuPzŠEj`7ϳw̖dJYyfw{^Џe3`sZ PQt'l3л,37NmI0y<&E6}5>' gE  ScT=)0BkJX ôE;$,d_c ;⠖k#HO>3Ov{cD&KṯO&qjmWsa ZΐPF <~P,td a:$#z\pU+JAeL{vF[FmAFW5]j{8F&,q:Pjp]:mXZ38ZnNw=P ZF­ػSUղq5t%M`勿 \-|7Ir|I0tk֐/78Wg>U ?jZ.lJ|0%5 {"V,_!vp/Q.sGtޞYY٦mLO.m'Ygz[QE}zwT_|e ֋g-T޳*1kVePg?iXSyx}a'd1ƴ䫋p`_`3_+ MƶdI~}9b0i̓]{&F&z^@)z,?+;] ?V]߫XE9p#.՜I_߻ahl@x>b)'t.X 'MY á4M%[p gd%b8bM{ tʈO*f5fSU?>nFCl\P{M<5U然 WsU7 gi $7|?&",iu:{/bA`=z?8WgK 尹q} ob`´W270$e_'N/<;LsW:d6&z6U⌬xwgJܠ]1t)1 W>G1?T4 N=2 ae` ٌ. J,5cKl~I66BxU:CXۅ2FXT8ZIkԾmTp珍&}O^3-Ws]t)}! /OUBv"N>%n@=!n̍}g鏟l|>nD "&.tR}U٨/|8EP-B){p?s1ڴ-{/3 `.ѕ~3ǻ߮퓷! U0%K](=/]8"bK K1q&TH7Q$,<$&Kէ3NSƼ g87W#)@q~ip]8Xg r`6{'G#GapA:q|iKqVˑzO5l8zM9Lߕ"}p|w'0ߑ`yRS0i*iVTͧ5Wܐl0}5/tXVx|h ]ܔqzeʥ8Zr8XxI5uĆEӭ?/a>`vꒄ+r@¬$aIXܧ\iY#ޘpP [t`9,".m 5DeI`c6EC`>"Iά3MkJ5lgi[`epB8X~J_SpXts^ Hf=qaegZ3e6٘҇7_Cp`LȿHFiۓ4MXxj Sq , $cZޛ7q|UA1[CLN_on` >`0[Ŷٮ Th^w6R&T ٶ>Kv zhFYK\nTcqw3XymkF\wvI井B5Yy r۝"]^,A@ k&)Bo| Nxg iοQ=<$'\rR7aJY} 8O<]2ST?[M9׌oƥHjgwP:JkH)(N??Z#fK%nĻ/>>[gXa͍Ԇs0HͳZ=8."y.$fKXm/OR o<|'TnHDlݷ < |@tA} 5Lwө]HWm47棗q[g5K/@{\ꨕ{2G_/7y9q_?-nx[9(%HsrL X}cYE`pѫNcHL! {Ϙk[B}B\u;!~vh#u)@ۓt8ÉIJ9VVOJgQIEab69Zi N?6MI]K2ﮱaس ?CUOoȋ] M{7Ͷm[6F[^fȿޯGdDfp$a+mW5/tQ[p͋CǤ8W+&cRzLK_J6*T ա{$wG(P^ĩ;,mK$s&a,N^fglK=w Nn¥½Mn8}.\}.:r؄N%R:8!KCg4bWh+_WB,ZJHsFV0zW oRwZK3,$6hu.ǼFKH"iE+}g+;0y潆t-ڱL[ߓIX$,;pF{<ỳT7M6=.X8-P2Z'fG!US}^شKnw<bfZQ7wGq}Ea]XP8~8z < 5Zj S&0r/?pY0A9jc,NIA=ћM {j ~Bӟ]taCQsݦSEnh7x+QͶqCzq~JpXV@ڎlnox1wrγp,4|9M4`_ Kn 4.y TqDX;FdT/ gmt_S: JGXj44޶v1v9>)kvPo{ s PST1m6xƙ.o5Z6`QE2];C:1RO0:%CܻgM==/8G[Hlݷ󧜯Ua~xm.^y {e} +ϚE+{;w k|skk:tg,&Lm,c![Ba}7ͮgH> ]sK8VV@( ﻅl 5"|w`(t5m?4 TŠ~`r+s'6k7agum9~2KU]^YWz NOuJb&ָpBdUX&&/U BǒjZad~atdtu'?ޟ! JZ6^JW͎j0)MZo/&/Y~sW}9TZba0.ӊ/ w ,8)pc+YeH;m%4O1@PL0)opHw]FG ݮsqؿED(/ozc(Nt &UGb97&I.69tNݙ!A- =XT+^0{u@g5"u/2^43ObG\Xv`$V,odϮwԠDM}xUҙ2ɐ(ͫ jAu<|FabdW,T¾wxv`J,6Xp~T)[)默+үᜦlsVo 913/$^DhPѸz]9i#])@-R=HV\^y}LҫT mNGFqՍ1⋕VڭANa ,f$wl 688Mgp=1#ΒE]{8N)D?,>C6v K]@Sǡ:ԦWu8WJž.1RFSn`NJ',Y]ƿ?|OAUCbS #<]ma$Hi L :F&s|Wҫ6@O흓rmaA.3 0NCz l^А~ hlx-d!ncDup,K7/̜{_ KFyB[}.![Ty"{D}[q}mmX5/ C^3fu$oO0;~S/i=`;y \]8؞@0WN$/\-&׌~ŧ:)O0@mwL[}Qbu'`, t>]YFn, H{iKܭm革309ɷT|'Y^oQ!n|+dO?Xm/32n%"g0OXڣ*=5}#HiN0i\ޝTz%kB-k9=>p `u~$T\SOf$2/FLMn<O@OH޼q\Ъz3Q;ҏR!7?~"\֎_ZmY/qY!`h'7TֆAk3,؟vi>+Yk>h~H,Է9݂gՀ.m[{t=4k0  pCNy99 T{q,;lah.Hm;.+pd ʹFYRRscb}} O ꊃA3k[{Mj_5dêB#j3YGPE2~|eg;8]\S~dy|P탳BN*@MVМ)ީI2G2MCG~uς qrùTm1(.]յiMQ]T02?0OE+7.ۡߗ Cć{@ g0{c^]%]Pq4RrYиmգOGH i;{+GHm{zV&Xpm?S(+=S t1K$ڹ^\7<_?0S<^"-YiH5 ]cM,P'Siv(K})[rd/}3K_,K<Һ^@;n[]Q+$ 6:G%/g BI~0qrJR?mR 6`weSF{'Y pM6\6bTkc%m{[Zo}Koĩw$N_0\y]D- d=O$)&pZkzԋHlZnf~ y]4[d\x,8cyhRTi޿+W,q*>S9].FX>dV7D9cOiۯ]s8S na MP$vA*]hV8+~bǻCSkgcP^d8 '˝"Uf xumra݁+Tau/]%v$;`zcFvua^nԔ%iu#иQ̊9fӈJo[oBVկB0vcޜ_qt0eg[oR S-"|`IN๒ +j 2/v45f,;w%]ݔ`q`e'SI>qgA3M4bWYڛgʪ0fgs`r}%=չ7ݕ\N },50[dR,wL;R(K2AQ02Nv9+Ʈ7#5;]_Ժ-BDTBhz /Dp߇nLEA61tQXY2vi_QB…:Ys qud==5re:GBv\ǎ#{a.jwmxÐ=(f\ XOk>h,ʛu`65Ϛ>Hi+ö)װiRQ.5:f  XST]i̒ E` ͙;u\6嚑z`8o3nmJI4,Xlr]IMtp7qU5ڣHl@ i$ã•c{K]%>,σug/æ 0~X.&qFo e|9\??qvͳ$[\5Djc1jS2f~IM*P`.IyStE&|g!O%Ac~N,IПO۷6$BH|#B2/]&WK{-`[q mԩ#|Q?s#d 7 {wHcuszӝ'p%ްmr[>@ɉNAp k%vh*%kt~"$&j輤pGPd:RFU,?!kNHcwW "M_-U5Ur,z ªಓ`-VӼ_OQ"(ȓʖHR:oc#5>ذ=v/N b5M Nn9B²!2L~zyWdz5]xy-ym3~+r{,,l$qMĦm$ɮ@ް1Aw݉8[ᕱ2^M~ηIFp±{-wmWzָs"uTջpC*6[/4I.vlPW|S<6̓>&aZM4qn. ʌ0gsB$36vBm#H7n][$O,&^IE ޫ>lIuK<{*m9bq`44fJ &J&П_iu}WjA;?>TSU®UA=F۠o98Sχ'\S*׉J:đǵ8Q!# %pri9VzDP6 x=s%qyدwBa/$9afTy%[TӪG<ZoN).8ݪ[^1b6W'7qqӹPf{_X9Sm&aȃ+R^ }%nkAM^~} \YbsF^\eG"87ӏ͂D|qy!3xg{NmlY4o% &m iO* |X5޽5/̾9mƷ=&̅XQ vV@iFSs8].F_Z'[sɄީ'˫}X^e& #? ī{< tLa},w_a"$;yZp-ަ/H5_I6wnr>N$YHC4ë.Qn5E}@'7ƱV_X7_&XjFL\(P3NIT0ftuc#&'$vM㈃^5XKoI0Z>8xd YUpץ·fP9~,९ֻ)0K})M v"Y[,\ֻ8B!;@?vC&lzhW9)Gغz,RnŞg8)fUE%X*캜F5OiKPhSYCm;D!;>??q`=NL4-b\:$#$Ý1$qH|w̠7̖ݻÛ-G0~kl=. 5YRxCwE[;y_C+0޴#fҚ['[-\'mzwSoٝ-0MkFXbn+w7ȥl3f;G̛8e~^w^>r SVwoք-bHVv7s2yQ?7')o}vxZ=ڹAs bzܖ=Ýr„tbaBl <>&@cG=i!.ˌjw@f)z812rdXdkn6X$"(EWJݡYג}pl-]̂%J ߲ܸBзXwlj&XBŐO 2@YEkI#/C u(QKKvZ>'z h! }u=)DѰ5$͞Mw Nb3X8ԜiV6'^t({ϗKuςyvȇo}`N~usJNck7C\haTrı$ݴ XiF.[%k RY2,k`aRfop2˲׀yKx"O.H"?+컹7]8e' DnGO*D$[ Z" S3W7`eW` ]Y̵Z zpa9[C7.2pO4mWc?]^UK5?&X#T}\tC"k~W "-Gh{o{vS#ɰc>,(vK:M&lLs1V֦;]8M_s<fk͸=Iнqo?7c#fFCf+s<)gxM-.Lo g7) ☬tĻ+ohTzi)0z{JK]/DS8ag=goフ2tpojt 6ʹGxAU\"{ߎKTDz^niJ*2Iח;^!w/yR=#X4s5C 5qf}1yjԾ-8u̦fHa6_r7_qb]N|6Ɖaੵ<]F׻XsNq;n=r:k V۸<G91I;8ek}R'roUȐS?{4?$0@V赕$7`dž =Y;Λ<{KtV7;Wv> @=G0r Ъd4^DxeN|76_gT/ӏUf{'-j:+~~ # fTvEŃ=:,}b|%{:Oϟ-2N Ps;t+l͌TjC6jRzl:eMr1m* 4Ol aDg&R7^R^'>{o/<ʾ%h|eB9c,ϧW{ZIysH퀚MVUa_C`wppF 'GTqdƁطnHU>ƍ7Ԫᔙv+= efOyYf켘ּdب͔%ۀi|@2L\_ͩ;0W,9ldՈ{&MzaYA&ڳk;w ha|߇2$ `keqgH._QH2h$ &Ys0n7?fTIǂGaUJyX$AIuɢ]ߡ6QyRNy?T w,/x! 3VU>~t]U-@4*zL/2RG aB!= SG$R΃\#sl>5IF[I.,% Hc>tQ?9~ q^C)f~1(g~aGiorpMۼ]Fݏ_9qn 1k{+w̧UmTرnsF8X6f )׻ W=g{|: ƒL7 nSYaW˸CkKy~މ@54A0޹m7VS"ٖ%yK0/ 8W~1%,*}]1Y 3r˾PSux3 U%eŎ%b0١+ğuܻ8s6w`v\gW~O@mnr37ov7NJkE`¼&X:-)|@r)kR?!xW 1 &-m8mTEd'nё>BM0e3fر&9!'$Sl?U;\zLX {r=M=CX=~ԯxj>_zоmwqaM2H0;:|p{+yNlBHS /$ yjCIV${õu |ra=֭/y7gzʽ^)B`AO`@} .4O!`f /eI{>@Ρ /}e ΉV*Kg9ЛK ]>P5O=V^n VM,##`%Nr~;zolnK9qe4Ygf3:U`L>>~\jto؁-0+mˁ{s%% yffv[@ۃ}gZE:f[O efb~dEGX׋9nvʱ &JJP|Ew.D  ͡ZЧ&8MZz2^8IAےhmR5'cᣝ!Q8vHi:8mah/}Py7;`I>;Щ16:dj6Hj!`mBX~~DjS{}" {k0pfl ;bcm#s/a]?9X8W+l޽ԋ_g#L3~ZN҅0ƥJ5 _C{{}G|eo۩4Xٛ0t+)ץ| MإEWf7֑/B0׺.C2} m) N&<ySyRc[\|Cϫ`O ;|ҌYn>dܢ[ΈPUWm@X}D,=ĵDbIhW =4&o8WJ)\;8MD/s}5al7=q'HԲLEPq5N=CDnFZٿ,]0+2A彉kJb,e'j6yQ$H_vLZEaf׻j%3ݩFN!}T޽ g_}\;4{Y{xS~F[bq^OM8eibm6۪.`ey&|Ⱥti \<^ON |;kB?3r GN>}:jVܚ<ܲLk}}O 0Y>ɺo*t9);o&vHY1Ⱦ ,|`d@M̛#`40  v"ͬ{k8~9ھOi۹gzeNr 4_ J~1C0nr+X] NinhH` jXp>|Ea!tR}?LZ2~}F'?.VV]rqs{)6G=z!l-*B;J8v1:}xmWKnDdHw C[Faay' CX}mj-v>Pc}HlJjV!M0(*nv{vd)+fDy'K}Xņe/wn%Bbtd0q#u2Àa=9ws?̖(Rd 1/abe?y fbiX9cF"29>(G2:;N΂q/9.ޭ #˯:ƅb=kc=QLǪ]eYf(0ς-3$ vjo [G }SػH':z?VGx;|L*qթɪJ@Syl4g4& /pXIoj,r:K2o+y{ KܵqQW8MB񿯸hŪWKaVZ}AqYu U^Z-,>\;D"O{#O0=-.3%g^@ju*SIVn@wz7tppLT'Ъdpr㼃;(d܄ ̋b 27xC\@οogTo;oD45K)kk[/, Zn0`9 SY ZaW AxЍYۦ?ocofNm4F`]Wt,xzG@̿37 Nފߧ5 7gz@2۾M)c6'X o^סG}W'iH&GK{@0 K_Gݙ@Vb{p#vHY,n]-v{' a 7qKp./.>J~'}Z3[bQ1ҧ'WvX,jR#S^)c_rҘI  3ǜێj+ M`\>0 .~p, l;Z-7LcEz7fԠ3Mi l"F_qADžsES$%Iu d a #q*AXGr;HT~H* 5\FIDAo {C 'ʻ~߮)ǩۅ/>8,4.%|Âҵ03U/c3.8_sW]°Z›pn!s7LZ 7a4 j@#X:#!)lD0MD0qu}Ѕ礵 ?\Hq$#4S'.2aۺc35qD0r&А<\O34#qZ'~În8G> =㸀H`PѰyr->|/ P۫5lG'z\Z=iLOqK}&N:{g9 LG=kJ\aͥ[Q?~Qa`7wz gLq|,gC bLZrF8yqwZz:2o|"ZvɎ$e8{n7nRda?șm s.?b`TlXeȔ<;._AӠaahP_w${j9~錤X= {{"v | #J̢Oq~z\6:=_ywB"ҩzIAENuuh>#f]H9+0"׸뷚&V1k1ܲ'b[ӭ/w A{d9bdJPFl{Oq"g\IWž^{nT{dR"wtJ(t?t'kdlBԏĺ7nX}au]y_ sh+{{VyJ',O38yh,цr:XMQ΀! =w1ɤ+UύBT]2{(bPf (Ԣs./m!yX'O(Lz!gt-BŁg8 aG~ a\Tf!@6DX@}>)y5`%yl)QU5'^N2 zh/9 $cck9) XQhch/?e҆IbaL٢ã 3~wC/x |Y"x>⦆+r jfCrPĆ5~aUz|>N࿫*[ʊD`ܨ9rTT):X8pr$4'H7N/ *kI"[.YQ&Y:>nm]azz''m>b/}8ʜg@0>1aa{C>, PbD9 } ZܓBn0a%g}]8p]݄; l=zsfXSlוبkR[ 4F㞈PW м\BRpWZ tWd@]{1vPG> \<ljSpv![v_m)/X{2:őWec37U &` Dhm\7ZvJ9u1U9\֛!('@RXGN< f'8z5EUX_"GVS,||I+NgCm` yWnW9yqӖ*R{qYsX}6+ R1Tؽ:kS3ڨE,mʲdoWʶб%q# 0ؽmBN"RrI{ڒ_4[OA(7G*${7"6!,F;9qZC8|ծg6R]:lOуЫ[ˈ+\aڿ̑O3֖yqwz|J &1: >tk}+YRޮw|9r0 C}Nۚ"35HƸ~W"agmS1|L w CKABB 1g$`$(0n= FE?A3PssbEQl.8s݋-K½#|ϯ[:8͚U5\' JDJoNG#f|U/ՊO)nW&$Qj`|sdzdv5O +mgw̰Bv*~džƘ_å+'X7Ij\DrQmYq/ l/8>v}Q\29ŝF,qU?}A2e;뤇vD9˨MX#Ep׍]C09c~ nSi+$é-gA.o x^WI"}ֻ[ROOC66|0FdkۊWq`n?<+d_sΟ#YLexe+tn }|gS? Þ[.l5[e,SaF%!)(`iCO>aKµ^~tρŃ%<0C$'V7v:d+?f{~!#z89.s_"N0[HJf=lٸ0 Mn@J[Ǖ,MOɌ{Oݦq \f! 7Z?=.ncSJXu.$Ξgp9|^%Յaxár>~ǂa}HT}{/ Mxt7cIwlq&!͉}Dέ`gw@~LjUܡCuĶIs ƦVӌƢڭ8}V{-o^$6^hj`0R"g5%3&a@x 6}$`N  ̟b5a-M0p c.h1n3#0tPT|?k3f]{my3e;ǽ_C۱,#i1!ǿBo{qIȝyQXb[z+.3\a1Q @;$xa#, um0 ]V0ev̹=aSaSF߳MTxT5dH"b~>$'j)X,.Paq(«;8&/ Kd;5OW Xks -g6=um?^:C>uRywpyʖwP}l??):y_MNM/[#*2~ Λ<Ӽi?oyNXp-Ep|~h~wK)S|o^nw (q:Ga~*V;ʼJNU.9?sZf%1[)mFen[by Y'i6O?#]WwP͌rIUk[v+ZݪWND=7C|)SfW](ݎ5?]y8Uo>֭l1g%vLSA:X<3']ӱ00l{r:_'fl)@|rf+_VN+qҖwɞM>%G#Jܦ!q:%6 G?hzpׂ߅]IuoXg {n\m# }ho†!p>x@)^7lƩq+$/ㄑx[m.S, Y9'izÚ-BʌW_ݺeTEu&DkXśx{uV&jFqA/Fvu31t3s\&!vfﻞmĚF qu9t<܋sNdTw&`LW< cm2WdX 7{_BvgO7f#`oav]8T.L q>΅PG1//~V8ŧS9dzxJKaĬKL͹M~m 7XU-""݁=fOPf %trr3?gu}Rp"Sn¹;LaaƬl[r hgr?W/x}90,X-K0|IJÔzh?7 ~NaHnT1Sjרvhm{*  1پvn!lȾa l *U8Z)GZWzĵg;}k9ϰZCM~r9hix>{^ p1V.hr?* /r.EvM eĚ[QPdhxUR?C}Eq3X0 ;X؁]+gX ȴV^h٧0KJՠVxEvO^vݖCeC^X%Tupi8}Cu{0%ELM>\cN#+Wi`IKnP䆿y$Y{6ϓ.-Yv^*',|1Nr*3@rlӒUsY($sJ/ߍg@99*/{D̆jl/`رJ)`^z"̞K{m/zLدcM^ByJbOVH9nrree*`Z>9GqRUx #)^Xq\I6S w ',pDTQ7jj.!^M:\mo{om!(7l{~z U@k`U]GaM"/Obů(9s00rIk˳0׻eHء|]$FiJ/JR(1Ɋ/ Q]qb]>ɡk5HgI(;ܴ׵Bocš #3ocW(`~MPHMs% $l$4EpHk+u5uؗcfn^5V4H vh-pt_{n{ÊÐ~܏5:0|N̽>q2gȬ /N wr7obkiwYm[u}k\ bn}[A4]OQ,b0e>RWD2J^og+K0%OR˩_gU*0_jԋ|cAO`hlun:\ If\cL8:e?.]t5R4.^Ji\`AzR7Ԯ~8(A)- Ƣg a m2 =O|d;۳g02kgvn,+qG $r^oرx `>`r'˭X _:҄?|j%-i`+ڱWő<%y&߲Ê<-Ϲm&8w?Nrj0V{2cOVݮ0$'[8?S]- pp,P~ @Vl5ge*?P jUqp2oI('9L9쌆VgK*Bhk9x`2ỵ*?~N`JKdHXa-C"8'|#mw:p=IyacitzͿheI! JCYPR$m" ]?2=O:Pdl3}w|.8J/㍬ڱ-EP43&t $U@*c}g[dY0n˒ieo/hVN{U]/D'Y-;^l ҖT%.AW%I(u9k/Fj"je߮A q6xZ)Me5h'+e1 5B]d8N06.vHn, h2CKU255[/S{D>"m Źnow2zX6I&^vi;8m4@s״Ԭ~V]E&h1كV~1/(z08TY:iWS;R L6,# ^lL!95EggDw+96p%WB֕Ey@0@{S-CqEVزH0q* }~>NkE`I^!5Nd9pశo/iEZ Z`D,]2No%N0>V s.3/qB@$n~9։q4a$wa%NkXFvU?wzN.bS79"k?( ~xgD9< ݀w3 K-R[qmmV@8dc[@RQt`~v@g4ۊ#vl=1GKت" 'G ʢtX6pǡC?Y%˟C1z_ ?tحΉNǹPbh (>19Xal*l7.ewgՀ2 lXnGjmߙ0Ԟ@!(hH+|w%6e4eVL}evjZSl{\ԙq0!X77Uv,}X\^ xUxPÀG5Iv970q}TzncOK) h NF{|(eks߄b3oUd%vܾװS ޚ\s>y.4Sdv#sn(OcΛJ9If<}6Gw qoH~f6[o2ڗMyaBla -K~1WT|^Ҫ>ۋKM!VɣPbOfZ 3|eu!8io_ֈS @IW) =PO@;bA2?V׭d`^9 Np۱JX;{gې̛Ao{,;a$r_{5Ŧ}4;7o<#Vy,ӫab.7r>L5Hk_HRSx)_0Lhgʮ7RAmiuӲk/h`Vq@xd-?~,`:I@B}'YmsGp$P(ւƫ+s:;зOxa(;muH5KO?UBcJ%{ z*y >? sZ^SBrg2I۬~9aoČMwwzI Aj+#Y&\yG2H-a2]Aeh6~޴u/u-f{ZI A7n/A NTv) Qh5tP-YXG5I]yIN{(=IҾ pdž3[oCgD85s|,|>[S5_T_{>i< QҨ;tk3 ,-dGMHeQgp<߉d eCL&LqF򌀾2R#/ɅZ9nyx2C`=z5}G.(W`yGVyv"S*znjE=E9\z_ /[ fUF_°ЙOҮ%X{"(6 wWۑ9W6 77 նofEN/;!|?Éa+8v4;jor_^0ah~ʱ91.aIH/}~)rxMh¹ux@l.$?'5]NCǟ`!Vg7Uh{Gn0޲`Hw@uSb'6uaʯ6H8r]p)!fd0 YB,csiv8~,`z?%9{J mN1d惾=^%?yq2s:jOàХ0Nݞ9B1&x&qn!R X[' .IWj`鿇'r,L)vzr 9r=mN4& jIKqAiIhrهB3񲍼'03 l|pZ LTKs)7~cMAW* vܭn оYnX 3 QVcK%FGԽxq,2Ph/hZa˥Kj w{$)pȦ<5VC'ReJ;1!Pv^'e` ,rǏSgFqPϮ?$ w|ն0RGpb%x Q}Vk`yHn?6"̧INítBs' )#z4~kyv sD=G1GUv]X~{Fv}ͻs5N1,W|˗ch/moޜ]ghi/>sΊ;YB-s$Ha뢱iXޗ8a[3o! ZaS.d(81;SN(>C-9kj fJ}THh޳=&T|ިj3j!6C4$2#(ޒEGHfVݶ8)keN2%,Ư{N y*9q_)hW8K>d"Maܛl0ۮ719S./} =~9aΧ~8{@w+]TB*#JORʄ6K @}KΰPx)%VaƩ $,q6-N!'ֺuЯ絲= x'(?zlyf0Ipd;H~, ~8]EGvߔ:kgǻqp?ϖcwdhEJ)Ls]oGDz 5zF|!%&yk?gyymYZ\qXFmf~ zMٷٙ5pNܓxt6\V/]8Xf}z\rL=xMF/3Sc@n!G(t<t0+buDɍd~ryݞT)$Cuvףϴ~'9'pƘv/R9ߟ&N):x _NNGwi!=r!4#5B%f_iҧB۴ƙSNC#7t*!bJ,飵U\%[1ǢZ+VÖḋ/L uToOȓGe6u voxكaեX,H0shJTA,TmUٷf[ޑ{\fs$gzV>\Tp5<*>43KLۣ7Z7B|ղh/LhOLU XYfSFw7K,8{r`$Vk` zG,H&X7k<[Lr+ŶέA/3I;[@ 6ԌdSoy3ӧϲz{ȅo΍;%q UiIS8{ V¼iۮ(+(ByZy}aD'8s]]H./FuŎV }wmuB67uJ#ɴj?a*rD!E ҖIC%HO̩Zt>ĹS_=_Gp l[m3 ,grʌ¤H~v,R_cVm` Jy~х^ yE1?&%6ؼzN7@$cYZ$c&F?jp뢑r{@ݍ6[k'j Ƽ5֤6sH[ ]lV144wZsKS}󺿵87og5;m6~ڷ_v0`PFmɼߋcsǡH1(RtUO=ˎPR,HOyCeqwY%*(LD[-H|.9 _*O0D ;tь/N?x;Vc+±R}Hr߹bpWrŕm->BCf i$ˆǃΜX˦_Op&p(\d/ohҏtt;q9 e0T6 绛u⤁JAV& ӌY*HWs8gLĎ~ɇ>${̵\KG |^V*e!3Ʋ?w WD]j'7GH7WZ;o\7ťqMc{W_*K$ꖻvW/k4/ DV֋  3OaO&웩2!Hx}1ǟHC9mF(;/XĶ6Aq[{Jaﭻs{BnG7L3)bg!.~'F~G0+v =?D{ '~|̷XTz"~FrRE~!1(i* 5vkw\5[j3GZ]_ĚbMklhrh$yׯ\B̩>ً Ű3/T3יP`$R(G[_W$υB!V cDqJ?-B%jsWjܷ7 $Zھ XfW(V\t}63aG fz 6! '~%kmр`~CII EÎl!ľkIP61‚NjSbJ_T)Ӣ4~Ƨ'T <2o-}6߂o=bEl:S 0 `!g:5roVvۇ+unV,j8 c5k弙ds:>P9Il(f1GRČTuGat4) oCφ]0AiBX61H2={k,nAOm~Rٲn@28S%EkӦ& ;`F+\%]8se2Bs`xia4''o=#!_aĦ2R[\pn%Hd:d]cyB/P)XTp3S/P2u ûЧ^S2od~ J@UrIĹg|8dG2_LJ}~Ag;ϯ?ָosl҉I*tJQ0^Ty'%G@6>':&A =fi4iNEԩH/[S@ ?{n/FZ5_8yz+0Y#J>3a٥v $FkI'g' }L7'D(hKϷ8DI+@yqX7 }e $˕} .=hDܒΆr{p ]fEM[Y\4wa%ɬ56jwݩ8-qȑainФ>nA= @!>KavE,l~ 7HQמxgyW613ɠ7 Ap𦞐yCRB~<ʁz;z[5^ H'*OX~gwm]>%5hH8݊z;/l:&I'gbQ)P$"L=|AODUrD"Alm9N |sJ^q}h`ȹubܱK5:lK\^f#YM Gx#b|3~±7`z{l}LB_“8"ɽXO ЋujūuJŽ,\հNni܊cNE#8sk`;l"Uzi!R0P,岋u>Z3A<`Kվ52߽Q3Tl3́u8%# 7Y<r;bvgM,e$a;]Zzf'e]Jʼn{eP }OhN3Pz_=?WC݁}{# +Um O|\B>p:{ ?\~o/j}aa Ϋi_]{I*pCi[fY҆dtmKS]v3r1 jFRvg#+IH?Ϫ0s4Gu2HǕ~0mo&)?ԘTDjHpMp8$ ! v)$ߚg5nڐh-:ĭ4 KOvv<)2Vʾ\cjsW]ީAaD {,'W"k i$z~> 4;-Nr LH>Najd> K$)ܾk";K`PwS5*ݼREo(M~VuNp>d9ؿ_Vn\.[S?0KmC q{N}іb1 ]m>K7n_seu:cynL0~MK&JqBîrSϛ0xlh$vN]sI C듛;x L 3ð^j\LI]l =t}MWQqQpd^e|5fQX0ws Bd%"6;ZW&G7L"YT*cR ~|T Gp,:&M~Dh'4No%W^ބ+Tc8;b #;="rn2C>uBUlDpݺn3.U 쓂E'̫`qEչvhn|4/#X7,n_Ͳ̫ YhʰhVQgdfT&ކc [y>` tw4@\H`3q?7Q0?H`C)W{)Iʃi  גC_0?I6mP!/&Uv__`?6e9)X W= Cgxh&{X~)1:{,)l|k.L͏\X$>['n.P'(z=0Z˕W2P%b6V0/ujX#Ƞ)d!(D8?T!43qK+Tw2Оvr<u8;gܢ?I U1K2dUiH,-ii)翳Pט` cK~A|"Y9A0[7SC>ݼCo`'_g߁^jSc- ?~\xA )ᶇ9H.pCRaJr;dJHGpdr^W ?+PĴ@2*[uWzy5Nh4&X[NpOo($9^L06fJžՙ;'C$ױ@kz^wF3; K:QGMO-6.r$ _xڵDޑlݭcsNe8\d[m$,9۹&B'.G U;N,\L]…}I+jIJ8񁢇@12 (0](_g<۴cİ5*7?18#ɧ'!U礭-Vy_ 3>|]ma7ayj-u]G)ء0|Or>7Nr EJrwW8TՅW%4DMFMܷ T4PYt#CՋ_^0rst_:)沊1+T:yŒfg|P{V;R"h2gfK+ ]sq[Y.Fa4hr(R~Or(/gIpn:'5MD4ڎk8slx⏱¢^B Dr_AFܹ;bߑ5-dPNWvb}U돓7k޺J2s,$8$Rɋڻ#kx{uEqvy1, 3lÁRUHF"H7{qھ*wcvw'A[#gOZv ,g~GH>I0,؂ݿO<"Y4kw~I$k78$*ݯi]GTK#NG3;C_o&q~_mrd0=wi` W5/\?& 4)z(>oMĚ~I .( HƃbB `uɔ8Ro#F{s;7co88+:ZZ8YsO,}쳒Tׁ3G#\He. R+^C YZgJpi4< A%c)u\i8rBv|g54S9 X}| ~2U=gӈߘsc'#'Y:{% aqsMe/E$Sm {&J2)Bk<UWqbP5D}me,&8& ٛt_ ؏=Oĩ+}\d*d-!A!̌ws|? 4ssӃh{ pobpnH TunÅZ5}~'w8$-8g9.;a@nT:G`7N> I zoXµ/2~L|cTAN>{eөVx=TA9l <1GlCɿiCo7*# sfQ-wnDЧPGix&)_Mf}."d}{ Vm0d TT0 7/XqvNw(s˰:Gm gLz#ϋÐPYh{mg֔3kX0*4J|pkuO}IU]NWCXP C9H3dx?dj+\>r&͒Oe!0f4 G\:v}ihϿu/X{'KlTչ은iju8AYOPJ_x~5XWn/NW')1  5S6o''ϊ "J< ϙq*yB0oo5 ;A/EσNpn+}:K!G:ڷ ک> 8Ȥ=;L?F=2oaf'潎 #ǭ$ )_ = SL[Z} ]*Hoz")s-A]S}_PfR,Q*-c]ϑrk m+:[XLQfXV>{}gau8}#;)SM%A:Z7lY1Oex \tjb` dmdi=$3 0Ano˴Z}_7P<]]#[ է/6L{6`㎯cͲ8KYpx4(Hpyb EOS}٫`ns+H8h S^nY3=s+^s_d+]4='\w">€R3k=}1׽ݶvgHJ-fT(۝OrΊ$Uw߮zH +֮?OpNy$RHvAbs~ެMcR9.i Гq'dJLig-Y;dN|7HIo!Q߸]?6f9Ҳ}.N-a0{7vo!-T M7uɳ^{`>̨?K aݢʃ[UnmKN+PdvPO\{{ OT3ԥ(R7w"8S6Dsͮ8͈,;=%4s]$[ӫoa\Hҝ֞$+0~2,igŹ;c/8o Ώ?& gz`ƪMIɬfִ%1&&7tNשuцmH+3Kl~Q&).j=I8zr7WBuX7zi'^x$b-Vt0 ǪFo:|# Uq]irZ9?mc͌$nO? 0GJ$3W⑵}VƲR$Μ~)!Mvpယ.9kyK7c~taB݈2K*wΤK1x bt,u^ֵ:CPvdyS$Ӎ5Fd!1]}` k3c_*q5r#N|c-NJq`|y)^A UqDd{ Ljr7n&/1pѮxC`e'rv TUB5GI1nRvMiW$qYxG% m?Es0W͋/SJ& #RΫdXPgXsŐ1֌+8NqU09`u=ci/bIzjKՆpN٫dy=ؽGs9Way47n47vSE)D*owJ%uW,_j|pAu5vxN-.o0K0r8 ޯ/LcsBO^%qŢmpu7A- ]1v<{~kiH[t j4`U;ǭFb;L2mfE#S0.6w| wvmPsR@keZ6NHSi煪yS]hmp=ରsmL "7[Bg94NUsHGK:e)ttS!0e+N{[at?YCJp/b*WhcRO/''A}nqiR0F2nDrR+]z]U/l&{Qk> ] T$;5ܵyZXI T*^B%}V>$% {I{ZLqd~Xft]\Y&L?J¾$v,Hu(gY+׸r?&|6^g4XHknn7zk5̗|z}zGǓ0 ={:]X!ݻIVS17& S]9~R䨋}mʞ?&9^Va'в!9Ye5;\PwE#x_ÁvCQW'I8'vgU iƐY\=eYhՖʛ]$zf տ< ;]Hskz!vl4TPO2 ^>rN=bQt9uخ^%'Bp5RPowm\ʇ HGKCĶIDŶQ*gɡCN0TUэc/":p+Vj3<|h( 21'vg32ؓJ/"9{ {XmuSZ)L?lܷ%l Ko#18?1U: ,q'u4٤i3Ώaco6Ynv;Hd]J|Xy.?xaFda2sԆT2;S^ܜ|Ru(˚N(m&vu\28g80ݩHr8r;rk~p}xB^Z^rN䱒Q#0Q*Ev9aiuv KAJ&lﱏxPtlZ1Ǟ3e) x^[WzލC@\@qץ~%P^4}=sb CQ) ÷|+Aڰٷ ,'<{gM?3uDjBwbU8 Ԉ. TUsmx A 4EUFJUlsΎ]}Ӛ ?%YD Q,Y@Mxxu T D{~)14PLf%RN1?D TP[9岵"Y.\yRǟ?P\a5{%\|;'/J_M| up>jP|mMm CM n;"~铭$N?US[) yI#x[ob+]/UMpk2|:S- I=KRB=OΩdRywìv=VG0﬇xwy揩Hv}1UA:t %7H8}?tO7'HLEk>y!Z"9&eIʞ,(ݭ6AoBÄr8)1cv["-{F/hmŮk?8 F/_n\ꡟElmpY`<U!dgnoŒj4035yٹTxҰ*LW$:ab~~ S@|ѓ>r#b?M<ZIEZ6mO̢&␇f=/=p.{KXyy^)8[ԆF՗ҽDJ~w?gQN54}'mN$Ht@Sh}OdBPgIʕ?֕ _ R>0aqI@vHӛչS'"UE=ؘSY7n&(4a;ITށM^ucp~_ qDɞD<~0,W_^n}KBG}|'<ԙrl׭>o9,k 5I`Tܫ_H78}J_㰩af3RX[2cRB~$P٪]l~P m2URyZV6͚oV"^/Ҏdе75z_IB㢍i K{cbV3dgؼw4H5Ov(.1 ?N)K%_N[B߿5*8UU(W5n]T= 88R8~3I &cQq[գW-\,(OuЦ}fnsi%aJP Ht쫋k=mj o//S$y' _c[Ew[` H`1ٱ5lX"jw HKl/TAd N0ZMWJ*L'qqKvX2ۈӉm8p &lgH%fʹbJ ig<ٸtmc$uwK#vaJY~/e)  ~r%* LZߐt:&BЄ`Ulњ/]ce^ZA|_z2ybb-Z*UVb+5%@_u5$ۛEx0嬛qx5?u δ8h KNi؏{ycۖ#3̚ |V=W`5]jŹk{;FqoY`iq~0?)vkd%^g)%\Тuऄλqy0t>SA',=8T/vٸ_O3N O>J$B^XZQV1 CGGu'1׷va;;߆ҳVEi) FVtv)ɂqR YY뿸-8HN/9zn/l؆CL|IVNKnG  I0(!}{vw?=`ݽF_\D>#& ]q7MԍOÈ.Nݔ릜ʜ$8<; {d`awX w<=~:bv"\J];O0SBRR؟SǟHhWdX¿L~ZuP ~]$w[*Lva~孃T(A}I2>.RXuںoMc}űcfjvRRj87 8٨]t(6+4虿F{BpPмmɡtM9&,c=U>qNWA!cN?6C3U{{L= Kx0ήCQ0~KSQ4 uGN 3` ˆXm'/ Cra)冬 Q80KK~ \|=h[[b{h} qnQm~n۶mR Ҋ [C/Uwʎހ jF~K7Vo"q/ԇVsY}Yd^ۢ)A2ٟ(S&@}'CKIHy%Ч+ 5fS+-H6O706epOW%@5$ /`.jB2y {\15d7 V{֯n¾R5/zo@ \V_/ŕ`wH˃AɁII0Uc=tJrQRpve-ح\ifxKIw3榲oݗ]^ 3Iǜ3aȱ/A!f߶ nLwG`UNm}0ͭxO.[n:?,3قC!Sl9 >;. ]3b^7²1SKJTwXuay/nB>HV4u~]jNW #[ESQLM;f'BJl\=DHG'X~sarxatĢ?{;쏒,ۯ^Q݉\g+2[\^S24L#8}LB5e'2jux8Us,Еʹ+ ]WC+tZ Hga^L*Nfzg/g/Y|cjo* ?΋aևXezX,wLx>xƎ/7AI T??c*F$c3JF/<\>KtAʦsD+zz~P`ԤW#νfc1V!X&*+:8,q0UsȆkґK* At0U2z'Xj0!jm?O$WU`i`;{3.0.U `Bd DdIҮhDճ8:!!1I>Zf͇A$/-xgarM{:. (ӵtB Æmt\<]K3.{cFBgjyRh ɹO,Ҵc$G~,Xgn pE;;%>ٛ}ph. ElC'G |oL]JQc6+ةWDDŽF{Ml쩼T5v~ʭ*Xs0e+d%rFa/Y?>h+f4j7L&|[ Y{Jpu^c) |~d #o'Lw;`%h[_(O?Z> 3όaɼc#ɻzL4A9hi-ǭGya:z؊d{+#%`Иnf5 o[$PыƹFa8|[zy;~-O>xھ!V/,B1W[]ucXE=6c6@9 Ttm28D0 I]q_{SqXETŁcv5?wO-%-p4o$ȏ 'ڬv GYt~rdc}wϏ^ح8/|}z3uD2 go";_||6{1_*Ns8C%@Z_+.Z|Г~՘[P&PLf%(f/w#t3<_s'd՘,+'I<+v ]6B04s!ץzpeZ,NXrȁ﷗euHz71Qh$CO.@{[rD` bvg+[y'˓.8D5?Ÿ{lw g$W`/˿84!٣$ 2;  )h,aTEs8׻-87e[ ;Lt }\nAYP D%prkuiäO& f6ځ /E4^roX3BՔ# !Xc wOBwذ3laa' N X۩rv8%v)}iG<" } a)up8Ōe ]u1''> 7:8HhU(I,-}z~˵@ E9)CK8c2b/7o7#XSOBB;DŽ5]‚ݔ.x%~2ܒdr[;O-\I/0cX6Tx;EyћO-Q' Нd.U~>Z1 /VSarp<̹}_( Zw:0F492+Jo8^p,g1aⅩH='j< ^q?Y9[fqRX!s՞}y_x.s$q騒zaUIm}`|"F:#[ut[Hu~:ʧh  -pvn3d Oh,N=Ue~1ϳZH#wTC1&B;/@G3P[kp:UoL Z: SQk6sY\ -gci v w n&髢:7b8H %wwYY5wK~~ݤp~F>08V Kjv}4~\=V4zoiDUn;p{{Hq$}(Yu=I1SwhΥ+j.ߑ2[zVd,ʵ곅x6i+VAnWi:qHa_ Kr1\J!:!?/dYo\LYZMթl`jr=\R`ɦXǝ[pSr[HP(gZipmMa_bskGYYg}[)oVRV*[`WJg+AxsemTools/data/dat2way.rda0000644000176200001440000251335413432435606015056 0ustar liggesusersw<ƏUVI%QHO%$ɨH$d%+$D%q?&{q:u|q]믣{]BS7?2d_0R֟YLnh3n 5[b. xuTmh4‰[x^ avY Rѕ/"g& 8@KjkD!*̷1s$YZ_~ZR4O0jSٵf|yH3/ׄ]$/a?m4 ~HTe7_h?}`=pw%>:ozeߟx`@ ȞJ~q<0AfcEn&KAFhIW5jZw lXԛ =eƑF^]я!YpS0`Ɓ{Hǚ8Id}hҟ%5)q_Xw IG 픆<6aUo0NP9s3? ?an&wYv+ W㡝PhqT00`x2C R*6l>6Fg˗ s/\>XTѻEf8ߖZ?GU"#Injd\_ӃTftu0_68~6*KHĸjt*;;x,Yr\0GϮ*H,OcvD~];%R͸wQZs}~K3t0In*r)phV6;.CdU9Fᚫe,45bH) d̊$,*O-Ħ~,o3kJeHi:.p|L Ɣ>#p\^ʍ}Qr]WMh28{0^ qYF]n!yX㛽,]8z* z} AuQ3 þvϹ훡ҊTm\xc&/og5PqNHX<*N Fpiثi=^Jtp򢤽Oo(>TZa58PHB1 >s׶A`Ay2Y[~f*I #֘UTBҴYLz 4jc uF"%:{>`sǁ~hQH2PEQȐHRt2ө; c lЏ>5 ߗRMElrh~ ML\6RfOLY}p4bD~0ݼ$qI}P:ܙa01/,g*#—GaȬ?:8\7Q)FzkdH!60-` gȨ.΅n<| utR$5 =\SB;-ela߼˓W,O#4=a%Gix~"& }t:Kƙǒ>"M'^{^ćtWir savPL.Rv7en>?Z&=LO}Lxzt:`ït,g1WK+[{ӂץi6$8Nx\`H2I+Wl &`hWҮy\pNq܋G"[3TJν`s~$F4%8vɝ;QC6ih& 3:#pp!8ҋ;4U$H*EP!7xyEq.i^{I~|yUbX.=I/z [HVXJx4uWdžsZ.ir;c\?af_;I p%gP&<9UʖDC*0;y9xnuląb=5]IwZ>,oo~OdG&2߻@D6{y'k#a}s) l \{:$yW Q@4T[%qȔve 4~+LXDZʊfEOHL2湮ߖVB;}v0.td ,oD<8SZM;ߞÑ FbM7QhcPG40 NRf&J ;aUӖ;b2O)IG'?.S]ZdpzeTxv<%j-ӵBA.v ְ^%E9E:z-~FҾt Gto됮z?/HmMjWپ;80xbP;uMmր&*VEQ1aEtU'Al.&>o"U},{'BT99mέfb@{:4jLnSK Ih͢(|;̧t :.∬9N{|ď re|վCG߿lipQ-A) O[@AFm ۣ-0hmgur.wmi&goDq.ԟ1Z|Lu`P&ÆC,=TZ0t޻W{&/wnn=E^@wdNC7qiLXv_Js݁]_^s4'v\J۾L&" }MN7u#?#C{_pqeC+nn{U,#l˯ݑ,Omo*xp3;$9P{x_?3Pf Ը̖ ~tS qM36IU| GEfv;ȒǍ9Hd.D{f^ϯ&+ 3|'UrVIҔ_>`CPbt9s+IS7ވZ7,r2%wAMʝO`1*?I|b}>埝zeP@Aט5F?O|Qs?-*#NJэo0wul,,k#< BۡcLr)]Up\ KAסVOY]0hsK KqVs.e}7X*~/Bp^-k X2_퇯,%C9\A[jV]#;cQ.}AzHnj{_{cu3Ӝv5v3Vf8^$w*>׋y̢N}!2}/6_?g[ʗt*Jcz[աFC3,4՞6[vo`ښ6{$xX(so{S/Aqn8EPy& QYo/AlcX&ұۧt5$i9{$d tPa}HdbA#{BzoT9I>P1qk6Μ YBҊJw޳>Ew~C -{-=Xj^72"S)PwWA~xU'?Fp㩼QoAɃ6'~(\(kƴw=ҮM.AkӺ%SfnoZ}KPG8^HwGm'pQ ~fD'Naa-g3 F;>3R,?R+Rm̸BG0(HW׍#8rCXֹEqm&A^JÅcN%ssc$+ hѾ6|}b8pI en%zÆPVIiEecCwA7د6m{;ػˡWwkefk} <9x:; Fphbel[Vcܭy(@A64}2~|o$,X xy\zű)gzG,G +a|C{ac89Lm8}`<^6|NNʁ $0uT/>d8?/k :7$pyXqi*W_S}JKp5r=ul)&0q*%FV'²nhj?gߦiA><>S+\R/z\1t9}ڠHy 7RqVYHɃN)8Qpc0TEH:,dǸׅVcY1VMzŗ#MsHujWq=Dk&uq5@LenMc8k'?aQQl0G$}wl&|x :"u{e,t8Z5O#<;`W@vn99wdO<F0PGNfxKYC-/*2ʘ*/S0}䥗Д{Ѻrm؅=I,B瓰GTFn^?ӑJc0ቂ֧ Wbf9}}hͫxM<|d\k)zLio!P,?k\43+g7:Ta[ٻ:&9-GpJ].쐘O^'x MI )~Kg;0!8Ns܄e5ˀ)6% rɬ{s}2pkPeX#{ף6+]h+.Eh\ n beQef^Pbb)X}|,4=?~; .6tJrFXv" ªem[-v|ȕjDk_u7Q egmzYo!D&\fYL2/iMnju3ny4~1H0TH7`煰PS,}!IU;Xj޽ޅqeo-)gzqLqL}{ zD#CftA~?VJPm*^=!$=BRFp z?zg׾So!bw)B`޼]ƛU[u7ϯbǎH9w 3/|Mбql;4~K \vW E _\nWwbv;[UWkh1,0\vwt(J\T'e9cBC/ ~Ʊ=v99[0U*- Zj&nIƜLYGr~S9v+?ђ#KQ0e$e3|h1WS+q`=8z 4, ݎ[$_ߺ&-Ang4/SmyRhWj&ٓEn: %m/%l?l>c'eYp:'>r Kz_Zc^ !t.i8_7;H:cUEZJ0}sO: 0tf_Ui.qv".:?C˨J0?s03&?]I ŲBr}{r.WtzNY?`ΦO@͔6>>W!8q˩MH )E5aڬ^Ǚ@ o4ajUm~۫wb m۔Ԛ{n0PK`}sȊ}8󫸻Cئc2wrTU S7>CTǣ˲1Uto*'P7oez^%ڷ2+h'{z9-MLI3BKrTxW  HpqÎs;icZb' $:yE1 CDs?B@ն0\P+ZT|{AGS97iV(KŶ/,omsU 0#s,~Lsp[[@ w<@CuwoE2y$ `ϥ㧠Kєo/d #KM0sv:LuU(hqVWax6&Le"$G`G j!iFntZݪCO olK0%4,Q5=6o='lni:Jݚ.ԫulorjCd !FhTZӶ򰚿a-^fXjc[rW*bb F_h[ ndY®F!0un"C W9 -4Ԣ?<&~?iK/ t@Q͊Xqvσ> }oڧe=PP%xcb?Vؚ+fv xALS1V&e%XOEp]i%6x89HV#XuB~zޒtCᴍ]Vmw&>)<QɍALaRygj8:-q(˕]4Hla $ >r_4LQ=W.fa:E7 .Xps\O7.kk!"`@/Sϱ+.y)@)_?}~<'P=^n0JKzaa8k_~i"m`u&7ƚ"eb?7HeyɰQ~(d<%\Igh]-6;C/cGWѻo9nBK+ oܱ"A%<FXG̼/X`aY8jVTllZ ^4@Kg>=g.aŏ/?[ŒIq;xfhf*A?&G7'_i3 j$Яqӊ3;)%WpmW{i2tiݜH23ϼ SUFbNn7w5:֕k6o^EZ힘R<)D-?SѤI gq4]+ؘ-3:;&p}үO"jqf0<\/7 ۟oaXuav<]vd;R{ÐZ06yW;ԃoff*W¥_1g:Am$ 8o>5˚"C6COFDxt7T M€fI־884-[S;M6!o,Qڋݠw×[TvZड2..~+Wu$k~hv0sER7 6ӎB oprAՋR!$?qE|˽pL-i@0ߌ8RAn#AOzƑf&_!5oVK:/y;mwulwUl=Z)$[@F UhgH^q5wǞ?2 =T!YNܟ6ϮD2s+9š͡C=V~PVzà׋UONdg{ɬ$CUM~'(]0)YGJz̦t'ugpp[nY;Eat7tdņ k.\Ugz0ܶqy?=>W]$a_┫ǜ%?u.uw7toae`S|5Ig> .[ScO(A{_d<0%;v=Q^o=)r]w]}N1U>g Ï!˱/w%:V]n4l]tR0 U~Mq$YԤ^T[[7a:%}90Oj07}oiybaT#;toGI:|EN w1W4 u4ugz-n;ϻfWNc+ey1fh\\&pܽ=uU6c8{Uϊ"tF%4#w !Bh8w.Agh![SCwʹ 0%?; }; AGaJr%m[u"II e& C??Hg1mcWׅj{ dl-p'/xhhL`R˲W?yt<` zޔ޷8'o a$!KսW;_J>ܻ׹čXyQAmF[qccY0E8+2DԭrRQ޼!+^M֣b_&7vۼ:~dx|fAR`<|,n?h]OzDKSzsoxpszykǏOxr6 /Ή/ <ϑ8..z`D:qwx6<~h 00NRm .F(];ܡS=}s%s4.ӟ-";tx}rU_gd{W7<C qB/诺{S8I { IVy\7t9I'롲>{SԲGH]8U$4 XcVv gm;; `x g;-2$wAŗ?sg~c؎zِbvmX~'0R{]Rv80{uuzm#im{z nܲPt\&>woDtq@Oo3w RiQ0mS SF{oޮ+⒯ӿ"%g 5^@1{B^"F "TkiQ`')|)S<{ZDDp>Y%&,yiyqNؗ{54vrΛ$1vB#Z Ry #g=\FF'ѸOGbag20Im[^`$#6+?u۽YH2sQ*9ifksK%g,J3 FB>}4ָʦS7~c_2 ե 0t/*z| %› GŰöˣ0dX1/ڨNX)}UW<_aꠋ~8mQ?NuW9'`ҫ~3 F&S٢#ngVж #ć-\Ւ˵vODž|fd>Rmamp׿bif7,mc{0$֎]If fWXygr*6{6UK¥ W|ެ\~qMybf,vr|:8>"%A_t7MRmMA8BwLswP N)˙o"I[I`3X-pgݿ6 ӀjtZRΆ>m G_g>jס:Vta\.Yf>%{rfL YrXk9@%_zdaPr.)fޝl,=>SږL*a/y~3eZr kb.*5b<}3n]rt$g6{ X037goW)^:N_mX{S` M{ Aowß0!0M8-SP0iG8,4,>y&O <IVQEʑ8=4eorQ$ꀣT?faחO}}ڢ:j ԳϚ5tqa{*`/ c ‡=N@_a.L?ȑ5q<vnE06\b"*'@RyM#$$g$Sbqr&EGdX둛?y2NC`|AݩG6+klj'ԥ(<):cLJj̨ N^fsn?!E=_aGK.lA0V/I>]ȗ3)iDz Lt'`_s؉ W;]3^ag^D.`_3}a8Qso_[j!Xx?@KA2\kѿ"&ee'}I;*v[tO*%K7Ǝ00Xnj-87!)HO0N(&Ol7R2 .fG2}'L&T6KeņBa>~?IfYa ;p6#:amЇ[\^N<ާӉ;[3͒D-gr9 |yJcOu\Ild]bI7+hG)v|5qq³0fusw5B^ۧa˧!P‹Dn!NTTv;[)]iWPZ'o66 _o]S$) etӽb0μm\ƹvс__gTX=om-b$TC7AjQn;qkV| vFjZkaVvBwtA#l*N&@~5 +=GJ}h?>S4;?NFZ$@_LH֧8!cuDa[ hMkb﷝C8'aZ.D3v|TB^)PI~qtǺK\@{]yz9ǽT˩aRl_ &D%wEt%a:Q3p4,߼p0I)!&g˽O].Uhʈ6\}YC(t[ QC+8"/Xaa0!|v(d--=t=mf+m)2 ?/@/[7Hʎ bH Whr2T|f(q?q𸻭z-ؽzz}};GlT* CL7p볩U5I`Cz2L|W{fUew0)LӃ1=&hb Ƚ4XYmMay8Lm c̾B׏wC[ Q&,ui oZ-bOebb04,Δ|qHCQ JXIXKV'pn 0_`sR@(R.U|s72U>hMIp\Dl,.mre9D+{Sf.u|!TA.ԾUGxBћsga֓ya=m54 OI˼Qu3Š \Plص%O,^:)Il w3cA韇q+Oe6(,l!Lv6('XE2qh2t[WZiw0<%:.&皞F%B i~/J>i^䎋mipy[Ǟ,--x,p:LslW#گYh~eb3 =+5ipm0O|l!g{SH2 b/){hA6ҋfv8')yWӝdz5v0F]K!0 ~$= 0B*ƆNcvzw: 9 GLC5ͯM'0R8O AyoHVTr? 3KטgܛqxL. M$qy\\E)S$׭_ H+_05܇8J'+ 36%&P }9 H;楚vfܰ̾]N^{RnFةlqOHEOLo٠ݧyopŽ ,,e~56ʸ`=g{*Lu SE&q(;-6Mvf2mRoI# Ô+t-F-Aԫ{^lEu~{ XK_46L '`uYPh宽MGt̝$ox*̫Ėgtutg^__w=k%(N6t\\WIn[d5ɰνPsku(\X*, 5d ` 53sÂ)^Xk;n_4?;$v~m^4O">K;d=0m= =A4hz)${*&S[\9v%R Sgo"-ْ0o_~=" +{,QMW|;p|1W]g^uMPL8r]EHjUE/_&.c"yԩഊm. ߣWr:}ohN6~-gWr_Lތ4Q)6xl~h[yKb̋k0f~{~O\Zb鑐+{ҿD/9wb?zGٮc'_0^\;̫pxw"}+IL?gaCO@ IXZE!WTo͟w3j"J0%"76J| e)k'mqto5$?~Lc.=IӇսwpǾk&a-5g7L-q>Q3aOy[dFn-AI+_dRl%~-&##<ދ(fΣtXo7>34-w̓~b[q~hFkt35~j&)'vM=ɅtOt ^;M/ I羭$B_~qw"nj3ԲcKfkB$eRDVȚc Urm6c˱nGjt0gMj,=kʼnw<2 <\|}S[IyqFu\f; k-sZOygI3\V|9 ߴx4sYHH1 _c%;C/B[0K^H0յc :ea™ v=|5&>n $XNH4')WHٺ4=bJ8Rns% }׆a=2_koTJ ̏q2x~h .~dץ1#0 %VBWo`3f:`LxT,i/2~l#Ǜ. [$>Po1X&SqH . YW,k]8kG);{Rm EkVHS^Me#羬Dn!IQqNd'uT>ӂx+S@L1{jo[pt=?-ך &Rqsϡ7{\ >[3O?zν ~j'}CMӱ$0UuʎOӎb>ֆ|ÿ )äi..֔JM,%e6t=]\ݷ,q+nO cl{ r`O(IR.JJa/Kooѿ $tI㈰46} {r}OML5!uRrgV۪P)88ڷe4[$(BJ/ _.H~u@#υrn{UcgvS̥~$t ٷ.׀icPU+#+=VXp¤ݻA$<_m[-8 k/G<+J 60[q2NoZ39+:qefCY莈Y:p`}ʼ+N4amb*#\Q\S%Qrwh 8=?Q!Wd1T%]M4N[ω'k8ԫ:(f$>49$Fm)'=~̛΁gْ,cp+0.{7vArs1f(K6胙j8+y-V)2͐]+Jy*O\%Yb=X\_3PipivuLpPNgwq&DAOkQr+8\Oй47\du=#VOݓV[ 'חaFk* zA\Xu9]9֎Gع´u-Pݐݘ~UW"`ϵRI]ѩWby37jgw3SXTپ|0y~k4ʣW̽/d>ubƥ^PLF--%կ5`vQ1HL%aV±mNhgagS'HK,XQ/{U2zۘ Z|žA`L]JAZC0ReiDi|a召24tH\u&5C,o?N"m=a?X[褡eK,ćqg`)?X&V5M*4\z8I΄>7X<*%ʪÊE}DN< %k}' ZdO\xa#AP_QN)q|a\K׈#ۗHj/K3i08C:]J;0#V2G3.t Д4dI!TA[EfƧR3CP9c['Av1٭4h<5c3ۚ?]RU$ڇzhѱ/lƺ$Vgt>,8u+nR8p\11ZD7U`1&yIZ!_K!,O3)-+S94$Ybު8/n-}ʷnzMyQS.8݉M/< 5AƂֹ5|Sco˷$`dɃ`Wja,JK-]EΝ')w[%UYr<`_yX,J>9kSkPQO}é #{?|G3|*vޛ“V&MbeǑm8`$S*{:bqf}8p{qW]Lg/eMޜ"&kdVsA9'^3 3U>KbvFO*LXҀ\g$sȻ<ʁׯ8f#ϊ>ÛJīe^j=ㅭǐsf\Y$[ NQ Kj(:8}Ģy^NÓX +rHX+_͞eژ>^v8lL~W+nlo FMp0~٫_X_u3'b؝[c'rh4.z w}+W/=nʿI> v*d_h~,5`cf4;ʮL!̛zX@9W?eP;7ͫS.z10qSZ֗ݹe/G$) 'b:ù {FC{2g/Ks=:Ɛ_icNXfۘu{v{;EA0wZq|9p(8X */boօ}U*|f穙 8WCuε";f|"X$crҳb |5T[ I푞XnY`Yf/n:n*n+5=3C cFLI֗ P ԇ9af/21߳L@sÂWm3 &̠@v;Y@P8eAo|\HF#I߄`)CH&.PI:NNf:/M2 C3a#F0zaghy3th0tDAC1u;+(zߨu?h:*{)L* }@_ ЩSˬI9vSj~$ӽw?I(Qt6XպaXR9n _'c68#(%̛87wt`A1ȶLDEw@Jﷄ]uBe栭9 UIΐaDlNS7{%uw ) -tz/6.fj!MH_$L)Zaё󩰖rz2Xxz*^(' #0{e(YtqeuZۨ͐xkh|U=<{k: U/=LcQXq;{$̩W6T\e``fnA2y/xx'I3 c{1|(ImYp[]z.,&LX8D,5U쒆N>] \=F9W`u޿ kS>tO6DS`x{'[u|*ߢʹZ4q ͎R1l?D~'Vo~bN|^%jhry~ZckHs=Ύ|qi'bIcӘ2&[Uw#*PjR8zdB2.ôk~afmj<̽Mֲ)Y,2v}}/X ~LZĖg} ;*WdvOk6iX_aNL91 Zc%>^@\hL#K*aN$62$;foq8, z) ZMAW̶uO*ƌXŪso>۬Oc %|6KkZ5IHJí$!+fz l-9ws$ܴpipBSq7n΅r3+Wvo{w:tuf :=%xפq CUj ->s j"QAl*w}mn/UF0e*RKpDlƞ4_\I{al(jcXϋM=nP~"Fw|TĕҗpMV{(l㱽.xM=iσ_*;N'Ҿ[]9qO8 z>zD7F>.Wl`,aK 2MeLn9R/~5BhgC>5'}2Xp4 ^Ke {`#3=*{J+0|D>w*&mlgd&Z G^CKQ©gy y$ewHVF]M ߑNڎ!={Cza /*qX`1, 3_ ա4J\yiht9N)BDBwoE&m)iiwݱx߹PEwaf4 ձy +=r%h|aa}1.rӶH!_o+E#\$+-V;~g 诞ƴ/XϏ.nK|CqgGte*=$Ywz\;a0y̽.;VzLRڄo&ڬm]|ϧ_/!&78|D=<7O"Æ87pvd(6ݞ qBՁP2l9]3ɷ樗*qe6hm۝-8xMIP&kǁ1#7#TbOA+$|: 4l۸/ JIo4zߞ+Hm[bqL~<UgƁ)VX{mv]L ޗNn,g`Ҟٲ6&.:S,&צSqϯ RKu]G'< W{)•7K/F[Q;B 1ؚ 0Eu(GRYuΊ(_M=}| f {5h- +( .^)B/댒RGL\`uJ ﹳMƩ*9&@# [ze\f'(9_IUn6؍ϓ ; ;%ٽ =s9BRn-=֪S^'UvԸc9ea0_{7i"g}s Fnc(S\.œ SX@ٍk&;' 5VKF$9=QŊ-+8|4!QWlRtT}%70g/cDuZsJahXW(0aF8 cVkطXtIكAeb`OL; $jƟiyCLe.y횡cpAEMSgiûْt-~T=@n*}βmv"L=+Ά= ZPQpZ+]B3}6Q&*>ɜsPXSLwTUqʧ>:E!?GZY<Vln7{7+P4Htqcۆ$SQzО;%zWCQdj8t(:%Y Jұ1n_d]W`bR UuJNOjWH>׍(4g~6?R+[-!KܤJB2*Ih"0}QwWmwI;[n%FO(>-j WT 4egh7`;EL\T[E YwL>%8/ZW?atUԬ;ñ#`,V&Sr,7qqPw0Vٹo [Rga,A30˱/ɨq3 \U0=K)k%{2_y=mRQwӂb/x'`kXh={B(J@mK]BXYP/oI*lO;`ݻ ?O78K[f"˺|!NQV a_bל4yRms['HYjc Z< jKApU셩Wt<+Tu f=f}fH仗j28%:Jܓ݃}CC·br8w/*w ݾrW tQOt "S,}I7+.i*.Z&KǼ7 i0wR=xY3\`%j[:ܞIc\Q, z*þ`1?{I2*29WaܡMenV&bɳk.ek!yC>^83 GN:HEc/ t;wy̋/#6G;>i.~;zg;Ez_3^+}jԵ5gJۘvy+]eaLjj u"XFޞ(ncN]mmatoɹ{Ε$ݵB0q5Aωh\=2 <ۡMrmN7pLUװ=_Fܞ}{-SiG{/i^CagerC-pl~)7%[좭$Nq[>Cq-% U\L|s;?.֒jqئcW҄i!nN6 ?r5 s_8"ma$=v_7RE2c(Rer+ɻi)8߾X%f.hRG,KhZVđ̍o߮s)}X\ z_X]6% ]礛{Ġ㴘W!}MNq(e@/B:{ul FK-_b.Gk \۹OYzBDa{ibvқ%pCWIѽe wFn@{s~NMQ4v\|p!LĹK(j/ Ǟ(udQ(0d$2&\aN0-gmC 5 `$1z( |x+}`>gY_,>#h99ala8.}TMfv ]~]h dqPe>lu kE'uo&])[١eS0F'@Xyl(A{$'WU# G(.o4|P]?nl۹FZ1+Mߡs_fXÂ8jz(|,]I630~7Po`CNzX?{e>"Bn%deBE¢nt?0Y%{7w)?MmāD;R_[=_Ϣ&ܠjnA1ouUb8.EfLe8cO0ɬ,1s"/X6̹U]f:ztՊ'Zo0I׻24uļ%6¡u-Nq&V6682 Єvg X:S *?kq .{|H&W@qkR v f&36lN޼?4@ggDiMu,CH]"4a1^} ouZh^m ԏuJNTWSS{f(|-2\-]oՁs|ENaϰҼ qb DPjʇbVF=yY!ҭ8vmH= wy&X:77vPT+ |Mu+8Ɩ_y=~8„7 /@NO?-5>g}*nߊ{{XAs֙ VJIh  RĕI^2*|*FA%&LuFc}S/gzkNpJ 8U)@ymI{rϺ1zS#jesg?cf7?lSp7&bb0 pn{V2 w;Dj14Vή]ś/PRS4,w|ZcI}%uHB/{pE|'<ž oٞ+B ay[wZ p"czG.ozkXV(M~,TjaŒ&hbs`Y&.h ֚Jv2@\J,N|te|b~x>i 'l_-{xs1o!Ry}bYqBA:wvފF-@@wuȶ+ yyj`z&.&HWΎ_:kk2 }6JG:|}M0Q-ϑ4cocCvdxT.)СzzLϯAeޝrM轚!xB2JHnɆ7[R:P;/E#La1 ?azd=^mS%{|Nc]H ++<Ǫ7p~k{ja?elU I/M,i' ,qAY{@M˰x3-}?Y&苚=!1++8tӇm!YFM+.pˆΓ0L ZVC~뇨W/>ZRtAq^Bɕ${B$%&/`8_뇇[׺N]' 0+:X'VӖ$STݦ8hyC@{xm.Vܴ7mo܎̉*ʇ/#@.s+ `Vzg_X>1 l):NOno:UځƘ@m5L\[Z;2ec. `mrcSMVi4h,En,E}!pZ>MV9K'i&04N܉߷kK[`D?^bp 5+ +!˨-8Qu/#Ap`W 4v)]Wk}S,0{Jtxfqafُ3QKO S!lvoқꬣqsG;MBf/ŲO Hzu'O7tV1",tkr/;d&}'(T#iݑ?'N^ )ք6"[kx9ߘ6ء]h )$ JnX6~_O0R_]IQrOxֶ;-W/)WǶ~tn 납ܓ`MJlovt{ R*`&of6l.u uN+DYv$tss`.ddD/,9G,'?8uG1wЇSt/>x!:K\p#/*6$ {|u|;.4*z/5˭\[kFnyo,~M2%[% P^.B`? aBf牤7$ӆ?Cbפk3g635_!ItO+`b]cs&csr$ޛi\ Jw /ozە!3<\Rpv+,=cd`tD&kʥt;t~ُ-'xٗ^\*$ȃ7J(Bo=8{O_tFcC ؛ ;fϔMW3#К֖Ź+YڲUM]+m{b&2Fphpj):pE_Larn2eJ"|P]H.Mb8!׋|'r8vmDMC?ovI-ƥ[kjN9_VYS%> ?rL9VOF-̴caֹO֝Nl|r}_C>vK Fpjڃot<:yb#qiDj`r*v!(܂: f_?&Yf_%v*Cχ'c'A(񎑯cŹ~~_[$zbOL}޹Kc@~KN>U СT-P7 ;ҺT]tV"EOpq4P^zb= K%h */_>L=T"6v} Fv*WLsD 񨸚-8uQę6z?_Re/{q{KO6\jx;A2i!&Nw_]B2aNm~* PøOa-$,.el(+Ĝ&lqmck VxKLU;фuV%hur aRB4b| ngi"ݰ?@rgYhxt:Ye?8 blP=̛}>gD'_5,܃?ozVTU8 O؊Q;,g'UgMet`ߋ8{GSN=ݟuRžu z\Hu/)8/Z+/T6 [ca(w)?ˊq_P.tg5TkXb㻜 %_#kﻩ43zaNSf <,CYexJ;?;>i[R;/$mꁕox,I:𫖳 = }"4ݵ|n ^WE30G܉&f$K/e7 Nt8|Ot}(FPM)Ls$J >~ancoDԣW v,.:(7ؔ5)׎>Q 5;OL28G^Oϵ\9ev } >M0Pm)?g7ZťA?toybx)Pxp8QvJ*x Mmg<ywC_" A&#ΞQۨ~:"k*z ʵ$ *s~Xnjm2hVy#"A8߱gnmVo~4&^_qyrL=tf+xnC2m__^j0$`*Wg0J^{zؿ1Fqs#L3 #()IL=Mu̜/`(FJ?x?6Ծ1H ɼ<ر:9Z+Cۯ>ߥBG} /ozavN8v13b?_zCӰ`-&6<N/.䃥e.n+6ri6&FG^H셚c}{S_+{bxՂ)Jtr.\%yhk Mu kPpDbncz Ӣ'٣K*:/9^'`>;6̆ltVL*7dJF`5>őyX1+']{`:^0K*`{Oi%0M%[7K,sˏ v%qetj|bkg79BO׆1/I'z0o2^'~I9cnbn&{Aӭp  K?.#snpLժ yC={^:~(qv?PɽyW`ud\Q/'[nFݞ]ALKh ֫s1ɔ}FM#JI/m^߳27OV?ssY*wA2r8ad0tzwT7M08j/U7W_A`xXZEt6 8`? { fgh%J2~q. E:]i2'rڴ_"aT. ~1ŕI!HW^-0)wbRfQI:_&KHv"q6gנXg)v1]Ǟz֭:'_RpS=E`S|z\ep,_ eaՁ1 6fE|Vun)BA[N fqHSQFR{35=@{ޙׅ`H4Zrh\UkSV30X"J>h<  ,8.J;)O--y oWVO*X D..9Ѱ)Ao7pbq\}lx1~M}1-Z0c}߇7ZF7*pP NfMwL!P=4XW>WwХX3zMƣ WNoћG>v;OW䶍>!A@=X? 7 .$sFX+g0<4G~^Nl6PtMpzeCRw%oTAk\>?IeY+" 40{N nUa%H S푰2Dg&L6t$vWw8+WtI e@ٶ.mƒŴ$f5F">ŌHM HնPU,v ,&O ŝ14*?/9js!"sRk`Ay4#`P\mp b!?W!0Z̛NbD _&%WDLZ{8mv髒oY%b Eg`{6۟䑑ߡ׹E%qᳶ$N3,HnC'BUq*5ڳQoAjG(\z_BE&~Jp®(ӣ:Wɵ{."!>aWE6Z0ɧАu$ϵ;ec{ĠKvy%6nTtv-#SNbfY)֮b8YP:ХS1u.?Wʤ9{p=C"{<|5M_XB w{,Ky7 ["vs.Pi4{0UZWD9Y`rs5SaVqϵr7@2yܯ&ydgݮd┉ug3֋i- =ȻV ] |L$6S/ʝ{}YUFœbF̌,/=Vq|K!X.0Fk{sѿ78p3ٜž'vKqϝiwqbv/qe>;/Z'(ʍXrLBbgɴdߞwopQ'T!:|Ȅy,Wo*éL>k^ݭOwƨtQXyvj/HO<&q҈ơ$[l;^UƑ˳Udt "ʱg4nWpa Xe>!D=}, s3qmgg)hV0#z|}AOf'L_ ~) Cn!#kp(,/xZOS2a( /!鹚iLBRJ}@l{HN2kV9+{EdҾk VϤ]MljR{a}ץp4@ƥü#uќa#42`p{ӎ\UMY-JbwOq2TX-]ػŽQ$Uun%^r|HTcՙ/ L Y.24aH0Y]^kGrN0t?jkDCEzYu K^g{ j}!+elnD2f$(u&^ߐϬ }7{KӃ;ˇ t _k[=l.O6 2n3矆i{ $%jJMWqr*48m!a*վe3,GY#IfuG[?.a}B>˸͓&w"a UN*<Ko:qO0uϘ0,x{k4Ugf'nVvPxn:j|jw>Yٿq{q36)H^1Mz߬řV6_?N|RƕX:+bSYw#8(Aҟ ڜ מ}4a8c EM E'IW HDžGpJ%^!m'GZ4RhFl8~w7Mܔ儙K5C/=fs?`B,Oݰw^ ҷzs#B8[Æ%{L`NK.ujո.66M"ZP'8?]_+‹@-נV d)gg ns :\gqLCt(K28p=* ۢyȃ3&+B,8>?߉2q\20M g&n>WW/s :awa}7WwZ\x֜8&V~]edVapzq⎻2Atg^nfV 0XOnDMuU0$$b"9ɯ4׈oS8NVrѷZ|gIfၭ[`TlݱVKK޺)5^Wbl}ӻZwh5e%6;!bey@ђ#g(ml%(vnZ8UY. SƺZ'IZ]`g+W_7BΧ۫nC쯈:XAMTL +IX;?[//7үDcsν%H]\:Cw=5q{'WH'c_È0W1tMt!& vw;mcEHJT|,uv"ܯGd*n4A;Uhu'h`6(q3z|SAyX{MoVoJr|snx^P-c5BUŞ*B {mt"Cg$χA98oM.&B.^ >J+ś'3`IK-Ύ̟N!TI4Ĭ{(mKJ~XXSfI#I)ffN m= {nr,HV}:JհDغ!Ur q?ݸ5}LaPڄ?'RVρpqѦDd3;`}Dz, Op3EV_x{S_HuݝXlTl~{Y`Vпh'J]iXYޟS'wX}oSwܪ@Xf.+"5y[.fòSzUpOŜdx(V#wżyAT[AW儽/p X^R&%Bס$Ǫ}n4^kH?uf ͡G`faB/_m=3;w$``{!rM9s~其&07:͉o[ĮsPvܪ,th&ˆys_`|7'O0#eN<=As̕>8Ub2`TyhO vs|6L2<dܨT#ym=SoTϱv\ Keށ$h'^eX!&fI÷$];:Bt͑n L9D\ݛ0 )B9TDO˜pפ<#xNjhD`.jh/.$r;w5'3o^ K'+oʏQ!p6ǹAN=%P mTuF7͕or8t#1-W{}m@T IMv9hjѺϊ&zZT-0ƕHGAs$5+N@eY:$3aFTÅul˩-~sIOX<ŴQ-ƴ{4i,UlO"B ?#3Z6DZr%[j;O=X}u&aCx|cy86kOQxqs;Ht"|@o\ &^K],\1{XbR!?׭' #{{nFW*B0Η(`򄥸`zc 2] gC/+H7{DѿFX4> 7IEp%}~MdpxZe gӠ*(@.Kg G`خSï{7/{/&_!Yf^l_Ґ31$ET%nP{aNЧ}k9-=bW Wb+,7 sc˯ ^\$|ӯI k1$7INJ &EdEg*I6|6x=,t(\3Ox!.5}ۧ#ڱ7qC$7MVy)P6mhdĞ`O X0osgT9۰ŕ4,ç8:{KRTg];ƞNU}\2Iu4{S Ia05M[ ٫?y.rN6@=Μ5 _Z:mO}/ ߓP=ȋ+J%W0|_0 l)Q +]eU*|7h g1W(Z s"U+ HӉz`/ f-n=!)t}Z}3ZĸTW''m|J|*;̜XCOU7햟'rH%¯PzDt-:òX}9V # F M!ٮja6ck$ou9.ルpaMZXI2=*yu}4xMQFɗY i8 Cz<__'jR|&G|A6I.K2|Kw҆8o;һ97f237(ퟣblZ&"8-{f*5>.ukL@ ]8ܡb,vEoO11Zޒ<lf^ƪ^ź!S" O׮ \hMD+ yCG+ǬGa_N?>XwD!L|U ː2ɒioQ vFa* qp%bzt=28/:.Ǫ ~*w)+jL~?A0|n8Vs~5J0)M8~$9~CCX10ۆ$\mݾC%H.  5Zl;; 육k4r{g+D>)Y I۔"q>{]MUrx0aOTKZqElY)0Q Ѻӫmm{{ч0 +d:;?`s!x J6a3mŁ=4mmKߕbW6/'[|oQf\}+u@2V:n'nHmmo#RK̜y>CF,aӍդ l%#>re'ti[4^d9YJ elNy `[Чηߊ"^92=uZ_z\>'VRaˌ>r'wD.FLW7J&L|,jYo9` 8ĥZC#xp}դ.Xsݻ}=/wh]bk8=v<`8Tw"{ttaTC PS..\b{ >r;" `j5ش1}9]*ދ΅\0)(~W$NUg@ݻ0\Wv( [plP%l7yvg%=-HVCkLj?,{|\e͖ #Әߓ]6kk$~u !^'D>Mu\Duaw{ş]lUS*&?OL[}&(1׏P pi8|ql7 ص/=܆F^šc2ؖ(;;(|q)wޯLXti_CݲW(~u}/Mj~4~U;c*`U*yfl7²ܳIPDhA\\rq`beNzaXa#ӵSoZ .H$aOVMy{QL Nn$bJ Ći^7$۳џbGڤ1cg's8Hn$;azGώ/YG=8*Nx0 N\zQ z ɐZw@F$Q/[=T`fsyh$c:XR9?{.'-ɑRƉÔ,Pl>砇<,d'ѿd\MfّSXe%uXӣ&eu`N8 ?"Bީ=p0 4 K HDkQ~xZf'BU22I/~'(P{{\|"Zu".JFUXJI=^QGjGyPe7].SC4_y% 3$u3÷ϔuƏS!6ep踗_ٽp慼ONnF,pƈ}%G @훔֕+8:LQ|{@Z=X೨ 7ާ9~lEsн'iQܐ矂 [axiL6=REkn<ϕJRNX.lNωn+$_@MS{g\>'g%lp[ɇGs^e-^Pɍݘj>+LV'[3`蜗{Pqy:.ww4v𑊽vZ}`UjqXC$q hv[%ٯL 'ewo^ owBW<]=Ql `mI8&e6.Nl|5{nϼC?6=Fm7sjuρ5c8V5OF;5п5JpJZ<~R@09Py~{h?7GH&_#ܘ ʡy0o9.w7`'50U|i+\ I;D(:0P˭uu짠Z 0/g-N2(44#->ޱG>v^W~7w4\Ec鵚i?&̱qk,t`-p8 bCaLͣA06s^}v/*cEwh*bIuF$n6mll.d[ ܢϒ 3\oQZL)}F.)nOn/ ܅NWln2f+Mo>g`̳yzm8pӒVmcc}k`vey!55nM-_1`1@v3ҞR{Ȼ5Hj'vB'}!^e^/ Y~0l0(#3j+%\R":дA]b$Aᑰx~ 2OQ P#U%?N?t% 5O:.WOV:gcK/!mA/, #W0|r.+'N6JY`ƙǒs6:__O_ `Ŧ v5>fݘ|D.Xu/& 娬, ݤ Iv( yI[A}XI z8ǐ%R_FGHz|B}b}.-f8^3!+0c\?%]ZoWCV]) < =kjφ$KaD}i߱Zr`[}6nhA0> eEcX`ԍwz5WpWcNa-݈e, O?f\-uƙ_|51}u\nɕjUCvAu#ɻ{ë$~78:C('kirYWo o.IЯ;-)В\nkxLY,& /nuNfl('`/x&l/p4UƮ)0fB?/swӷ0rrj|{)^#`'9D#ZҏE[?`~w8llG2{8&6+DMNhO?K, &ASܱ=\& .&AH99Qqӭ =#4INh2s, (&dL%Z usBJ1T%{8M?+rGNy,4n_]ӯ:үEY5&N(~[;*`WSa;Xvxu`e4>Baj)X"vs$E/*'n} OTz܎';q)^ C¢e 6{Ty[p}0FXƲ Ln.}6=ܰfe *F {ԇ͔ps܉v;Ω{$oaSC w9֔8E6N\/ (y0WI8w^8L~zr+GW LU9ﭻ=O#ҡk^7 Zk*@;z.p5 Ƨ,kP\ 9#++hL*M<$7f\x2y$8ٯ AG~,M2K*:e脖-^m4v[SoMsXzh9*{s5ڀ.N# (Hv}&|u m%??~޲gq|tJy²o1\ ƶpb ֯q.mAS*4o/ 釡텫ln= M9qZӘ'˜)/xt[_eK21m*TWa5}{Y^وs 7籟?akr= [®1?!X)yU3>}@=47 :e+<ԲLaZ̖휕qMAU®H#4 bo.躠!@Aש(Ĕh6Y*ǯMa G66h7/^B6p2~d nx9%BS9D#U~Pnv TIXY+p`}N3˱17:n՛׌LO^Qk5jv-~fZm_Tru/*CtV]l9y) HH5Ƒpe?t](6s4v$M\FV@ v?8/mX mM]aڴ0t~m w=Osy)HI}:IR5Aߏ7H]UX:%+TC5YĠw 2^@KirX}t*kgiրY+[*?墰!s3.=Gf~S\ֵ\E?uguŋd,);uFw>H +62DM t/&RE\5).CV%: 3LTfW#84CmQo*qbK͐N!4y?#JBr7׼8R|f@յZڹ!t77`rI&H~s¡'@~94~>՚rmcЦ޿{wIōjr.4pP \fNnAOEw8!QbTf(Zr>4}X(%SG:}LqB;4L(;I1a`8#yqHI-9 㲾!\>cSPu(!mu'j5&j[6Q- 5+>8uu/np,'^!kK`lH1w:9IX=a~ThymjJ$E%앂+X7NN؎XhIGL ^=ZcohLfyWZz u_k?.߭C9B? '?'9G&b-Gq+=\ZzR+ `N"󣯷 W7JY6V>} ,6A=e~\si?b{dcpp;a<ǸZ2g%Xs>6t>#Ϝh20uKab\A 6CkzpYt:N|z3xK{oVu1 9~UYGlv}0D05%R^nO$L0VIDkPcm8N|~2k+6*h&?xaIq''{ oXcX(E4;.lBk#6e:b[*I~IRNP_+ک)$JJN]B:AꙋLon)ʜ%Y7Xcj}Xk.rf<5YD1Z|C`$+zrcRp̖njmE'%ȷ3R=#E}./X/3.ͤ8Ӷ+ER_Ɗ_)1QL}_2|WZŦRMeq~@Օ8fbpY##IJl$Kqi9+cvjRl 9Go;5@t=RU֓,#pU.C7ع6yLQmY @~󛻈gąsBK+kqj{_t0w닕^EԻ ?봴#/0\zJI 5ߏ4ڮӒhN|ڝDGKU2ABlws/j4W`ܻu[aN5 ?so4Z _sj%m:S w%- z]z؄aUbQ+M\#ڄ;״xgwô`bgh~>&Mr8ʶOY.Ҏv[뙷YWp! .+ҚN wj ?crjegU4>3&97Cݝy,ᵏ9: l-g>]:3YSMjެ#&Xt`@-I:WRT'|4rF?DE-lZ_wisiD` 0Fmk5ęg&U/oa{%Qq\J&3k}Uqti ӜibÝ2pYw&3<2n'a Sa0Wɇae&K$M7 &K ^z _+9P`dL];K#kO|8#E'Z=O\MQEdfy#GR ӱT%J\9r2uq=uj?9nDCGu0]q31U:[1a$Śf m%g|]߿~a{@ՍT\g[ t[ߞwZ!yvܓҟuͱ6n*mZ)܆$nq.[n%AÃl=7S?Z16O:\賮҂OA6Ek8y \=409{f#.x`ŏc7JQvY=tf%vm@=h;,c6S 8«}"= 0SM,NHu+xJqJ* TEjG2ޞbۢA{H!ul4O%jN>{) WcӞBכ7̶ْpU /ϛ2U I= jgjczJP/YBƕՋ8V>g.>nJA궁=Koa?v hO&aBQcjz0\ ( ̃8?*&GlV􀠋W|Z I6.]Wfn8@e̛d-r2ӷ@'ws?/,fu|G{0\c7j=9үC{p[јRFa_]|># k0Ţ$k}i KPjY ͝ 0F@HYĪRaЅY6ix+%\8o &68:R+JX᥺"5"f|>qƅd[&l3h[ t<⬂ WLA+S'_8LX޴äEj/H.AW/\ I[-W`Rߨ,Z;CpcF)t{4Eu%) JZKup=Vlt)ܞ|^,6t)KE$Kδ[GnH[U>ffJ*wA5ô 3E㶟;+]Ӻyg?Y=@T㫣!tQ:duݲK~޶&דGؔ_#}&r^$r1f9_ 1:kB}QNS$‰siV~juw۾.;8>=_iK&${?NycD':5#V_}v61e rEE^l:C\[Å{J%謫ƺ==UY@zޛ>ȗ|*v#†8M6Q^"oE88b}詥(d9(jυh/sWv'I/6y`u͒^sgaǶRhXx t33ft:K)f=xL;Rm;Mҥvmt19#q2|w 3Lh7۬Xb`꺠jKG8Q&˞|U\.kNon.qWu.kb~e2Hm*P@Zrvfcol4ќęWg> W L),e/qo䳛HI2 `9]YtgIs=3zj-pbL,>Kw}'Ku[&&MR呁pO°0_t:wZ"*l)P Ey{}PoBziH,ZRn:G3XPY]pL>eZUSy;=τs="G9À߮yC\?gH4VL:Yv; \1p&g :u&7処5n[eU ϕRqJ,{xMy{7G"Xٿ=ʽZ j8ks$^"9˺cǼd}1hJW$uXD,O`zxP}n ُِcqV(NYp gpPG >'o 37P4JMx3υ]۫4f}Mx^_c9iwV:i1ⷋ%1d)Yn ?f,q%JU:b'lv5Z~d:Iv+rv9H={xmWɸ(6+"171u Vh~ɾbؓa\{Q\Εnvݧ=3o+^Eg.Bnr geN1"I$ǰpE, *XDaC>ux%2.Z(U[gk,uZ8;u_?K'B 釷pЭXY:K/[zex]v1vù9($ldn=N=V圙L#JkL$mbZw}:9 {c>tZԜ@7Gę¿wGio=N4G`dݳDu-a'}Q%"Ifۏ9,^o]sKib u)Oa7<}g3i,*n&֏ f|<ݧvy% CTB5X?kb f{I?}sXg|?cѻYGn9󝺫>$'|(50{y2Lh_>)u.h|`Q/L=ο2~oo3_OEG/=M-zHݧqnv,m)SX?5,x ix{eL3l#Z}®*bq+>|]J^X4Iˡ/SNąVm̻qw{sh2? :AرhyngI(iMH>W?sj.p>om+I-+OQ}OR.JCPc}6*3X!#H`J jOejӣ>6] I|0h_UVz5kEf6MEy0KtVX> wCGbq[9'y,ڻ]D&+/q#By'= m?>Ttk<䂓g*;s%$cT䩇8,6& h1-V%&NQ.-<" !RW\i.5~'0Dsn|{ Yzg;lRA-TLps1uF9>Z&w}cU˦w"JӋ8"WΞy,sOe0Cr1֧8yg&X }UOSaNWCm(s4]䕅Gm0|fDP|<*N2>6Β#5i'ue5l?<wyo A{QЁ MRv*v60k4y'vOu1Ц?/HB.+ =FВU?omdTLЛhtF`v?7Zc)$xkW2b8O $yq>.20tβ tߓ WThP <4=R[C;`=ڕ` ҅ǶsPIAtQC*`3SK渳ӸZwO;/F@y۬m_qH>nK}3X*&n0 x/Bgᝬ `%F#`^ObDa{+["Ep:r&to`ΛyA]jw_i+[T3hcf9̏rqwF>uu0%Iцe0^g*M;ÀOLVvnݰ~>'o5ƮNe M9cc s,ł?ᯉ^ol\ӶyE ->Sڷqudg5k~>%YV/[׺punS K,>]qP9ӐHڴa-N2$7Vսhy aӾ)fKY8xٷK `ع~&N?Xx=j}`r Xդ`1+sFxBKÙ ˜u!HΉ=q*?BILz?y]e&e)oyǟbFv"VyFRsmU/oq\$Xm1~X$vvCOK{`[b=ɲ tQrϯTڮRBz7eqxõ< EȎ5yOnd=~'W§Ӄ㊸uFLkv:sμJ5l'a27,z(S|!fFرŕ?"0nN#gpTs!V-o$Cj3㨩r\ah};<ŦR+﮸0.>>/ViA2~CZcF7yp\u`/|(<*=t w%EIAp9{\{ ?{x<̬X;ؘJw(?=55~GC_ u3L0?қ!,{J^&KX+6OPwkI/쏻o]ȿl"(0<k Pc+/7e"eȕr֢:Ne 柫셯?V`VGN'+3>̉ a\صQq*3.zG=gI%bE_QK,-Z8VN|8=XwK08pm6< }Yp?_쥷+8!5;֍6Ѓ'Y^b¸4ե(N,[&xH.R]1;P?9ڰԙ_)$g8`^.0;}_n޿g~k'8I8ꀙ& pnkr1{.e^3}G5 Է{>T>KRȺՕ7drIX{[B ̤ԟ!t`;,d'XX䃶_ 0P`5(a3 k^cWSPVRuG)hg&E웨$Ǚ B*YPx23yKl|(mACI[H;1uDʼb Z Mnh g]Wa΋|/!7tfXu{}Ca$@rĈ>L:Sqb3!_嬉j#ZpL (byS1+#[N$TCZ֮%ݐ.UЁ/֕˛ʪP Y#vةշc67oq -Rq$r v}UWd7XɑzwSߛ fYZoW傃#*ṣ8>%$$I},Iي$I*YJ$ERdKB֒>}m`0_sΜ3}_3gֽY,;Lh^[k hL8ʛz9Bmlp$kK̰6OvNeq7G0zXfa$n[3 x= <@9`$\숝9)7٨ZMR] Po|,OJ|6=\ܽG2M^jl1?U2\`+c( T5j!]g;NjBeB6.tU#k<8p:sk/W 3Onwl#Rhc\9W^?˨h,wIXld;,Mقfg'\p>l8,\m]OZ0,(| 'MfC<`DىV.k?@][_/UIJj[{kq+ܧW:w!\'|w= ho{AryﻍdYNȒ f=;`u:^g!k3OQ/5Oywƹ؏[o~)j/Tzq$w^[LO=i+SXsWUdYbm)Ǘ0XX9%'Zo[A+eyZQl'<8by7XӮ'pt{חabbi :sj+ +<E@WuWa,=jzwʕ䨦&jqW ̘<PUiV tdj}qS\ľxÌ_]0$FL %Xn.c[kX{`Y}lf|R?hf=M*_f3慔A 딹B忯GdldR~kEgG*!Dc([ho*`Fo]X>f%^)rVSfɴuʃ|<&[׭`svx Z`@0p&R;ޞ20RY WoTMja;;[0+IgDC3OO!50w7C,$XS6b]ʻIbY6O{/m5GjeBzzz>@?V !UpSl- ncFSEl+{%F;Ϊ7\8Hp[ɦǿ. Ck3gF}Tِe 5U]0#9άe~~$#ݿr[N]sk6d+IF\gjWؼ>BP-ؔH'VC%#Om(xȀ[:C1LZ,R a҉ K Xfq7-9|pyu=&C9yplbƕy\?402/G`;m"_HnS:LFN=ش g%2 ׉á&|q~˂27< QxaCHdұDž40+M)b,:qڃ[:H蜻}Xؤr=㏉ރٵ~hq7Vr6tg5\P.y$ӥ<IJGgArMoE4qyD5w;M@y3r3װVm+Wke~^d`Ŗo)"zY`^Bt4HK\nuPpZC1LL9QFz86l|uđd]x͇|ٽY \y}\ɛht[ҋ.hsJ3.{SI< SϿ}%E'n:Bcݮ$u .8ca}b~غ߲^Ph*!a!ζ`,gE7Ax1GށKy8ɠ3w$ǃ~\>% BW~[o!X86O_6 4ylX='34ߺVZͦu$GXUto S:Su;n|6y!+qeA~|NY6b2{{צvyG(rVdu}o=hpܒWg>v#~HԀE&ǒ 8wҵI1 2.GK@όKi,smA/ \?g~1ĩ2+@׼fFl^;GYf'ׄ5<1/$@oޡtq>3a=xrzhd;72jX o…,l@邕7tecJ5U_7'Qz\u s#eH,zQsi_a9Խ=iu-BS.g2e*hǚO?n`dt1T9;o.wn^= R;P+l+ -`Qӿ_PIV±h1{} ΰ\д =jEI+"jv]~92 Rd|If)%u\޲d:z 9f%fP}70Ψr~7B}?I#v^БX-(CQ? a~] 0{J|i0+db}KgIg+&$jOV(T/%F؛#8kߠY6r! ֳCv]=i'L74k8h &']0v姝aGZ/,^t҃:!*n%֘P}gZ5m^vBVB0&RamT*'io> CZ>;' uc ۠6`36(T N>ԝ}ws<Ƣ,?=-5ND~  1L,hJ#n7$`~Te'u2%:0aϳ n%a/@r3sa:{WqkIG˸Z}chd8e` #&z&I,HA OXb+j~dO</$KY(~kEśi?3lI0 _;;ygOO̧ȑl uG|W'rq MS ?C4M#'1yjwHMğî+w^܈bú2YaVkV{SM<7ܪp禧aL/Bi=x܀NegH?xW{Fq]S{nB~TXo*0uy.@zҁ|0ПI$˫ fYP6V V71+,w1!q21h? a2p^];t("S ҕwqp)gk,qֆ Zi!+H[P#O: _7m Y v߰CsrL+gȁEKPѰ2'N/V|{8%cW/h+{: ypeܗ+{"V9GhHWJ쟩xz|{ry36,z$X?{7 ƢykkF(i)lK0%Z}M]=c Slե~ у!{n~M-a䞖K6E.sz&^`y( MK3Sic>9w:Jƾ'vQc0!tB56Ï$ޣnyѫ0t>|喂dmDjfpKBW,#y^.U zd=Nf^Pr yG8rNX?aJLPBO jx.K*C8&~N2ާL9t? >Hɭ0FrH5|-~;}g(~@;,ėVeK9h^JMm"aqzI0wL;k~Su0IRE{mTfg12W!2N@h,zw?\`H#@>`=չS໭k63NF1<6!rM~I{_HlԆSO.][msZvVe8]4']nO>,,R;y]/LHJM:o}XRSor>hC#N- Pߏسu0k'L$UƄ鍹aT0K`CTfyT+,ޱm۶l|5BTLΕvY?DlBŽ}fhFd~9)ܳ>|ֵfW3kZ|;MGe8Ȅ;oTox$rJtƾ-3}|_k}аeO!d+>4I!x-;G!{俠E핌PǮ~0LM qNŕ܁=ָxyu ]NVyel2.܊bF|7 tR<ЬT$[L[ Ƚx3ߛ`;DEN:'풲O"[fSFP_E)A֜=8zv(rP L=Z07k(\~뺑):z˳DACE98vѯw}Jmq`p1a|"ׄo8orp!j>h(C U"$o^/KꟅb:WoSmH;ׅ)-Yl>1N QLRyTZгD ~vTQdsd~+cӱARX`klWTnopͥWP=X*)*܌"n01|m׻yRcFM$7p:E+Yk&.<3֦$YL?Mr˷IeƐ!9,}IM;$u'/bP&tJñS3a G43U wVE-kt*=jaO.%>p~'%/A1[})v\37kY&w։!lWիL*3Y~-[L{}!D (<έ{&‚Ll^fN0t=. c!#X}ۍ)*1)Ot޻/q(M7^<~ ?u:ᷙI]BF`!s?͵gg]'q~t@չvVs \vIJ4/krDً 0y:LuVo6dTمۅcǰ2COᗷFj/[huNN}gl}5tZzn`I_P+C^iکǧMS~{O0eyfiN jnl\cҪ5;qqHc0\ݟ:n1Eo0LYꆹ["@hվ˂ + J'GEErL0.\wcwr4ճ#/np6\]fYI>s|_=%4>^8;7c7 \V̌su?Fak[1P[T{qߌyi P}ESm qN`\C"rSnz]Z]wG]bש΍GKO\]gk[?-ێĎ/±|. 榲a"(8dztTV DY–u: vQ|d+dܛ\d.ϭrei8\kD+u$dO"\Wj2S4 ?g &ar`/ 4ڞ7|L _^ (~$ v$ ylDxjGw-ɥg(Tj}j#վϪ|}f>  5;aD (S;CUq?a&2[!O`hٕ}= M N &g깗]_'6m׮Sr9 D?8wvH ȭTN[ɛRDmkr~^\;  7`]rOД|,H<) /`e{g2qRÈ I\Y0"pK'P.sk^$myM.)lYoV-oD*z?{f/f7'Ӂ%^HŁX/txNQH{Z*y00so;u7~K2 WG<GL>,w4:Fo$IA[#~Jrlt][V\w?ZXFgU$>.NnzdztNn]?cty?>ag+2{qz^v 7P񾊍$[߿af|fi-s@ -l45{o֙0/I=~zֹ٭d.z]r]u Wˏ^sB4 WV,Zw%Y`zǵ7s&:*8vYb_EHc׏gD򰍫7(RBhԦA'S_O0b=.؍g0>%p%`eM:y/:i”Ѣ`EL,OޘfeeG׊:ۻٷ{.¨\-7^3818{wz9\;W1Q >Kq\bW`Jxts]wXڸ:Ү&<Ä&\*t7v).}cV8)gW9ɬZԈ=o3A[:* "AOymMwd혨Z* !ϯhj:nP"|IN*+9afrj?}wLUVnUȎeqz5,{i2>i$Ut+&0)(8o4TȟYP[fDR:>7I)t53jW2qS J́Gg ӭW¹ bEc0Ri !+JV-˭d7m__M~.Ǟ!ib6=~Mz/.P;FUDku]ё?>{,wOJ:oexps4~eΓ8eߋj9_y*u}<̉C7\ = ~FFU+` 1гiW ynC]&}ݔ/7kS.=68wZj:dI!)/= ^vF)y Iu!0(8kAY饴1qť]6hK8GIj #22zEٱwqfOc.dK;76 j۸&qagoXlؼC̓bpQ`S@ӎPkO=2=R{qC;Z洸ۏ O'".]x/L'.ޑg*.-?zfnjp9j%]l Wk*W^{C̚oRnX@Gr;K܊X3n KsM6Y@tR%Rm1l>| G5$?wVB_HT]pKJ}?ELky Bg }>]uj_#l1Kq8xH;Id̎^º38R{H*;ljIW E0ء)%i`;?KRr?z{t_>]1{F_aMԯ$N X0% =D3Pa%m\Q;jpjzLunʏ#-wϢ&[Y2.iR$˴x{ktże\y$S\M5.}V{w)sώVNJsːGL>q?0_}v^JC,MI&9MBUeűPu~y\:N=lcӳXSE{~@߆)9X_^Z~h x{Q [rfAhY W/R!y+7el&6/J-2zk~Cˑ${LJR!,JohLJ&ڄðU#f+&XڍO{pfù?c[jcLh¡Q}!i  LʔlM $/a3ח wv" ΂78p+Iп9Z\sGHcN/%{4a{Kqn'] Mdnda;3J[v15K.ԇ;R瑤`1Ρu8AQ?&EZWQyhOSK':j1S/w6.f =;z#e0Oɇv$t*`gg"dsС䂾%ߜ _8#ڍ`?f<|[=2BLKŕgzqsWaREVT>ye-߸;˯AOu7LI[0 ,?f>]]cQu!KOq;\yKd>So,ExwbOU{dtf! 0XJ [ć`~u]J呻x Xgc2Oi=uB3NVLB+ [Xcd=,DɍViڃSo۱M qy(M CWrG;{^mf` Ģ8Rb"<}sؘU@7i~.El ٽtOE᫂\_I ?~Y}n6oΟpɇ /g瓏,oF!=xqʼn7pg5PziO%OM?I{mH|vּ)ct.eqh].,||_BLJ'qy84OsT!P5}# z}SkwId8;tL1@ ;/^ ;jFk'o2{N$t-XƱQOS ҇);`l!cYk\J u)[U`2}gyj]2"%5vѷ-(0&c)qx|e(.:@crտc)c?`]<[&m%Hm<*؇]%T@#Ւo\춅8mÂ[05ZOݽ$&PΌJ~dj=nx*PŻ/ƭoK>:9?:^K|,:AպWqLEیklbY{U(P&xfKo<ɾ o$l u_|^cRV]Mku!,? _R!OHnf6珺/ 9-Z㰳إ~0c9e0;|w"Yc_?ʉQVٖ߬??\~~SJ~z_XM7ٿN|PZ)G8+anV#At]|~z\zHM +eye`.sYR}?pz|eEӖdB5IozSC{;,2ڇZFp87<"-[j\:Ȏ{Hg*6xi &c`G~2/Ks -ؙ''V~=PQ1"='``[M#`*p^ӌY7H˫Nq7k^[Ywm_ڸ%e X6C湊Xllɺz8lj7{f'z~ .?jt{"@5`^o@iQ'E sgaUl58O۱|cw4 'u0%. ܯb#a̎kԿVaeOQ@Ĵ٩.'ҞV  QE ,H,t]_۬Sr"9F^P™`8>){Nrmޘ|"T_WR'}ݜ9O0}sv ={ 4u?νec<[<_A× +ǾeH,Cor~*R-jzMyTirz5~o"eE/.À_4 w6)B]0%|֝T&3BΟ2nS+^֩n"NjuX)CHL ؾ+kvnƺ%Ӯ8/&ix{3B+q|tG8s^ɜKf}ct<Z#aK0Qou-C۽o%lqp'8rVsMۓ# ؎cGǡZ5VS/z<ߑdfoVc!.}W1δbJea&E+6U=X82 qwI ).~j'>T=}>tkk1ܮ# KImyTpoxh1fgԷ8y[7;%ok$ /?ivF9Iv퇂CGUWc. %3ͷc_Sal\gBg+9uZ{gj$K3@BE݄(et~]ZE`#oYJыKkG+ٞ͢XҫRg7(ġ)9_-N}Vu=;Kd`CCAУwPC$G8\='hovcD|ch Uڣ/_%#ɥa, ^)YŇc8m-l-P2⦔G,`:^RFwEs'KL,)2lvG3Ңwg `]JlDR$,y4Ɖ&{ ⊲P< 7"XcVK3 44KᢛcN&L>C#1%8{ơw,dʫL+y9fz;h`#%Yk eX^Úo8q$"/a08Џ/ZM'sIwfhr )$S)E.kLI*8I?.lĪv"`Ee ayɳrmiy.[S'e9g/'G&檝LkztgߐƝgwF;Cx@ufW+h~//N&(:dk|upxErڈdrwګ/^!.lzhKqBynNīV9z/1#4I5w~87"ؼr9ZSZz@}"ԉCvOt`uخ7$(@~t}:s,z=n߁y}Υ@g#GW;,jxt [>kma .nKZWlj8G;L0WvJB ]=g悳`6p,zKnNHؑI5%9Ql7)G27L|j Kt1ԧ?Z l#:FARaf?>fl+cqe~x'brO$eQA%g|ɻ+Yxubأ{: bнؠZYXKثLmU}gNͨ:[A}[I'ca$Yx]he_8UVu;yu=ssȧ5k: ⵍ$'HTus)UxB_@ [6LN9__Ŧu&lDy f4`QjI>imm>5B+pHtgdtEz6s齁Mޅ™55waF]$SEU7߫ܛ #g);uƉ FR8Pz֯_  'R?hiIH >Xx.G6`oӭ(F;{ ]HbiuW HuK6t+Sb\43K۝QT]PR '$qֱ4y3 S\~-j1=xtfdOӼӏü/0qW!xhH?+;ЃdNl&қΌu q͍WIGNP^G0qmopxm "ra ^7Vyƴ'xořo.b˖mqCRA[b YhXzd 4x]E+9SػPM~PRe#-Cx,E~/VpaͨGJRI.MÎ!}DE\+yڟ7 #۟l~Gsy.NnQ'v5߰'ήPf[tKK'>YC71?fʏ X-))}=DtF/5bOt\]&\I6ݧ#mBx:\\%iuSg_ƊV EVWG`0wt:L)4VNaZsz~0sOe <.zg)| cv iC]qKU ?ծ/,T/B/H.i"֖wP(Ўq#~tyߘVoRݺ-qSt8P -g\':>U;'nf?Yѧ?v+5`lP9ssĀ0cM~L lU$\yCW_I8/оB?r& :\xA9kS7L_}%N㼜:0ةRJ 61p^ȯ)+\̩,-pZS#w0xJꦹBVt 呃3UHUu:NݨNPN 9~iX2) 6885Hwdq͝.l?#7rz<}A#gxv>/Ilr.ۍwKhOCD@=34Խ4łlx\k]:#`/x _Ůڭ\5Z]tљB.0}Jf8_Z^J.Nvg0Ci{Hfζ `(y-I2LdG2%р&C`X?x)xyuGOn>9t97(KҚlaqsj6noQ, NvqfېdXb*"7 ڵ%y޺Qx2&Qpw Z(;?1]{dk(VI}NpS4=y/@o|$XJ G6j*θAmKUUn4&n ՎHu{yp 2tk( 0n]51v1d %K*]}c-gK;9wZVNC>]?]0Aԧ.qt_C oC2p[~=_ڷͯ[Aorfs\hᚷ _=?\^:^yU(4m.I%WwK3fH/є]í\8*ߞ-I%d'U_3Pk; SP+&Nz Sw5.߿N(iΜ򱳯I eQ}0,hq!q[,B=a/:݅7~2V~ L$\~֫8w*Tǘj!^C8]yW}/Tpܲ7/06W9Q1?O4TCa5$Dm: w1lx/Eɿ~Hۆ/ٹ ˆ{,m:{g|xh$ }>Ptw;#HcMOclNg7|} 9^%wTG0}2җ"I t:]/H9s c6Nߗ۹`:Xy F> 8<΋#cQ0X/ Z{Yo<|v;A-}LX'I92s3W8ɼl8jpj҄wl J}աg/C*FN҃9BoY8;:+`/ݟw>-G2ļ<8r<޻*oWg\ {T bIr0z b}HpIm>\Aɔrw7zҧF% prќL v[PNXݟfIdoPK gV;% S? s9;j;aL$ƋRmܾ9Լ {N X8C9v]wgqY‹ # uh'g>넥qXT-W`9$ ޝ\a1hCQ>[o>{V+>e=V 7=sT_fFe02.-&9w_l`[!y} VBߪw$ʙ&84V*Sq,]~VpwC !f@“%o^ yq$?s-oslkN.Lmg&hӉ |z1ρzy\}[ߎ‡V:MCE$.j܍[9(¡ sWg'$5Jǘey j]Ŵ'P*>f\Gom0/#]){Dq|S¿ʸ- |vHa[IQ}NUh#BXαtڭCu(Ry8T5yyD;|Vg 񗏾CjmͦУi,[Ե8H}!횤 0)V"qpw;Fj={tdgNҠf1 7&aNEMWHD'y,5!aL ?;䖕N|LRGFފsnp<ǰt)1)YKbO?1ɿ~XMq[?)ot:.F8$`s`γm T@~A*v+m7LT0+EGt*To(^ȇC%Pkxӻ[_>%6c \4Gll(a5I` +ɸh|>Ο[iސJa.8brt_;&y`C_|/N jΩ4MjAV뽝w8S~ cj?*}ɧXDV|}E<̙dv,4U= i~41u3aՏa}X=)fG7և GNXm?Rk&P]95|Y^é%l.܀ųFN]apʳAhiGYU6l'X ux3BH]g]ulCϧ}B'ߙ°!y]iOpw?$SKlXdŽ ,>H"iS WCz0uF^UjƉ~lǿu,geq9ܐ>u-kk7\'׽L⇼Ρp.kFn؋H`mY,تӑ`9]sReڝ e\`M9oMp=N{ܹ -anChU\q i~y-gNdJLnSYLoIbbxfckqU*<6s&V䚯*S@򟜘5Dn_{c7iҳ4%y'J?T>žE; 코[sf 3GWT˷9@s3h&&,2 A0Âu^j8$e6f==%wca MR? ?y>@@? ޑzG RolDV1 {tcxEo~&NŤM'aKw6 Ĝ6rEq=L[m3MGU θ *3Zkc}® .bAeJ\4̐]oO7'0K6uʼn7šBmdҳжmz۰Hp0kzz0}PK l5P v, U}pJ0>]gI)>g%sF=81rfm%V+_ur~K]3ᴮ3{'۳@Z0c2MÂ'+U-#}F_n"MHĖ8ή8վd%?xb̛ gMe2=Gaxo`Mq&网Ǡ_B}vGa 1f`j=$Ol@ê8;s;u  l=_~@719?n„^2āƫt1d>DcAIzu 6C xYl֫VXw7;EoDEl|pa!sF?k[86F#xN55oA eCSt=.us lڊS efB`soQm0V)hQn#V ;SėώJ߹ *Ik-{I-?v_Y#Et*\!bK40(O]wzV53샠u[?bwg=r? G-~0j.X,aw8yKߞ/ZV?nF4Nm'&1WFIpl;(Jލ'*$0Xp2|<|sc!YcoG_~l#{V}2VOE:R _zջkiK|Q\1?Z0vO>[F˴رSdH*3.Uh|nn e;ؐ~d8X1gAdcB`1'ٜ73<[y0';_qz"xw}\1;Whr;=(S~=Lh'=pƾIZ׈7Bޏ>j{\RFv)2uĐٱy6 ֙V(L=W {7RGR6 ^)$󴛩/˖i`s{.qaڟHJzJNJ\# hX-=r%߶PݡbC c#R~mdkxt +{nDA_I[fz[ݻyR|bҋ߮K*W;r; r=rnY5?腑06u $չw==q'reaAUy&>V VY3+-_XsvZ! ߳ rvdSap/-5|G wʢ}\TS0jМ{տPH0b <vCeP<`iN՛"WyTYT nŸ4FšLzdܪ~]õy0#c3:eGI}Jo?o%4w%AɯԜ@)\,Z-7|7|i7] nV`rԏ0CV@ˋvjs; e۷<oʅheP˳0ܤ8Uc#\&tM;N`l;Z#bqwp\W0pf 7L^;No8 RvZfr2`gF㕕)E5Ь(8F`zוkAXh|XCk<^.b#GUx  j$I<7'8sҍyWgx!MwluԚ Y9ɗuX3aAֻPuf V>hEĂ7,։d37ۑj,~>]6 {3m o횴*R~)rM@oG\4憐_ϞS2ҫZ\~.3,<~O?Kg:w2"cݺ@R5Wy}n !Sqt}lh=0e=zVML/7ݐ ۿ8ޒ694oFUS~qlSA+:$ű ؘOZLPzʑ nCqLJsXi0b.0R-uCyyPV"y*-%vTBΆm1/믦噸(Keށ?pR@sj*oҕ.ǂJapodlŁ[ X`|_ӟ\vؼQǶ2QvC]ưAɩ:"Ň⏃$DZ=Ԧ6b-F09՞xǤbJNEpLk 隙Fu & glΘ=b MaYEffNg; zgYW.oKW%3X.SzsɷYf|r +ظa/4 +LLm˿^ؙGsяjoE ҲB  L!vk\<+tj(U0?$pZėPA]`AY?oƙˆ#KiW VqqfŃdapJ7,2Jc xÄc0g]H{eC ז9KJ/*HFn9\x=uisox;yx\ƉG'K=5$jpskzIQ(,=΃#3"vZA]mR27+%CMNOsHvͷ=^z.IH\gˁKql9'Zdhr۹-b[=8&,#,:*6<\Hm~$#B*46\HcǧU=ɔzVЦL3ݰ.\0 d=3fpۘf@)S&,y?&Nuot?iEgU<8׋i+vTyysssѢռ3/4`!R졖Yo!9wӷﺥA D+3b}&fsdNVNG:}qpGmɂ/ƛ̢洠^Q vv!0ЕS7ՒJ:_dHV|,,,ZϖF,#ٿ>IJjҼ|`dhc/+N,ACNCs$p< r5|2-HW4rRGXo4v]QL\^unnwm:uAO2̐28C#V RA0~bKr],:g$bc|֎jm c;'u*)xEIۇmW'XȤ)l}Dpzs.mPgg7Gn٭9qz[P#ƃLW5|g? aMۅu_3ȣJv&Y?߼7~ =G抹8u !=^:KAڰBa ?EWkcw"8eמZ3ÑU89k%uǮ7C̄_#*h5O2+ǀ'^cgVd?H 0~N4}6nRu~:ܦ&R9yݚ'ʞ gD# 6͚3aH[檲#̊Y<)bd5{+/d:P2L2\Ѐy/Ws˓wtێHgTjl{[xz"ҡO7߸:~w=YHq-Tbѻ"90i&\:Z= I>q0>w1O5 +ijF xHG.L$] '[Nnɇ!i[uǦTbϤ~̍#{0/zfiF$7ސ͍;Iy"ʹ8Kn5cɗ&o s q#)6Nr11x}oבnjԛK8`y8HD_:v!2O=,o̥47!c?n?c'>g=H7s쫈C;[Gc{d\v4 ^PDf&|dC$cR}LC c~1+h-$GyG(]2ː!V Sh-Lpi[cS>ursK_cAAYlS?pw hAuw9ꥍvdfJm5/xM`KjUt=S~vþ'3(Cgx><7rN[IJݱf0%"S>f1Kk,l75P`J|\^Tۂ(G`YvP˱}H͇ꓶCH^ :w:#xvB; 7Hه/6=MCdk[a5wmaCD oͷǿzaM~^*F:?(r_.$#XhMRNzs -~ I8ac6-q}a&󞡔)߸u>`a@ BkKpR7.>lMHRt?_KrUR B;{} bM_#EFuҰRd(4Nz}:N@ue͝I:g4sr#6=V ŖX6;1Iu>&}G^!^bμct,ׅ? odE>1\q<(uF?h0%{Jعĸ,3vrevfMK5f25rNz6m[V1!ƷGG:B29L5|/&(7A0ޥ6t]f~jF6gI֊1!qwt:q( mC/jQHf泹ꭖW8% 9U93Edp'K0 K F뽦.qUE`[ xdoYoׁԲiIREo{JgG 쫊tqt՟`Zu8>ygz"&}^9f(E0e uB:OGvҜ{a:#6A*PeVMHԞӡL;恤qjyx'z=9  +a팋/^ˮPezuI5zU.+. -3+v6N88>+sRKыz8!=vY.:Fh!D)n8U|.?tz:lj;$/TFY!U,u1h4? _iB!s8X9t+.ń*)u`UX 7>u#r:<42$Ԛ(|4fZȅ k;I.Y>Yz<߇hlla\/XHsUoe|Q q8@Pț%4b;1mg`φlo6%{k]?$b.+@ݽ^8j}dǂ=RUXG/M ؘkWd/=n2EZ@iiq%${`bcG"1]"e ÐQObW/Nt^aO$i2Q_Zr㩞нC;Y˪E?}5>O`N=W>EOх[۴.&)B1rG2&*=bzU:Wǩ&d-'hC8^V^[⾢PTC2~|.Ӈ#e.Bǃ]2pq'qKv7ˆ/ۘR0N!c IY%}1;7[K?u-N{3@0gGۿ$ťC#l?U4.r}fGh3|#C0.Z.Z3N/DqڋwXLgC_cH*kf HXۦ!Sjia:=qYY[dֳ78/ilr{7otk[^=<`nr&ΆZ?,}ĎS< TqQsunF"Ԃj!@j; 5B;V?r3'pɾvXC1*i$ECmZ q^ r]Y{#qh..K,x숂sVH|Jy KLaSЩ&:I0>4O̠%R\ .~HSV۟_ cbbn3Qa<6qJ˜z&tg@C8A:_(NɎj=0&(M4GIg)ҥy}_c$)Crϣ,ᗟW4)%8ʲ{op8bwbt|4w^ĕ- ?̣Зg'{VXrc>.s@V:qF k$rc`JG//_UтVç#Nk/.r@JK|Neߤk7b}(sx_8r/ f2}JHz",J, YN+}E69'-Yðҥq6|#]*ɨu]D<>RۭCz vPaxeKp8IXV0bx#k 44%ߍW|d8'osA0ˉɸR_9ܹB`Q:_2biPw@v6Y| RAV&ǥ_P&N5 MIUnLyLYOjJ9m[a%_0N7?<@FK,pfPz/O__  ر,,'ogmOM*aМhs6"ODXny= nJY "q]}ԯI>Cd X@;V of}IR$A@P:_t7'za4C`o:dIKbؤrKEgzB'.4w6"7ai"賚|{nQ"+qCݟ0%E <[= N+8Ӓs8=Kj?[[?ivowg#iVى7辠WhD/ X\ӏ^gc 3;wmLjnW1c=,­% 'jmd9ݞ`1cԽmZҔ_aEїUļ'.U[Mޢ8ï笷&)P_߆v-yHL]zoB^@!O:$өtM|m<|S&߅_cA2R}*TC/FBGp:YN)F@d@ *yHt\9S>kG_[`׎=_|ցqC@|?Cs-\v_&Xnlz2 K6vZ0(T523vSvc1nPR'LP艄i6%6kYf+Q]SA3I/F"6ۺwtEjw!tGߠ)eo Ă!K*fk'BmEЖu/xe{nb.ZF=ɥ6~uq0{Mo-#Y"e&HQ\1~v0֗2W}xTe%3ydϛ۳"%k9DŽxHAM7Eɫi;LUT򓔌@=r|\V*u{egG2xggp? k2e|+y*qX;Lr$t~Wl(rx>N!9C I6y~2>I0lmyRxF 4 .\:_=Wغ.Lw~śU'?caڻ:W1oYS]7(@XwpQ! tӯ0Kc.@U\h,QI!eVEgѾč% `Cn0XtSڨ vLjTC\eUL[gS~܇5y~V*z/=0{gtT.TVdئQZWUXd_x{{+c`+!+E7>; NPaזTFjHH~ =Շu@߻LχoWTr̷ "kqfЉlCWǐ&%#aTc06 9qn&>' 5"];p.8V3~.c+}%͝$ˈڏ XW8}OY =K zz\;m󛠦:ʒvRaT&9޻X?*2w%7C?m۪BB4dd>IJYLrdX&),Wna4JúNV4g.Ķ{,98m7DU:9=0#nG~:7}9g0԰#$.}-g=S*fucPR)^T SΌqqrFfl/: VA Pرc-=g_IlՔІԋ֜ b8ze"p=S= )), ƙr\uhr)}yz _q`imk%XPi6KpҤ~! 0ؽM*=%pv]q|iR$W|dj Є~,[ :vwX0( y v7tvDR?\e^`;j XYEu-&j>Hq(U4*r$H@{iVr.GuH&YsHVm/R8s:k֘`@Au:'8 oRJXqJN-Y_0va|4u5tAR _ڦ> {z>wEx]ߙ; rp_@/Ţr>Ñm;5|>snP"r>U?xvvjϤJ֮m$r=UX]!)Wn[]NگۏX‚UW$$>H^9߅JxnA[Gr2n#Ӧ縄>#^(9۵PU*tY 4`~.߽>9i=ʂU[(/UOoc+qBeJ=,dYUØ6-7ISʌT葥 CI[qؗμV7;;|^3@ϖj%Yv;iuŊ%t |m1/[F2\sSơ+JvX?g>q ޸Yչ̩\kLkrm|^A[a>{{Ng6 3nm_îO;_9 S`OFY~vz+ɴ]Ӓ=;5h}<W"Wavͧ͞icbrt^W4+#j60fg5CPئ6^A3MXbޮL G[^]ڌe )+'lE ,ҝGŠܧd$΋ ZKaD׾ ;2TlԩU)l^tm`0cu1ʢΟND0]}P˼뜒oJ2$ z{~&<=cFZ߈#Kq;Ny%tUM'/"$jN0rFL8yRjI.gdmf? `HЍ}IR ,3thދ95/dauoDg;,y|#[>luə]؉o &5qfn ##(2;f&+0+v@ $gj[b2LMI$ɭ(g7 gyYx) =w@!SƁrlڑyRX< #teql92%Z˃UKġ>1aC9Ǣ_aa󭿹p+ƐsVDΰ:O-fu> Jg ϟ޻+HzcaS +A;tԀ &'vQyqBc:Uȉ\ՁF]oݬj\ gT\e#ڣ {QyI`-7RQ +&߱SuS"jmt!aU O0\8TbXSң7rgle( ,yՌd+ͅA 98Sh3B 1u'G0Rі.M8h{Eɷڊø&BŸqRGq~©bwJ*"Ie=8!irq$:tfώC=$y'j-Mrjڈc\OnbeO M 8ꇭ[`^Ў l Bꕤ'=O#L `O۫/ԗA?(G2?$+Z~XbvC[Uar#s+ݎh*9ݲiPZ#Kw=*rbyP\_%/j!{`3"3T1CtYlA= =o ̒އ߰_9^;pGp4ތۊ8^=xъH>?Ǿ+Xs&Xq.-rꦟ"li-G ;%$JAZn9nD[Z>Xs|\M:)i &[LcFIL1އGu`N&n څxO,{T†G d`mꘊ!ҢFŵp.st7?'YMvV bI2&Bگ+8e&&kbksGu 0t~ N0^a9N`Ux9IНz8L0ۈ۸TXSx&NSW30IyzT%\ zLshҋ? qc .еTjf)&`SOŖBiKU06qp*8@{ ljc/gs:3t%vmt܁@i?8QPXTyj$s{ɐ"dƯH {5/c˧<`,{-_̒R90q)4I 틽O2*h A@w  /M/R `( (:p'J `#Jll,(9(s{oTBu, nFBcgAXR3'(G+׳xag"fm3ɵ> 3wR:tPQ_\~t6K /gǩХ6("Tϳ{ogqjJl?TB8?5rt#v$ÃҘqiU$w=)obN!></lSKadkvN7wm}+âi0_]% h I8ޅl^xgd{thI&LV6k I&}&G^<ƁOGc۽ ??FaI[dy9[:O,`H^Lf!B℣fFzrS/f_8~6d0;*/_V>ƇM]Gvcn؇O@su$/p "Ef)_mcK ˲An.gۉ˻9l}fq^qHZne(VudCpWgK2gt H=Gu?r:Oh 4ò 2Wy>l061^4>#<3PK?zۿs)1"orutpG=cCI2wN?{d ܶ8i\z<)QC| ⍮%ˑӑiW@2Xů ePCfQI2NO}{ć}m;^IU8)n="z,.yDb-G.<)hG(r J)j6|0+v׏dpz!$?xF L16)KFȷ*a2lˏPl?y?JT~="΍X0'Fyq#j>cWKx s"IxafQHlVr)wi]3vw->QWIȱ*͵[Ir˃xE⪾Jnj!C[lanv~a m<L) 7_E SuwGZa\拓YvǦ&悜m;5KE"&q"^rXjp 3-~3 v[Tr )k͒4x~Y8~<[T/Ȉ $R4[$8p{SA](;MRM\e`2l?~v~$TezVԞ ;WD׹8>*od$V-?;C;֋w` UjP&lǘJ EEf6A95gTIfPs0hc*AZ@w*dy7$mㅖ}4"q S:pر"C +J=lЦzhM}̴NRwb!׶Rπ5y8ȯ*; 皻075 AX9yd=WWZGp!nGȼdn㪵ycVV^$%'Wͷk9|̕WkmMNQ o^/`5'u%j#\1Mn6l Cq?.âQϭ0(sg<~ i*li;*OKk_('f >]{Ƣ?*ʬ*a$4{/Xx lXji1G~5VD CcmM\VBS\N6J-1DwRo>9 sRoYhG -axn?\O\vPąo1KĄA?u hq>etN;M=A_؀kK¹j3#+oiOAn{J~_LQs7?& ]9a}@ Xb(tH}< "[E112.m#3K=7źB3'TcNI'ɞ f۟`@!2pYE lhg'Do?e6f'PrDVz/tbC"9Jz~;V9D2sZ\$KUq~hdt ~q'`v#І!d(S[ja.WcMM4{[y+ND·>:ihMrO???朖#x1*o^:B{ὝՇy$m w\쉵qg)\9K vjᔻ<06Lw5xT $3UeF)~bF^9mh>l,f]]KRbJO=jP]S ~=/+{=ݪET*YA*`~( YƙD:?ŕRa`_hk<JKBʎZYk$s+>8} .V> /Unk;=6*.qZ$B+FKSH!D.k 'pv쩫j̢tl,CÓxj% !vF2 +ʪVszP`YEU?%4ℌD0Iihiڻ_7*m\'_p˻;i,̫(FGc&O.`cP^ .q8BeG&Z΋#Z>ę.%a #' ?޼LSn+̂&>S]L'1^/<70h{ുY;NݶWn8yxgr[2pn?ϾK1$|3PF~ ɞo!rLǥ>'Ղn\Gs[qUΗݭ$hc7uz*zCѝTik.7wu:v`}ĪN j\~%J绅|b~eSe fd }@a7@Q{߮v O/_o$L29V3VK’Beh t\1 oQ$'}c$u3\a1{Kۜh{3l랾sh</{yx| 9s+F0%!8СyV Ȫ >0O5ʤwzNh.&ý3`i 7Z쾓%c;E|8f:YpgtoqP pŒuV!8-ذFh|$ 5|—3BҜض d ,Ca`#Qj˟?OPĩdǞ~Ģʑu0pYDE-++?01M6C7⛽2::naBx2_4}bXr jo-ZaD|I7ږyۉBw@MLTSÚ |o(VlvU̸Z~$T6}}¿9gR8N{eW.+eLDAeE,;w\)vym鎓o3ŝp[ϟ.3m.Uz}Xq"X[yzoiL% %*T:Oy)__68icҼP#N^r|y~?:PZy}gI?O~ 3tM=K$dֱ֞&^csŽ˱Gh/ ,;Ծ?y |i Eou!&@Eʆ –4Hv;^a_/_pqI4O9V8i3-WTy .7(4=82X&A9oW'#Q7xO&ko}f']\ZaZc%.lw[*#Ϧ;=;a?Thry(ebTzI' <`x|P=~z9={Y( ~Ϧ= )\ ڟ;P@_R#έ.T?WaбL5?AP1gl2o&I2W1/ύ4s oQ-$S~0NY+w D vvX3':Yh]k/Y;Fx !Zak7^Nc*=R8ݪlPS#7_6AaHܝdGsW  b'_ :Fz۱o3jVHifmSaWH gnP'Q(Alh|QSBes?0ap5(?v]{qWE?l憓GOG)B擢oxq^p>qu# >03$jk+oqKPuC+t|v*hl[=h{OY? RNɎqlŮ_@ ?|.b訞bܠNÞC쵅L^Ja!Uz7w3Eې~ o" @ku#~*"LDcXy}2=Jwt=L҆:{pʹTqqTZxTCƏވ; )L9MlÁ>s_K魛9S-Y韗':8 K lTH#6{J2{<;k1#̋:񪹃\8:`v4c.p}' N] zV i׽!LYްA* (8S )^:/ūL}޾MwXO^`;%d>unP2VOV۝RW)AG>WG$ۃww wl#RzZHu?q=k ]dx t3U.V 8ҫ>YF:`<_:KA /oޗi@SpZnoֽ}f 7V `ק͒V\w%%}M$a6 \v,=o#`cY un!B01eg{l%ncA ͻUU2=."Y6\ 3N9rw&%qWѝ$S G;G\csP\eԽ@5ڞ.4s``7ߏeo0-V[2slMHUTez@eoU[P2/JRVjaٰ'8cqQݬKr6ϻ<:RqBvw_ t;SOV`EoR(5^^;nq%uKAA @mKCw޼;g/tg#{/G4py 00+W=gxKeHrݛ%8pfL]Ҁ),:$ȞO?I&'L8cA ]Z_OsWG /zAlio ?%ז0uGaPfs} oax8 #O,bs{YOIO܋\GzUݟc/TGÀߕ8~h+i#;1E8nZPJO0}޻KrFI,pSkzH~o(7ިZCGEg3HB]H+0{N2'w"wX~ UE2K^h/S`{w >)(Yoi0Fzo(Z8OY, "o;.Z$EؕP9f&ary:ͭSũ *i^ wKqkL٣u O$)lA`Ep )uud"6WсVaBTriuF'XnJ9ז'Hփ'5zb/[;-@eJ>Y/NxתJ+@!6*hk|J,pG=v|=b, :K0]rrqj{.byj6_I!eg O&ix&+ o״9% ʃ[&.e?& 3Oڮo4q#@`3?ݑsO&x`F|$]Vg.j3iBFBqug>*͐ҌNIe%O2@~dT8;ChЕ+HJfÍ@Q74DHەTl03gIr\"X>? e=ڿ >8 {˵0R¾7?'N8 9"Mݔ΃cS~Ⲱcu >=6~#ۯPG*}K*[q*Qư8/¿aݕ~AdH(&i{3 .*~wKHFէ72 okQvI[(J0wE *ۋcen;V}ЍtB^7-H~؟'޾FP =:NܮNy)Z^\1:N0}PV^К]P h? !;c׋t Ozc3Y|8χK3Hƈn-t5i3;an.'-A7jv*>0ܧ< sҗtwwh"dSG̰?/@W|'^` .s pًUU3+_mьå[ S ~vl/깪d|/4- ̠^P~ߙ%7by3~P2z\gft=p;% sMcކ71|~jrpk"@Sʽ 4~jW>c\;0B~˶o_2smRl2wvgKVscӵP I8g(qmY?-NȷB_`ޑQX#YUZ ɰV۲xoRƿpMI*u? [= D@򓄧׹c{@3sLNcϘsgFv=ɶ#Ð|΋el4׺r~<8yOtY[y`D{SR Ww*̥).HZ<㯴zE0PflPߟ| ^ۣ{q׿LqY[|-wvX JFٔ; H&`iDkeC3y1m{0]+~wG)6+׹aWT ,t%J~_`<'@o< z-gloq O) >Pr,VTf~F(ڹzf V.i {a>'>ź:vg3>guԋ_Zt湘?`ޑͫ.xOߜ`wvo$`avtS@l$<\>`q[q(!3i8F1oWmju[cXp>%/Nc1Nj" QD~MfH'd(ug6.N1)GfŶBx㧳ýI~-gߎߏI<۝m03=V cĎAkj_4qƂX-_\tB/dfp!aQ )lٵ8 _u:@^c< :z+$>1HǑ幘s>'+{6Uמh@39nMcj> 9fݣyTIEa! -7L߇Ss:ɾێi[ƶLL%귚 y`unQ*2O0>wu$Y<$Hx1&+fIyײ/]|}FeSޢױ z_pa_geFںٓM- oO]TK[W6?g匛eM{U-oCp,@Cj),zإF }} a56>48nF꣓$顣FaR k,% ^S;qm̳rN˒ ƴةK.NTvBzɊU z~Z*_|u#f*$ǰX G?R7[V6ӦCv'Zwoz'c?רass϶f{-B.=.!56دsg1Tl<iofr&oU#3ō^ }j|'De| [mؿvI\>dAıfԳ78fBxvK>a+NPo$WO~+ E̺nc>S?H t"̊#`#9g$5CF-:cm`uֹ ̔/AmBpg4 3ckZ #΋Ljj}D`]̙(Y`u8U$Ss]Η$GEm{59`jn@\yARlS@sq[,яp E]{.Qw>ת{O^Y3>s°Q6.:{*,d?\TdV^3GO{u[o=OP,|xŏH]oZ6hGK2>B8cdwy54n@gaHR߽8:q[`3ɗl`{?17B㭽=b's䉧 Y7\^$ ̏4A?+(3Z7S\Y7[Y(@Z_v')s/m%-~ݡɁAS}NItjVN|tj INw#%Y&OR'uRc ݥ$E1!.sKI3X 5ٯs}ӝI0.fZ8Eny0ƴ1*g4}KjP#};7O2k).1NU\2a57yk {CRIkfЬ$Yrr>HOSA`N\. mgF 0M/p6s2l 쭌u@vh>^v 6/ܙzsea\ ؖ!;+5Q H ytV}M>@2rDj"bwccyzde+Ku y뇺z9vJH(sAI;!JQn ̝/W*| U|isY(k'pIenS?z,NIDY4!x7XKr:./yE *Wccmr]qܩGpHYӟXɛ8?|uŊ6;qu3> ,; Żkcf+^n'vU1oN!&#UX{| L%*_;I@b53@eWI{7Zny(?= x) 92X_)w06+6*s < L#=pf&蛼Ux 0ۇ?QK:( U@뺵tds^xGg|J+mc %%%v3ʼnϗ™bjްı-=g1I|f.3:o0lC2ac2ZfMm 1;$ hHAx<ļ:/=WNK}C4Csvy IijdyIBp^2D]^ռKxQ `rsVAiEs/Fs;oTo އZ|j.bq|,DDa=KN3kW:O$cOa܁cX6$6 Io%rLJqk=ma6CoɇM{ ̺]_q'=,o2vdtN.xa V̙~8},G1|jaOͱ rAsmah"}J92{l*PY/'^d|'\|OgFp=o?B |;D3HBz'HU8G>{i@0YgԔ4^žyOد535enVq {prc^ӬnHkz|Y)%oܥjq Ք8boɟ-NXc%" %#M,c_Kp7tT |¬4iH\\;xX)æy8#k4^Zx-#<W?Sep][\3uʩ݉O}Rql5YѼsQ gc?̜My|JpFAb4}* V )~˨ *]QvhT<^OWB]ФՄs`g ]ܲ U[`o͒)(ىݚq+C_ sl_C)h sLūKy17jRވg/JZ?$}V_`H}8y`=OÝ]$fkE*@ҧya~uژ8tz*mZX,ff)E2ylI[tـw ͮl%Gۇ=B0-!  $GD>3P}ܤzF־0iv;iŠHTIL^?ߔ$ )~d@2%Q1V-.X@ uwv;\[ᅱVz]Ǚ7 I6:;$vmǰ_Kιl~0yzn5pblmf.n[Wp`_2<~ve'U8}Nhb W O%fOEPT|UƢmU߻GIe %TTq[:wu=Evb}۠.Ye.o~Ar^L- ]NRu ܿRPɨ/,[mqK ~O89V Ig3')7RٸLTG%Loƞ\n@j78<9uYSN2{IXV{adѥ:? W%"vؑ zljC0hųiNJX W6r7^^lƆ?6ɵ8UylVcVkn)CcW8[ } ,^0Lb}i6T1%58^D f!Na3>kW)W+jb+;+w;1q@κI\nM)D0h8 5 w%ҿxű$RxzۦY:3lXUDf6OR!OgG.W=8=Kr}ڒ K=Oomm8GV@iZoY#?ŏB/7Ilk22O0Tnc- ;Sw)$ݻO+#H|`P;;zeun(>5c#wz!CwcژD}٪"ngNf,n0]\R&}S#|W%"~b+8Nb~F5 |]YܲlS&؈cJ^@\6 ,;%@IAx p$i[!:(gx%4q>ǸlLS8pB[==V]//Ɇd9?MmR>0rJ"=$=UKHWep#茘bEn삹w{qO3đ\&6[USoͼ974dnp'2uH n5& VɔзX7Bq2dk)>;s>TCjLH:q: Yd.%[W`i^]Flr/ Wj [`;!_䴖iW~{aDŽXiAiƨDmqֆ0Kg T4=ؽێs)l搡j7GX1y9Ad!.m=';,`߷ڠmCL ~ەM ِ$NY?pVd#x%[j܇q&8u'R>|[3#_ǥm8Qiv$)n$s98PVxH=|ī('kfNB8:zLQ%1㺈<גwAyLr/G=75/}Nӽw|iP{@S͊`0X~$E9. zTȶ?X`Tx$(6&m1U^#>9Lb𷛡NOM}MqԨv SbCM`!>V0wnGؗnY|L@w"gOhʯ~wt6xF\*5כ"N=PS`wgc7vueIU,- /4q>X՝+$X?LU`I2 ݖ{ Dx?͉rj$hTJ0$LR7V\jzY('$ o:mХخ3<m҆سE}|>ɝ 2NX +qK]:@vsO "Ӈ&ʋ85rk5?R ?o")L/BO䰁 7- eKHB"n`ɑW ƙr%:&&zj܆/͐zNz¥ggR^"5"[\8yO3t2/q ꗾYFsxQcq?bĞZ5W^S5Nb}+w*%$[!9(nI2Nݺ$uY[cAxclK B*ZKʇ-@-0aShDҷj:/JJ!$kGNbeŁ7n&R-ͿV)z{}I;dGmHJ5[FإDZhA躈ӥ;luBoÙoc;q*yaDWʿkd~ ?wpEŔ 0}\km1Loumg dǩ/^@ͅ@,N>ϐ#^yV{ۭ] mcY{\諩WAQsss}Fr^M2 pVj*&~ 7%qy8saLébBzaD=6-jp|{ҝTnVua*mrrK\7Ɠ:#umP/ ֡. /1,WU1GO-)~K&,S-'$Nmfib`懌U :)?a+j"lӶl,CRe1\^2WK1;psT?9}``ɗ&a]d)vQ w583mǕ4^$xzMh.jONUjq ᮕF1q0lbO?YW9アR@++ a2E0:`]._ k Y l:{H75߷0 + ׆SpZ:?83wJ4ܛ<8f7wHR~% ӷXs$+sl-|2Jpm͸i3t(8#O(+V -kg䆩Cg_bh[Z-]tc;A1B9UZaJq;]LH]GlkiY,zqXAQ81ֽ#o} ibSI_=;ȕHx4=IvӃ+<+ "Bߧc\T7+]aqjI3l>{x| &g,];OҙW^)5iuְ-̔`j֜NƼS;Sгr}X:5U[A[#qʕZz0~H0UT\^5ƄFPfyc*oMa'쪬RN2_V gw;݁7^C%k*~>5ͬQ`~?Ȫn'o9=G$-`k7FXpbQ抝j~OoFB!Z8(19wHs\(l.D vjTqc]alu@0s#ɿKJXyWlGq,п9,+1VDrїwwUaKzN] Wr?+>a. VSp= r\e{j+x/˹.zc㸪:zy*"]+㉶޿p"DY}q#dݟ!w%筗CU\L_z/PRFn 8ۧLOz`$b 5b=gPWAn'Mo1tS텗ƹqf==3or{ C]z݅?u2GV!Uan{/Xnvur6Yކ؛bON5Iy 1]GUEy~~̐?7&%88èPI5pmT@5A7М/\,N0?)[znk2IOvg)*6kGG90MÛ8:w[lڟg?%٩4\ܖ ќly$(L炁:@SU;ұ70” rZU8%8* uIW+$ήg%Уy?saEwna]p]?г˝c p]V=d̷Š_Hџ~KOHa''{+f*)놝'q<a[J>AN A3(2umqțpbݜ/0Zg;Tq.tGu\qsD*c-q6O6XH =T[ƈ9{uwڽ]3vX8 v/ B'a&!Ibd?P] s" JsP\h`W,jmvaJ4=DgE_4 sKe3}H-s8}83Sy,f+NN ߜvYkZUz1gk|*Mǣ :?Gm JE8Ssҗ tQIp!#;,kfƙ2%y [a*fL+0sEӁɫւQ8:FT854ГXefi򥄄ga f+e5v=\F*-8Pi0YO?uvƊĦr;Y!vsNZ~IEݟvmL8n?KO{򵇀&C@ 8F\qJ%m'WE,ll۾? ˴Ta68ٳ'5x9" ; R%i sMXK-5-\BiP>F/m}H]]8v'/\fUNdEE"9eSmBIJd1u/=y`&5BD¸df PsR14`[}~oPm B5+.:ZѰ ?rϤ)ÿ ?89{>γra=c}U\V?߱}B uFlg"b`0ۢl*7>{Y3N01h[K!sm :mNlfo 쎲|V|9Gp)6Z ĜԤee{aNA,(=|葖*|8~;# IRR_IBJDC"2ZP  {=c<^<}ݟ׹;tnNb/OHdy]}MIJWe>^s|gV LA[88XG[R96^qY6 a‹v s"}5aNO/~_ɒΣhca /=)βJ;ڬ/ Rf˳Oѽ ؝N>@킍v$=)0bY8YK>M'X;NBHug`LeyӀ٩G뷼7~REcwNl,90*sZ .;)-9 |~úzN)nXA:W] Hw=Ӹk3[E.|*IMʵ7L7AVN,C1|klT,Ce&lN&-@Fu?A_׷} y>C{_ -\/m:ג&#fA#N~_xߟ2B8l>Q~UF"ҷqŽZ'pMЕeuCvB +f."+n/y8]Y:,ٝKG?.x߭OqS_]dp႗[ siSrFce.r9nߟr:*mL~w/Cr z C/:&Vx`U͕Hٸml';֭%+P&6*i3Zk+8\,hJ yvt-4hwE`ޫSWyFw3v 1ԧ_QSj'+%|sגXD+e|뀙'Dj<[1+[=PoqhEO xF\bs*pժΚ'8wo]\J:gW M,k%k̅맦Rױ\\޷.Y+j^I6S˷/z&DhZקxxWڒ1W zxޝb}Y@)ԺIaK!O=+;\U[/+?AZuQP " ,e?ҟXORtP)$4:Ey$oxQBmvǺ-fwl >[u up3~wk!.g>Seߓt kC\;ѭo- r n?r?bq 1yR9?LP~@R,k\Ӆ|ǣ7ϸU%Hm_0& 御&X[*MW(<.X:d/g|}px^TST@8.q⿷uYg8]ZT721k撚 ]7F7"j߬ ,g0Tb7 ~tž v}Ҫ`8؈;S'U.j Z.MCTvhP|N^^wפNgg̐v-Q +9ɾr(0ȹ986^;-Fιu!ѐ(tߖuh|33vgrMFBoJ*Rh)};865i+U){k-FThWo ]'>qH@lSb R &ZEl^# 1.sRJq$WX$Od!+e4Ʒ!.P9S89tCt[7+l:!%Z@R嬂[,e/lZVh-;~Gʮ&tKD}AsxĜ/]-~u Eroo[%k\`.zXOӌQeQ-0-qz\l,DŮ͎?an2+7fa `X$ezޑ8#0.A}z 69%Ip}udLV*n$@Ob.lRJsVZ(/$uߧ˱v0ircNF-_9ACS\a+nKz|z;J zb0Yagƀ,:zI&q?:$N<fbs":-WPTD=+`"o9W4SO$/\՝>4gOCW#5__+^x:6SGpz@SN=4o8jށn*0mupE <.kcOzNKA=3Qb?8C瑤}BhL;zo@vEcaiS ~3Re2F-mIW|/`G 8K>†p楨>A[˱,kI!qJr JwVz4HGb3(Ǖl /~Jƽu8!w^de/;ONa?X sI/bQ]U$%UC+{r T}We󑚉 FǾAt-BYn~VŻj=zAH `w'/Ȟucğ 2wZ1,ǸzdC,at oxGÀbqQU!mc'_~T%Q ՗uF&5{r&B`U=o˺_0;t,ZEz!;.of' \I|Igdܨ{DyKtT[bG0H̄ _g^HdĝԹMܜlƫae2˔H 9~:GrIۖB =eJAAΫ7UV;7[p2KJEfO8~턗8T =U]jQ$׷zKpdRgI ;LϰXo /]LBZtzj  TܡNnQ†HRf[!>e s%l}1`{f.MpTGi>̋-}/C¹fп^y}"5mu8 ?%OǬ8.h/Ww| zG?XF .O9!ٸ<)AVT6ݸ8mvdVmmmKw\_i5佲/a y\1.E\(_CM[}q/4q{cS3X/` C6-_e s_nCW5X{{"ٌ/ !u!"PIכi Q60ÞY"j?ݿGg('" _+ˁx-(8ѷ6?(i6ƩU!8f׫ȿn Ӹ~X7oH`ڵw #<~z +qrڣwQ X+Cqk].!~O, Z({$ȴR@WXb9jmI'sXky .+z2Py؁tG`ִ<~<:i 5%9W!>9rW`˪ kb{`Ӧص"64HPVTPB]u#׵maZdv/)$Uݰy/)yZ]~5HJiC̝J;x0){,'i jaw <ο +4?WatZ:.m5g9orİIYW̵u>MCf̣AlsbN)jMl8 ME (gsg.HmoJ#5@~w;uh|QXJ|Ɂϸ/eȻ[vXWn" SnqU>r0+5+ i[d` g;|$צxzXnBL7$?c\ [yێ,+ծm")sTnPnkTY͓߆whYh{ dPӶ8-5.&+Sw7+#w4/ߒ (d *I*?݇epYR^4(,`]]n4u۝efɒC% OUI=ZܚJoKVN +Qq]U'{-ݔ}aK릇}v~pÜi7s^zɠT-Clv-!aShVq ;mƗ Pl}ĵM,N_Uvi0@{fH4[B/cFXonP4 GWAƌ[\ҏ}u#GqdM3b4R;0Eus} -'aD*,.ݍbkae=M!HKK3^߳O_/vMkS0lJ]NSզ!Ctf3lK)^^|B6"bT\RmWdrN _cH'O}bCdHz~1>جX2#'O9"fm*6(}x~_dasqPiv$H+Kζ9В}T ߾p V[8qd@3:upd=c9͠y yLx7"0cQ>Id|/N.!&x F^?s.| ǁX`+:e]mE2BWߧv0!1љIr};sc3'ͫ:0+Te|=Jaj-߷Ъz >gLv>gd!{b"صE!:-@ps'<\9+] uײ&94"yxh&V>P=%m g(H{d?S C㮋Oא+֛t 7NkvN;ʼsO ۉXjUIi|¹-SmKJ&cxƶwةU8myHMghW<*>nS{Sq#+/`u9N6_s `ܴr QWH-2S?44~EdN.6c/L tƳ;qjTzs 0kCUK"ilq~) =_7`=Xbo \K /*V @FI̱`e]|1AqPA_Z9Įf[o͕k^`K[UFIUȞ lk柂YsGp|y(?ᘀ{1fB{-,5zCvr%ߔlg_Az2h~[/sF!,mXJ$r̷ݴC &c5d 1ZYm`oc SK Wh2&<>v6>a|f^JMB9FzAv_A;eJ Cxj),5~L4*v"@?#oȞN $wǥ83z[`oH6G֠ƷAыA`-:ݶ<xJ wYiPdSҕwsy7cFjf8!6Yrad~cp fVRƙ{NA8H?Zzm\PGtZ#Y`VkUX;c#wt m=npy8o5t}xW}R'~g`o0#lֺ vYeob!x -8)wmI 66ԙ%js0,pǷf -néɮJ0rh_Bf,؁D8}0*%;,K'Obu[M"֐le+5<沛ɳ9sGvV 0DU.#wwޓHx-T:+7O¼2\s zS#`Fič{or 8B seۘ8֮ ]VM*v1"πw;h^i;JJbmӇ:g1m=1*zFgY''tZ}YWhwˎ(]/)夢͐Y㇮t=6sYL{z(nN*׍oV'!n–W<C퐋h:adpI]5b =/Cb"a 8zlFz+˖1)tO5no9}t Iy bNCͧG<hHg;P y)e?Is#ٳ8O*&9O[Yq"PO h KH.Z|ڔ0#R 8*3j#aqйqolf2aG7ԕ.SԬ3%UDWoy ÒICg;б&$u$\4u?)]b8LhESsqw";P.ZSx; )F9Yi;GBьFE>xP'lߝ8pሽ#(8 ۅC_ξ})ΈQpQtN tq\\:'Z ?{\_)O2p?V])r;N&ۚ?~n(5^N`ާkB:&잍<+āX";v\H*tuDZS$ 6 ?!ueB֮禰x5*uI) ٬ [qBȪM~ : #)oڗ//#>l27 yM漟<^rS]aou5vT]3/z5l?*~C{`en<:Qչ[_ ~KdE?Vу,T矫W^B kGyO$۟ Nv0zV-7U8rL!9d Ŷe6)럋dYpk{'hPv9խ0Vg6n ֓&rAVj>s^QqHNʱK1Ğͬ/ é#69/~_'(w/j5U (#5۷qmd;m6߲^)h- ,;ϡoũWuU輛f\I`~[^9 1SHb? E]uF? 3_B45woOk(N172"nyR!Wt{#c()Y "@&噬w2i܃s$X޽h$W߯mq$˹ghuD v)| 7w0|߰T{d-r:*GTviTn_齷`~\L F5t6%ٛegu-E.؍ o]0k8rn+7O1WX!Kɢ&qqvv1d9}T/N ZGtaldv, a +(\?Vށ_h*2}yU܋?wgEMu[u {Ļ: ˮϺd(8>_d\pS:KXn~pcoUb\qŻ ~DTkHxgZMl`ַÄ6: JLWOPkEr/%{.31CSpEr4+zPsqGen8J\|;Hv7q)S8P>SW7]>D͓G7.5A.se-;YA'Q Zl+vf}c54/hԔVF5IwJ.%ߚz8Y3= ^GbzZwFO,V |o :`ƈW Ej4+y~Z +nH%V uZF]l9'ࠇ||[%W =|M}j7~t>BX׵*}p:8f/]_uJcgW^{}l }ϖIJ 9v^3pf-i[b׊ؗ1<@x2E֝6º"R{苃.Pwi3moMoՊX!w/xLQ 'ꎉ-X{+Vߜz[)Cj'vd Zof@-' @jjos׽DwExX[K4ڮbN #)C\A'70է8tQXƤ2 VdN <_ sq\h٭{w|<f3In:%ar*e""Vr63dw> FX+=Bmahw(08 qq+ܭLt?_{外m%oM UܹrH<;GH)<) C9x'kPWt5hʿS_Ja#dB1\逐̾ C8xoHj]?d3n. 8<9i1G䒴[)KS`6cL} U?T-tgp;ڳvJ<{j^`)Tm.ѿC^Nyr?`uE{BO`;<}{aԹ#riw&mM엲0ӷ!>NN:! ^WIq&\[u}L9k>FYLXk1dlxu#ܙ)0LJGVѡt3uzf}+P?r5ƒ4-57k~n.?,2[34hU&U {JCr?{7Lx(ZĎ 0B۵~݂c?g`Fc P ]90dj9ys: #KϚ c鷱a6W/L=5}>ò9` LsJ 2UT 4$ʺaKz# QA2T~M6+&KoF/_a2yYwgۤl!o>!umދ#Vsרsuspg ʛeM ~"݆έNL,=P+}qc.z<,`8'b1.04Sڋ v-}A<57"k۬?f?  _h}|+j 9Ԁ鮚Jx3Ε֭_.WIU.{u;=$ Vxa.$AAϣOp;l.1ąë&XPs;K0>6S vQN4K>sS#J|glNЮ[.NJ{ۡORh)OW?,>~g3j1NmvU]A_z۳I$%F֗9+@[׺Xfh?|2~}dہ}_v+̗ ^_vSZȌ;|aZ,|lZoSBFmOskcp 4m?q2='l6~0O\z!4g!LkKHK8I$G3'6Hi3iiy+"L9m@TL:/`mK[ϝ[RbiII5S]5 X#rH;o ߘ60f4%ve߷zsc$nk8=;wZ];߭R)'B{ٚbIǜ*7LL  #[)uq^]H^g5oϹY 1Xpn^-'Oe TIhy&Z>ݣ@Ըsm0eJ>|iWE$ݚ)X%4)qB͠T[)C%.NU4gpC;1!*mF['>Pdu/6&|/tЙZa|nj{7 6GZPw%]rG6„WN@$w95|΂˳{JA'41݁Y%Gdcd&)[|1LuU7:o?}Kۻpj.jvFl$ /{&kۇ` !qCvM2M?ʗ~ule[(HP4YJBcJװ,LJV`LV[wm2&cμZop.coJw,sgFka&9?rem3 xrc2Ыfy/m͏Դ&ثwk`-j <;;sitPsRv}\Sy ձI;5x:v^uUWM Hk"I%]8tPCxqlrߝ۹/z~w ;iԮ.,3bd:aB!ro{*zuDOv<'pyݕz%"ٷ leǂ.ĤIN%_'3NC߼}{1 KG n?s])Z>Q; aaA Iw ]?l⯭+u_m|3WUn \\=M"iޯx%{V;H/k,@}Xm@N L$9,Sn%XGS]jԦJy0:M_X4Çz/CQf-+XSg`#/_E:do_k]0uSr6wt[|ˏ̘?*0SgE^Ě&x/xh;l_,qT.ղWFC<9ϋ_R4iG~dEv*kODC]GqɥBʡ%۵+ni2Hs3m-,N6$6j(3۳Hi){:1A-wl&h9+.zI`9)#'͂^HoKՏ8Lj\3L{ !bՉf9Vo=Xxk9 M/=ީa_IZ|!ٶH̓*uu&m/[*9"-L~XFZqzn` &8?⭃ x8&LJbCK75׮Hp=vf/$(?mjW+aR u7XSN'Q +}x\9~ݫcǍᷛ"| ||b$KiC8V6Ws =͎UTZ+  tT̖б.bȣ{lŋ7[~̕q?c`3 "|eZ)s@ScQ7mP;yȻIy_؟@lNW]/E1c]_8۞zǗ$E[wT[u}&}{Ntzܳ8^@Wx03W~y>ryRakZ^}EX?`;5=Nnptޜ&Q)8MHy=o3#zOH;VS,Ku cI&k8Kyq(ٱv@vY {AoLΎ] R%3,l~on[l;t9{݀}WZ ©;LaՊJUP!s\?>l/{ZP  U/BZm0z!"q?홰KK%_1R[&+,m"AR|Qe΅o,?\Ϩ8Չv`;veϐ0p#!j v uujwN-%8$c ₋qYMtU&5mأW+nU Mwehn{Vқ;,#)9P]M0枛.m,@JRvKd<ʐo-*3fb_į_ `~?-v׋(F2v׫QkpOsWOr(~"N'HQ!x{H>[Qӡz*3GħZÇ"Q{e~`Q.WYuEV. { P vnl?QG -#y^J,aF{s 5GB(I[_"|x!'$ނkNXʝ7O/2 /pN :^o"8Nw)ԷyI>\RZמ]&v1_bqP:ko !v}s ~],ap[%ePI >2J̐|@%(H[tclΫ(EFt/mINs-M~{c0?EF :sOpV1\U*5$gIٶ_տ&Ȗ-ZR!`%a*m|~%U[}<+nanjL*vsR/k lǖH#_$$0C׮<Ͻ_`gz >X,CU[ SK>.,bgMlq@}[UnRx9HW8ܩ;2,ctNڕ|Yc|AҴW R%Aw7KeˈZp y9]-`4f.ӁލWpBa{,UkE+/Ep\>{S~"6OI9kDr#>UZ40&&  ƕ.)iyfM NԌ.cv}y $G\<H\U+eLksKŢ8vÄ8EK֙[y*= 6JogL`.ڗ,I^Se>NiK2Go\Arx}")7pnt$`Dg`.r2 oHʊڦAƅ]=H;k|ʾV12C0~+&{v¼S)eUr3vjA%>Beض˧n9OF, []1>L1^,.]|_3>l'yr:{ކk8H͌{$+8ty5XKXE6uofεBcY_ C`Ys{ثq?c{%L*+]N_×{|izؽ}tUy-BS4wT" g߬mz L4RoK%oϵ![Zs@nc+(zeN ؏tIGbl/Y  #&^J68ci%=8wpl_v.PÔX&vHJ%<5$6Y|뭱j?LzFp?@EluRoyEᔭGse`Z|[ H۔ ArT{5Z`$W Sr[𪢛3KRf cttNLl_ vmyF .l)Yկ;/B2yRH0F[Ҝ"==ٵ f @F{9DaelYwLO7\X ?}qT́S\D&}C/.;$74Ě].n^8_AF8oIpS4d"a0/% pW2j2z0 ҀuV5 Fݺ!}XWtv0VV%@4;̮ERNdBF%g c_?)}Yw.a|\8-}/!d?_0Ae3vt=|Sd#qD)'VBSr2UumuBM8~ 88~ _fzx8.ڝ*XM ئqgNYU;:9 c}0myj 9]=Nu $58AUE'FpVծ$6.&7a֚%^=}AU/sւw0Rh$ TƎ_6℔j7TST\4+wUqp#-vgæ1v?Nf>t>wf%Jc6MHp'E{5V1_@х…Rt4y{kIl2NVx챯>T+kzpeH=};嫂pfwk]RQ`[LɻdÉqmŖW"1JbR~~HC+6p}88mn&S)/@KQshĿFFpu~Blc(pD+FϾ֤2`br*.e~̡{f6)!DRm,(Fމw{@M2Q;d&.ѳQCZjKh(Kfw<12͞N+2<,ɱL/NdyIT[ѡǞ̻FoPyp#A۝ } dCzDžOGH?h:uE/x Y əS'k+2Nmn~ǎw󽈳w{Fn\ 0O)amsU]=Oiha߷@wQvX?:=9|eipBf}Zsag_.LJCU-Kwzt_&ϙOSu0pv?Ljͥ=\o4ŎrCo^.K, 'W11Ll'3F 0wEa,F nI20cdd~z+@$JPˈ!I+bdaw\1gPm4/( :_H{/+V ,l0x B;a9z*to QB2-qR[dLsZͥ?sbѪ34Gzpgf](z;.ĘrY;=zs 96"Ar^l(WMqϝ8dY̕1k&qqQǮc5^7Wbka TBc`OO4_^7ux1[*t'<>֊U]IY;ol"ArJ&Q]v[4~jzLsqI[dGQk"U҇6ۤIUdiz2ɝfW+.A ܤ1 Y#9zkf FB 5?a<6}c!uU>>O/_o@J3Ʊ/2&8_'ϲN⺈t[OL~a*%0ǾݰZuѸf'jE($jWvB]jQ>\lEMkXvHC}:"l胎Vf_I 9E'°ZCy[5zOJE[lyO.]%7;pfi8Gg1XzegmqA՗2myk{+v|uq_毜j 0";Ԝw ޣpQm {1L8>[bG78^Er<&FƭHoԱТğߐS _( wP{ϕYޫ2%ln{,v^!,RF/m̈%m(h-"J˹{evؓ7lNRբt"|ɶaVR[_:0Qܕ/ a9(`igiOWӪׂ1ϗSߓawlLQ˲7-yP%mW cGaXR*l0CK 96gC>cwjfm_g;;\%kcW3Ԇ0,yj6Ѱ6Ȋ#>8w@oj0p8}ktȨ/mA keg{.CMϼEHkO%" 3"aWn!80wܱk:c~{5Oկ8mR]s׆lH>: l^}Ň*/U  bTo Gz?QG.ILj,u8 ȎFd[ ukwKpps1K]R|jmqz/?Cn#TWך@2CE`2r씨s]+L`=V4q ԿB[=O1u>yޫ` , 'Ĝ$IhQ  ( {g>⨤hMPZ3-!P'-`%?IQ^ g`N? .,:#8$B~T`O$tRe_q!#;qnئ՞гO:a{ $ F쮅?f+qdάViY!a}XRĦ[9aV!{@aCX:^np|5lcn YQπҹMv&W;iKkIh|^ޱGZK֧0mHFܨƫzIZ쎪׺fُЕ8q {%i.vo{=.uVɓ뼮9Cv*7:H>{'qQN|i=cKHlP)t2\~ܖ^FIo,ֵoױiW RwPVw<}ܴUS$sx|'O(b ݍ u 8ٝ+#CHʼ$,q{OTT/ߢNcLN] GW$W5[5snڭ~<Db NYHrXL|8Ƨl? $}[3GOTP`lɗXO]}r@7fLdFGr<οU]/9KR|&Vz}jXeH8Y<3"/=-jz?s/~e{R!$b:0nZCCБ)GTcJeb{2J]yKI R~$rFBmG&?G* -8fK\d }ҟ&k4`͎76^B l0͓lsu`Wx,k'/bw0>:-X?r<x}VmN;j8c6yEXop) ?_fo!W/=z2{Obx̼mB60ws"_?Np*Ķ' ),h_g3$wxs}h}]Q؀'/SX{6Aۢw0`nRpEVo l.FhJ L]ި R|g+e-10Kgnw0Y,Ap%0f˨:R!@<|Xkbۆ5Z%x;gB?cs#l] r8FrVhcMXQ-\8=GcbJK/+8p5o\7ⳅ{ϩs.uZ؅sC=SlLW[-#ep5+KS|u'7x,=9XE)ҞW[\NZg^ e|vM\JC}qGFؤ#L:Hіa.y1(NjdԶ:`AW'E0UKo%0 ^{9Ald9QݶwtPYJQhL"u$d~Ua#tK",pld6tȄuJGaBn'IJwL'tɧRW+h$Kib88uBv'==;߯'?m!{(ȷM~ !>ygRD:[G]9` ܙ0djF0׿c@m?4ɑc;D(0?x"NU|d2CkR Vh2 H=s`2kbKޯʖ붏ڃ``V_\/ )kz<뎲UomhX0V m|?y/AW~<ߕCHњ4N+n_p-<8ssgn҄Efҡn .OR+u)5˽I͊ۍV[ bɵ#_'Jg~a 3r%-3UY5+~Ħ RYz@/+{C|Y~:} .<t)GQLVi mKhPHlNJ9MZb87L^X0II᥏Tծ[{K}2"\2ZeM*RyKV0Ż}j {T"0c<+fm{_m#x{3 JV,`=:Z)tkk$' & oo-jCcUsޣ505̓ߊ.ҟqUAB8 g&z|Kf?(?z(g{:ڒtgص  ֔ FSհnjS -`gRQ}Աddmɵ]H,Ry4]N0,[KL6<({<i10}D6 7ܥ8-|t#Tg}Y6v1o;.`ZzrǤ R~°V_1l>5zk"jy[ i?uafޝyzJ j|0v>ɮii{( r4't0Q'fk݁:5Xe=0"LixMr*㇩f38+72N۞L}gC+; f5YdT@ܱÖ4휷QL}OX moAt_OPE6ZN @W!'-LZ ӻ`{B ,}`(ؤJpՌͮ~.ѬlRQl-ެuef[G%l{Wd>\R" 6J'=le˟ܰǒhh6pY[,Wff8(I_6qqy@Zh .{xt;u/AnmPtA=ƒ{ߕ'CK?؂Ԁ>;qp gVd *+)$ʆEY6Q2T&7]9o-&HlK_i* J qzi* Ҽ1ԋtH)~E*,ݙ`r5Ͷ Z>]~ o6&CU‚]ݚ22W)FVW6cmXy } [_!wK#ɓ퍐ݗgsUNź $`M$1Y2b5Fax3lw>Ϛ`@mлNrVKp޻ǿ[WOz VG:VZ: F8>oui\M/¡QlX \43_oN챗-{jKw$q !ɀ5$iN+\9w6LRWϴq:^.ϱzqS0(vp!LaoL*p#8&-]x,r艫~u1] dRz(/AP>S%FϦbKEI'84Zo^2g%*`wzM"H? .蟩'٢_i‰D`^`/M[ψI͹AM\s0$9mzp+Gt(8\䏄m_g ~ZxUg p}Ck~S@rV-3y{U #J6(<:;w} WKE3uu2ds1[T(ιQK|/ al !Nxl2K_gAVryv ;},Z 0(;YS9 h,El2,ժCVo6aeӦdH_3GWX彪f]6pڼ]<5C'h>@ ۴>?{X*%_GHlO*[iv2oyIRxwLbUNsUtOJ۾?0O[[RBy>K>ZRعp4;bBj8 0z'I;7ƁkC `\1ytW=20y.A7,x"50qVn 3ujSXm:<&?"3qb9?@zobM$svq!=5o9{xV;mg|Vӯ@ltX\,+d5z7)M )hH![]^w?fH-G {;QL}v;Ѕ=NC3KSПSC5T&V8QQkLU?[_ٚ?F0x7]q,I^(*̈́f{eIN C)iLݣ2;Y+oh}s:B0!xG=G;CD Hjam'ϟ8#e=BE6Mͦa)ͯd*XTijzIϱݿ8.s1 h|4l;[&^O?@P?.9tԨN\Bֈen/hS/MJ[L-lcX)kJpoq~'vc(ݙmX Ho [,̩(9\-Hb״i8!߅sb,oL@ A$u7c38B&Q $η-^3W8ܤG<F}|/9,8KёaAb[0c`@9jO߆6_ԾZK\ Ȃ2򿤴a1lMR&uPq&gjX'y3ԃ q`SOۯNCFc5u0_Բk_ fT1˻c{dw7t`.Y[8lN߫Cdu׽y jk$`ƖL?}u'OUE}s3:\5Zt^Su?WK@0tRlV^zٷuV .#)84+yIXOn9M[9^?d]UtƎߗ<~5nt'g}}*\+:@҅tvRM+ðԿ8UTxqi ?3g`^s#vuxfUS&A?3jq"ʥjD&@EhYUlCmPiw6UXC'2 u ԁ5c7L]O+ԯ Ly9ȧ+vߵqsQKq>8{ѫZH~Ԭ!gM{`UT;^OP޾ .Z/OP6=PU)rS: ,MBm/Z 6lrH[|9h2гBB5G+D-{{C;yG7'վ$(݆1h@i!\4<9mݜABܩh]iw9Ș0jBC!o89&kyOqle{b%2 Kz ]OycHhHJRI]E}$I%(2BZBV6{{u^ZNA^dfBb"؜D2O/}}=RjW$9m:$gAyNȐLg+ߌ@o_R \?Jz4@;kw'/'l1?ֹCfPqbx(?]ՙ*h!1; \iT `^ƥ޸>j Ufd=NHXX%X@zŅס{`_hha:U bw&Cp앣$ӝR!0E< =83ӛHe1_a3aϣ'GaRevf+l4fcq0b˛* M-w57CpNi 1z#289%.:djړ;>Jte司:Qq9z&fbDHc܍' I6?+00:c-"s> ?i](+>\5fR#Xq`~SSS-z~~ _y_1W13sH bc=W:|'PY.NN!mrC X{s-Z?X5`M#q97ebԌ4M工"~g1*/_? 1 J#XP6G_2}L2BZ0!UKH%?0{&M>f8rmKcQăz+k#6z# qfǑ =| gz8ߡwޛZĩPr/K~'кI&EfaA\*;y l#y@G Nۭ;Kl.ҩ<%eV0q=JG_y&g#0_1aRe'ZvTaU;?(Jl63s:F@ͺ`K%_ JM^ š8]6 O%ޖY_"skJFo1W.X:/L}tI.[l`z̾'ĸ #HhimXi?sdb4t~E}+*&1Ylza'UэsJ"NbeRr"vG;51^ ɊSmKpH/+ތNnx$J ۡ׽{{&b>d3 q5g+.EPCױʟf.IK@5 NnGa-㩅v7|¯$- u`xWEH nd;mV'dYMP`{o`"Q{{qF\ٗ Ƕgϥ|#v߼k ْb+~!=TKekWK<#{.%Y#^NG(59աMIUs[Vz֛^WHs7@ "u+Fq:S!XǏe}y4jWZtV,0i&֋?2 }&]B* à@NN8_z^7j$)E54xӊF@{ ! DW1kg}@D\1\ص,qLr«xd*'`dۡpߨ*VՏ}NpqM88GlTcouR/z6zJ?/ES/& S*p"|gmkU8UTsf"$MpG`%9Xi.X7d 170kg"vw; ykRDUڋ#Ul\"uj|r ?|\v7Co1iw[:[_O0`=ӜDAvm9HE}pd]o+Ez;{ͻ`/>.%Z a'H-N=H ޶ co+$Fky zr1&iJaM$Zi?sgoʐǂ[T:rT1ɺS>650̿&)jtg%~LV8)o`Sh9du/mk7;^W=߯#rC޻B/ɜX`P s<E8uŦ;dlo͇ c߀ڻ\D ;v= alV_R lF*)'j_TyM蜳֤/JH\!)7мtiRqAnQS)1cawBw)w$#}UH ş"v{Rz"\߼0nU-ZoDA`iSwa$h X(bYwH Gcb_@V>5>aL2claw>Ƅ0GI|oS/MnشI0 N9)ù&mZHX-00*سێl,/Üj)ɛ(z}-H씔)/Ѭ0I Dm+Q@(ՙ^dӊ-}Z\.D5?Ʊ^6)h,Tho+4Sj)J|o0Qp=ms;&μdJaV ׳0CpTj=vSyZ)/*2 <֍z}FpWiͱ3j>E@0VpJC "#a^bvo7t#Z] w C???ŝ޿`Gg+ҫ/phO{FUR Jv(%4f? H7I0{|"iW,.oy tRJڝe2 .?a.S|]#;$|~bD0W|i {$HNLLVw;TޚٶHm7 Y0a9/mX:L"ZKcI܃ TeBޟD9}i[ۋoQ" qH=:CԝJ!MRR-v%?OJ@z;K`'fCYX91ۙ`v tسpn{CT*k7BLLw`DžTk0pBen?X+!q\1\qraddL]8ӈn4&!3olEϊbdO9d%핮oyKgBa&)+W@rv "0soE,|Hgx!Գl9`(c\I?,^T׈]*͟OHũǛ.~Nԫ(G6ʱ@DΡ -8+yZpn.|iS4 $(+wᲊyo: SGlM[w}Ɓ#s0żE.3tn%`d;)vu>~l|Lf൛x92W> ;~/24& ?z*LLs "97goLI v!(<~jڒ? WvfqSmuR8%ٖa*'vr𼵪?\.hlӅ&Rl*Jxz,߉ 6[]C|0v%Y%rjd p҅hv@slʲ-X.!m"tvD:,~#gY%B;;L'C{b\28uin:+uC9 O{1ۅlt[I[[ɦAL0Xm< %`fXؐJ,~a1u{/Op_;Ɗ'?.GwcH3A\>0V )/} W[[;l >D0/s-4 2N_9mKsoĵ&MUS6VW$d:akO>i 3c}G6i@? ,P,'XYv1*} &TĦ1"f$]M|&lɾTXQgJW ~chd>+ı׽K@tqAtKcX̧ő´'BUբ üOm+io(>$`WQ߇IKv"kY%L!O=Ckj2N@i~1%'oזwf`0m59I&p G3X`p+=lull?Ep|ݻGD>޻g,qxWX z ɩ᡽S?(Z. v<03S`u>d [7g,c Yʑk-z-ÑIZ$k.ۋ`b;m>SFp~an 43%_{w ߝچZL´2GnQ_g7Ib,F_ٽX{} &N9'OZ)cjsv]s>c@ě eh_hnazAO$/Ǻ2z00fH:Rê 9ᵌ=s0H<"2 G zY54~w[ดg/VbRǃ pwj8u9L0R[ y"Y`\ΜgNY/ArVܿG f4OCkI>\-l8b=w[Dq|s)seu7.]|uI.NP7[1OL9q3 ]e+ +S1f`ȝXĎCHc ox] Ǭ/+byq|Z=>X{PU<tȵ $36JE֓5kxSI 9*dPSM5~y_D{˟Bܱ[07>Nz>Z~L.%3g#xJzl[ 6Pd6M >pTW@e؟:  61Iң$dշM @~QۆsuL$vTdtsR4Yɝ)\N68C~ʯq\Y?r8@8=ȱo5H;~g{8з)}=ӄԏW"a>CIV?;d: [.xOs&RShm2)vtp, k<[<'G/`N/,u4_8KӞ[o%Kp,:O`rӥFU%~z=t w ='@/Y? or`JB5 ܴxT֊*/~{#|jt/m6uiww{̧5J?_€zF]^vAgCoΌv0KX,Önuǟ');bcgr;[Y .{=A}`ZIoVg6$mʲ -;n[pԹM!$[$0!zycH#40z8b(G:vBݓ>Fk/aWOܙ4nl m|ژqhsCL'=>E VxlQW^A߫jAm=LduhX߬6 <[ |xYI~R ^d);۠1ս}&)X1>g fnA\}vnf'tg#ظH,xG;n!됽ݠ0vi=-y{+KĦg_S5\knù[{8x$ѩ4YǦd鑩TY$Ll~Yn9V_\wM<-~ԣ:A2Z8$}:g`2*K2]^e`},&bL| >ߖ3m@ ̖qcc[_c^\Wm+T?ǜ$}  a|-QGq2n'Pn ~JYwЍfh.U !'u|KA^^MU^L9+n@ڱL󑡵Z4N;~-JBX}qЌ+hSVܾ3!U~:g#$){'Ss5Nem.HG˥0Ǹ[p{C~}Y~)'CLBV!4Q-|w8XT,Á+Xow"\fYr:Dk?ajQ&Үn KO3|VϗO)2u8yHoszkX@%EN}2 ?}}N-┙mrđ6/V=`X+^jۡōZ>D3P;j)h2Q%=f%þUϥ8q>J]WKN |8qB>L;PpF e!ϖq_)9H0yBRQy3MJqYf!ZKveE?R?ϓk/4ethyO_ܼ 6)Uz)O_fEGa4͖F5RKQNF݁aJӸI2r*I4;` #RuTU6mG*wCGV[ֳ͆*^H*si͡]zv5R' -Xx{$3vwY˹0gk1͞"m@[1ڡƷ\7  :˭#-lL Vi׮~&caEz㩉jԻr ebqiݲ]h.**&V.~ŁmayrRhP .[/ڣ,*g:ٵI$_3^FAANNr.rtI^ &.+/vxuvKk8º!u^l>usQh#Z?xL|%"<-v,y!g&X,w>^$}TC͡K9WO2+iT❭6S9H)4Ů`sF0jGav[3`=q׵^4ј7F<~h%y-ܪB +G]O+VAeâj1v΄,ϩz# qO-?wDy>U`{j-}?Lˮ}'۞Op}\lj_/@ H?gϚouS뛰&Eh1$y'qVHAXa_pR',͸Wa.3O1c|+s LXaP` *v_p"$+]"W{lNai5L$xU_zƅ\H!A@_f`ZѤOph !m0q(KÛ[0&ϴ9aҌ䳰;}JljAM]$qCߓ77!-;|o@Á.kfS _-*Boy%h<"Mcv | zƉ>I#76jL}p폐3!B>6I!b`s`TJ9Ot)>B,% c[K̃y;6l703yg89s =Lmm†ZA0OUS`?D.Nǜܕi76ܬ~ To=ߠ,g8[ ;}#Xk_ iOG^̯ξS 2/q:.ǭJ^huϜTpn2tҪR$\qBQ070,r0,+}" tv#I:Ca3ror4 A5R'f6ʼtIQ,*G'd(8n`YگP`Q\r~<]>KJ6OFڼ G[Ӧ~W`!^ϰ񫊻 lޚuZCPpyK(`z0{X 7|w+ H& QI~{c_rƦJՋ8z9`F1Lܩq%wX (HiX0[Ě`eB}fPeo9[ ;F|܍p[ú@.7Bn͆̕-En:Cؚ1Vy,/?giU_ %5K$[Cq%jH? ]']V1 &븮=:kBX y 8aD5!|D)R3Cحv}Ǻi#hIԊ[zoӝgc_1^yXۍ L]\gqYs>6 m+rxH8$whP|\ 5㓪]8{,N8R׹j(spzki줯y; ml VK85rjȝqQE"Y9Xmu3:ھ̱%0V*v*,'uE-bݫĘHp`zTo.'/L(h)Kʲ#|8׈~6?6~ANZGvh@^ -=m[qen-ףLU4PYm`gBā;<ðqhH:W5d4x8 lՔo0uWQߝ/̾ '*%iViO~S do'aZoVCꦯkeV?G* ˪+?n]of6XJܣE/MS)HZ~Y:q|fVbq(0ˁh=KODlC5[vAaOSPD6 6詅qbP2%2o[]#yG/%([w}C7|NEf'[6+MGlno3[BHʿbkr~'n+ye}!:}qPu[+O!)W婻` ! 'u:Os-a_ɣ:UQ?+>etVNX}^d .0CIpksTX4 ɡmlKV:f/ өTjh_IҺ'M q`+Ki\-[Y=?tqǐn<@ư|R(72V,ߍ5Si߀}ŔC5Q,Eaf|K3"u"RE|=f0yv,\ә)0H<ɲYUnN&)|Qaz = xElveفSqkq"mcm\hk4 N~P+(B0PSP?wQK&6vBnJB3^qbP&\qA[LlaJwIsl6N?xsVj]kGELlLH̖~B9vN h[ل[9#-W2?j +t}b6 V׶(}^*إo85Hġy,'X-X5TRuf`ul|ےi£W46bN$yY9l2\i;?67ƷƱF#9cW0 ~#MkY?,=FnG*P jNF[c#8vp#r/G0 K#wPveSX(2zʌ4iA ͧ6VmWGLj1G~nה7zտ$S{6oe޲"jĒwB"Rkd'\:y?lsi{#U<`݇!G&̙ku[]ݞ˜ېCY=N,<:)D{ݙύ(rrK¬RHW'ؚBkaYARRM7&0Mf\pՈ;r43J%/ Nr췭0-=b[lf$ cXH,sq9BO kzH\|f8PdO#T #7iF+@O JXJ=Az?ES]#Fì |ø>\ya!JdvYZc#csǗf7L*B_c_u$t\ع~ÔWOi`>SRdr3dt@xNMqhF'-+R)M7 EC;R%J9ְLjP3T=*ecACᠮ] y=Ɔ-ߤ#fIk^%m,^;JrfVg$g۔GN\k!HyMX/ 7\Pͫ(A2v/`QP?aoW6㴔[fnR s d J(VU߯pL6Yf a%EبkҳWxμj@>|$LeG =_ q'BӍ%/kw~qt&iށMFX-<ˆ Rq`t)ɘ>}viO-eP,{V1 "ao:b0\`2 !G8\l,WJ<2o=ῑ;F%4DFC}#~8aq!|5A}b[f#Cr&ؓ=W,q\YWC@+ zL>pBTRzzU _~-2oCz8vRިZ/Q؈`r<A eN5/?ˆxS嵿~/M~v.7d{(%*$d|ms; &%}G`ԕ AwP#ZqoDs4n7<ͣ8z=h=0f3.%p.*Æ]sSax84mן"EXxul){ g%wU_w[ղ#.04lLym{_*m0kqܚoji`2y2|3*VBE-aEt@*O5-On #ѷjp.X |T .gd0 ْϼe.}Hk3 gfzQC\dY LW}_hg: GlGoV`ZnNC-ة+UZ[YocfoY-5V!}Xzώ>' y k_} 3Np/9 -kN`H;-.aҏ70EG#Y. , WT}kUn5N2uT'vmYÛm8U*f}ybI t.zܗ/0zx8F[pD9Qq^s)774,)㠏 =4}i͎G\++V3&dj'abk(&'en7bbᓌXUŸ(ARʝxA2cJISס1iݵd?уR fIQ`./o^Q v#_`l{tSӦ⃌*?BS_z CqIDZ`"ܡPwvN| &i8r{z(q~ORscI> 4pfЛr Rm+eZpz=v4v5`D -MYͬט/bYxKc!Q 7q6J~6ɒ^8.,\p &{@So=4Vcqx$_=KqJ8mT4̳6wYzX~i{ syo:Uzk>?(<)|xT= E)mU`ѷKN<t1B~LrS4UqiDt:bD}* S.SC6)ChOTrٲOW[vQ{^W?e=l_%p,8 ʷkUr{q5oz+탯.3R C_Tb8oEb71~7J& Hj{νEw2b$wRNbλ҉JyKDQMH mܠٻ4M.vǧvVP%~@@D >q{YFNB˙pt6$}~"z :\VHB_olvː1= lBgyi|F7u~'V>At! CSڱl@*.Ug*&M8Q{yN߀g/}ى&6WC\:۳2vzRaRk^SͰF -*L~뱟~p}"+ u T#ʤ!ޗiI$!{ŦXyrV/tGҬ͚bFiYLv?EϑE>N'}dL:=b|腨'6Cͬ6Ә'-L 'X(?{s%zFWK_:عvur6-/ gX[ ڟꧠ; ߈$eGz$h(ƾFǟrbN3}rz3Q@#Q0z0$#,Dn(Zߛ>;NӶ3X~u4̲4^@{ǁµPk.*Gg VV;`4G n&a.[8q\(,ɕL}PV3mߞ~) 3%y᯼$7w [874!o167`w)yMйGO ~S _Pe{jl9wz6g$ˎ%)lkEq aw ֨odaG{o22 ¢%& ;zmo<Z;ulT4LunNMQv:$N+_QNpdѹ;5ș(lFT8&EŃ)!nfXKq|M#N+~XCVz_ ݁Kը%vL2˛,G /x/+wOhjvMukļsl=$ vxn* }M{jԬvY{rTgMҲN\1l9zƟX… c$`'L쎬BaYCrM|p侓v-:K7 W< '.X[=A,^4aE,Gu$WJϺq{D*VܷS 1ӡ!NLQ+3\հK"Ke`~$X @K!+0/stRO=dn0ŊWSa&X[ʜ,\^i{ZQsZ|2j_aa3Iip%m&! /0 ΞS.T9 ;qmyG9"Xn)Ⱥ /ovx) )[>dC~5#J0ϱ44Խ3!sR~=>14l>¿A G_˔1 EXHw2Xعଭ [7?\Զ yIUޘS91]6sD._S$/v V$hF͡wF畴>c C/=̋b0woCef,Rw#?I?Y|kSlՓ?s+Q~Q$1]3 /Z[?=L@֓F0oޓѩc"p ?I1T_+ rEE C0zGUk IIt.y3}/)Q'D$dqS m,]e&z{!Q' ҩ`CZ-+Lp6h,.k.یU&7Td]MpVwN՝\ 'tAYIY7Ώ,4F˶mlX?0m}2}%? 'YR70™\o1WTԄCɟӟ}\xGYXhnYsChD%c|Y2Ryݜ0kgq۟`Z;ջ #?Kk۾lq㟾rG``ivN8P ^JSyH)R?܋83ңJ:{Ebe֚W$[0N-x!ua(x:]_xz7G82;5 ;#$vlj?f X%U"}*ʓÂT0[wY `2cWnӦKqdq?J5=7 qæyg98(4q_0~w V,Z)vz zoYz׾ć OcȭaY(>]sv?},P(pٓg><\7]'kn'Ԅ峘(;U W\oX,,u׳@/~g?lv41 j8AT~ުKg\RaWH9$mzr!CꏷG?{7kSEa)P7<[:bmO (z7cHU`0q~GfN`_3XtPMMJ>Q"5(;SN Μ 9}ke@\yf辙'oJ n| 3A0A}~!\qcSxDpm68 'N/}7 El\Gbs-dH%yU{06SqOֽAoV1m#s{|$Nǀpʚ3 fӞ㨭ݖ )0v\͒yvV~8㦐7P8wRUb/I¢v,}5ڈ3>8Su{~t4LӹtWsq$ǭijKI7y}r5aʖP'nr֋v+7=J.,+"w آ<ܙ:XN:[t~Ta{f5v+oao@5a'uk.:+q$֝*Y*/sC۹$IfVTMthr퉼%԰}~k<;YRgbw{ ,)VzHJٯwmq.BT0p3^ Ĺ/v#9ڒyz(Ͻ-_oVZB y/#ˑo[_m ԰[asuv1zR&*OlS(NtjoYpf/L1_qF8nJ~\c%,E=!3&&R 42ﲛdJ{ -%<8>ev}'`O{Q3^J"mk~TmB yaiNXrBezouyjZ&ֹ|9cedU/5BQי?_ X‰. sY!(Wлʫ߲}՚5+I _ЏCI<28´5BVsjy`ďE jo =zf>LyX[!ݛs5m"X.nrLbMLK05 y{&o LK_ 8$MH5T]Q_s )io鄙P'WT/Z羶/#i5Ww*1[$aKA)\?1¡:Ag #UqNnB.ۏڹ`A5#GX/:u?-1&LkwbYQEge.]7Nrm?K0wh1)ߙI?^d}Ν @{~`^۶Iޒ$+U?.uU\ùwhEU FF3`yS۳;j wWvzR qQ>ꛦdIDN5yJS^VB}Tҍ h>CXnvrmb @e). 'RVN^ǦkԚ_Zt.B/2h iz=͋dHFn;]`=B6":YZ'!;n0[Dh= ˱>G^0aT#lm8{h`F3L?ne C*b(ن;0ay?kM˟uO`v޿%ণ)oKX8YRԑ@M#cR3̼M&hw;eBT$R6,s‚@d.^uD {7F(!qRP9⏂ V/zzB 'x;lÇ}Q&j9soAxV\v*lƹ]fZvb/Ci7 ̊YA_j_tI b.c$98 mҺ471kX4N.8pEH):ESkwu$xc݃*6oW6䖀 ս&[IӯtaKEF\֣ 8#"?FcvX%k,?7=^v7JIUB,Bɜ$T]HR\۰S77HU&ɽ" Dw$ `ޟ?NbJlY_lJq=rz?rJv`^ 3cBlqY¼8/-]i-¡GRXn&Z̊9'_rcsmS5=]0* z1 )06*779X3 D.X]zdY^y㠫vUOMރ)GLH&=忧q۶\/)=i8z**As4 ܓI[м$V=-c$5`K-4<܀]s Nvl&U:-B[CǭWV ??5xTƑ8t+,M{ڮ')v?vkHYqڮ:ܣH c*ƾQu"cIi7ٚ  @t<'{Fqq_~w-!y6 Еe=um*eMzB۵0f*AЕe5k¶!a~@XC |*:WGYģzIx2ڹَ GHHz˟Ykwݿ5ā8\B%Isu>B]*;^nkÞk޺TB5(,oeoKPFW%VahYB?bn2LrjOÑ=Pk}D46qb. iFʹw#0Ω=oj/PmA &=D2C;ʳ,̡4,0ɻqZ0E>VU޽~ĺhvwJAgdEЖdX| (f~í!G=DعP^3Ƒ[_ y!eg4-@uf6Hrث|C~b#>zX[l {!-#S]1濬b31- K0pg7٨=ٖ3UuI2̶T\\7}bŤ"J[`~e ,64X ^ƩMۖ`.K[?i0 O=L> _坄jU[d62 (\ڝξtDCQm Gݤ2HoP #HҡʄWWľ6°J3MRtX^mGplTKuks1gI6u9v]aZ} Xwnoy x=R$lǡ0vej.ԙ6\6CJ5E׃`aY0?դ(!Y)55R~ҮH?U<5Jrܼv*q&BIn͆7.Eǻ a/ڔy|D/,s:?P?n+fxq~Q+vBjQ{K VQ}1RoFݏGnˏe;ն0VxDǶ07u ! no0-75v6og/|х ?H0;/K2;l+U^E{a6Յxsګ`nR):S{-IArO2Dn=.{̋P9}qJ$mw( blv#[Ewj [sƳ{qzz0">^w͔ԋOvvT*.@ 6=Wg)4`ǰSwBXk;6㢨s<峣/Tyg'|3@Қ4W.le{]-atjn )h8TAwwE]E^ o+ _V]Bd?o~g  >>3\+mL8 kӰU1-xh¾ 5ӏ TΚ. 3S*V۔'1<ޜ0}5r2G{NF^&9iRtUVcщ\ JTͷN<[dPsn}I:ťHsm \_7=K0m?^HR|FhS{{nWifzr->پ^Td:c14?))#3Veau9X^x &f6"}{srTrmzN*KǑߋn!K_y''\.D2~ޭsNd1*`ISCZ#t~=ΎLgB˅@vSxmV\D*ʿwIU Bd|}Ѱ*NtdnxZW"9:[*-߮)CόOx[={Sy>6ı uE$w暩kI~EBM NFw58蓼Ub|/?/r!܋I . K6M5#$}AړdRp\8gǯo nU N@5NSIt</hIi10gzr;+v> =U۟#H]+Y2@MxWXMNdsq/٬j@M?١>|ƽt׭/ø3Z}*H7džtQ|]fOceeרף8uu0r݌C%-D16`s`yuW\-;ٰbPK5B/T{Ίu=# t3hrY] \[VZǗ[7'!M:˵E#f핪W"Ud_sd*Wݜ Jl"nd *yr6FÐAl@Nn7mq]uZ-NMl&mɟ&1O uC_n;߼tHRvr8 2 F^W\/wrq6#6k&j7s,"K`U>bNRyU3#sKue-s1u8>"Viz_O`gNyce'69˵$ ?]}+XbpX(:\^uN8ci 0xYo0rsQ FRvdmŃXvEtCKarCҚUiHÜ;P{۴ o0rnA N܉-)X;Url+ }qde"qϥ\ . j9iW*pbCkv_ys.L4>ׄx4kOW:NGxa~٫ZX{v}QմOC/^-~N#)SVl l$1| .Sntu ~PR= q`0xn h=Z W/лվzb8ĻH7ƚGT=:o?ʳrZ +oBlh*u f6Mo߀4#-$wp^DKY`P'iM`6It{]no}5#yߜHg߳!ϝBahާ-qr q I/zgI (K^<>gO36[ j'{L^Y h\ 2&uϙܴ ul" Su9`U=5k5ݡߩZ_3Ξ{e`wF#ɃZ0ͳ_k0. k)!$H}`]26_K Lc{P퇻<{8XTZ&:ȸQẙӢa[[yH kSm}+H0 ?DL !8LgnޏC:7]1֕ڏlHʍSU}@R[܊ą̢6MZ8$qxڏ?#d9*vO,IX&R04r2Z݅$0ӲN0Li aǖKÄO,fDZJf.. )o^l-3ުÂԽOǸŰ,x*s)6N/cJ:j,~ۣ~"$?9C[%4ae:q{qj@(:g0ϜTWO n:-gD  =fo`!e=?9̔Q6L=*_k1]҅n@S[ _z6-gƧoė)\8~Q 8ã7>^×.d|G)^6r ſxpT[5PgtFc;c̈́lPzԯUkS׾B Z@~ζyzBaAs=0;Dpfw=kN ;}ςGbQ -ށג^zrMc}l>lgCvtu=frP=`yYv_:sDqjL r4J&f%$qUܷ$>(9s{aZ얿$PlG0>Ij%<N׬~[#骉4毨ٟ3JCt)`:ѵOLs 6m3 E@K΅(9E ^LW2i81c\%{g| #{^"XޤPm/AGp&9 Ԅv6;eR;kSSA^ $ez/uKcwUr9{{_%m`7վpb,88WYĹ;IBeX4jf h۟C3kIʼZf>#W>*19.'Y}gӟY$_՞_ޙ;3}r=b0ؽ6qE G$_K#Y7U)9yzY~#}·>Q$荿b{!EiJ?{O*Ưv)ޕcE*Խ;[dA╳H;0/WzMgrMLRhhiJ~:1#籂?2z͔Hr]dm`EVF^ Q,ڧ;f3S~s ,^l1 /.ۜI l;^C`RY|7AsdR=srd[iϷ\h2h+/}V']")9Nw CUq5L0|\9'RgR e&fپ[w)]炤cNgN(ý us,=ټ/8.qȻNIؕH\1=ݧ#oY5K|ݻ<*,n#፞|+޿-'8+MDh&,{vkM',?sZ_c EaMIy,;ƒuFo>E I*@\4k φlIoYgSP0"9o5 7n(f[m.طNh̳xE'BgBcݾ{ϲ ,9lqfq@x68L0| Ewk B OAwFb.~]w+[]왼1;Yj~߯PR9U 2c< 9#kq0 [)'K—Y's2FZ7d.$pgݿ [T[_R]`H+x {rɺ_XRJBPJ.G vz{%!Ӻ̙;Z ث}JwËG5pYk9SSy BY˩$Ј0iǫt 28߂'~c؋mBImW+ l^CO~d7y3V]wo# ?vf{ܜ~3Y[@Z1 v](DjYA u>~¥.5a鯊i$t D*kFu.LüˎF\k;%07˜,30] g6$}eR&88"z<<;Ŏ6nுʭy7Y4?aBtm~ >]fE֡N䕙\'V92gl%_c'pwF xK\ *}].pa 6o9g*^j,V\YދstD*Lx;bM%Q_k *7_ڛP4dm[~ˍT=(8t"/L'N1Mi[_n"UKfga_HCA=h*ﵗ`鵬fs0F0Fx$OTv_0w[lk'NޣA q ギAWWP!iMr%?Tqx4Ꟙ.GΪ nB\賅П%o%?Ҕ+ IM=`ĺV>mSanlߞq L tۆ5wa8oQsñ}zK]pN< +Ԃwx>%/RKٚoJғoBE`[oPVw9#zns_-cӏr?[q#˵W=rR%ݾz.+(iAϹ l|<&\Pm.=@/W7A螟*M|쳻e5Lc:}/HU 9 K'L"G]`R9O-.wqKZx]ˏۅ3 ^ _gMN{lxJ^ rصIZV󿗮:Эa X4E[sΟ别 F{e9vKP3gBI5F`Xx/ҭ[0ڄǫa"盥tmG>_&Y6$n~sҙo~F_aew24I?HVQx!z%~a5v~13/Թ⋜J@Uɑ}WN<`Zu}|[R= # %96PSsۿeSAHg6e=]܇J="`OHe8zYOy7ɲ/1|lPYDm~ #1WqQ| Ph cyl8cfMK \^cy{#A bǎcp%׈ϙPURXw’0盟WB 7(6+ٳk(LGg3UD)V;j VFzqz|~HT]thNߓ6ALw1ftϖ'Sgg5[ىÕd::>ܒ'=TgoWo}2qDZIAwCa.(m)}YqljU#?L.2\.zbyclďo2I&B[̯3zw6u0vv >^+(OUicMAsV 5JtR9 q."/v73D$Tɉq{~G y:asޟW<>ybo5Ӎ6  M6eAeq:MZX@ռv ly1Eg߆i_D*THHE D BE$E*EHRIڐM~2M5~2sx+̰;Љ{L|S#)G]rܳ:$sâ 81w\pd4~js3ꊗIOT.(b H}7 U/ġ4ov6u#_8y[K,I)agiH,샕`4փ)n gw.|ʐvk6^Hp`y^ $<8S{4s־0DLr(SǼ`_L2ͨf o}`ֿ̅56XV;["~N WNd &MN{dGjLPjP fL"0':_hW [Do&-(Jܻ藴#u;Ⲱ+az8AKOot_DE܀,{!Xd2]'=!Xj+RObg {aޙ,Fv1@zVý4=k:sd\~6w\j'GݿGY|> mZK\?GD OW:Agʬ,.stR8(r?8EA@$þ?\ZH[<V39!uw2^eZ)Ws g)N@=Ty(fp"__ x&팑]gbrƀ];)y)Gij\ 0׷䃔$4-w$tNeBI&@ܼa&ymŐ\B2\47؆ۀ ~P2bH=dpQ\> ۴]t?%y?Qk4 w%@KIߎ|{T6A0)ɉ};;$RZ%uBtu&o[~PqEZv`6G8ڹV Z6yu3;8d"5aC)wR7<_ߪR>\4%w+"t^-N3M|ߍ7Ä;8G=4g/ִ[{es]^m,êO4n{$CWKa C=u1{L :oBN}5X"4x,E7F-.H])k0êxfXbPڼ|Mq χ3ZR`+\wds^,du ]8$i`P;`%.J7,g~㒮1ة ˯`bӸ"ךJji ENrv=*u{}Hc*4>_q?RGjBܣs "Yhȉ ;KJC잍pqF60Oñ3/GS}SRX]$4L@^GqE)fp8 aX:9x 1 k m@&eꗍ,kvz%ry+;}$M&kW)P}+sW뙻;`,Wl܎mm N0߄fIƢ}Npn6nka0d3k{H2_|w{%Ma2Әo<tK[CW~vXYbqduq+ i\m$rĥ|g)K+uWa"᱆`n}w",/ QԳu0[7CcF& oGmȝ_mHL^ڗ=Q{7h2)k$V_sY)mGk.p>X !)c} F0V ̾{)Hc^˱rQ`jQO'[+jnbPcOXJҾ÷4KsЏ$Tzb7k;pN_n޿oǮ߯T$_$%~{$x足ϣ9`s=Yu6Sן4?ωSom\NLGb &`λ8\zIpN,m7|}N0j0 WWf+3V;3~u|U}Zi@Ҿ<[ cݬX|Ʋhɍ˺h }=E06j¯6ӏqrf64Qc[lʆ'y+~7NGbw`t^4[:`c6DP DZ|]/tr਼G4kmG[iPϓ$+C7fጤaHo k9w&G(=;Xs$L'PYI.[L"q0W+Ox)rt̪/hmfzou}=  >ykG2P0Je#=lIt7t5Vڎ/LZw`O9|vgL :2CUŁ ٧?k6bQ y:G׼g[ELq[硿TN8 Z,\瓐} f̻+:c\K֏X83N,:azc귤$Óe0x6Uq.Ƕ  CJp0Ϊһ/* -q2[`gwK"ئku?ث^o u[ՋZoJ!5UܪAR> cN[iAEŅas*_.,H[΋0C>3nX?+09[< s1=u7E@qcŰPL%4nzI0~ {C%0v⬱뻕pU]=r8ſt5ۖm\͊7C۳ VkZ`#3Lz1K7t_cz?{(3ɧ|ĹS]gp6C}~8̈ q+Ż`)gOU"dwSKl&tu3oRz:1ygQ|Oʚy%2V@HhvN,؊v5$ -r˯`M .2?{Q74wy5=ETbWZ 9{m^&=F4ΰ>(U{_C0a#6=7{kEtkjY m@(Oh>s1wݚsّLhwR72R7ڹأyęd>\gm4Rew鱏0kZ̈t gѥ#qn09[,.L aߦpEJ#/ݪ)aɷ?.خ 2A3鐭vГ4gvi 4NM;v%G;!xAW1nIm56%,J#mys7Ћ5{ -*EhL#n?W`%2QبtU)GD .?_*>]S4\So[ٽǃws Bov,d^K԰ﮒ'pjWsxhX^,?Iv=L٭L_!'fo$84 5(Ǟ[¥Pz]#M9߿ԪS8Q;A'5[ɬMX4Nhwj+;p;N5&䍜gbٜ!E=m\6ҵW"vX9% 9L"eYji&Dp6`Vgd,H:PpJ +¨RAE#AMy3zs>d'eSpt9ucKh  +fge}Ϯ 0#Be3|9伣`Z@"d?R 53q:tKc2zd t=ΊM+VqH6 f,6ƱA+S%\lו pF`qV6ky-1wבz6`00c~'۞/|y=#`䢸~\=WSngӣ;,m&6PַwCqYq;A2(-QCPk6s]tIaӼwjEŷW`u'9#h+٪rsx4kKܬr8`bDCOajKAʐt]$D3p/w;c_#x6‹J^uuf_d4a6,_p{S+o="Y VߓzQ5ӭq1WzX\5@)_̻^a'5>s 4VZL;dˬ<,:S +C&rݝ^=p`oF9v+ΡW8CK&θt)*XT\^"u֦gQSYm釾+mpkzoz~oЊ.&pAޫǐd,{sW)||{‚n[N#|gt˟doyۀ`ޫCNka@`=Hfi b/"ݫ>?m3O☥x"̙gQ251?V/.>aOCvQsz J\O)ƓksðnKQ(؜ +Blp]3xsW[ٝVAU!iJ8^مs$Ҋ6Gu1ilT=E H4(3u`y!DC320yY fs`jj;@T N<eo X6 Y8LCMEN0y;(BVq['tWD17z=/\G/Ti}'?0Rc9f7MwХN6 _\ q;]$hGRUBYOuFR\cNso4oџP-3ɲT&o |K& L5nzЦݠ˚++P͍%tb" qel fb$%  3qb}=su7s6fOX0&/!sݐr07<4z`je?x{>@:5.^F_~PISOK6+0og2촗2]}Pn^l%ǬogI/b[jI_w6s2֏|s@\Õ6;]9f7j]Yu6`ųI_J^j`V TLm8#U0'yֻO}gH֒@Xl`Wkz.s\2W}Y+έ%9޳"z65?Xa 3RUr;YGTw*)@s2ɍ8|S{b /hORмum(6ȥsi` &d+DbpFH:z]0d޺gߚǚyJV{W0s?2̉4Zĵ+8Ur;.>t+{Uٚ u,c-kl:kadn>.dŖcBzrw3κ3KTNYOUkw9cTN KBg,R8yg=4Q$SDǾ9l]m-gˮW77.By:jmV(n`4RckMz]vR{뢲y*0dⒿūcǟ wb8f+0VOE2l`׋cA94''蝙}/ % 7ԕj=&HsRӮb!T}k@Pz!/"Ϗ 5,&9 ߷/t;;uMP^m[="fE0)(Ws,[{8Gnړ,|a 踘 5BX{m"sh{&-w;Z q67K;.xҲͿ'`jەϫ!.m7j\FeCJ`R|8z&hq1N[5|A|qO|-q\TvB Ma)z]NZˡ;/gg0{՟p@`-ר)jLń6< -0t? q#2@:\zZ.'{Tj A2VoT1՗R\qy sLsaHG7FC-mo趿J]<9G;F>gX:\ssV]r]|7tl; k4`rhͺgSqہ i qRñ0397מǥLEljT#4lt"8wMêZ[O"=q&-w&YYR苃Бv[#|;VکG,\K"Gĭ~dך燦Rqpu j?o<-q}jCӚ`!5L} %DST `<3k4ZUnUqc$c1?؏y~ aU_a%1˚-ATmAjAxKa &I-P:Y6Tڍqmc:x#C'Cwݟcx|~ Fm@O2K`$Z3BrTXyVǹzI*l?Kj;lILw!֔x_QM]Z!!.j̿i$+nʶjܸU^a:ѡPSWk7q|zW Ͷj~ì鑇 1wm!8}ZO<9sIu#"%bW?뭗VxM^`h{:W}|Sj;gp4z)XXUqsL %ɰɚ_eK?~j# th N̒78o# dDe`!X.~:sznbއ8{I/@ܛp8m*M@Z>[}_E}wJIB OEGpتBxԫ`͖X( LjfW8B/-$a"]7ص aot:@C䞧(O$e>I=]Kl=NŶ.aZ79z`jZT&Bqҥgl2?{tTmY'՜$̓qVdhP1A; Xr*4AuG2 15 0r{\`d6Cr_ ~HpQ3K =+Ssq[ghc=uM mx岝8Jl\<~ {Caq)rT)}+qF{+5ajP<yqWM@k9?(pƣ@0CWo7=;y`8IaNwF׿~M9'K~*+Lw0 d$/ƛHfYtk?k2@wKcauga :Lc[̚$tq~uƍX>"sb/#61F{GN5Eu â^՛O'4M#1ٗ` ;ppCcD_b._gkh:xRC'Ka)2=~u=c"߃g/ _0f^MĦX ~!Vs;Hp7햿!M3{81fhUwܠ&s]>6BQFc:fx*"އceX_\W쐯v (椬sgخ;v;7n\F+wћ%ոէB3 lŊ23J@k梅fQmO:adVV #0otOd+ɸ%, tdCΘwJx$jX|mT{p ;ICJ6i1@ߗk6 ķ{p=#0 wT1QCwk?K[ Cs/h|v'˒z`ӺO&_m, G*C[Ygp;rVŞ;r,Z-O\NDz؍#z!^A\TLZ޺Ŵwm[6w,+jrY0)*x/i8͋|vQcK!Y. n0>qw+}ZKwq&q yࠢrA@7nC~8w2 wݫT\QVxjQ&kNb{ Xi:Q3o:7va-'Y :iJE'y'c]礯B6$^w 2d_ ?(g[7xnGIASVe"Z[svw?YzPרt' eܡɇOּ1Vpi=GpYZn .?A:)y}s V`CWT$z4 С>I帗}]x5}wd [65 K~#_\gů3``a@ΈI\*øMz|kE ~aşq2ϧ$XLI}IckǥP#h =_F|#1\r`\塜ngljz{'؝ Š_)4"]|4wab ׋܁]А*Z7 A^JZ Wu_{aytk8ڹ'An+7y_N հ*²UGܘIpڨ=:G0fgrnDwu=nt/=0i}j{SIu/#z2+ᰦEk5h+ ﵪGќ (&U7!ڥzݘ#XN6 ľnNkIa"-s2k"[[C@sYv1 6\ ݎo(8ea|VL(H,l<=jzI-U-j)n.}?P>XEm՗n`Ӂ}?;'VJTšcyY.PT K SM;,!8"` A?pg nP[{M}p3k˶J=:w& BOiC5 xɛ;zXlW,'TӧslY{>Aow!0!XGk^=ǿDZ\ mvLGir0~(>y͈?5v?͓ v'İlSR_;5^:XgqMq(<_j'vy;}g+; D3sb"nQhO?70q'$Q[?1u!gzp3׽=f]߱澮5)w\'gznsbA͍BF;m NtM,0E? +]ܷM/ZqkHL,5VK><`e/,€Y6];u .ł]8zxSWq 7^֮Ti}Ǩ1t9SJ>+ѧ@Ǐ%Y nQ{L;1$׾&Qde46׽ӪwGSSti2Pz,fqI^oO"/l  '9ɲ0㬘eBʦ .r3=Ti ύ "FqKTU#a&ire}%{(}~:Ws[KH7áu_l~c'XL րQ?XqT55` )oֺ00ȟd8C*]‰eb@med86+ '.|,{8ŹOߎg7Ħ"CGLIM5qC#+ҳ'TL-Py`9ANkp9sl9 E)$ZeN >l ZOo_ )ph)Iq!پluS^yC;kN{vS Zo%Ptw U}C' C+ b-WC- oՂg혮C,'ٖBY= Rٔau;ծ!e6Ŋud6*ihLn-!'Xbv(;o Ց?'l5?SN?USeJp>PIݗ$3!+ !pA }?yޭ=q-WcX_?e4robO<|~/NJ Kg~U%9Q ks g5=vPlDˮ`]&'[丵rGNU5~c$TY&b0ќ7'X &ϾP~xn^Jӌ+;LԂ0=9N޵JR"Pyi[$bpћ<8;P{99re&pFe/OqެPx",:a;qԁot03i.`Znʙwx/B(~qY*Ihl8KD{+4hDžsolN=Ui}ؠ{_-aر76M(eVZ;zI|{ω/򿍐~^k(/6&g]SaB0SiYHxYv7,z^' $}V> %\v$Y# euZLP!YS ~V8H+OX=ɰ9ϕx3cvA]bF/+Uv_< L߁^#zsdUZ)2"=cSw$ tv;mn$٤bWWl7o|r*/cXq쯺se̕uzI:Rg2х,/|:H}p)8\I?!ZUMpQ\O[\ힻ (B۹A$vNaį!Ke4RM'ͅ[l6׍qcW@X4}#A:\'[^rd@ rK+7ʞ}rfuYwFw(0(q2s<ҒH7\u\)QIu=Yh͵^.T9{KЗ_|c/ٔ EHfbR,ͨ`ÎQ qk=KX <ׂT eUogl3SHF3)Js͚<*Iχpz֜3Kw^Ǥ* yW>_:y_APtd\:=~yޚZǮ2i kdM4sC4n8Y,yhn= QS[W;õߝ!8ƩI/^!OO#wŵsuC0vh,?+\:NaIYԨ)~s;t`V2Φ(^XZj  y HZR'p]X#ej] Gǩӿ+M"vhq{;:qV?_#UVJƜրFn\t8n ₿uKz8;D?wjq0[5Nclٗp &6ބ7|)7T)pWTnw:HV0r+oMއEYqI|ƣVZ܀{g <ׇ+-8@~R5z$|<`-O` pzMÍ\q^T%(9w43?ʟّ|^"pv HF"۲}hw(D]lM8:Y*Q 쌟ĩ'GTkcpt5]W6` ʰd3~Rz }8^] ( }bP*2g6_M9Td%K6whT"byER-Ʊ+x/!GͪZ[IrNu!#ɕ,ör M돉Fbx% e7Pk}&)c*BtCɟ-rcqF2~ RO} ݳvᲂ3LJƙeL-tWS9I95526Cb\Xq}. ur2-w@_ueOX'.NHNiЗ2#LMMM"se:L ;@WYtv+~нL+yjZEǕvuf8q=?V:_sl"=5Z7zqE:EM/C덱2Ҩ_wI"LdjuSOD A;o^nwfm2.%dzwdwBO*Vx>/vq\OzӐ֗3t‘ɮ$8U{4!noKnS?Ǒ&>Pa-KcGa ΞxR\IN~q}@𜕓`9|>q-F,\L҄M~cݞ8#7_Hja iUڇaWSU0)g''`%+ 8Ŝr7jPAr,7Tf=Er7'tjwR/ü=7g+E5lVJu%A9yr2L}18P؝%2al ??xV ۸ @f}lڍT `oͯ=ճ$Ts530w|,[ aS(o-{`SyGE={z=~a>c$N]i <{\``zg=rb;qL'PwUIcIo~ iL^~K1P|<Ή;JOkH̓gԡgVs[N~^X'ָjw`hMMs~.mL{?X\{Eizv %UfA4Ia:&ş6^%:!~f8U"x6z#rsIg>o2pH0{Pv.RFmX尝EP_0|CY#{AJ%HNUq"}5aKkCxH[ >#"V\7})$Nʦ dю&~*- qAX%b~)SL`qxp;)es"|!dMS%qz kL.aqMO7,@9ԉ &*bN߮B)[N:vno sT(&L}./)'?9~¸:[n|8U2'hpВ ʻ_6Pfy0eRA;9>HxxA Nm t[j}=X__U~΍HI'^7C`8I!)ͮ_RD=#܅+9|$ǟo B$BQϤ+)I1Bl]9Nļ΋3"rCsfY;H^_- σ}Яl@w@^dVԺAXNܟf/[q~M# IY e Op&LwM8Wrj8z[}VX3*`kP'ocD2T1li"Yf +tā$s(4%4*]?U'p}$ṉZV̆yQN~C;ݜܠ3{AdرIN住 =,Ϥn,$9fp6}C8tz/U=rA[9B G%'q 8aʾ0ziCR*zMWevJkwv|% #[9{qJ? c5'yS[^a`rNc=S,.N5k%N,_FzѨvHuѕaZg[ >Y)4 +c{f Q}\L#n1ܵu{~0gfGA=q{oKPtR @ݻǂkyw09evwLe;iv]n"NP;se9~mdgu7NAjOOaGhi,L1.딅ݓ{H+gq*7+*.p} tƂ倥F8}@7JWqx/j~; gwR.wck>qo6Uq8wˢaUV[X_E7.i(bF:J~ 0ҩ' mكSON7rђy1s,0)d3OqVIz7>hec7ϟұ&=dgmcxX'nb }tZN '|w ]P9a6Tc;cb֝^}Bbkv3Nc~t&c|/҃1m@cP D[<9sYa#M8o qu aإ.e-?6`ě6 :Xa0?zQ_S1N? `^&.1(.:hӊ:kO po+ QA7s :@fQBO79ZM&BoSN…Y?i3n,au [Jk9i;B~0U=wGZHr~o˖â䉐L?5Vm{ѶW8~D c}{z7i+P)k#u {HV-0)P\ lJMc N Dz0i 0.Jpu3q?NyXl[, "/8칿oa(Q!C VNNP>51|7Z {%o+X5vNGaaHR(pD0sg8|weRf)8Ą!/|1YppHPpاP7\Oi;g`4=~댴.K999 1K*g2Ci $ Gl¬8q-DHٝkt#k}F {B.nf* K\'6@`FNm+6cbtMD;au~}1ǩΤw忛M-qq*m=t_ :3 64K=Q78 CEp㫸xdבhKaD3`y3^1"ÈjLmOZE 2/,1nVEf4S<2sHfOO'=_ӎN-műq.PM򔒴03S:UEI r4xTUGVS"ҴovWɡMAlܦmuyJ}mg&51+{N'm1a^8z/].)N8pq.t,%SrqXJpbFeXH3kI%s›3/{EHFe}j`R6aBhoEԇ4_Iݰ6̉;3|Fwq\p~!^sR 4J2ؐ0IyrD=zԉ׺ @=@Z\,#%dMW[|Xe~gȎC ~  NJS Mˆ\GP7=$٘5<#z5^Ʌ='ssF>3^5CN>5 +%|]1\O,%H rScl0e$Ά8MŒ+z2,y N_3MTytbЗd² u@I/0X]z)0+ܭ4|dNz Ԟs$kII8ǹOoQiY6гf}Щ6i, `s:NK$~$Y|8u`{F$r)-0Vz4Zg9u?оo܅c'+v-67p`5 z?hYH%,~\*\>/\5^72S23ENm,M-nWbuӝ:q~?aM{CBp2ݨ"\ g?oKzoS! ּIn4+h;}p^iseYUӁ2ZS81&qH\^E,71P*m%=8`$h@a%X~Ʀ1h Tܓ 3B˰*>̏#2<1wG<̤ n־'8;c")ì 7Mhck޾_:Zf`*_Տs;XY%oJE#v^ƙ?}HʀK}wN]0U#5/cD> ՍMTU0a}:88NUbE:%y˄`S~R=9F[ Bݾ9.OuwUO0G8`{Ox]&"rx,?<}mUO K'PwgfIM]쭽L0 _i|wr[\'HfŒ4^3c%Krz~{K(> 4Ж E0Y}y܀apUne&\/#Z=5uSDºïC&yWy'P_ga٬{7+]d*"%A\_YxM%yg#/i@?})%Yf<38Ѧx!`LúRO]+eȫG(0:a9k r=8=}ItvGLN:b]՞G/ @p:>>,0YoWk3>eeXT!+Y1y/ Fw=hم/*h00C-nJ"%n(2 +A֑s\NX{GBV$eݹx&E*ƙ3t`"F*`ٖd&w/zTr`[t[ZgX*"5"\$<5AnIOwW?z^m]o/A)H=NnfHyð(?W-ni[#[qC%Y[ 3K,O)A~3n(3\Hx cfпIq&,Vu!}$7f M{cI1 Bu=͇G/Zn|[S%/#u3=KXkLM[Cql׸պ ift܈t=JmY$S:$pW+ϵcǯfCYuy8s7!(Ib]][ͮK@bf20-N7"x)Lncib"R_κVOLjCŝMb0ӗ1ꌳ4 MSW@R1a874y%m28s;XSj={ BjDSp~h_=קw+-W= $>nZpxMq#y6qHwH{x)p9l/ܿrTT?qꢅyu:اs~L`tL!,y9safP>`kP^I)r0ףq0҉ݬdVg+Yތ%o6@dɇoVտvl>O7q<ŪnٗŊAv?-G&e0I|ep|+5ra8E鍋LÑceZ"nAߛO]uWNA5G-!U^Hr֢vsMWroJ6h%|uq׳᧟Pc+Dm'`Q>QȦ0i)AY=Sy*f;{\Єiڸ0o=#g .m|o&SQ7dyOvbt9= oAbE[gLs ?Y?Ev)jJ L\􂀀cXsGtS $_سW hxU2AW*H LFK==a~_ }qj "ZlXvJY >I:*MDñE;#Ht,r˖/N'֋ܐ:PSGڥSϺ-6n% 'o=&]l/-%sʸq8eǍ<KHM~;;[dmNyVgW'+<9=#m;cζHL# ljrAN /u >CgL@F sc..rFtn#usK+PV}Y~ H>=d|1[Q~G~w񑔂dϹGf$CA+Ӎz?mjX9Tswߍ>dk3Q{6"u.kxkR$=*kKL>?3ԑi#C( c$ň|&q^&#u֠-B,W*,_jR ]m.Jj3i1ٙ_ .CIV<)o,)S4ͪ:#tL&v+("EY0tG^a$tuv4_XY o־)9 GOX-Dp@ѕx3{̸lZ_!n-u:״y}b-Ag…'[o,Ιs $3P]<;C7. ϙx9w^,QksRrDž/ܟ.,[gd%/kQ}u4zOނQ|02  QdmU6JU#uB0T:Mи@%X>|/yaZin a\(y*+#dwLhգ!Z|-'z?SG2ZHVk Nkhg3b)ݯ5mm ?'QrsCIvL(5_8Q"a̡뾙8o8$`⎞R8Fw+(h1awD{)DA$~tM*=K\M؛6Yrn" P?}9~7̣ď!~ PRaUNR'<97mŗ~/1Hкg5Z9\`ZȄ mY6ǒ?mLHʕ2Q|һgƨRw\، u=[Kw3yZvzӏNxS$#|8v/$]=3ƼmLN]Å ^z ݶS`ӷd`tX)N+#PgmzwK"?`C*bN:!U {s?VZNp)k{`&۩R{,Js܏ήCS)K.(+}u'18~ Ӄ)Q: -[4CD;\L0y~}}bIN_qUR8A8;cW6)9,[l=GhyS8,ɢ=މ:ð=ӊL4r9-U8:h*,I!D$$RT*SBRoTTBE9Cyc_yޟuuz>BOă~Xb]H[).q|I$6}|# ra ?<_< '{< NAvݛ_ kǣ"lq>blɉ#.% @/yHuVَbɬ?Hgex*}+黟0SSOUb(2rI&誯xV";Pe^8 H85 J7 5.g=LP5fq>\ ]8 n+tO;Nް4=#8~9"~rWRwc/\3*"!Yz%|s濼M0&xg:KXO(tu7]qv$LF5GD歝@[b>FKOƶ$(3%'r{;FOCvoO(?}3 ˏ܃{+ խA1ft< IyK !/ZoS Գo=g}6&'٘\Pٶy[fû0>ҥ'؆ô7q9C8t3*=7fYI2xư'q9qef 疌89_Åܿպ>`R=; )c9Ӊ IDu(k`K`m:cS}v _M0~+R۲bT{+=ZWb{ֿq>{(dʑ3,ں7>D ʧtAwHW۶ϓ ǟT]7%3f?cCu۷g =akxq}qkz>HsM AەJ*? }w!E(f/޿;4ḮPd;BcF 0g=A0./PmUkǴw?Yٯz05?Ŋa$).>m'whj#)n#)!t4=F -ZBw_nAy#C3N[7e2.*+=aH^.8./2 EX6`_kiZ71_OŠw.`=|6lH2l]L׳|V(F>~Gq9?+Q#^R3SZqSia>57?Zq8 +M΅ Q&xm o>o3NJ 3v91\ЩQ;JB&Yx,=. NV "7qdņ@En&y3P_} 0`6AX2T[`a%",f[>k%/OT`z=̿y}jyoR%Yv3o]cL{8pA+)esxn9/ .}ٳ$a-e]>xuł;{bwRRg-!5m?2eڌOE7S*%IfSI.rua(9K!Uxf7 *K2% 5*ؖN1ĸ̓H_԰E߲j[<_yUnʨq z\pC+ fFzPNl .K7#س?L*cxEFJ? n&"z.di/i߷]Vm0B:_8S1WȄLWIb\q0*mP!\&O/7d=~ r_c/iv}zpA;?V[i u8wfUvJh9Ty%KU?=F o/?! 7 m!Y2}\Raa zǣaγ^Fܢ 쀽;@Ds!dicNoaZGm^XV`b4wYH9̒u1vMJO'#im8w<+]O2b!`.Wv":*@Z=|'+#mLgdGڹ½yYX. cw`Hz`t\\^zotO?|C!us@z>x2f UgըZw1> 3-lmy1w](kE ->LK\oU:l 0' g֓~~rNI0o/Anq.K?}Oo;MX=,)زH+F\@!O')y,k/9 K3OÜMC>g8+jݟ_k䆮[6>D#vjE5AnIO2UA g8K7\yXׄڥO mT9B.Ezӷs yjZ{ 侹pel7 aKd=] R93&+Fn_au9SL>2p Oiw>Oz*g`*$nb y6ĸˆkX!Unf xtmCϺ"R`r7~wu&:(m0\jU2 sK M, $9=:}̰'iV?tadi6)TkqTKJAl$6qnU̶"@fvk2W ?.5IW=T &Y؝$"}yudkg&\٥:Qy|N3|;3x\v91/~>)Ü8sxj`}[wi때 q.#)m|"`135 Y2 x"+{&O8ˬ"eNVPsO6Cjg T$|׉iRd19N`!n=V8u)O _+BCgiWk}39'ʇWvqųߎU螮,0.$Y;3g1W3l¾$]Ppgg{d)(O2r gq1#Z0vs@" lW>x6TKB\<]׎."kQX`jZBbotc^?[CrծP.T' `ܯLD|>gٞ چx`H9{'kmtuM ~Q/e ;bF;fo[\(o6& %懭{|Ӷ2 $֦&TKr^v߰Ǘ`97Kr ,QY} #hYz;v]!:VZY%O7#U?C,_= 60_NF1rVݧܰD㐙&_o~Ϛv.z'Yr(Cvږ@=q&f;,܏n3"Hv?-O6v߰aK0y/'g23L6C_ʓSXy-q O4v8TA ?ӻg8\8Ћ;rBj4nʇշ+)e}l1ر^~Lz "oǾ36f8)J&;˭Kw6G6\b[ xj+RYj/.;b:8TmmA+rÀZ|$SV=ۅzI6!]-lڥ2{;{=.Y]l%nN! _Y͍B!ЛI[ aD,8?(M3$4/emۀ~嵉luQS$}c,~UNO(WCĢkwڽ_ %(ob5 ׁ'8|!U7 뙒҇ R`+|^?7߁VI$20:*'>4fUGމZBPJRsڑ)Ģf= Y&8ozO/S$)EqY(o~w'^Y8bM*5{ll5W /.e=)C 'EHjNt >芴U #YO3.0vG,?f}9+ſߕO8{ufKLI]=F?}K0.bLTx88j k։cəǾ,PYa҃Gye*е{32N>aQV!~h1+7s2G]Ca2Έ5iQ԰o|ۆ'@uy#sZ'_$Ʀ_0lzRI0~3cV8Ruĺ/YBkqAuI"LelLCv͟D4A.Xȁ *֤cƲkd\{C ziϾJ0՛737~hn¶&G}CNF)Gz[= ԟ˺0+m*'Ji*f;.cO ,waO@kwиA7 zX#m~#-DwA Ѱ~:"b+iM۵6#}|-ŁX'wvsc统;*ኈJωRb\x V Q >cu[vLʷo?#R$e{˦gPtƀP|{[A?=^*AS"pIli0+$ql[:'!w7<΄bi{}4yߌ<)ςpѥJ#<ju! C8`q矤.EMQ'h9M;}hAfXgL[==/5Vdوhu^}>xO K<2&vbV"V SP<1*˭5m [J m[Ēt喴FðJMHkGj? yC۴FP61y Gߦq.O5>SXtl&kd }a=)G#9# ^B/9Qy:T&yܶG3%Ik=`6%;4^}?=ve04|T!M-|Dڗ_K>pguسecBG¢Iڿd+$/41^:}GNbN(2^QCKE-uʟMdWI {t`f}6ߓW`6>>d ('" j`Dzp"vn/tsu}#4FU: 3:B"iΈ(2J2H{{3Y1`0fr뱬G+-u\g^q䷣'p NdV7܆.P,' 6 RlqvHFAmօ=kWb.<#TF fͷrA;k^Y#ӾYi3D|AAjK yo ?#daԡcELM~"8p9Eյμ±^Q?3S F5ARDQյ[Þ% qL9K&ΐ:]_dP_Ww9O{h̩2^0=ٗ"92IFJ%c'ct u;EXK@cIN'.$+Iޛ<H&#TGSw)Նwj^yW6 KpF(zw]Y}Ap{^إ`~Ue䟻Z =ޝ`~Օ$mR9V`?^;ԨK0W'\i𸾤kG }E;?_FWz[Wu;X` 鸸N+rҪwPvÀ0ݔG* ᑬ.ܐZ{f;w'bC[׸Cijq2hZ GLutAձn8:TH0WgdU@dž&4hMUM{b? ~87⡺K;8Ru_B?Hhzރ,nRG'bR vcz68c,{ntl C6. h=㒻-%;ĦX/v4 n. ڿ:\>҇cM:\zs='rUE+gϒLN즩UrfozfI{rX|vLVpy~EkҝQ ,7k7b~瑶*+^l47ɐ?.sfP^Cr|#"~I臱K j)Жf(_' Er򖁺SoL7'dPJcMmdY*2uDliði-/aMt,*3孏AZ4 &P珦>V|; =\>`%D߲z`6<ۅʚ;tQ*s:q%Agi\rTk:_~!&+VE4ú$ 느~YO?w_"?Gu+`mU˟8SyZ+e`RU}!7y5磛O#Wk҃0E=؟LV ͣ=/;g&#ɧt|܁um5E-qUO%\K`)7X&Z .Mvos?_>sXgdRiw{NO\ v^\EADRZN3ۉ;@1b$g{HP,;fwgpВ8'>j@r\1Jc 2 N\l)y8I:$񊳽 znqY|u!GLB_$-8[*aޏtˢw{`нJ>}@|o|^`3wz8dx08`O}[~hU፸(7L6} ݗұ0fha탙4W-Y+|C~>T}?=G&Aی8x$^O>bHg0aO1_owO/=e]%Jksdz@9ܖdO ä{ʹ)oLW'"RK;qbt ->bV?= EzBStYq1%,W{{f%YXbm0>9>:j4˔|垩L )[6Bq>qT.Q5E>V;Q8G`Lꍒv ؟(yR3AB?-Vj^8IY;^Żlοn W'ȱ㖗3| + ֱ).6BIgƑ,( E蚋:@](L (yԇ~X}:`*3a"6'0++]T 牶LВ]ğ:ffբ9,g ʞœJ>^{k=ݨɣ]}L7b"w]IR eZ "5.іGoJCfH9rL}" nmKRWoH|&5ekxj8p8 6HHѿC0zOg.]\Ռ]*pe%Q M.>c Jfvsl[/jLd8:x ķ.'/xI{}cXZ=dv.o!BNk}GOv=L$߃/ۅ [wC{Ci.C$2q A=Y7)z['0"?­gVdH之G J-.`7KK[+D玵ivZp.3/wz5%@y6{]2SmsVNÕ 4˿3q< }]BjۥlOnXqjw6 ٺ)[>;$ƕkH3^:۴OWyGM;JR`Fs/VlxXN˅;ȹ 7D||xjF0tهv^s\ytV` A~և.Ǎ | )ηYPpF4 ]ߖw)=Ǩx;wqΗ όI=o$Eݺ e kZ4h#Az/ ane 2H0mkQŐ_ma6ΞT}F* }- aH8^?&gum6\OS@c"[ajw`X{uůR$볞ޖF/p#" G CLiAi#߾~k\9bddp1(7܉;Cqen6vlɁt"Ey_Jj܄F3\*ޘ{YpcNaӚDPm ^lgXމ{.&Bջf"pu'ihHRWŸNAywbCOi`}} #]1Z`Qꋟ$ۮFw uOɪl6˜Jq bllǮ w=hTӘ֐XBv^$&KMZRBaIj7μ_q1a' nww}*$mva['T'>u^ Κ);4 bLgKւ6ɴS v0a]hekdMXu_-xC iot֍ )}81O~3|˓z ՉKdgfR aglGuk]=FbwUߜB-41!ybٳs$q){mwF&l:v;ӋdAf{ ^$CIt{~Q6#^ZU CǝmÖ;6K\*{b:,_V N?1y-&\WbޤnYؾvu/vJI' RqƐƉ-2?66 B2gBs$/g@7s-Zr_e5qm۵2vn=_c38Ŋ]rje&ґ/d~x0`޵WޓHO"AWa5Al81uP(UǞ+g}Q[bXE9X 1'GvH̜ 9*r\\*quγO0.ڶq4|;a[e֑/K3J5ۦN^l"YxjyxXݵ不\$D3%ض`޺v[Z '_ wPK9?%\f; H.u[֕Iw]wL.?lw@,; M?azRW$Aa3 g2 >*⸗+ fѤ&!v$rt|-Λ3yfgzO-\! ` QHͰO(5G G;'=NDˤE;\}"͔dKHg ,sD 棻]۝?8y~ir6#em0;7}" Żq]=;k.֊㱯.'3rɔIhy_ ]h`Wy\.Eҵu׾ ZDzCo ?~ [I6o,$aćg׼4`( {6Aasf}x# O Ȝ -HkmݷiѰh4$P gz$|Pq˽SW뤻/lix_[Cd9WZqj>>eUڤp3b-X9gѽTl쁋Xd{)bE__=Ni:X4H[o ߎ\,xE2$?=Ck?5[`T[8K >߄]qsbG(0ZwX&̟epM:EZ`jܠaY3G{)gi~ ;|*qaZFİOBFin'T #;'MHe޿~A!~KQjQ!B.^_=xI=9{v Qo!c'|*roaL0 WlyV:!Y9e'?X9wݹ!.W\m®p>kt҇0s(7ʳtea)Tgd?J$G__a"ٶ TlS#(}k9k 3t|i듋lWF5dfzg6> # Ba=͡$ 2X]QiF:N~\FjF&&wPyˎ̕jX!vX>";ʶ$X"U*+ͯW\w"i|`=wsrԞZ+8!'%\Uɮ[%oܐ`Xg4d}r0 d]YK{'MS=j #ȾapچĞؙ3"v6z&>|}VmLf@8"Inƙ_o_ ЊMe{|(Rcq!$'Za}j?_;#`xm]vI:<?MC\) 47Ӈ9!Z:%٪yS}g??iʈjN^^t`(L_R ޵y#m71v#( m wyÌKϐ~§]fd`> t^ w\r^;)xmg|7ӺL$ hA݆0k`j^v?mE`ֆ0'ڂ13|$EJaTvDŽ??niB2oS(;,ez.{Bf]Pؗh0 aͮts\W8C`AXR-1^vymMeˆS{JR:On?x][ m %N"w8Ě,:߾SYv˦N`6jsm}<7bJ]NhszV9E*9+$r;^BsSt.,g%{h5ѻa7˼[]|^7d|y}]N,`ӻ8CC6۲S Ԅ^—U;iX&4`-e>L^5"?I+ϸzh7ۥ>M0'H^ ˃>]&JҀ崾 &N méC^m{ ~9$3pY;;`{'hݻ 0FIv'^%#}u/Ġrx/[?6-[à)^q,XغuS.؝m7Rɿa@ʽ1o|q!4/V3͌z{ u3g˱b&OHl*:*,fSY~aFÌ>zH <3ãk#}i<͹CmL=x'lަ8 =rҲ{ xw mp% A=XJ&Ѵ;ȩћCABXvg~?I&lkf7Oj rP 9}LH5T`NN[<]<u7!y$D`F,l8cT~MXP\` } 'XoZo tl%Gq|$6Qmh'9E}fEvϢa/=^ZK)de\񃨅;X_yQ7dN^λ9"4F|")gC|UXhT5)ݮ~_ ]mg#`rf(C){lܔZ^&9Gw[#c;qXi16U˅ YY ae+W"7wr8yi0?dG X?~c V%aդ#EA 5_XfH?,1~J0<8f'-E/.SzgX9u\{ytYfWyboU K 7^:1b0snbjlHwˊsi0{w{ N)c-_Ypǫ#w VY'n踱J? DWDA#7ls RV\[?y2.rԶo=JKmI9-;41imkߡp/ڿޓdm{ ~0Ź%+)OXz>$OI_ܪƲ*e&RSX\A2%ά r+A Y99P[~6\J4L)>_o:a:D ߝBrh(_W' vN[Rr)ZB"m%ly6,T6 EǪDO4ˬb(?&y r-6Ељϐg'<F$s&URbcq\][LAlB Jjݑ悋foLh&oR-F`:n{ov~=o>TX.5N0# 3'3'1rp\ W[2Q{a,|Åwzp>Lu?6sgj2C)c;pv7JP.m.u~8&s6x#$(0o*I{"Mk\Q|Ǘ θc;Hެ_8p`ƫxXNvLqo{`8z=,-m:WD0~i9ϫ ތwonC㫸v oGv/ 9o*_M\[݁~୨j=_Ea:/C'Te]t.(=U1^U$7$[fUQpA4$I)Аlښ"`dnOmrjzXtsT2,ˆL3 uej˶۵O+y;c{A'2'IOcn!(/J~{~D"Ja YPgVu~KRǡ?&nJNU7>EֳVH_wqjWީ.0\6?P%K} ըcl!(n#-kKPT_r ɡ Q hG BFa|b&c_>cv3+. 4ɛ0s"Nur'Y8^ -m[ic~;}O2'.Q& Aa+W?`ahgw\?3uLȘ[o;v.1efʥuGapyFCs8ub aIZɬw\) [>˼[*lemسrfcB RՋ{pȑW_h4Ѹ;c7r:Vh/ժWE:GW!SXeOFz(<5ޮ@85FS/P~fIL5 jr>dzEk.ܤQ3+n.֜]/QXi;䈓 } \ ܞBZuH54{ f`ӈu0R&hb -cp[fOnܘ8e  --?bvv*V`14fϼ@Ph 2H6$os[pY^Mͳg[E.w\p]Vk)];2;1T] D2f HPmg \78$J3t|Pɫamg. ̭SSPsI̔F>=,3H?ϫ}wn]˕xnB/fY 8m}일n`VWyE63/֕3'8!j T(CssN׶-zGft@t ?z$-a{r"7R'&|݃> IjntV-!8^ug^\7e#9wHEԿMBPK}|If*Z0ans]}^>m߶}H ry<1 tȡÜОU.5]T,k%(LU4 JKzeX)9dD0?20/u?v]W.ZŰ(&ͷ/bH/*q$+^ݞ ߭FI??3pm W;'L7c}ypg9g*_}r1u|FnD;N[,dϰ_\fiz8s .{%7DO/jPwh5`(- Í(t3R9cc!v:ۏ=[;绍Xz2e&_@17S Uk{~?.2ī"]/Q8ɹ%cgU4HeЏe5m l?7?6 XqWm;"6yO-8Z璡3CN4М7%~{$5NZFU';XJζO ]63CMa>>dZ%fn01«h没 ue"HvGq7Cjx7YDSb]~+"1ΘCPWRpbnĪu;v`pj0m:+ !5Ƣ9ɏ!H;~s3DO^~52)¢g{:!'h4?䎔]gprrpL0NV&vZ(aop>Жl0/v |ڢ`=J2Fܧ U.ekJ u$)ӄh>DzvZ/1\=o5s㕐#P?;@ ضSӄe f/^`ve[0fV|p/nMs\a&A(WP} `\kT\`p]7y+zTBUra}i23x+{ I}# 'LPXiw S |%!ӳl.0> 6BҕnXXM.p=ZГp[+o^^~ A0mT<*qAyFRBűPAY*o*җ[%Ӎ4 oLtqL9i Ow  Fe^2;Xf%h3NcDb{KJyg섩7)bE8KP҂K7lٶMG>\`{s91񉩽˒8;|`cHXkcpB;(GZ"nCfU;7s ^-=[l/68^da/Ѝ\1Ʉ2oO gq 9xCɈd z|c~M`{^.XԒ-H, c\ 8=!ӁuGr#te}-ffF ~ ]zNn1~;FЙ?́'Ymh=G6|f=;uG%N݊DsmG߸SPݫ R}G52fGE9=l_#Vy W gSU ͛fÞUﲕn{ℐQG.=v=|Pos&f{~pX|rz_ 0]B\TcVgaf!}fXs[T7x\VON w?9l1kmM}UB Ͷ4Z†5^%)NUQn#ImbaR_ucH+T{m>#?NU;ݏh&ahzr_AѺ+ZN2Ύޓ`/bH.<]b8>J'`5Ɣe(nؤ 5 PeJh+?xP [hX`e2SP\%Î]6Yl8ה:Xު{a=C(K NŢ0|\,N u5[: $-0lc: Z6t`Y[sj14fͿE<uyHuc.YclϱJt n\Ď)>5l`1v힫{H*+O̰Mvhww:~ruEkʮĖբJ0S믰`Zɼ&f^1Y.{BEK\o<|^:1$vqf?ft1,2d%b &G 1fſJ|2p㭙4X-Й|+ 3$G`tSiX%mɽdtR:N3 ಁ#?ޛv}A6˥ۡ~hoT|p)s Nw'oI2Iex'M O+w:ҠKl*ڎl6HPYDS&*P'y?Ny9W+UOp,_%sށ%^E7X}tv\g/s%e!<.d 6R}G0wsvu$p@.AU.`.yg4n]2\.إs^8NRy9_G}cfz 1QH[gD ʞU\ŲƦ;p5Sgyѩ1mXEH4͢),ŕ@{?qd |]P8` uңel ~X cL)`MR.q/Ji g8=MSL}Aޅ8~N&RVn4ݛXWL&,T'_U"BM)$F'R]8ocu{˼̥'p[^D1{~.AĐk}Ҿ?;vP~c=fˣ2PwTe O nsCEGf,p^b4m)`+Py\zWxtt-oJ^fRZ{u<3u q0'vk2C4fz~ ,Y:݊,\M %_{buXmZl7ԣ)vR602-\ޓuŝ]sN+m>8$,)P{2=7뉕] IZ0' ݲ Y.XZ~b 9VReO{8"yG!̶ʹJk}xr؀-ea>8v*|"tN? {, ȑ1v\Tȱ=ӕ_A\>Z4K`yX#A+ xMŖDYSky}Rb# ( }x7$sB[Ō@ {fL]Ad20i{CH5a1C%M h'6K S4p!GYKThP3Vݙc]$G_e@fRհdT dp|dۥ3ɛCEoB >UnT O|ay5Jnw$``UQ~ kQ]79 b}Lqq;L<. u䇢ЁO&Ea m@2z,ili~6ֿi t~7epBV__.;t(?~Y˃,qHl-bo[ySg~d!3$JT^cTUZ,4y,K>#rEOt̵<\bT%A]y ޯ3 'qZD;B}L8xtے0ZSK^MWͅ'y6ϡuX{Q=KUWKVgwŠU.) wC㡋zq~WZ'w@Uk^":L>Qk a)}W@K"/JԾ_]Hpj?s Ku iFWW q%mgi?};r`VX{cE-k-)_.dgR M *rEXR/: *!L205K h csZ٪FOn 2k?.&bf/ӯZ8l6|KƎ{UoرVG|LovxÆş)aR3z SiפGlQ mHP$;B(vq*-zcNI{*œi_ey4<(`x!W\0 q~{>.5C0{ + Nd2YfJx+\}8ݓsŠse`^̖Mb9OYa?a܄{'xހ)@>z/i!)$Hҭ-0kL^r&]?0C:" 3iW εCnI998^͞NރSG .i]Sf =ѻ52mP>U5^49C(nG0R.n&v% m$";n !G gNxn^* 2N`VSxփW@dpWآ c\l8I݃iI8{LswI!,v?Cg̒;:Shȭmv_~&v:j_KŒJϷ+yŭsX ,8. |71FZ$ˬۋ/Fɭ3tR^^YfnjeyCp`Ӿ,3_64O *_epRt[7- ̽fk$M>Ye!р>[q^A߹KfB:Ӓ6isXp2>L$πnTv$&X4$@ooK^;nǮ$:NxU_]x|,fDZ^UEO95g+qI+EE/">Zh@>I?Va}r7ifwwcIa()Es1~8~-o8R5:_Laˋ'I} 飽9xkI)}~N/S}hon zYC0In{^ l=:\sfP\j JXTUH["\?.g z:Ǻ#{+BnWżuN~u ;o` !ZM^q|)X+~{h;o5g"3aDD].}+PnˏU#5!jU펋#ء] h0˨Y+ˆ=k>A~1`( 8_b  ź"0@\eViW`rՆq"p >k.O_r!CTaN!p}GBnK,+a J~ﺔ?jN` (3]2zLgoo~@ySIVyTg⡠-RIP~f \1 rs)bp;m! |gyxEO[3Ɔ| sdz? i;B6k%GDFz}ך&;)Rjt"Nm9=p =}x 5gH6 ځmQiVwarC0Bjs ߥe |au߰퓾Z}˘AEkk (y0rsnHMe:aI i0QzuN.45V ~OqdoۊlOH[ GabpUh} Su+X_R3ɦM\"L o#_q&|$~}R5ި<^F`LT !p²~^OdS4ȅYzy%g☥)~>6Rþ#yYWFI%ETإމw YY!u'>^>*a7{`竁@z?0p~ Ư<C]{ gnP  SuF'= 5rzv-0!QW0ܯtFjI 78|l=*=~. s2\jZnLJoa|`e`pv/&Y)ۙy=/230&'9MHX|YZڪlRDyS 4c|S]>?|aqi疑}<F#[lHyU4!IX-ᘟ0*eIR7nŏ&[׿gF_ja|OIm~Oձ%F|g"z.f[xz0ԃ`'Y;L>',.Nep?XXC[ &Z7*blPtn9H2\zlԅdT4V 콰&&gWXxy[rhup"w7$)O96h] # JmA_LJt@3}sgjS=Vs ıw.= B1t;ۛD (_l q"$Sާ!EPh/#h;K3v*{> jIR9dyf8ΧssB5v0{#(tڙ vÄYJLuuҧUpeO7rp2w3g*Pp|#tX.JIkn؄J9:=$! M;Kf鲸<볤t+m#žg DR\qti'Q+zHPn,&&7F??s\c;>_ 3[x y`vI7vQgC{yAWgglü6Qӳ)HFhUCp1qe?5EzEl5pj\l@^<oR>ǻ675"Yk>lU4'gwYBM'#{*^_~n<e~00<(gvg/v,?ě <P2$J*I"SR4Q*) !ɳ<3|<}`}]kwlܜZlӤI`=0SudӶ\|% '0ĺIN;jBW;r*]YKr-( zvo0d+qyhO*Kyx v_`?`u5±t[ؓ럿Y`a(x` |j޺pHjky}N| ,MpN$=FOpY`"AyK񹬤 1v{^c$G+R]# ekpim]Qą@;֜ulxpJ?.ǢRՊu$ ~bl􈇗bNUpiĹ1{ڜ=8.~cѴKӨ4h;A3Vg _X%XeVnև~͍^x` 5W\:H}s{'>!֕4n0X!/rL-R0wr|>ǥcsSgIFO9ҙr$ ܝ * q./{۪2NRcZvQps#Am9޿['N)7X7aM\-q}`q -E\p5IG@-sl}BBH+g <%B9%F>)l s81~L c'fcUŒ~1Orhs"$0 "%`ג:cn-4Pp9zs#wT< ,rZ\-R#6TJ$WH[x~S`Sq$ 3tv$b ,ҪDip1LDah:p4TF='xl,Lؒ}|%t!GKV9ɝv&]tӒ;rww\7ÅH[Gȍ+>p[&\>*L{ `gW\$)A<޶[tY]:Z_oTB)8&YU]Y>lliٮ~;\XI9kVb S2CO`;.CcHQC$/Z9 x?ҁy.)c|QQqz4q8|'0Ep8\ӈi6jeQS:n }S /AkТ{οԺ63_F`RoĥqDţF6Q> "o//bF)foTs&zcͿ+HՋs]G`=^o| Dtn.̲g)#p p $$Y!S[lCQqq۫W /WfPQP2$0ݍ탂N$2I*sکN]i ^5XiУy6i޺y n0f&` $ލJ,a~Ԛ^<}vVj bE6|8S5\ꔩcU].nlYiRS g1/M7gJ[k:F8e{E฾`~<U&~a5Wo8! vR]{V-n珏umƒyFۍE/v ko U?OUPafQl|6nf%z矼 )G@ye~yH;b|`Jv?Jt"k[+}xM2^ 5%ȐG$՟|P`;==o^*]b* gCi-$HZ /\[#2a$n!APJ }&j0=oTI2Kh oo*|9"E0/3ϗ<7_)Gpa37%4z/(ia^KyL.70MS3'9,7mZ/^GKy982F3WtE0+bU/fnBρ'NF`+]лG1i˷3;?D2Z} _ Y s =̄`Y]W{5^.9WߦqYEPZ%Xiy-LN+yl:|o+mF =MhsVμM0dK_ 6bk=v'ǡFf`-vo<ʪBZ!iͅ*jz] NNg n n|ԕwV1zhU=>Uj?غ zhy04Us!՟-y 9<~T&6ܽ:H\ZKZlu>V)wL;eZȎʟ$l ԓ> i$˻^^ϾBv8S4tڭh( a`Y"'< }q1ف$Ҥ[8~W]~m6poPlOķ[ցey8@6 krŔ8"k9%VoHŭC"o5k@ȗ#0zX8^[ĩUc!_cc.oSU:4`TmkuM%yRpBvXn{* Ihu>|sԷjroۍ56:6mӆEHX*uJ\޿]nA6HzJ7FV9FC6yo!(4s`xU ]{=b+Hu?N/K>陲Hxyધèo=\ a%ŨC7}=n0i3oFr&cS7usEY J &j\Y`|.)MP16u^!Y7][)8b{%dRs~:s8vg-cfy0@u yR8{WveXocl,ly߈F h2;|mF]Ky˃Qלuk0̟en\"2MU={d.';TAô R)F%NW'l`E)XkHĜ5(0sN+*˱iaU[z my8s_\S8dPCGTNPcs"pVu\V>o)ڜ#X8#M"ɔֽ 0v̋#MgN:3 (YE޻M".PnpsЕaY~ zL1~;KM1$olgFe[`,#5וL$8N}I?pv4' ؆ bmj}|> {Xӭ/!j3?,/"Y\ʎ#,H&knY2RD4Np4M*"cb[p:b%4\>-;6M>.$(2Z^J> 0Q&X,':qiE5;i~lœÃ}xT߾/ |y)F?wK`sOms~ԗ=d*견KS`n3 b.HKU3=/^QX2׭|d^s{ʚ}ox9躌s<;_.Z}'{;ދ-V-8ܰ$I[k:eThtųskfS#r` otD5&ҤNmnr7+++Um;-:s]v )prP\$,;lm+zZԐgNwڨ#V198hyGnq&CK]sK% zmcN9e#m݋X9[`R0gW=oCuSj@uFLުxf(I;`(kd8^B7 HhE='(sՁ^Ef=f7+Ʊ[0mR'dך>!>g.‘g ﭐlk5L2UY<=c߫w-m"07|T ̮5-qa[<5?%q ào-ͱ5>w4Lľb/ho Xt"4t̺ŃX+7]"Xb=jw묜$&?֝b )ʲ<#)8tǏ ķgѡlu)M< KLVe)nꗢ`z6`ыG7),l@4S]FyG޹檽#0p`7N~8vcƢp o5mNUW l:k#yV ŷm9Fɚ5fWc*ѐ~a9!Y l~92f:Jr0=U'T0ٮx#zC3T;~Zͱ<{U:L7?T {h)է0ah9zmj 7{[UM c߆TךF8lmfKCainRiyrPwqDqiwEZ4bkq.l u;/`5[!/tYKbl2'k?ao-~Qح9OzE &}bR].h`Mi*,q]aDXc{@rnU_ c-Ha-x=Fp0Ƿ=g2SOvx%Y jПaKnT9HhNDImϣ aȲ~E=}AE$C"(+Tz] ~W-3 .I9:)u3!cAKtdxs!fGnВÖ_f.He) >5Bs*>gN>ƕ1 ҠSHN?6C>T9J3ʝ,Ҙn9-I'vWydlNr2.e6&&u`ғ %HUFoݕ= 32-`X||C#g-iGKC/oKeYUɢ8~$.F`7&8EZǘar%FIys8ho`>3_>ЧM}d|Q ԗS&8;z޺>kCF/w{_Eػ@`I}UTѿɾytmwnXW> oDWyrl)jXGidzEU.JBt荪t G׏_eҁ%~]08eN,Ŋ HrN~Ǐ۔[YAu6iq/{/N*>e,r&8];g!z:tGD_oҡTZ,LLN`ڸyX,yWF`쿐ïWUnK=>E2qj|dEуx$_|1sQߡj3 z電rne5πxЕ.tbG{5Jq3Z<ݿ#'P>6"Ɋ@R W5 M[ -髹<&? c dry}H*׏^zB-w|'L9=mjl?׈='׼U$)Y# $e09hF#t޵nY=ﭏIJGpBjaBxPxMH5L,b +urA>VXd~E$X@f U־nр^7s̗w;bCXjHn!kם蘏 %O 𘆕2ok/o \b 8(1yW s$;~o:˹-6jȊɅ !#LY%/ȅm*V$S^cV}9,׼E}&w=W';M!!0w쓔涓Tk-2A$ iƞ 0{,{mG~cS7dﺧU?B~oY-.TQmע:hj"1{Et+琕 ur٧k878$N]^VUz5П8G4!NVj$["mWyEtFX=IEX}t6Kƃz(cm ݯ} Hֵz0j;:19vNWI8Hrg^Ya99]'}v<3.H%,5 lLP0\}!@3g S:g& ڸ]H>^e3)P)o\<=m}ovɰoEQ#r(F1[-_؎ͼMztJm8}.3f 3/MOQ"&8뀳Wjtnޣ@{v/fǭsŗ5m`M V0u@ķAOSO2G??mV8]6?UWL/E];{"  Yyv+cŪl~&T4,&BH#A V\`A[>ͮwZGHzcB]-$Ӱm[|:ƬmnڋچoW8|m݅fF'2c%aX۪gyMZO*Z`1g\T $n${q,qQͧĐ| ݑ8ꖄ=`}Ǜ%H=09s Z!ELc/UUUKMݨ=Sp Z$^+@Ͱbr*c_E}* ˭3/ժqH;XYm-ׁ;i{ĵ_CJBXU/vަoE_Xb E96~msehб=ke)hP.. ~;/eǦ7%CN*xaO nK|s6o-[($ !S:U\6'rUvka~'%v=H N0Rqzhۜ Ə٩R¶Xw}$V/YkŢ?M8zrsې1浏䏑öb_ʦm$5oƂ=a*0~pKq.3[u6;map<6^sTAHܾ!49eg+EOpGu,z#!F; ]f-ٙ0k4*4#`AH!Xh`|M{ =kK`271pϾ 7Rg❛wt4WOdPGjrt0sI;/[DaEP2P48'9? }9U}=}7f-"DrצN, =z%٤SqB+|.ޅQU\@>w-(`˒eC˯6Um^_TE{S.Xٸ >0m [ZD։Qɼ׷ {҄޸!G5+F@2P[q`%,@^We΢/ &bv#/F7qv%?j@ywaF>ƿS8][qeKAo}_K.N>^mdIp)Bĩp$2)4`#F-Dk֩BnZ?!}7[;ٿ5D>z}X8\5dyUڣɛ.8uvt2|:o&Z*0ˆ[aRӞCc8>v3?9ff'V% [-"1zF^'V&(^W*ŖoYpP^w z,9qz8&pu 2[avGQKpB sB73vȷ;EtahShRXJMzVC)EΜ>й0t,z'|YF]oK_tJҙnw;=ѐK)tciŦ?/ϮڤkrRf+6ZVwB0mZ֘uFRq)b)rX-E`SXZE,ӛ`]Q Vۧ.kea)-{\Łk)Ox>ލ aw9ꤟ*v5ƾG w3!g~4%<9KL$*R͇(0N < K?kvۻϵEPl36<=1Z*kB#ߝ X-UA1GS|.-8Ep.=MLf{ʪ=8L.Uj, dpE 7 7x L|IcWƘ6 ir>K'/g6w * \T8g -Qh)ܴaKwF~f6]`HHf`9!c*v_~ m5(O_& KrJ;V~;U?і B/kMՠ 56(ܞ4.(GE_o¼ܟJ`c]̦WMb\a ?)+w#8q:k\%fF)aԦWWwTaʽ/̰}Wp;tp+饘2=ye]Eؠ2<]S|:2~<~ =>> )%C/aʖ|T7d'N?,G8ϖ[h0|uu_Vyi0eI-wH8xZ'5-?> BKo ,_.m#R=` Mọ:LvӽTCPMoͪp^G"E c׏ K8N:XJIjGLҰcOGI}Oel#s*U]Fa\ S/r'_]-Bp>_}fljCU %>tzHj4/N`OZA9NzSaiYv>nkW~w38=]r:z;8W”T#fb] n+.!s~de{2 I&ǯ`do[QHN*Vbl6C gح}Ga%FW|{*IG$5HfA)^0+SbSr([Wyh [:aZQZ &76&բ>%AO[Oԏm@/67ln͜foGLZ&m̖n HE'Of3%!W[L~K?_V~2-'.`㛜ĕ;'ngj M%_岉DŽ*>r0| 3+l,3QX*)w+t$Ekk;Up0ܳN8`6c+$Iw땚}x &u"m Z&xi9`'T^g~82^-O}f 'u,gG`j+N_) im5ZLG7Cݮu<'WN"]4adyYKQ^EY9Zcٯ/&|Pg 㬛9x#܌9{w;IgqzEՑ'XkC{y`xc_Q7WmVS2DZN''.܊S/{?ߠ^+sN!,D/)6?p kBM`ٷ=2Zw x @6ek`a@z+NޓҞ o6x9MNh}}851]FͨU51#/)voyƻ<#!׭HѦڑ8/8 ig(Zu$o>|WxYä"N3;=9;aD2aa$kv9ly=}ZOg<]Uc[C@|}ֻ3:?BR^W5jſ6Gz#ۀF(| 5orS-/M )[ґS|ßsV>rSA=8vS\0$ -\ yCG`}6(zOu[$M`ȳOdn&7`s]U2v{gabXyyoJ2*WO:j$S؜DES}szGC2w4tECNm|KKcmDKwVQ yC{a:f̑s$J0%0V%19wzG= s J$vےL|m@ŕqK\:<>7sЍEd6ib*/_,]9mpED1Vc&%ɃJ&cZ0\p,;vNGW,{CM.ֺXE{<7d7llr25ҴNצ8g$}69fZJE[nQ`|t^zkܷq ,"rQOԙ.nayj:/lKk3.ڇiUMH{SS{oC)#uUڅ(NpMΐ?qrМ}f)u mhhu8|Wy(<7)=R0APUy|GˣyBKF$Ĭq"n)y5O—k s!OQ T[{q"VD];57ɚ #TQOa~w3ř5k^b~,PURi-f`lu9L}v7JQıX 3Ax䁜"v<=nx0Vo+tR ~;xgάݵd>MQ; qooÒֻ a3kcaiqK?5{pL6\>Ops_~͌#)'l*fOW02gȰ'.I '.rh@W,dSp`r%nY$ۓUpȺM*/ )];rP¨g-e@_7՜) ⥷좾޺a;oIfu(.=JĵV|Xbgػ6x[-|Fz9*zG:XT߮_|->cfI<C%3J>ŹCF\\旺]#b"$ԝܻ>~'%L+A0W*MjK l1N4M+Ňplq|=+',_IPd6ͪ]>I֘7zfPKE 39!v{Ǯ=vN7VU>7$Xփb- 7rqʀL|hb ϫ#9C?Oךl}BVe,g  ]{~vQ2/У9_Dw1hVmCRz,͌lŒr1U9!^|]gaswѮsIma0L+QW WgI 1ܽȠ5*$6Pi\0Lrҗ$Fs8j̰]dH3!>qq:a.>ѹ/ύK_=Mz0sySXn\l/ɘKbDD #XwQ 5D+8sjGHk𘨎)#C t}tPis¢:/ٽIGƺc7NHԥGfyAc 0cU=|A!:8ǣ}ӽЧau^*o ev0c?&|b%C5/œ{1栌Tx쾤 _^IĮH.!j}elX 'ނdiEdf<ɒiOx֜%'K+,{'L{ 9Cv¼#2t>\̄i2'O fGZT!otz4 j#}lһ/m.ϰyDs}kHW,i")-2-C`oqYP`H2q,$pm@f8V[:o"홏iHv?h9yO 8v!y`ڒ%./4Œ78Ty@f߽,Ջqz)4np .X5)rGuHyIٓƥthXDr[[N `{DU9W#䙷%Ť@]PE3^*r;&) u4&q&Rf>}$ ŷniR~@e~Y8PrbV?OfʋM]XYL߾{лWsЋX(SI!QU\ mUEj?HNo52OEa[Ecd`ݷ#WNv5Bg|!q=p L笰+\X}_}sy\iTq{K |f9ޫHZoHCL>y%q_D{l˫sM[lp~btӿjc(ڝp v9y}]7 m΅nsGҧpx0 +-^gBgu҃kc  :{rvݕse5ñZ?\ji_;n.EH}m&Ns.8:fUS4`rWi{8[yx NjcxKq+H-b*t s60uAYS9ۉFNJ~!FAv}32 a8ߦ7=8ۼpO?k:T.`i )'7v݂svv͓;~GP{\/,zW Z?v)gLv 4<=e g(s#cov=1hQRc$K'cgKl쉎g7N>'X$U1`8eal N;&+^d#fωzG0{M_ =%|q*4_r kȶޝOD,!L{R9%mt;ˊ?,BO껻8`T q^ex^V d75}\b zgC`gӏ[O3=nS=7#នޮ8:p*TN6Y@s0kcWq)닡AG8OŐCгDkUc1L] 7]LʘS&oŪys,V)MI$)9~˜LCڹK"m\)xaz* v?nFس':M$GjeLlŪ#-Fw뱦2eñdX= ڟ閆A"''n^j;{F.==g: MMaZ\>x ӿbIK#( nOiA/5b8qnSM!@kǎ{k`E~\É  eCdKlvE']U/ԂM81#>f?4:&aM?Cf;epNHvtt RЀ*en]R7QhE r#zaMZ8ɤX~cK ,U5Ցn VlrbJ'ۭDZ)b2ro7C-0#>#3y;p|r|h0>:5so1fchQiYm8erF?uijz*YL侠=fk.zy#a.0 m#ha¢uJR86u,Ko2?ȱӄbZNB{䅞WtL:3tk2aD쏺KI,FL??u5 zn' ,piT3o\9&TLyRp't9vheLšZԐg6.*#!&tS}I'r뾠~ w^S];pʣ@sdk- ~0H6ݍo'XlF^n.Ub8y"]S *- ]њ'[ ~ygj5Ԭ$ 1g#Y5X֑'>7kf/L>z/G. f[~EFTcVX|t *qxBOu#YuHTI 4`b]f#A>.@OLdzKbqfTs~29E_> sB)0p5FN3hMrh?UW$5*`ze$Sr~d(d ۖVlngd*atsbe_Sⰸ%Rϸp WGN^RuCwHS,ӧKl>䴭b8*oܳer*~EZEЫ8\Qa3̺_1(!_.Bc|9UG U;`eD, n(@@GEBL ƙ?IorSY8R* ܦ!ݘ.-\ -dE6-q͚YAɤљ S<П,ʶ׹ Qbi3sGYbI9紘3ɴ̝,PxC=%Q-iIwftRB::(gy= {5wSnw^,&sބbݔ^eHfPFڐ yy2[^0cPPw)[l`Ya 5lDv#a0;4۶NZ}in 9fW{0Y ^SWc7.;b~e&]y7*$vm2$KN9ND޿M)ZJÿM! 9VV?'gK0mʼwqd.Oe~_ f!0ivs v??L `]a!&8,J5c~{)1fg'Bֻ0modeFZfvM';I9-+tL:JB=87?9/NPmov;lT^kZ+ iAOJK>[IEqG`hL{>pQ; XPmtbqT_+0w Uo߼g#)JP&cM?BEɰpЮM?pNofPqɛ-DI\ʫg=ۇ7q.R{7_Od-՘ݗZb tɗ=0-{ $n0_ >=yo$0qfEr >Aò;ujpb@Pt@joj^zƟE&5 d'oDbG 5pJ4Ϯ(\܄)gFpF+F-rcG1.=J;fa n<8lO00-^4{ %_:CI/3(SљBV3=U^#UXtB0 oK;Ppȏ(EVuw=EbE!{s7zjGw11akW>/ _ijFj9mI'ü5WDzj]|L[ͅ6+o,lWhoۃ8YsM>t=;=Jѭ3%cLi%6U߽ ;ʑi$t $Ӂ#اĴ)A%YyE?sCzZq _م})֫~=5&?r8 f U%XM }%Ĺ|w^u݋gqV5=ne]Mt(qn%nɢ:caT#' Ì\aIʭ3g&u!8(ZNP}^؉KMqe19循aMB-Tj;nYҞ8G#xuu/=G򰂌ܪ\Zddn},L p>O9&` Wr | P S`|䪭t]g9۶`KZJ` :xÚs4uW]BЉ)lXkٜ8dT_v? s!o#O^k8v(֖3I_FqNøuUƔ[$;WqIQXx3v-ױ7G>fM柨?VU+?6a>u@Dȇ6Dw/;U)ӿd%^<+FIR-0r3=,,]?:Fד "Y]?dJX͵5v,0TT 4o5bΌa{u^H?]˼2zs1GySޓ6й3I̪w>H֮ul}'[(Pl'}7P&qWp -PuV@GczU :dz /g4}&ZtfL[ErX3A_Q=iYqJ8+me,}? ռ/5N@۶Kq''gO)d-ŏU܂"r;e~0[e#vvK?gB_]r+^gv99B?VQdͻRь8&0{,S7l;/t{fkeW&wǪ\fD> t @Ө-:切]}W=a8uy]J0|'ᮻ>m`\eA][-\`VpIRG~wÆ_[/jZ F+fr@U#*XwUe^ZP>k|ӢplLW# 3{{0'+{ϺNckSѽݼ- |o*eTyaBjE:o:!/S0@yg?g#JҼ PKS#,='i&%r ^t)/&b#k 35/_,\= *]煦Ş1CVa؊5ޞ6}iI G8b`}LYXo=0PڅlgCuQSd <]W|=^v[jbcnRY?ub+F*cgo慂LFʼn_Zp׼"P58A׉#/U.ʻd;h=2C_|/&0pYKN$H#(VTmB-|q'tyCpn!2-G9T[7cUBבNsG-)u?61zK+X.{/9d+ S :G{̮EP8(V<0wM h|r~ς Agޞ<,.i &A3K>vi'.IҜ}{ BZTwfG`NKn/u.bGCJR}'`mE-eJث厚)>K,ḱ݅Gw8ذ(^ar1 ?~wN$dKq`y2> D6[?N$cw,~%QN`KRdLU넌-3GW$p&\bFy~8Jٰڽn}}6V@m`쓠@UI@澃$so§\R_=kOCkOm:%rNlny)1l nuĥz=T3s[ޕ|]=#AZ8ks ywkL<9Mn_Z}Z %+/~_¥c_Jxnltc!,K PD2 ʦa%EQXZd]wY'-S!d"t#v s{,䉋bm2KyԳSOAhPWp0$˖Hd4JsA_$,h :)W^2A~3H_1.M)Ro5hŒrcl$򢁾 >`G Ӥ7`ꙃϋ/ri DO8b{h7ILPZu ;af k4rO8(09Ց;H?۫Gr3\O˟;p?'fudžGS%E v|~%h{4pwWumtju/w]; m/zGp>')/n2ߡeKNq)لYI#0̶ᘟLT1KUVvG;a3Ӄ)?2\RJ/m(~iX#cύ,'0PO`J1U{UIOߌzBY|왁A+=s`no{Ի !i$9mMKq{baӇPr٪S-i0?ma˘Hݏ@`p;8Hs5%7Q,n쿼ḭq'+=be o2bX{U` m V77AQ"7/>)@Kg3> 'i+mXMiã.7ˏc FhUtGL QK@.Hk(}R{g3~~<Ԣ$:y΍2,mOPl^ήzE]-7S~X]ql)s-5U2U7pn'aٗzRlluB#)Jf!qgFRm gW~.Kaye*+_(rzU\U0gvKs t}lb3.^ܧM]wǪ&S;a&48(d6tCSphK +ϧ& *o )_.=aAVݥ=jBs:0͕0x/Elkd2|'ؘ>sn@V͛bo{E$l'M;t|mN_l ͋b'7IPhzT} 5~%xk!!fFh|(j/}[*"UM @<S\p/=BuoHޛ Ω"RC`?INhmEc.&.xAx:LKdfka5xrz 6t]ұcdݔBٶwGf/)<~M0/fEƞws*=:0&Q+ 0:'TLyfw=8a[nFj1,3\eg]Τ} VZ8fA: 43/"5WNl|)o7^懙_ymxqVqPεCg;g5ppX1m银x.A֑\~K;8A|tP**wOh6J`Q'qCFgl`v#7tKZ1>C]NkZVhs+dnj/ L6G 9&; EOi$t#YwZH=J_T:nJRG =;9H&2F^/FljvfsusAnof61 f]8AڢG: weN+x"IH4>d/M;yDr~աHr>5:2s iX2y]zA^t֘gЎak#]Ӛ&uaGfh9pL1<;ytx1đNMmw=%f{y'|2gO&'ieAF&HN匬ѓ0|?AR:MN/Nq팹`Bz&*X{,_v'[3dXͧ}.n)ful1X,Жk*xR59x0<9Ov:Ϳ)$S1͹:Vj1Gy@[M Ǿ6ːN7w,쵫U`Oc7rXCR_!0n0j(v$Lôv`7)q 0 KWUU`q帬yǗ[0&{*ݱfIļ;j^+YcMrF y(hyЛ伸OY}vX\ vY6'ݡI_UE[me"[r`a K٨8_]Z[8|9 sCa:aQr`Lܐx;Ce ?ޢ+hR9#v5pX,0ߵoT[^f›mf } 6EIuR&^[ı{w ZШO[hׁ6~Ʌ,^//~\z²5!%Q0*/p?NUIu$L{q.Y*w}9fe^.Y_*Ub68_u^9Oz .eB؆/xgxJ` f˰H':8L e05xϋqdH*m}aG;a[fUk, ތ4ܫƃML\Dh{L}}soO։-w adlgĻx@aS\ieyF 24'VrZ`ffD[g;'zNerV`@%k)c_p6/Ew/}b;[ @rB\\q{o> XgaZtTo&ik``u{pJbk6f;I`!}mIkSԉa8۬ժϐLi;aLvPUH#ٸ*>J_!t`Lmgirfi@[f&-i l#IXXxeWu.8+I2bHV2$>=V So 57}ό>6b$';Sx^-źWgY(t";+Jl9 $k 7u`v#g#`g;8tod]m;# h&v?$4_j+Yw΃\>#Rlm}ފ:_=174,_HVʐΞU8`h[vV$> Zd@{ BGyΏCHۘp&izf a1-`eU]n8Nτ{&ƙ]ip=yRK4iE3feRXU_kx=(SZm3Eav1 L~dJ8qO]8}ηxaf?gCI_+] ˏ{v( ,s{E-ϵZ^"O1~$HP;|7Oj}O =P|B39aX JǕ%չn?Qqy@n;I O`GZ!"o[>ÎVe3xM1R/h UBbQQ0V}OgXUE8FOM Ӽ1BIoSgǏ4?c>A J g 2A&(us`ijn쵩2t|Z% 2O%`jjtI.<y:t <q+{pb׶ND(.7b%z&aGPkPѫƸȺvip2}'37ghlNs0Z\o{ dz;I[;mw$i?EPpWoHyP)P]?vH&&*L_fBuFO|swjwkLڎ-Ő$>c%j!v>\HrfTST՗ӳ0Qr<蟑ze<נ2y{leO-?\͆.BdCiqYbgrY cY$Ӆח4oB@_?H>H-X4"Y6oP>}Xwݟ|o,vl%AnѸ rfbc;©ŌKgCKjRBuz)n| gl .II_qCpGȈ Lp;X?Z(rS@Ru6'%%V.߻'.2 B6TS; ,~_7Č/pfʰI'i Ε)i'2ϧ䫅+$Ω3p=ہ&i?ê٭{-F09^O%ڝ{h8tаatKx[/Gۑ8?<ʿcwR$R~$DQ(QB%I-!IJIv}˾ﻱ93|g1?pOGrZ+@_Uv ;% b.죦Sdah v=֘[W6ʉ=k{x)+-g ? RJqO%XI_pu T~H2k=Nl59sI]G%IJJ%$% a) ?몆F 2{ZއUX9Vu06.ACY$1/ oĮZ]ʙ=8vx)yZ5r&-t=nO%2?bo]׾@ז[aZG$VVwXX+Mj۰JJo,rzئKΦb{DNxBPM1 KR ]Zafʓl=y awխ" xF$4QL~$1R/ F[/m~/ 2mnHno}ul5L׫8mlzpXd ܳ\ֿ,*7@61z8-\vT(A9T;=? ye7{7֗$f-VηHVT#X/0*xiDITX 4JR ʙKWߪ%ےIL.u&%MFDvkqI: N2AK t`âV5zyfI`x+kAbLmu٠mz\`/!Xb,+? -MqD)N' |Ǐ*— 6jK=_|ш={g`qdUHVq{ |mjޯw\7Fl Ʒ"=-Re%69k`tH/W;@aSO+wW,{z N,vq;8[H J8]%(Ϝ+<|Y|%aہgq O{زgS—`x? L4X(#Yw(ҋcl=4Ҿ%|ƖO{/bˉejUf7?ZР''3̨贆9w"Z;~S#pdBi-Vm5X-ug_:`(kܺZ>8Y꓌iY!H ,{ ޹thĢ8xLmqne\[,yocMib}*IZݣ-æ mv ê+/YH8rr:wpvc`/mg>` Vэr4Xv#&@Fm7*ғVd|_twkXGܝڦS[Iרǂ_ *D<,ZtO7oLB]ƽ'+A0ut6<hJAױ^bH8qD :wY:}X vW8&"杽[qF9^彆`8n+j#.|^|}}{5\mfLm:"?" $Smf[}F Jqu:6E'¤{'dI'3_FwjΆ3Wꆏi\jρz8zkT8, 㫅_MC/:q72G36:{_XaO㼟Nu<|PV`ܽd+دsgF?= !pgP癵k/,~ߋ$^ E 790Ǫ`f&7@@'N}f[PŜChN0>7;%,L"ﬠD<,,~br'X\WoZ?a>F`ׯKi#S`c]Q<BdRtb& s9A=Wbݩ&rH2+a t qz/;LG]c|?kS?(!oӵg9 1Ta= S<7]>x?I&h?AK~$wd HW,‰05elw9|ggZÿWԍlO j(<羹&f:T̗\`xL#<L1ٷj NoJ \Ɇ=n8XSf4#zspluLw(`؍931L{!+`uƒƋ6.f ctכAsqϋZ7D. yV#J`=AT xq.<`%k%5ng.V[\O^ 4zݾl~ے$g~MW`=Bc;[U{g#MR5ob~^-j`&(K˚?JcGgs|ׄdt:kB*Ł{{8gw&`߽$c䅗kQ||}yb/X Jt\x-l{Z'ޫ)<zE9Y/, [n]̆&-""aד[:qSn%W.("̞w&Y!,kB/EL} ;{͡-_>>Wݶ$TM"Z\ּlfS+SbO2{9pqpwӒۡF 3մ DǹU [UT^xfս p>ӻH{!ΜPvTnf*xu#7GS<.uLSen0]a8wL_a?|1Lul?]Dpxz%X|ߍ:T=iLS3͐286:cw UC#nZv vE#b@l,!b>X`mXRزK7|  M9ǥb7?3MHqAH{~@Uϥ5vҏޭ68y1Zz YIO 3$&qٳPT秆UZ ңX-db#c1Wシ ݉7cm`nch6uTCWq~Y`{0r3յJW_C}(`i0H`DCKy0'3zPJPdoqqCRk%Vʆ [8oHNYWj;/`%mX*8f :G'<ŁȾW8&}>R:8HyH~i Ą ݂yNpo:{Ƶ^mi@/3`9Ϊ/Upk𚗟uzmO.F[P_jHXxNyI V7q\QV -|䬯.-}'޳RN:+:iloSP$?ąȟ}/k5h =6!NG mzȊ[䤿sTƂ H=_`&7fB8!;{&3`ߡ.68>L xiM8񖫂HҽVJ!uZ8㙳n&aOΧ U6箝1$+vݗ:6y~m&dKak< U  gvJ_wI96rF4Z XMW#b78_.;vtgd{L?%$\>b,.Xb;&V>q{chc#0cn@K;߮{ʜDs xwZ|&#Bأ󋰤̡!Vy`qg1s!k?gF}Z/V0N 5;qT+fc=g4̅PWAZth;=T.<,1aD3Uc.\W,dI3:`[ܙ 'Q {+nglc;2q\iʰm!b6Kg^^1?- lkncgFF>yȕt߫z ̏{!!SAx|OK  7C#C_ލK+:_@_PᴐePal!?? "k4_%g2'X='f 7)@NbP%eYIhTxaS,x'5:ö "/wX_qO WbecȪ15h-ݯq! 0hv9Kv渔8X:#N6v[qdk1k &^g/JcWC5 mj~O- )ڊ$6]0tW;;WylF:UPLRrG(cf,0g-ðAUqB&:Äd_zk?gޓCK {믒:F`&t.!SGf:auX6+!*s+GK{΢Vg1ι~L.t_ӕ-@?wIo _o!8m"!+W.OcϏ袄Ls刺8b*8c3 ܋=Mp=/r`o/%^2p,!d9>o3I9-5(JՋww H5 C/!ረ)V 9W.I4 s;Ok='Y9jn$,;' )Fu nnkUB٣:a0.&z~xx{N^+p'Y=88#4UX| SnKց}iK0>XoH,t"?I^b?ݏ] R2_SF2-I05E u'<Q/Bꡪ^K1O.^F+mz[y.LnpkWŅwEgmg%U%]7-5z.98TX* j~LȄJUoN>0Ja~M]3̿ F]?{ƢOfɑ,'qK'æ<^vX& _aߋGOB2w,' |dW\d"(ŚO*B!>G:x'lч=3NN8chJ_t[&xAџIVTH_2+#_ ѡ_xdwwZUmN0,%+oU x 9d1-v_z;0sGJXz3n8d2pFCdN-/h`(y ?/]M02۷@UZN?%.Y$>=|C޽繯 Kx#kcyۉ5_g"#{ݽ5:[xLLjelhL)s ߟ_pέL~$(̶jyPS5Qi ߟI M^\9tnK0Ź{flP#:b?ʼnmF/Ő ˚/]5̙$ڵut/-mm.#)9qfi?bJop@#aʱMsV̭~ w?u2Ñ̆+G75 #QR=;^zc++'?F9wb:V="шeﴐv̈1~ kr57`ˍe0T64'=lg^fˬkzCFWEO=we4ubW=Zw^ QЦ$WcJ4v)thw/r#M݌m%+Ư4opҽpDiOXh4v\*Z> ʕNQ_},)+U{GO1@>I2cſ":arcd8xv{W]> S3aS}.TN=!oQS6kLes;*(ށ5}R8odWmsK/4a_s`Lt 9WIJ̓h0?8Na}0n9gCݹ[^=&X_i4H?hcBujs  ~#6/'ɱ?f$Ӫ᳧"qdCoktKbQR!׺PW{.1MO1`r^aݝxHڕ& /UwWSl~ɞ8T0B0ޛdQhVkާ&/A2.AﷄڷaH Kq&X1=*V}`4pXA&'H_|69[%0щ}[9Նp=K=KFH\^fr0\]x 3OLfכol f~mfJoXpIڥA-A}^C2 "'cwy5{ ;j&t{Ћnͫy {7Y k6 )Ⱦ+ V.Yn'b 0˭P{-X`o'C#]7pSoFZ1$Px EDޑtʽAMAaRKIh3s3F:~.<'7hUECI+PX1}E6&->u+Ɣƽ5둖v0.sv`!;qᒺ&ʨS^'Q*`~IP$p&TW[Ϗc ]lg=$Np{_/`JR[ɴ)rܛF1Sn7v+4&٢t6m [[?ay۞q@z(H9umz ,Cԅ 9!ǾƢi/ 4ô#{AS\ޱ۬6f_w_4NI|(& 5X qT&]]ݧ`8-CcйLh g6^пfSg >$RYN?gx` -V`ddt>d_'-`UCK732_o,FL?K2{ ![]ās?sӹ"hNɆg`/d#,Hs&)nB sX$QKbESw;5Wf`ķf-\\ St_6p{KzK u=+coܛfR>o'>FnP*ZWT| w@t٥HuC?5K_Is/*bݷǽHp3O@ָQoѥz]Z1\M_@I͆J%IE#ɜ~ntkᭊXC O5oE0߼tXYYd (o 5L6;;a: %"vq[ ?w}[ sg<7^Q"AxSy&LoVVDa|\ b&qOIk{0S$s:l:s -+7QjSQ5$=œyCr05(X12{|9Nžo͹$ n Fڰs(B))NI`k*%dt8B=0c=v(I`Ճm|3MѳkE'{e:][̈?\k}+lR>Rձ{`ѝV0sS hCً>5cÖ|Gz?T[Jۍ4hz6U@TChjW'3y}wOEb{U%f19ʝ ݼoIv앸aW'@2W]0t.}-u;?0#+V!lgLkahq%@yaL'O};6gy pWسG{NjN^Ɗ8zWtzNa-&OeŲ߱,kGV_MPK8OŏS,x6@W[j8pSD #v<SW\ 2{MbKtCpp$ܹ(hO-LA6xjRj^q%vwםP OP輫1?&K /lOaY* SO`᳚>pM&w-VoӠ.ѹsW:9Zţu3 al:H}U~;"͊\S,]pb Ϋ>Ka} a@wU&Oc#X0V 80,Q,Ϙ%|e@7.s/3fMph)9`Sn7 iَM^+x oMOӆ%Jq%쿥PzDe,Go/S q (8'X٩ ӻ7Y 2Bߘ?b9g0xSINo gw 8z 魾Xs.P.`hEi5R#\✄ZfVom|5`MǮ*@ s8tLo^˽}2o0oLyZUO>M-k Js{8WeCR?0OHq_[f0{i"$k[&vL{^yx9cO9hs8e3S]mNKm 2 I2H4RiiAcjݢm9lɤ*+j돜鐸Ax?Y+L s׻9@j~N8'ޚJp?:= ocW`ޟmKHcGfȲɻXl'6?k 7L.n[wMBpzz-3wv~khY/.ruV?NQsy=g`H ~0&ߔS*VpE;|7Rb=6Wv4zBΏEś02tƪtu>C8jKS?d[zh6ޟ} qOF҄B"sd{;o9g]5뮗]Xwk!W31>ЅS` ޻ykh`^0ru~+aӊnA'Tcύ1f"'EX.j;.0T ,7jpɵ0) `=+ɳq#XUKk'}xfdI?摚 W,x3LY1{]<*NqG4g]#[" ~*A͙Ma υOYZH N̓6O|y~LLĢiOƹ1Wz\+5_ެ3Ż INv@P?"pq*f$ 42a\a2 kU:|*"xY>b^@ʹ=gl 7sm8'8XEpnӅK^d!i4ҍךb%0ڻ(4y, SzrqÉЯ8›}}nIu.W0x.ҽzl.υ0cC8WG<.W A)XaP0ϗ|LɃo۱Ds%mƟ4}+!7$8;IM%=֬^0;,W3pu0ʿ;׿sEcX#i)I=WF'aHy@ ij^1Y˽E`"8é*|UYTaҔ-Y߷,gTpV'oM>RvjüI%LMy;Liϟw24pL\M9;UxA;]f[ aCɗ8Mt-r׼F[)~&A콼}8XZ ӗB@[k[c&,<-:NٵںŖB-T`wçl6~gu 6Okp"csI!8K)-+vG., r^2~ډjZr.gVG@K5ǽa.pxQsFΝQ7 lNpAT\ؿ.dߟ=u^Aѽ&{ .-O^]9?J1[Q7zuAM`]a+?C'.GAA Io{:w٧HζB qyO7^Xv:\*d{x'ں۹E{^|NV.x@@'M0IIm2.Dž+T@:mvoB,`0s]rq' >Vjg!IlI8Un_ Uصdťf]1u-U<(dt9fWΈМ^hLqҡap4luO eUH!*|0B0 ߺQk{<9mS..YZ4bቾ9 {rY!p|D9Uf(R߄l@UD_U^m;Klµ?]CX|%$zW-p8o1(طiU YR>VzX/k$kQ$خ ϩHv[^/dž(U۞]QcA atUשS?ae\8z͘sIK8zPAo6D g`;Ljfӻm.R s#t2XKp''/bIl9a}jجtcΝK0`u1oɇM:ٸtݢC~R:B2>Z]lZ'D9}8xr-w6]̙B2NH-"$URv!uD jLNv~s8^X z5J2۳l_%8./[bCr+<:$BJ TOqNuqe9׽!"4̬s֛'_%%FvڷaiMqvFqĦ`zC Mib&R>8XvnƘ(nmEmoolΩ o WwEIm%*to}4AoFг|p$%j]ft>H ̓vT,ۘ=+C=B7a=̫-JwU&fXsHڜ}OH5ύroRОeq{zm UQ jօ%6q~`A2J%y|MnRVsϫ-Ƴh;5l8`F& @϶%!o13nPĨI]}>}|d˸_PE뱀Pːy(}_U iS2PxM Xz3k{u|8Y!EsIwM-UQ.()J_Iio5jHRn<,1Q\p:.+'E1R֋EQKhqB?i%MRosWY[B[Z4B'O%_6ה~rBo`b:b`~*򾲊 -հ'sI%OɄMϯ!B_2τ Ha D.t5zkutL/^?Mӄ^Y&ܞ^>E؋3o2&p^ Ce0SֵYE`xgGqT FV4g"շv_ΰʾS51f t:Ǫ29K׽gy}]*MN8ݢ'ۣ(l.(fD4> !&4u;[l=%K:$W5p0b^Hh켲1=`}O0lm,ie@nƲL0ܗ Dol3OWBvY,Od*tR^IQ.-9B+ B_Cn4gJ~7 uȨu{t' c dߌgc ͳ-`DM&sj_@9cq 1J\b,7u驚 wXH;qc ynVW 8`kk=Xq\XW. ^?8W/P`:z36AwwVNYӚԑO;laڵbzrAȣÚ0zĂD>P`H_NO+ ʌ\|33*$E%Bg!W{Gt0l[b $ ȟf{b,U[^O,ф|w<{!<50h8/cjC{kryyR?X.k Ym@S?zwLph{(8Xy:2ZRg^!i8UMG?~Tbs{ #TϸS0Leƶ 3bw&/ҵ Ke8,$}` \Xy ?Saf L{pF|"<uч?BgI:$+\^'Uh.|7^ѿВR{s k˪{mzNPQ 8CG69%c> @z&{;@_B`A^`9C?9bf2 Rc`j>1/s:xy mIRLS7BE`76;ъM< ΣC)3Oݰ+\q]Pw@c**zh.^r=̧p추кZknV_{vBo>R_jŁk,aps p5B2Ci,+E-+Tfʲʫ$k*"eA2ΞQx닙tmk3ܲQOͣY]vV^JV$Z>b%E`H Mw,@ z*:3llm3zڲP {zDLO>U83ews_U@a*0#:Ex a/wJޑt!k|-sU[vl ס'+BqmN)eэyIXhV s$wEwVH=v03#4 3ruGu:Ѕ4f*q!%RQ3*9 8ל$¬`50a$Vqܼ` 8_d SKncOA6#Ѳ<NL @D=A0۪bUI*}~;ƅ&}s< p毞c{i</䫎iц5~gC%gNksO33)^8^Ng]L[i 2!S?qucnt7~p_?DytՓ'翚Wb_bhwz?[`?oCsM|_]aңen.s3:*vl[I EF^g4S}`>ä :pR'd< s;-E#PHṓ3y3Sv"1&&3`Oχ@2mp_E#aeQMyjNW].0΂zo[؝%<սɖ)o( 3zvL¾fo ̽n`Hhh&%cam7п_6g%MS!U3йa2\g{S} &S=N*,]W٦H{aw)8ay+1~y$@n/ َ 4l1}U[泿ݯQ$W&}͒t!SC i'[leDAVqXP!|?V qSïK0('"0V^ |.:c~ҥm$[׼/H\8Nz*`IR-(b'=F>AWN<#,/kɕ\ӏԅnM5P:ߺ5dw4cjk,K3y+muC'aƔ8UeU<-rQW`Zؼ Ve0dHK/$8ٳ5/؁`! Ƨ[(ӡnf~NmwS7`ņPlR6˛d;ěb{̏Wk'HSɒmNyfvQd.IK^.97 3Ց(7bT3zł`ۨymz%zYjކ!VpBtT_ov w$^_ޏ ʍ nb`c=L9/n]*$^1=&ۆT+(ʆ]C1LRxKZcރK̇0ȽHݣi>q>|b + XـNly>jN013ܰ2MLN8LW.ā줁~S{z{ !}@2?.Q [TfHj _t39]yQ]3bmBi ]M;EsI.dD+>CZfM-ӌSWXtw( {޻ǡ,hS[wa2_a9uk.,7nbDZp)oxKjC[cp(i+8g ڷt}1WH ~PL}IWlvF0m9=dq5L5玭 EqfN_pdQB\9Ѽ$hY;lۂZk+yan3 94N{~t{Kq=ά:di"Xw|AzIfٻ<&N$qle%5~!0lqS<,9>i5Z }s0֑ Q5 ';t#djk3ӋEB՞Sp,;0zm> +VI&9=0TKXUg ʎR{ҩ ^BX97zBPBLl'F2/0[q#hyW<]#;H*n٭ǡg3[aTCq{r<Kk?KgrRL\zw TUq<&ZA9LMud:LdτYX~~$mP1o; veXaKgX*U$yd(.3^ yzh#W3B=m̈{܈V'NPF0wLj+dL<$W[^ ?unv-K JԍK-8)) +O|swJi{KX4:]`pɳc1! qraOԗz8ud 6HUK؛mNr8uW= fr4vN&H#gИwX/^`|PT_*%7yL5h-qvA!GV!^kA˓dҖ].QTy?]/ukι̯p9أ6 \88^S^̇/$C~~N8e^݂`XK}G>ApE 5N=ÞZ[߳>v3*;w gJ>h>L_r-4`lJ46wlyqXԾCg%nXuK>7X#R-[~]0ໍ6^?/FblK*Dod؉'dzqAqj>?w~ 3ϾLݜ0rwwXY1̀PD<֍sgC7ol Kg fDŽ^2~[&c!7VKhCv8 dy߁SlnX#+G`Oʝrޚ2 _{'őEOJV<^ktu-a#qzM i'YT"w>}t/a~OtXyS?9cw`F%8ڽgsL\rb6>e8l]Y`Vt_݆f[.}Vҥ5kf+|:̆^e|BITJJK'&`DPd[_ 4ɿޢs"2 ZXQ2"ӛo;5Kj?ī;|Rcнnhfx`4h.H]ⲣ_}땏1[x^zړ"h/<;Ӱkx)='ulL'4jpQ8M%g$+s|n]0iGZnkLXZJ}ցjTIfc`bc78}hDX1l^ǩsOac/^V,,Jb\3HF񉍑xe.I#~?B]5'+LU WZqS!띋_{mhQCl|uWP? e~>̳lk.M6'R\$gyt#;sk }RN{M2Ȉש2Ap=0%E$x͟j.uWM^!nwQ(IU:(##k``{f,!c# =qZiEjDZqRhA',evV }u3bͧqҸ^ufK~͍W[fzy?F.$TG~SLx -3a]i(ǎgO5p6kP{:)(]`@Z2>uNU_/G2U"}mױ'i hٖwDsL"Cay XJ݋c_*R(fo"+M $09^&4z&h=6[_J+6#7|p8_zTAϐeh F7y9Fd64ƭph%tb?8^pNWo^=7%x;Wa6[$VhRxp>5oȋW^35'$G8\\lct&G^vgsFGNZgqW|>T]4[vZHvl;T(Ւ ;v~R*)ÂKcNT+0?j;/HV " w1wJ^6p5i|Yzm86bWtZG\B I||)`Զ(: H 0g;̴ Bd[. sΊ_lol-&48FcIʧX}d\uEni{4!):\ nz3 KG?>Nzo^'\/櫨ɾ`N(/C2^*VGP(/Vۓف^e!__a V6X6QOqkk1OW׉sVcNX=G{E-Fsܐ-n<$MB_wi3C9=pPh˃I=/he|SN}6o=_։*{P)K6MV_ٹn*M]uYuQ0㳸nެ&HU-O}LzM8_w⊅Uc:'+rc4Db¯g &q?#pac bhݩWzS}fO |>)KPv 7GlIK)uY-#xӕaL䫛*.1~MwJ7̾TJj=mGq$q kYHo77$OM8WzAjxO ipboh;?L)@L68}4Ug4Ѣ%A|y]G.JzㄙIlLJ`rwԱRI7mx{~-Lu0b\Q_[mqj[C}jN49ìaF@ t|7 'f CG؇7k}㇧-:k׆v uKm6̋80#Bgl /o2i5rpWN7E;~c?Ypw oafitWֻը nݧD|ZX\7>Gڨ1!}eBKʼn[a=g6|I{ xƷܾC'z&X%}pC؉/aV.>:_cr2SƔ0(KTW!-9?qXIH<*SWW5%J;9Q# U#0ȯHvCj"/]@q 9 :\^Nqrޫ$A^̳<l[ ?XUQ; 挩HXv~x!"NX W~yKrЇoi VCݸ7_B:ji!\#Xy'3`%'|ŭL L&iTS)@u[`= \`g_~$0=PEd|Jy3a~sU8%P9{a68:i_X"xI ^(]ƱyHwަ\tۧ M""pb0;Β6=n8T"gS+.6u߿l~d7i_5yq 9w8MZ˰͟W"8OƆLZ)~{&8z5Um$J/L ^mnOw[\p1s+mk6٤ :4 _NVn(.rr$r%$hCgwvQ6&Fme1KXoK? kNi= Ҡ^j].J۽G+, #cJoq#Y,+cq!Z.P9  L^eLwx 48$,9d s9[һG;! 뽿w]' ~Wt)|-3'7zLU:$Sc04qUu>+avHyz*ue1j&l n=yspҍ:)dޙ=W3+CBKa=WUS;ǿnŁZ0}{[Hׇeb %UM?cjd;07dixCh07msL98.%^jC8>I͉r}V>sdϦa5ĺ'Iew'φ ^# Lkd<K廏uHbXI 럶4¬ XhOuNHU̹HnkO^/ ueĎNQ<\YxʎhyU9V#:?Ii zY*m Aa)F&?UGEWeiQK)0Ҽw -X0fzA0*NJ-e1OzR{7B|Q0OoڨzgNPkd^=O*\;N$;&8{Xނ!i NȳkǪW.N+0!t$v:ٳԄ`~HM?L歧ٽdn@́WGƛ_0=Uan>K?;^HN~Q [ԝ{@ܢŲS.%w=wYjqj{8{ԙcLb.dͰپI:O>4q. `~Vų],ڿz D⍼Ұ6}a 25>==Fsís?DeZzл#eIx֗~}ofT^盟.#RظA8uuQL9y+ &[0e'T̍}''@Iz-Zhc_+I)d7+4?yJ0 )B`i 4MwTv?9XgsLrX(Q+G \Fzވ#STtQO2~3Ysvt2)AZ9)9!E0l6 {'v5V40)vvt6yRm5.YUz(!Wu:DZ4&مi5o9+O<3K+l༅}~U6k{1wUt_>"G@ ,cWچ<%ufG%W2?OXҢ@_+_ۃ\ M9{ݯCrFuNaި鷊,u,LUkYksta6PxYG<p藰 )򖴋Z? \qc0ˊJà~^rֶް郧_ďMèeը~7Pfajh@cRs&Pm0桁G`u%`[Y,(G9IC/9|LTpWCk%}~`5ӾEqf0? hީ;Bڕ`.JAzqtz@>0 Vۡ0\TnL\=>suu?Dh9j ô@P_? ./f?Pk;5c0#֩5(IP8߈v(ǀiK\0Xы_ddՎ]ʆW tl; ?.v@ͽ`K__0>u+o?(.dZlߦȏC 3nqZuGtcԛ.HBn1ָqq ~UI9߬3#4v[*v>@`iS&l32C̍EA{DZaqԕ۝XR]m9u{K8Qw+J}QagPz8wmdG#y[]5qNp | h}e4įYDZխPEU S{Ook@AmA\1E|oi6ZՇ22QEL|p3;H6h`KR>M8bISAz̩=9rGl%S 0qu83ӛ'n\޿/6~(<nj/z`c,e8hv+zS#x^WYOK`'4 #M0tv2϶Ƌ087R':z`)}X"-G!tYaί:YS`"/0ق)~@FkR|䝨P m!|E h,äҖSmZXS/{]QSFq{nQRkKo8¼r [64zy&<* "=cߚ'q4 jv.<LQ"`9^|e 8/`C7LURul4;k>z`zGQyݾfF#&͚/BtIp PeYܛ];v;{b+`0[QD֏O+[0lhe(j- $ESE=W,-8Ӽ4p^h75*k/E1+NQjŹ}B1%R1a[W1uH܋,B'}4 M@JUE29ػ+:54Jt'pt =3J;f}]TCb3VK҃O]skQQRp`ӫD7bGs:KY8Ltr|M\YOV5 2W8A'%΃X .,-Wf5O8WUcG|U5;Nx-7z?}t;H2a|iG2Z<+fIq.t6X˘\U!|fQ$^w36 ha,Wef4.-d~7V' 5Co<1⦯Ϯycfo82eQ']'Ϣb8}ʏ]c"aM$҉Wo!}{1\UrZ -fGό'\DP<%%8wiL*/g+۵]SxDܺ8[ $mÛ 7);sa0"to8**fwGMW`9"zGFwQ>cYNjPY5hsӏKI0ZXD n ?-u=tG$J5(m[|.cs ޸Fu&)-Y*Y ڙc`Ǩ 4^P6yvynr>%O?^݇5h$SB5LUX~]U\VX'akugv6? E$e!eYy;-&CJ>n}MI5zJdWZ/3ݼ'DPC(AʸZ g{D6]!XVY/~ʂ^pY)`Q&&28 M+8! q$m֚Ϫ8=_;f'މު]_aqo뮵v:s}P!'\(~i_8 _^˟K_hlF(tcW/(Bmn >R8O$zZϳhjEN}.,Ɖ^ C[tkŖԀ~0g}3+4S5q;[`a*5&׏9t:=[ pCpg lsdX75$R;\~tl[#" S״SSS%9ڽoA@QR`-u877,^g=.z=j 7u{x u[f'LtӴ&'C0Pndp5 d?:Q[l/b*MacBpN^'`>]-l#fП~ R9x|N}j8ZL嶜h_Ur!Rt8 tޑ~[N6`]Kc_WK}|1dzb^R+ K< ebڗ ΒУsލ] _`HENc$EȪT~*$J*-!EQIBRDJ(*@{^q?\z]s uoMW6{g{;Xi/(9N蟖s}ARb_r=$Xd dC]!Fa$<ܭć`܀= B$iTPؕV;"v`׌`ئ7|gf(V>kp?'c^?CcL&YHVB+u1>^ > SCN?p~B\^ # `u-?g/pL*nA2y|d zjNPL٫W٢Xwf.ǽ=~-֛qck%$?V|9vr\釞^ޚ9*E7{gALd_d$ٿ3aMr<#1+XWaKeLp"6 ݱ+xLH O,"c]RyKĚ}.F2syC$Scsn*!^xUsSqKC!6( b3^8xBѻv*VROA9e6I*ȓ,٫ؗ-T=Mkwð{F9zyM̻|ŏ`SS:C&Zz*0.P9c7vOGTԍ%q=ޝ 5Bї:gpnt,6ge\"m^Six]tȂgslgORd lK,_)H$绾Ng6Oi,0 1lj XUc_4Z[8~ohB-8ZFϠ|MOЀIgo4:(MҋX) IXW%M7\nbg/H߸3G~OgTom_ί\_5}I/O@gm*>[Gdgy - n\xɀ摤-qzJ3 KԯD'J!X7 !jlmS. `F*-~:6/,5$\VيC|{ϛ̳4sSVN e%cZŵg !GPȂ_"MC۶Y^Ӧ47!L&v06N6[͹GS[H^Tpʞ6g="'>Dbiɲ6:V\=&u C`*Y|.`1OjP^Cv3;0u %# X.Sn|;Fj R.N~'y͑,  )^ wl0wg&BH} }n \6'QK7CNװbtm!F'K?wN#0tY)WBGqܞkYX&缏CDO EbyH[`əy/l9O=݋tnV]0KHߑ\unl^5b7X[T:-]9N@c i| &ωՠ&XT.q> 2_^/ATMHhs8F+$:C^ ߚ{7k{HQ=m}zN AV닍#ν>O=yWKݪ^QO)@}-ԯLΚQ=h I_G5 _Ȗ;  }"s7goAo>]u@H8t%)x5QP5&L;c޺&O^lm?'stnBrgcp -X9lc5gg涭'*xk^uAAV9~6IE| Lx\ kG8LRccba2O~j7}"Q'Oxsţrtj zդg4qUx!KRd|Lp?}W .Vn)8m$$I]cp*5t)RfH"g܈uu}{HkfD'n7k1_9sJdw` ovAF͙aZOe6 4c,Sߌ8{?5owy)|NIT:KԕL?)' ;h&k+Nf)3v[d[a%Mjr|i;^m.usn+to{ vjwk,|]q<AIB0tj))R?v_tλLIqkt%E3TkCc9!XmRΟdotvApZ22oS1?ݽp4#J Qw>6 9zkjƕNt76]r~mX5.̼W`>j ̅)=/Ns6|O׾x #Eǔw൴|H'G_Á Aan-G,}y_L.$88l ?{ JMM@ɄI:Ds՘l)Zb;<6y5Oܷ!V[WʒG,G6Ut9m= Pa00WȔ6)ӎ.cD"tyFYQ|xT5l!j@N9Aŕ eFXЫ֫Q}>eYV  L"%uU6Zl%12qГu@6cZty)SrCbHGߒ'/б3xTLo>5mabY_4g$Eꄾ}bY N0# pcz`[z1HEd'C$7*<?r IRe8eir,wUkӵv<.|POr|s h? &9sXq8zVS~Q(I9T8Xuv% q\:~ݖ(} îMo^eҐ<;2MM:yњmXQ˜i9"kntAU?ղa@+w;|IBh8qHuΈk%zqCX [PU+}=xTaقcr.ɢ#Av~p<| %l>?6o G²OBylm ~M\v\HRc.S;cA~Ryt W5^')fwÌgvOM9.dIca1>S đ7XYq==6T nɫ\Le <9l>  [9RT j= K m.~P~`N{# -@ ![`@bSI|$?vW9(e7BzJ m ^AO}X75H]vRA $>ݰWgUdLB;|Ky^+[mgIR ڏc`@Xx 4n [$%s+tO]6xsg^RYÙu"rq0>V& ~6SX%(@ymg?ĤۗղboiP5L}қ3ŚDvC`Orzv鳪k?RK;ʛ->ajL7U?!G2S ޭ)bs%k90 羂~ |T k3m~̋uY7}ݎϽ|<=2x̮qwFzG#M$sq\5P& 韣uWi&]L;}'8 <7I&j-kðeW[q>CՁ:k~xOR 9z7n$ \l*TS3v+2J܈ɭ7ϟD[InpB'ʗ_8lj6Y6?v(qs3v^-v${rl]:hI}ki,fcp Ǩ+XZOXoU ]K%8FI-h~۫ ({tVd$väm)/^6O(ƪ~.ָý`Dzڒ>H!6,DNm.]L#aG|0A(}ct)2n_,?4fBkRy$,}+xe+~aɳ)mjfw.D0 5OFa<ۡ! GPRq˻Cp-.{oIk}ӓkn? u5`gp&U<5.tPQL2 KN0;&Lʋ5(0<\u.-(E6$^9|qbk̮bC=Y]\лW(& yr8r^8RR#+!6{C*`¬|=0g{|ES"^.]NGV)gq4ܒ*'//|,¥&gҨN}(SܷCR)N~>_.z)l'.so[G閿EӿKCy5 ,{BIrSe. ݸۛ9[}Eso`nY79 Aq.0t?B0_W˟Q?ke_5]0cQt7,w-I]tjКPs {V2<0kiOzB$I2D$k\& kISS&`{qC8vFK1m_w:B_?3^SQ`\x_u>]$sɰ8( 4҃k_P9%Xh@rzN|rTGk6ݣ\vw&6G[͏X`O0knudշul{zWf.oYd x%pr ЙR^bEκ|핵>q \UˍEqzY ziK\z'Cstoɯ$s!v:BuwF\q:N [,TGG)H7) ͓,ũKqoY`v|gȉuA3H߼/Al =n;b([^%4{dDJɺl'"az{}$s7Mp5Y|@@.뽓?հ +#e> 4=*fC9tx]3 U.aW ޛuiwNڿNd+NJ]?d Yp#%s?z kՏMG`]aDt7JO+GI ֯y9A9Wqδg|1s.~&񍂋\)+r{y` tmfQ'.ݦE-5:)\w>$;qJEl[=|O)"yE-.l^؃cI]sVȘl}C1ǕּW~{o;̂ 6J2=g5|hP,z JDKۆ[LjЧbI>u(QsIA X9&B|?v=+wb8ܷJԅ5F F0뺱"KW=R0e~o0p<`< co8΂.賨?/ξ??_?Bk¢ȶvBlǥ,Ҙغ%)h~hySbG)ۡ'ƌ, % 9;>i,g zK\8.VUJ~OCpo!0=8S7B3E`VԨ}ELmv)eϱL`$!㗞m3&เ#F(;JkL7Ac(`!M?$ U%[yaԿ<}BKDq0%Bcn8nSIojC`"\f.@RtRGø m NO\7n5 SØt *+ߤ|\퉍aI8^濛H8\{Y~/aƯ@b+{ӈJkG#0n>Q{hDOɮa&3G08&\^d?'nQ O>bUAZ )YFkM6tqt^0% v]5c'o0̴93w>G ocA0áp2J5'9CT #p$B%Y[JWFqī~@dPWj>rN%ne>C|(j w0l>,wpǶ28pFVT Gz. HnZ+w$$'O} ;"''$b%lH6aiŚi!dq^ウoO4>քcV#O1+I/cqqŎy ,UkK%-[`^}:νetYfb?g*fGR^v¾z@&W$^ XlA[ʡ=B۠ZژzfX ޿/d]2` }5I"$TɼTNj;3V4m j~үݱ/%A:7 ʟwmrvJ..pUY#>*/ Hy7v˒ܠ3^IQjP*#HkNKg4Y}ʡwN,J!s7Rp1F/ ؁Y ]كS6,W"݇m[Į -aOvO-cF̦5u>( ^tn[Bz+@lzLeMlq1 jr0gu&Ca%Q KXIv,ն$JAX4y0 Р{n=yq'G@A$(мHoP,c/k`Xܚ_RBؙv} y駦~SBZՈNWKLY*wp\IFzHlÌ3. 㢘nKZFDܥCW9g|Ɩ0^LY6PoQ8:_53_ve|! ,gRq"бkueY<l 6AEY埏p9X2[O؛#JGpt~$*iW8E{V6!Y0`pi/d ZHAO)8ƥXq>ތd!ryUqNA"ZتՎ/IJZ#BYͷ¬W{"8$# |J2ҳXezgm9kKl=`#7Dz&{usWK)n skk&Xǘno Al?A_a z?ɜ ;#-@;-?"D`C%ݎ# T}o%o@u:TbKúw߫[f ^\x'Z~ZqЕqX ZZMΫܽe_fli2Ti못[-ˋ_ГG H35杒 uۇJqP t3,A2Y[s&?_#,+o*XKsʶh6 C2'U.~=\F6 Ozk(.Jgޏ"bC,C̺2'{09YϘuYHas[f>sB{s $8[/F7,i'vq[Pw|Ş[rɫs42 'VAACY҉b ? };]o}IU'>>u -jvADq~/=~C;`9MX]Fb\>__ e[I!a,]G!}$G<333м-p1}-b#|7C'$ɴ$p*l7Ѣ3VjGez|Cr z{?'qϝϰ}?Dܩū{fNO% /H nJ>]T`ͥ o0c 4…+R0uC/<4K~UD{"0spL}#L!JF3.%hVc ^?2'on{/- B?#%1.* Uŏ4`a<hxx8Vdӥm n*L~8jk,`q$>$ DVMl;gn>}Q_xnsSg,N{Sz\WZJfʺzڂ-諾vާh򃼄|-j%j/YUJV!% inWy vTLa|˥8vBG΋%< vC^E*GI9Kg[X4}&NKnݮSE0moi,-Gxo^lS7;#xa6Bfd4۪^H>;2ok>bǡ=,ou;(Z׸eDL'>Nɐ,gIV-ϷL ~W KX||-Sl}̠{(.qv;&HeߨSЅ-8'i041JfI.Քp6ͻqΥLq/=Jm!)t`"3~f977?S|gPD}4dL Gzt5HB>EG첊ق3.zjp ;9$.zZYP<t]Xg{ c)wBlȻޥy`H! /+/B+ܯ:x'2/8HK|Nu{f[ Zp&9 彚񪯵p7aRŔ!hy\lp57 ]Y>UE}^ac; e3Śwfafk S VwP抋'L3mbhW6+Co7;a.{`rl0mW|\4?Cfϸ"+MLepZkJw}`In#NP$sC<{Aǘ~"g2bwV|Z(ӝ4ecM?콷5hb: K5f2w"V_ A4gxΏ7Q$촹̝-'`XUu' lm| #N\Xp*X%16hokuӳ/s#=Psfn=bKiaVF:[>ҙ!LCG0!v멒Cs}Pn%ml{{Iߨ:G 4ш0Dw!b೬yu 3'zƐ꿴a֢;W U7^šehٙ&*Dz?iVS1+QѪWX㒳ʮ~w 8Q\eR_&t0"C*L_*'D͉'p (plxUnM e  >{D<]X힁U8ٽ[GOތ_젫=/~27'XDaҎYpiD9$Dz@'Ln0c喳gk=ڤs}fau?uMHJtxv|3ozdR0\dɆ2y b7+ŃH4٨SVfڷN՛\ S*9^oeo}lu>ß8#M:\F8Pzio8No=P >8.qwz( f:?W<%R:'[f`*'I$XwA;mˇphrz, lr^U+gcoII0rzĵq}-R̫xK8tgYh`xXl9'Dh|C|'OpDLN¬0~YɶdU5f?T+>aµ2Z/F?~*̊]SލD Xr:W| ~#M&Vġ~GUN^W*N-`e} ʙx닶/Tuz/w P[ksN`܉%)BRsעL/>K"9hхV@=}5HdHw[,,F.ackBr$^ao͉cO{_C HOLsK=Hni|Z z:%TӸ՜8#z4QvSU~G,NM~=Wbɷm զM&}/(rkSQজ\ d:9<3 oBNappNjtU)vƊɔ:>k'zqA5Mf̰ro릐 QE hUvTyŋDqfon #L3xl}g ,)$Bpc~1>mDŹ~7VZZ hZSH}%$o-Ж`ٙ`-P^H^iSʹjX6+Rw{x9NFMSNMO>}%aXIz\ʩgm.wBtߕOI0ɢscQmvOA0̇޻/;S3J7͓8IXŵ&X`N')МJʇg}:;Źb^qQJ -Ut@g(X*u+ۦo89/y mZ+!>RIvyMcvER?.nm&Xk<54ة),U?wGnpK'?%b\-}27zL< O"&dST܅gF߿H^l ^ED  W{\U`~o a#t0c ~ e y-󂂯w^m@m~Mmګ1ceܟ`b y *lEoC  X&):GΔ/}fI 5LPn{=$,=6.'{SγM(@ fi-WO~ęxٯ̍LU[=^3KnL/ݪz>O!z\6= W~n>|`p#:8Gޓ0!*1M7^Qı‘rg?^MO\9eLtn$*jsaCVw +PN}jҺۈWJm'rXOR7_$S O0 [~cZ4MV^hD-dcB*) eMKNi33glr|ٛ|lfNM1c*Sl$E gV"ӗCE~Z "WH.xnUO|P&.+ +"+鍞Ԡ|FHy=۫̊]xǚ5S5d|o6wM, ?5M뇏BqÄrL~v&:Iqf {6$'abo K̈́t6k֛JL0qnLP.?i'_s׊dQIVӿIVcvI޾?S[_l.aʼn[,V닗ϑ rzJGxIOv[W}K6sɢ%UͿ:|]Y aH3g]-XW`0"ib}:,6b{d\ӵ'n8c0Co(;}RE6Z`@hV>3"?];s q={No3hTxmT.]NFT`oGt`4@ν0q;T7V&Ʈ1ߌ axoNTsЗY&/w9F?׷c-̟ *L"QW/{%hwe$lpFAk? |* [l*(,|#~@P?K@ Rɞގ\Ujf %/mՈOТl Z:[}1?%,1Lq. ~Hֈlq~}0r#?]ymb-#éPWau!.֥%qש78v;aC.o17n=PƆm:w}MvbV.7n#Y+M1K^95$k ݝigq^부lWÓ;9ܔYr©t@mu$^IuqwABM&@'m8$t3h5YG |# » y- ݋UBŞ`eNE?rEHP xup5i;9ڼ88+="(A wL^<ꙙB׮΄m0v P+3gDE#r\~f?p9A5Le9?5$` H.I;,}3c5%}z{qtA|tS)Ijw #֒Ҷ!94uju!%ݐzD{_`%}&C_,IO^C[lV~I#O/p/Bmty{,׳= VoWÕX$t¶{q_#=$]ѕP)Y#V"b?8TUQ:^}UQ 9п~bΆZ fbepԗ'GvP\X1ˆZdok0>I2x kЉYmaƘw<&Ƥ:wgq.ŌF9Q(~V8ľقt1fOa.rDeaπJ3l0gmyj4qh \Yki}wsBo'DDa۽X?XkԸm>juR`zgУ&!%mԫbE=]_ҕ5i1Y<>'V_y((.ݑ\'KRVi S-bܲ+>kXЕzp.e"=r##iWA!^-ΘXI_qy0W鍾U.zAQ%XN^gG}(;Ecwv[f}hWam[$sb\жT{U"5Y'{*؅C[ÌN?P7\L&Uכ-l2p\X$(ۮd6,*a?Y%gmGG_|7+B*:R`qL(m,InaAraذCZ`5~S^aq/\ JyGU>6 .X`YEo+~ _,z)l5J㠊DžO[CFO)Oea7eLj|Z/{q**ogqց;/.\ mgVo:0\UWO;".M aݛQ7|㱇:NiuHd6 2ȿHݶ8Wi'Liv)fE@R%8x)KyOicUJR޽ox}/QaFqB_lZ7͑i l2z {á1{W;\їt F8~+L_`k&"1'7yA?oweWw?WڗGRmՇx*`~MXR`nb[*I4Xhŝ0S>^G\A mov,SqBbG̼¡z3D~~j$&2]hdK2: _&-kN٧Mr@jj}aL;|ܪ:6yDB6GX~|h>h3s7n)}upwqz[{.OK=ހՓmaus\h|Ii=MUfpg9n(\k^"=m:4o}/l:NK؝ET4dߖu9i|Gzyڕ3II۸BF_kvz1 w;7s8V\n{lu\c`za\`x]Oę/.pV81$=n^CU_+ Y}.8WK͛+1*`OBÆ+~ M*w,bZobb c~y.:~oK #}ݷH6MlX@[>>ncCԞ5]`ڋ5=e&:xAy?:RJ0?iy(xK-@|FJXNCWS5BC+M0?#5bK1Uk!w* 0.0JE]?>9kvRX3~DE[հ뻷z,xp'f>\ṡNjTaK9LC}&"%z'˝OߟVǁΓZ[s9g4Hz: ^Oa΄֐#WPCXI܍A/0ܤ&/v>ĦoU= Η<Ϳk~Pae0-;{HP8/yIf Īv)Bb:w+xe~>AJՉ|% XQSX+ܱcj<@ ) =-X}֮Ы:f%Г`';Gc,f=jv*Ͼéneb;U4dӪ(@%:)sliHF=33+kd+HΫk[5^uKʦW>{ s+q;_l3><=9t-'}b6 U;gaVEa ] y3!TJ 5U0hK擯^@iʗşde2[Bk#x8,K"%816n'VbΈUÀc^i2$fVtu]/v}.AϩmܫX^w6P|^I_gٴrqع$CfѮ(/"JI^s|jsȨ ޽km?{7S'7y @ОG,0/_#SpXgl5~&qT& n^lsB2nк`<;TSXۭj e@%w/o@.z[R01ek5$*$aG{gss- y+ǝF0{ ǁjגp9c\X =#T_֫}X>Y"7LUBGa*5t$<ŕ kOlУܷYXfkS{4Egt;gpFj8CzɔЛ$} _1,tuXһ5' ~_?VP&$[Nia_ C˳IعuiAKح](Ni)} !WnH8Mi_ĚuB=~yڵ hW:oὑNMn m8}Ou׽gw7T\乯qa-4]]f#k+J}"Rey֜{\@:[Yr$Ŧ;svQ^o{{Co~M? j}5 #1:>ߕ &-ۧY lJ@+=iuLxUq\8o Xؚ^,Arh~٦-I.S@gS+V}AYbS8eQ# g`)nq严m~p ֙rŤ opպjbw74?jӐ?Õ˲JWb1*frlnGVNefft5_,f K; mp5m{#_jr(IhQYQRtd%Z,zP,zBXkUÆ@?.ݨXYǺe81MԺ}.dvWrb_arh"ά #lXG_r/~)dbSس8DtBG60/%q +M+пbNC 23԰F}x߉4Tl!LkJf% 0$ܪfoa3C)\s&c[9<0-K?/ } 똜pdL)㪯\((ްʥO> C>yςdDȕ{"ޖTqN-<G[EmqDXe3 DԐv#U/MBwu'0< \%Cq$D:Єf\N >.YqCշÛG¢w-V0I$ f]<_T$\`hnj:qy8t2Γ|z؞ΚPp8m-V^BJL8_g/E,-]v̉Ki.XIXT܅um}TCsOxA0o\X=6 ɹb{tfnH%tÁ$K>sd}$߿p&>?az\+:q6)"6ԻF+ʁvC뗁!};a;뇳*=ڙSH"KU.'Q`f1>mi V_^3Y FޱLY缺eۤu"mul4&nt/yw#?XoRFlpc&.NC柸B,jؖ7Qx5MD8214a_ܚ8s0Ϯr:dc8b+0fEiZ0~NSJB}?j`PAc[2V\x]!{[d\dzqe /R,&G8Xq>3K-6j\OgrcSJN}/ݎK_+5 "E5;oAdэ}7#~B&(O&KCC}A6vjgݗ׻bBZ8SsAB  8ƌ+JtcFOGab/X`uY])Pﮓk[ü|$)0 O~SYͩLvнt{lzg8H'~Tr8!1w@Gr?o9/ߡ1!9,dPB$>K'YZd ,;6ԁ)aUn^{b_.j>jl *oy䥸/Ad85zV'eʦRG8d<<%$+qqコ@WZةah;w# ĿjyV)я uƯ~o ϼoWtHC'L>PS0`ě!?+ vj`'iCyu11N9IBcq)M|}EbS8Ԙ9R+SIEH_/E=b?\6h4dߦF.5098W4{{ҍC2aęYם)i+Ly]gut5KjVWhܼsXcaܰ#w׉);u9|;f ^BWs^gW% ܽy 7B@g" ,\ZˊKywn_a:k $+rk;y'ׅ>ys`ጭ2!b #6k!Dʥ!;4uO'su|W8)0W~ #S=7cӺt-( C_M\{qtQ7tpxx$)QZw\aj:aF f 6 hcٲ \Y?ykJ?ĚIT0108L.j˗>*9WnRJt`PC'%+]R*LY}3#O9bSh9@&#ص:4tSѳV? cO_A $` Ry^ n)G@kOփ鴭ߵL̪Eó2Nc+Ygt/ߔg^8.p&o9yO\ōOS$å-wW$v?|t$`&Z(yO,2· KumLk ؓr\KzK,c%.NUd_wPեdogceH&oKkg^~mk9Si=p Q20,X4fOkPq.j+~I[{,dzD/̼J}a=_M^8Ly,;q&lF유/vc`ur6[U$^->6?cNMg"rCq[Y, S@0IY-XYI3Ցb{v.G>U] ue "҃H4vt퀖CLlf.wk7/!.[g-pHd[-lMmxbէ_%6Ʃ;RoṜ6h>^3+:8mv_@V|(L/KCŞ݂7—Ե|Cpa}7i'0Ħ;M6Ŷ+}\QhȓdmHts8YHmWFMĬ.&՗[܏W4_|'?G)+hy?ŕO' 0|7?v0{V81~>{u a\ ]!Xt͢U4?~AxSP}BH \MA)͕,y$ٶJ3e!(ιfRRp^uZ-dN ^w7<Rz3K5}WOCVVћ^s?"8.;9a3XjuTt\821ݻ{XZaf9lQmZce]MEYb-IeMCgH7c{ܽ F*rs~1.B(0]vNL]NlXngw%6xQƆK#-c YNly/V{d*roa,K w>JNpF_jf,^m!h;.W%Vha:I0t% HEgGgp7ӊ^_Xrgvme#FqkL:ǫYP!!eS^Y##" 'h4ʯDž P'+7 =~Pr?LI~Z1Ϸw8 ϫbiǙp ôHM[GF ]S`Vejn\`ai}3-mWB ;%a\4TR  vʯ]$3:=NdzzVBaMZKIB:wclIp g&=w nK_z@ &ˊH~G;QϿ~ Z>Ѯᤡ&;H:/>}{r'~p} EZ3a7 ,@-L|W_Oǡڌ1}'.2\zUI6^tBxff d;dE뺽M;C?]d~Ce9ty_єg8c,R'unE^1="N4&XNV=S^E ?l)ǁWKbg ׮7q ?*>J+R1aU"qf*~YxV]eCRHr_c޻i?:aڪe\y 6Dֺ9U'o\?;G9!&sRd՛B pBvT/{z`,.l8&M0tZd\F%cRn7۵ iZ}-pμҳ k-5dͿjmw$̑M cvV#X7*GhЛ,~ =͚_㰕@[Wo?L~'1eR+YgQBk(&x_#O[aMwvttWq o*Ig'f:6iYT͵lGbǢL:V'R͇`3v='4ydl#vl%bq:2mFc>ښ# >;_lĘ*I~6yWo3̞qBks`߬~2\pS087<}יdqo;NHo͛P}I_6?t4ų$f [_?4nJqÔJk#~ZE*,|j5Na(w57>"wR%.K܊^chNpx_JPO Kg:Aɉ }BC"?G__=я10:F-6+5_QbAs8&=0vΧbh_ox~٪?7ܸU\ "KéIOŪ(X:/3L;cT򈀧B0r;G01z;r̳ZW~2.Wf;vZgW5݉cʽo<%ިlG_B5xd(pr5jߡ4pvn؉I7&X/ )RG \v^t1JحZ4]d&y˘L'ȇp)ʥM˕3CGghc/a̖5b-PR7,h} G[+OYm)}gʎr9@|}pB=IA1_R 5X5Geߑ@ %dz2g15ЗUŤqG\0u$ch':' #0{h/KVϿ w{H>Ǜhjdx.0uI(6Ozh.2s=_,}Vc P1+RO)͠VS uys0YWu O4/pW" -aRWf?_.X[XzNd |>[6H+Q, f6k'9Tp`P-uvgF /`˫kvH+z^t VօW 㭃 :Zl"ɼVCcgA.]V?EH4<;3~WH-bɥ-@6s׀9rLp*qL<:vؙc}W+VX-Xͷ iϲڵtqן1 OױJZ:O=w߾<\ )KuW#=&dH2yPG5z~j%&ߐwr ckpʸ6-} &ʛ G&Ad2d 8Uz6awd$8r/]|-'.³bˡM;ǩj>q'X~.¥Mr‚TA&V:_{p6fQe'OzJ6Lyq}X(qY F0>q{B4or0*u&p~(`wVx'H1>$L4t1.~gy?%9cq2^=!he{(%I\ _|5MsqUlk*g`r]Jt<<{#.@9<=@wi7iOY A c`]5d_>F_%1lߗ$'^l3ҟ']:s%'Eaq _ݢU k53p鑠9 O:=uqYX`?܈t"1M7O/!tBd*EOcQ$J(H%I]w'+ -IҒ$J$!Jdם۱86_?~zùp,vnjp|lǥO7x%&: fc09WAU􃱺b;F`8jX_^SzIJgmX4C{-b|_a~fm L=awN;2>( ťw9Ƌ ~^b)c|FƬ ZD|t_YNmZ^{ ͑/D\zlXW;qWoW{9'gĉDž_}s>,&'`곽@s dT#f[;LQE>#2w(bƩfXuuӶ)3',47HE9kr/nǞ ka&ГJyސ#(|v $W]P6 3+Plt7a Ή.e {,G3@%GCK1%X}]OAvE+wZ7dɵ{ $\ԁ,e]ACɰUvvc*Z54AYq53s|C~ Xtsrc#%4RwhszKid] k [LǯJ(81g/m 2'^X=qVGܱ!M~9oL is*8WK'7lwXcA[ۜ Ώ7kp5oH2:[`XCEigC#lGqԉSX5Zj{Gk *Tב.||8t o<|E *CMm`\<5* 5 D^d4l3iLolmf<\7[aN_:?Nũ@eRנq!ǚNƾ28#eý#Xj>~(B}dL>ye `jD6Abnnݴ 6 &8mK,T'n(1`DƅwvhIZ73%/>"vJggpBčj5"0[a+7]n<S5NBU;%|G4-,*޸w)8{!7,V {I:M;]VOXq.ş[Y `L>|d}|Ԇ?] ܿ16mĉ[Vz~4VӸbNId/gh ?zN8/!9@%Q=SJ핤ǑWk7R[`=) ֻ_َu}tazR(̨+}#Ewl?o#tI?zXO$Yhell'\O{R{قs|7fFvT)- Էӄw[>ٓR*0[\.+~~r؆sUqz_rhy7ZȥV7-]c&G8x@ u *-qx;ҼKwGkL-uj>?ͅ`|&5*R.~1K;KkL%- ? /̱;ܾ߱+*TsUݘ3D6; l1AuZbw7+!Г:o{bpw߷Mx,:u:F6/Xy!_<:'y'y"]9'_,=U-HZcOŃ0/}!f t0q̵UuzuӁO'rшƬNWk[N}sn~R2[ʮ椰IE܊Jȍ6cmبm 'y^>6N 奷aFK>۝;pҼ9"'m~+g8(2uĔ)g=8@g~eb߈9W8Y(L\ϲ9~g]96o'd?-AϺW2Ph ;NDZ]! e>]+V{ vnOtih9ĈZ"X2zё"5VpL\u\M*xAoXsDXv3kw8&}u`~!)AWmUJ.4p,~6G%>K a q~P„˗{! oEz"# P _zJo- {?^J"uxʩ[@oVt3}evKo$Xg-2-3nSw16x];?`5%D|'p6U `G?Cm;U w!WذHI9̩.m= u\H ۴95섏s!X.7h$( 3(uNJYűQgmGU=m2L U,hA!5n Ǹ! .3) &%]VspH)=tШ 1@HGzЯQ>7e#d󂻣7&lnc,7}KӃ wm,-6}X5}8C'] y%ԍ2ٻ)3M2]+#agN-6yVzfy.6/Aݍwj8S &U+0$!&_yj՝ϽBA޴jYBB G|zu|QGX6%*Bo@lbܜ%VR'?#yOιbv`Ȑb0maYކ~űw8!3 ' {)rCpY@"Bɛse98FoS! uِc2L%ʶ0P$Œ.Y|EwN:QAW8 J\:N)jC;,ߗ{0?Ou!Xo%^[7^ ֈGGcO0H޲)cѺ`.QwSijz(#t_TB[w(.]"Q zN$2]+}_4}h6c}weR󅰘7EGw-*^5+aKev`9[_C';_y8JFjB]O㌼5w^eJ-bʶ.$V o9ڞvM=Ka$ܡA{?b frMv/ۃ9ua[FSx&"6^Zfe<} I6,̫K.n{?voϭ@z80'ɮ'sM;8yƇ^vݦD _oD[o*$Y=W6yn[0G)m܍&*I38t FY/ ydCf߸?+|'nv|2N$ 0jغ5H(˖S 9RUiuWF; EHT^#jdzlO_&oN_Tq4eOA̟=B[.K_.ҁ~iC !eG2_ tM#5BM8xnZ hR3e=]NaV8(%p+`Qr'*qT{>pk|&l8E<ā!Wcq ߥ> v~uכwvy̋BԁQn &H@էAٵ1$Yk]*dRCs *>B?Of/KNwV튓h%,_~q^k{,!(JQ0ŰLڋzI`ˍN6zSƘk l8,ouJEg8gl3t#M{ʉ=MXήiau =MY s$@hJ]ѿ0woR(}s&'TkL!v+`Y>.Z77q>VNz.9i? u6nb}f핽ڣ !?F8S|Kb:N|@ 7[Ntb1HO~o/;,)oVuWF hu@)O Vd% $qA.9o79;wKp&keQ1"vcl,u`Զ_ƑGʪC glw`W1 2;j%N]lM`E^lO,6 Ӗf oTT9ҟ{\߸aHs{0 %ohԙ9sl,_s:bckS< Oњ|=[}ᑼmuwb͇دd#.U.jw.dSi]a?n5 h]={'گZ㗉cNqSHI/dnަ }l%nhDIu9lu 30c90ׯuZnd|aapaVŤXnx\`/:2A CwUÈc'Xy٪cL!#fzJqm{3A]XH9K~JHN3-솅/Boo ީ]"bwYf]G6/oK޼n`ZtQ܅A7, [Γ}?h, ?-u]Nk<9m=ɳo;H6N%mNꗼORu;~!WSH􋓎wac"kD̜>B. :3X+~=8Gklnl%9vnIx?xWڝ v<1*9v>G`kvWro.C '۹SͥAFWנ+ә:ɱʍ*X1zjץ>[֐'Fv&@PG}.9,w mΧ?&Xϗacu3WH%őYY:zĠ!_P yʼn$_D+Ѐ?C}L{:L.w*?hGK 5<N iC8nO.M4ݯ4b퐴jSy=3סh'MDI8* fz:'(O2aێŠV~;n\jgAn݄qR NI/Ws%IӡXMb(o__7%8$}Qc/,R^tp$ML82^;o>W~èȏo~[jjՂX~֦ ļM;=ccIY$ΪzcvZ]T)A3B籦@_n% ;g%Ɯq=6Z ɟkqH6"O/Jsdž5is*dS}

G bɺr2֎:99,xÑl, |a o83^HIf|sjΘG~zy۶!H.7?o "Y+̄]Xǀ: 2ܾՒ<pD[DZU'ʼn߲>Knk)1 e<7uf%xPR!XA j^ y.q@k;c`?B]wgԠ'jkvrÊCgGf< O8ѫ2zpҧgƧa.2W63m`{Q]g7,Vq[*nwTkŖϛ@=+ϭbSÛ= sc@4}XӜc??'#aᆛXlpgY'njށb3%CX7e"(hDrPx9(p·m!XȖr/1z* &GO>}*]OX%)^ZcYE6|+#C&ztT)UV/'eH6[ ~Vtgo?so:Z)0~8".hzhM)qЬ#>w,fCSV~o@ >6utBW;9萼oXmzn=1>=œ\M-`ϡȈ=ݿ:Ʒ=..Rp+ 4.j(=t_gzJ`<a?5yeج4g2a{52Έ|y믮Mg"3Xw8-X2y;u<{Iܞ;ZWGq-Nu51շ8wNGՍ:ۓ@ =O 2)ArU86].g>w\: %ၑ4٠uRvq$):c?rF'0>"٫;n\ O\ 9c_OEdyx&Z}L`ˑnѱ;|灑FGH^_!ko̥-U1a_g"IJ ެ-`q[OW_rlVDž_X^D9nNTbG+juH.gK+j fA79$hS ^c#+ERTo$u:=W,mvC*<%@v Ohh]̪30`F?C5f')>My^^ j;FD4y1h'_fYq8$rd,t^;-DjDBM\Oo+Bѳ#rU`=j8h x36Lگ7w@ځc8}o2Eg'mCGyIsy폒`*bgY$GpɩT bxk%sfteX) e FIE|N-t!% կ@OEc?Gu2}~"s?t Ҥ7h X D^|EzQ`(sq!s~KeKKKg"A7k,k־݄mc Bӧq7i/TH[$w6‘=quq`0|ȊR"HN|a0ڨM8-}I:G\~v[(l8u3vɞP"QeHOPz4sON:ĭ. l5|7YN5LN*\kYy:=H7c{b)9u,qt!حdDPV-Bh kQ;8H u/I^yM81k}D]SƈfB`(s<;(KQ6Jf1Q{9byzu4<"֋m,+[ OꗕNco3w%l`xkxza:d)*?QŠSiPTUPO",'Ȋ g|&qD$a@5{=:8t1hm3_a'(WLK݃=scU{еNv.pY.xm4T.Y1ɣ r@Մ3}(L:nGaYfr6HTN$UR=[^>mm%_CS0S[_Ԑb7V&>t.̼i|]5r@ P!Yw3$ӟ@>htn$G1WC߉&KBp܎Ly -ˑ!,0)aKSJ)ośG 5>mA}P}jB'*e vM/`MoRj4ѮʲrpX îW&a9zf̾JB}МfcڨNTxU)oqzծht"~ogo4Bzs)*ܠ/i )Å21wNj zeQͫNp%& V.SMn0Bhjxv+ o̳o5*<>LpKՀWfT+/)% 7oM`mmX8TiX7/7]u9YiL2TwgcG0P3$]vtUN=D5;mў#9&~A,V㕊w8{kw7f.ӊ\x&oܿL V ;5#8/T¾>M_R|%Y ΦG);C5]6Ny#=F]} τՠNtN:e9aeDe#r*b6BM˕|g:Cp~:mc00BV/ B"a'?F=L4Ԉf%ٴ6]~̋5o;ȅ[.[Dq,7O9s^^j.M™lj i!(g$h`[P48Qar`bhؼ:qkN[:C''K[`Q3>B^ _`IpĪRHrf2YQvuegڬ2NU+;Nْ ϼpԃF:eHeW[Q+EC$Ϲ.`/ēHvIh*=#rKLxCgD!2} +ܯ] ~ qFLS:y_=n v2/mrpV}TJ;P/էkqqyŊ{f gpOMjoJ7״ G{0y;^ ?j sG0r؆rɪ|{6fﺮL^-B 1r@Q}Kw N 'tlL=l&H s@,8Re1mڹVZv)|`PYc'_Iqq#,_CzL7aJiwfݠߪiPLg JZo×pŐ:_1]h[o8!`^h7Kq8\Gxܹ )uae!4?~HOe$4#38%8o>a ;ZlŹIM{b>cӂh]X^TWs"du"EpiOĀ/lx']~~}v+Xd{f5d\ Mk$R/DBEQн⁽|5}iѵy.o=ތU3˩㤱EJ߬4v慝2X23d }zzL s,QOW੓SkG@ZWWqfy\ FmN*xI$$9Z$iJ>3 F?s4gӫ=$PJpJ?N qB5e2{"k 06K4\4N8)['B?,$aȮɾ$ ͔J$)~7qܐf MC!#8f b>fV,@.D08"jiO2$qL- 64ć_[wn@oKE.J$?ϓf6I=[][VhU'aUJI\nؿ`w{e1צ9 d'eQ~& SꃎA- sgŌ6K1Mt\uz Ӆǵ Y>a|\r-2C~[~L]s%b wMo\;cd:.%K%%nHʇ;=3Ǩ0ZQ3$o)P4HO"f!CE3#…Xn)0H? I/#{0aAG/ xckg37?[bz9,`9!<{(mUK\,$dW.D~ &MG/$X\󭿙,#Ͳ6V9de8tzڦHg> Nߖm1GrI}Y&THQr9< ipaVP1`\uwf/K{)En,o&wͺq0i'!e1X]IW >&Xo{ O7XU¹7e#hI/]Zw'D!wܶ#)=aٱw0A 4QvNZBӄ_վba.lAe2u:ʡm„[UאvXt.`=qyaO'XObmVdv;f kgĂߎaI;+&lb"[`0:Er:ZTNwy;ΟSKYIaY|syɣ˵{eQm]}t-,Н+೫I-0gTs :;o޾rJiMJq81pã'&3sĩ]~PaB;'_'O ~*ݤ'0< eI϶VuՆ^87Sk[%]rppu6 sդR#uuyu4;w"j= '<6 ˧Zdg1Yn keS$qAPeɈdkGnj|#" 'MRIv#%}tY5bVz}I7^r'4ʢ>˫Wxgǿ1VM)3hR_7zሌꉸe@ 1 Z$ҞTl|KyZK &ģVXQǭ0g @ gߕZÜH^&,:@Rܗ<ßoƍ8yL=:$ǒ]ߋ~#K_Z>תΐ!cIJ*5t7jGA'+^'*Ǎw7]'nz{w1~3-\6_B1v0gqvI}1ȣ[1ʋ\^k ¿,HfTW<|lUUl|7#OJJH62}$#+A󒫕Fp˩Qϙ0wW3l{&M&uIZ c#X*5?vVu C͛ |]THyы$[]@j,a>PV-/eҔ/҅% {Vll!NBJ%Pm|*+Ku>"$[*]8YLf?o8FU#_D,w^ K\DP<R/vAF/p#'S˱+[h=U7qTeݝDsX=_h.9kMؗ~YΆ}MJIvBO|䧛zؑ־ǼKpՊ$QW:j zŖw}xֱ X?O53m#:':%tÕscQiN/D_o~tU|NXegbKGd)犹'.Vyz!:y#@?(Ac8Vt3>)UwjK2¢rZw/lz!OMeAxpy$¶M$e&6].҂7]\7ս:/JXm~( /̷n"dVC}dUx]2D!^NP7S*o͕lJcCFg2 *|M>aOZ<U w}*ʿ?xDar⁃tӿZqq^ƛ #Cfhqv7Lȼ?g話})cYM~d8wǜQ 2z[Âb[-FܯP2{ٱxalZ/ *`Ƒ?6z05ig·w?*|0%PJ#+Lʹݭ{ !]5֙ř^ 4)X}s6`}F){%X:5 W5i־:4lPJ;G-9lveLhDO@׺V -y ?NΧ4˿;epCɑ!!hk&z^ gJwAyGl>=>]:j-1-O[lܶymԯb_6F&cH-z: zysm:|閇L7?w hr-pjGہawv5URsD _'LS&̘zh iPS}Iv 4o/"Dqp1+viDDO~ˆڸ!d-r,O4|CE-KpaMk8K⏰Ed*ĉj''_bz`Yh X⳦XwZpqOGINЖDo*&Yg< ю1h0$Kzb& JDxBGNU-t$o4nKlJ%X]϶z׌&'0SbK0Um9Ej$_g|~I#z!9?\΀cg|kLɺ-V8_kN5ő_A"@?W<Ӊ26st 7SeHd=MR҉ vﵸ=wlѤls/ u^}/jR9ρNN&ƽWlONR 髌eh| %dž@WlИM?c/Y#0(tԩC]HJFn tZ>c8nu}I}k{At]|8u/^/| ?1J6\zbE6k=;f7ތj >B5`|r۶><i77,%lOrh8٥U7bfj{`X𝕴PMrɺބ:QYFqt%Wnl' XGog~֤Yl8:vax:0XS&TcrL7b'Y6}jLXS\/A`qwOi蜼Z+hyARfS7(u|ƞAS4 ֗dtyם$u6[šOJD; bD~uA[Vֵn;qRmk PszTfE˛C>Wq]djjUWm4Xum', l=1;R[vAcV%/Iy3+v癚0gkoAg+a7ĮEʾ$n6ELxt%+<|G.r'%!`"g6 hN}!8z6n`v?+aR.xa8UkndF5ڏm[jGH R8F,Yľ' &ir%h8ĕk=!$/?vMl߼Of~ʲ N}oKUr6se؁I|Z]g-`э{jq[b?LMj߰eyO54oTs ~ ET]X,1'YSoa!lcwpwf.:%+'؞es>+'pZW!9,Yq֞ᅐu@ Qu;7^cX ȕq!S∁h=)UUqJd_,D!ؚFaւuD&ioO%YkHpH5O~ܖ q0㖭weO]^c^ֈ&/">_0;\/VDԬY{$yeCLGNoۉo5"غ WL&)wraKѫ4u͏x(1xfqiK lc?$N(h-EAm* EÄķĻ%B[ ) /ם1o[װo6\唁$T* w,=w9$%3V=pbӕ* ifK;o%؃fG.iRPSlS(R M+$futb$ QyXkJqg[f1q3Yz#GQQzT;vbޛsm5 zw~oP*? JEoݨ3j\/zCwzJ)kYqab/]}A/˕J}"~{Qjz"W0+mjYQ_ݴ;$H]ofIz66_0–ܚA9=pS6G%Ջ98lsT{۱A|?DzievU/=u A}W'Lqs3%AټgSQ/!C?u?ݎ Fplݵ\ЏWn63rk*"s91;D?ʟG <3)F y8&}^ӿ{?.Vw匁C\c_ah2tk w}ă// p6^\Nfd(,"*v"{&|x cq5|X]s[na,;6._y7I L~db\=᎖ y]sD]ʘYuϷ'[()˭61+=rJ[ =?'Mu ~I>sXt{::, <׾oݝa>GVyIL'$MM`8li᰸3OqbJ\d-"[¤2LD`[F^@^zl6sW<~URƑ貗6p-*}0 ͜NNt"¸g8=A'J!(Ą)k3.0m$MjxbLX%a챺\LфVpЛݷ{֭UpKpvn=dqCv; ڜ[v|(#5^t E!u@ܹ=kX$ 2ž|OἬ 믝kIݮ?i{mڴg'h#$LEE Lܦտ\ِ^ɝзqYd{s^)]uc-~ oþ1ؿ0Ju\Q6~l$ɖ{lu]Voac8aWي>|-= VѮ2Z\ַjk(OM M;q1dmXK`Nż;p'E.oԧB0;zdjQd{{j L{Nhk =hsV9qHGf}[[qpPgys.H,x[M7¸rԻluݘ0;͇Üg *C}*;nhU]B|, .r.Y\xk٦I?VpQThl 3;-{:7_땙 㽡ŪZ0nUR뮊{ ]1J ֧ڔ_W'P |#YƦ}N]ۃx^ao:av]w$%i'ba/$1i ZXgm8ehsj=$vvxʝR)^L7? *~у+P 5Pu( H쿘"@Z<([\9H/cm '):M˦5vNU:2Ja4pe#9CgFgפ8I+F!23u]Ty^Y%]ݟ?7#4-;dQkE֮9~bz⼗}9칗-^zzXs4vRɾy+fq*KB͉YR.=[3TG8hx&wSj£|5wl{,>w[&[`3T O"Uн^]ەv7ȗ?$IϑC/odv)i.S> EqcȺ8﬎& gp`PH(wv9c Sj5[V)$Xΰ>'m+-K`s9'an 9C1db= 6ުCz_~x= n0]' j'>ǻgW89[)ڷKㆌ^푑@cT"C9`:KH9}}4`KWe&9iyx,NT!8_{SN*llI֦yo|Np|JВkv$qD:8]Q<G篫*tci;ӻo} D\[.5aN8h9ZÏ(-q~ ,?Qq'~~9-wC\Bʈ!8"/tKzbWZط|,9׍2} 9K\:nk;. <%o6ZˑoK~cc iO}V3X~4}HLX\  _ʏt[fX{})/<PU4JV\y](OAv0'v뤯0} {IA8@7_؃[^U:8=ǡLѷz-G]7LD ?{#a|ЙjLvl8Ċ߷qEiS햚Ό:Q{g܉s$2'(we?.dUaɝ6'N@eyg}Юyc5,V %SŞ A'aԽ.Lݞeo GS_+ʩ\{]_GlU.)& 7kTHn銻A/3oÜxo1/.M0gW;&ĉŽ)#9..Ex ollKYűO6S{a@40] N&kUIŀX8gu}[ X)pfZFޟ-JP><sK=aJW4 ]c7`D|s1 eiХlnli#g>)tD-۸cY'81G1d< CЕ}'ΓeCpUC+yMWgLyC_1?ۭ܊!,A:+<ǛEsi·yXQuL4u[z Oow>axsq\Q_\ 9wl=sz˭EE4=yn^`fa\y4EWU ~'C?] n15Z兤$ޜH; ?y+sfzI>%,*<$_&Jr}10!ng`;IP4d|xfװ'ܻ #'p"S ֳ]{eEAɢOqSɌ$9?K~ ΏPw^X LEäI$ϭJSԏ~=0neLaWoA  ]*db˶ǔHΫefp\ʪ]qGq`w׌ 7Crl6SZ[$a%f/AC;{_gXo04ԞQGzuQ%FqƤBN||M%/I&xc{  &(.vTa|:9lT rwqq)a?"ukΏZ`-? }Dz >nEү~?#}rT86t.|܍Ukc[Oġ_(''D &\*Ca# KgJV;'Pf0 d}t^[mϺ/IB 9/$woOiاJe u嫡,鍊у'C˥k?LLu⷗ys (M w܆\*ɒоn>JٿٱK.WXeR9WjQkzj*T̚ Jan󂿋aݮ[L1g&g!zcFc|*vk ?*v,gkCrڋxQ5a GWs3H_YR ;m?`AWxG2pc v8wNaylz;\n^$Asc繇apUx"L,sO[6cʒ+\dr:Gcyb@_W&Yn:bw!`N桽oo:02 UW~^öҭyH{ʹ"#-`(Gץ4,.=԰0/?V~h]'Ɩqw]ydNh?~djoN]$k+ ٻƜWiL%ElXORWFbu|wMvIaVX}/~>U*8z #{w< U6Wca瓝%?T1/N[ ؔKO4E072N '\g`?y v 9a'1 fk2j&l]`?N 3 !XՔ'[CjуW0'm%nL͓j[֓/6A +6 -~!_+IHtm&\>񊈵55aDث:?fys`~e;<0/7$TfPdL9e7dUvb Qi)G, ,=)ChO\{JhȫTxqOHNdX@3^vO3OI;sK  򭺙K'7^ z]éCk~EEQܐ SBe?v%YXzuo $Gka\( h" 5o$}Ӈ4't]i{/}&vpЭ aݼsľ xκ"H@\p ض*_e&~ApB^_Liˈ$2Lh]ُO-8c"zN ze?Rũ'g7dM^;aŕ8$laFL[^ZL6My #Mػ<-*_z[K< =AV6JG~&-é;_s&Bv ř̍U,/NRg#&:3^`]ׁ1Ư$^S ZkrK ͆rE/wq䨫ňdi_~zmc0HxmYWzޣ,9JP5/@:OQ)ލ<ۨ+ 6/7*7f73unHUU$f,LCEY7Bg??p/>vsV?s 6Nfz;(<\z4v /nߛqyUmu>0=UYKu [gU倣o׾IGT$`4]a}#1{zR$ԼpiKQlT8lz{Ong@$[p:0%9iM_d,\ X6A{m=d_SqͷvWUߋ9N]&(%gˑ>_c,Yy?;|%*&QaYӵ]q![/7єW:ɹ$Gݴ:V|>5WG^!ZrG=Inn}Qьv]q2;Rea84^J䷼Cw1!ewr/ mNL4KK?E#-j(dMK„󷅉fz6Wܿ>4 w vW@9[ǰ`fV&ߗobZBS70!{w+''s?>.@k֒ \VL>& ' 9 >vPNt U#`„z-,4v m/,ԗC^pžcbl0:H_4X:8M7]Qʷu#|'x(}bH[H](*W%Pa6~"5NRTH(~*'W<~lwV'0(f? JdĎ[0EC2Nӂ]&&cݴ)(-xF'*|| ;Zc0լΦt /l6;ܜys qQnϊo%|Xm?{B]sR{\}}L>x+od N|ڗDfp,`4vf((q؍*Ğ-6OSʅ!G:S_Z=_ki(-GjNc7@ڑg77 Nu ? c[ybF)9ި(!ui*"k*3~U5W+1ɿO-א,"Ozٍ]w̫ yle1ty8V&4Z0W82!eд8/7 =EnИY{[N9!\Rpz$>H0KK$fB8v 4_)ȨaGqdhmWާ̝YIׯmVgcZLmX?,?kJcS< *+z5f27U?s:Șa- cwU$FC͛$a@u9E`zl/M{7 }BQHkjm׎ fls!m|?8I>d7݇Cq9\[qgaYk}-߼ c[WŹ1uOS L%7d;B0 >[u[$r;6*ɆzAUw8?xl2. ;xHЛ;/`ܓZ.Z.,g0A᳃yH\zv%Zosv;WcFeI@]oc#2|mů\G^uCBl`'m?Y}Kʶ-!wsp;iv4ɺɓM0=Ng g@4 &eԏ8vtB(6^+Yâp JmE+qTхG$ꊊ}<ñx,fZ&xE׿/'m3ddckZԥ0myʃ݄ՙEȴ#l*Yfڭt̺Z@ Y>Z̓<Ά8ۚ qzoP>]k\J,/R{8GAgni?ؔ?eכ7oTRj`4K8GeQ:"Fp8~]Y*Q]iZr ځr@ekvt|mȯ@U+ʲ J^ۯIxsh%a'vӏs@F) \|]R츱yZ'K̜;̕9^Y8x4tNB 4 !9ڥboņzWHJa;"Ka>9`ܛ%+,C,9XQkI~[礎ՒTrVgJE320ܲB>LG,+b-m~C`GwCOoX݁sty఍DXR0R-"4u줓홹SI?ΗV$W+|WqW&i,\OLJHʁ_}`n×ZdkU۠fS'<Z隝E[BڥrqZvgb}3nЯֳ >q3Qxd?v܎)[a3T3ɚTv*.efصii8ȲY-/[|pڊ*&B[_1غO<߆0>|KjuD=Lզ\Tϯ)֧`X?8p̀h#v Z6 +jxUJ0#n͟|AOOĉME0Mw/of uSvOIQ7 I+g?hd4]}{esa193`5=|XPrЅKRh0'<6M߁!G(q[`_I1*8Lc Xs%; &l<:qUd1B6]p==ҧ'/tþIu84 |jWFg'G\\Y~,vbc_5vݰ[8S::^4[fBg>Shy*-YlctkJq.)jp2ۃdۮT=Y)حvWml^=oNVfh1J5CIyo vamdFhU nWQ3j/+}PxTF=&qcQ8k3v~蠑&.\O7ۇ=uwYv4ƀf"%pk`]?ŞNhh5ܛ 9 O&va}3˴jg]SD D'~\MT/D엖WiW6E֙p%B֞*a|M[تt"/̆э;^:Uȸ,o7S5ڗD0O X|,[^j䫇Bs:ܸsŗ;ҡv4BYǍuvd {3;OK`A^4L\=9 na%96C.߾!zLci 0- }3̅2SH;iu] ~fQg׌KQ@;9ObAJi\~;9MNdatH(4kșyv?œIuG(IR^0 n(.]4w\ E=WغamsM;⡪;͡0c<#'Ta{qk )Kӏ8jGN Z'^S M; >aZ"sʉ;0-o9JN/777ly-?ΠMH?g'B¡: TKYÜKX@w2&^̑Tb5[@@L ׋__ȠN N#ϑ&7e (7;oY^PcY'c9U~ ;Mp5 ck6YUl~)* 7-DdO!L&0B0Up]n^?/{,Hb =u*_:j8嬄Mz]ǎWEshH7Ѧiv/ŎQZXB~,6;s}*<=׍i1VL7,O%cŝrغsLշdvhZ+dYk3='c_+ 1c9'*JJ|p7Rbx95}2u: 1csʑCٍ8(‘(ԭ󣳍ZOY'^|>Rcg]hh|MN6Ɖ3R#Y=(#ʑ7qFAg=JawvP'x{:k܁{o1E@缕Vd 8e`l7̝fw㨻؛tS乿w۱fhGhMh4;SK:6C/sٳf8ۆ3 ;wevC!w1S: '?i Ӫ=İ79ЗDwFNޏ>f'faˋ=َ2#3)`6@m/jp`O(t Tp\=J E4t:p}[ח0@b|UrR0W'Lݻ"pHhxM]zlN731erTk'Y dƥ:JTf&)V`I{W׾핞P/Ra)Em_mp&&ݸFK2ڴar[? 1V7JV_6}#Pީ2"II'NK#QX~fF2^LTAJ/력VŪTa쟽ָUG:I4I@pwf9f:$ M'\jqb,.gW*Urp&Se1(0FZ9{q|~؏S!? <}Uaٷ\+NXxґJ1;R!z%lQL08ĵ泊g;5Q<*8_w$+^1vhc ~['Α,]ʀ'_O2+F_FaN!jմW\T. 폨H0Jf9`hh̜(~n~m1Fsl9$ Y/ߓ5UO/RH+u8,s &)G66eCbqN*;H a4W?;.hyS_*6Cᒉ_kwwXMB#n nuDI ߺ`q^"D4ޱ.+;p3; [:]݌c_>,k5sf&ARBCmE1QWa(8P zkM. 9R|ͳ♰3T,,**xurC]í#O9IΕHhݛ`ҤCJ(%a@d y&+*"5,H?Oh7짿\-w\b Mڻbt~G|g>ka >_(jjp侯oU/m֜666bϔ[ZX㣮Qy_ ,局*6 iV:&Ƅ5J0Zs󝂥_ @780ӎ?[nbZI/MJ9k -<}6mR$ҍ1 V'G Vԙ5Ǫfr81v@fg8Ö+',S.G샷oզ B`їW @;&{Yrm$}|/'g7vÕ^m'7k! :rPa{9w&^}415$)!x&5vA\NLtp>CBO4aTRx_u$sÏr~v轘&kvޜ 3 7]+8dC?O`jSeK[6}iA2zJP?1V%cþ 8S6Tay?/qr+NG#)ZpO tjPp1 V,I5+">ƶ3ơ|`R-/XV¥ ო@_`V!'k;JF\cQ!H;YC}c_N5DbOH=ud [VMs75#ˮc"]Lᄏ|?Eu84ի`?'Вf7hg4*P}s$,m739(DU 5VT+\K2*4y)awUA|#e,bű+;=o/@ǭ$KQ MX jvr=ad΃9ڏZaՁ!M]_P>`Ͼ#q)@ReCu↸=w51;&4vI@ۑ LfHK\F|hb'\MUMLN[㓓JO&!c)znj>eBwW6M&_dy͇\j7Iq%p5}-N]6ۍċA_ c>|MV_%*aE+7͟&ypa9.)z}@_ WhCL0ES0udJ/"+-Rc [ly=Gڣx~SHth&aӬUG"허_ӃqnoW6uqr:\)/M0Z\2 ٻ_ւ6l/T $*cT\Y8vtg5χɳ!lFu gcUΖt 1a~ E+cXJ)\oOL{<{/-`Ֆl*_J l_ Ux~3|KDko1ikӥwYPf|Z.'r^5! fEajYƭʶj ?hCĤa|0Ȑcxf'v76\:3R7 cb9=C G/hJnihu4~T  [k;n. Cj0L|iw7K -.`iּ.a۾!Ha]^#Fr~S'!]ѡO8绀>-Xd #N%T3vƳXtĞϛ'jx!R60o 2!rO/̟{9SLj9.oco]G?5/z .E:|Êl@^?wu Zf&3* -ڂ] Fg~BUw`g߬g3(ڢ}>>i#EvkT>Z$V[}w6o=M:߳Q=Ē0Q$J$Xz=vŎʘ=h;#tya!SgeRvOcWMSkz8{!ΠFϊIPo߽M5Dr@\=v9 { \/*S+VE,:LMCqa;Ua{Dx;sxzt]?'"qw[-r=p8+~헣 oq!FpfmE?`]e$Tޯx(⠝B2~>z?.&څ:.?"=:tU'oˆ`bȥPzqu?\KMnN?yv uJQ+6+>r~䋄WWLRF`u]yyB䞲uWLTKw9Rdm,0oxF~iWBc%8[9kl>d qkAEL-QO}I'Mr\pwZ2ߦ٩C@W ~AYPN-B_GKDJqyJ䄸W$X_ӦђtָP3y8Gݭ칎W/>rFGY8}P6 .ڴ3lѷW"^T\8攠'{|J&TLkjc$737^v_8IRVn;,,ao׻Ko`z3*om,tF0函$Y'pߑ@j5 ^ CUc/wc*ЯOqsOaiY>f؞oB`DPvYUZ#,V w+$/p^ջʇ'abj.Bx P ox:[mA K;`< {gº}~OB5cʁ0iho]Mw.)H<).5K{2E0\<"Yb /c7%Ʈώ8u4ô%Jto11Pdbo@z_{|2v ~E0Dn↡7˓Lܹ8Pn8ɤ>_Ʃ??%)/4 cpuJD+fOv8c;Q_4n+t/G:_Mzhy]4M0\}mf3NzDKaP{w%Ͱ8 &].myA/`q0`@ "a^0ad}Xx}+zoND#)x>Z솹Wx _aDSu /HU`|f/q[F7Q 0&u'&6yI zc=`u9Iǻj[;8* 9 am{ZVd#5~^8I҃^NŘuꈖ?֛ap_Ҍ8Iῴa;DZI=y|d:IL_%'&K rè+y;M/^^CSN/H{}!ӎd}_8q  NHmxIc˵)bs88)gvyN`vk#(=;!K"4pט䷲0׹=JުҶ(]G4q_\ . cR\49L/gV߅Vōz}ۢqLJq'U\:NUj ƻb~o;t+$LoxSpG-&) M5wRX%lZ0^_3 kIi"}Z.>E8dd_eLoaML/L 6^UU c>X[>3j]Ms'LK$-D)efWQٽ߮n/S} 9[L?S+ƠtVlv'թyDJˇ`saKXI.g`ժ*~;<>ӓdvv ,ZP=O,Ҽm;₲lyXq7g(b抨6fMy$- u0j濴D#PI>c^{`tݐ瘛//cpf)0ßdfT^V/+GɽMioQv,v yJ)9{pðt7zq"Ҿa$6jogԑz辞-J8%s#(YH1a{`n5tP.|9;? pp+J,y6R'ԂAc規la<Ť,Jۭ;q<2|?7vx8te̤YABp6R<[]4 -o,Us_1C|z婙8Qq+v~:@)K 4-a?ksF\{iS勍@[`2p?H!d oZM0g_Fi H?i}=TeInH71ɃZ^,DY384!/I2nxTT[s%*%{&UY$SAGq?So\U\zЮ) 30+%]WY#L魠9k.pyio]kՙzh݅wq1afhe90Ϥ}ՆHt"|]쉃8LZdƟmRf%܄#U 'r} *?X&rwf}XƬ+4â<R4u.-v"(01kǂsڱyƦQؿߟ Xn~eڛі`ptKϦ7VpEuWC'ZѾ?r}1f댟e+E;±w2ZY3fp6OZ` 6s64m"}3oL_/ƙ\ ZqykS{;Vix#̵ɔq/&%_r ڷNq>$UȬv:`mnm֙$ӟvntu0-lꏺ&N\rk6lgټ8dl<6iH2*GyxG0}~H;?N1҇unn8Y1괡Dµ_TvOe@EfePC8&{clVS@]iV97}^Cf U9%oO2׍ӂNa@wr?-α{ķc@x5˩C){%"'ǹn.F(%|XKP~<ڄKT\=fw \%8y1-}7},VDmdۏS"CEH=s}/:WdZW piV۩hgtg]^s?i_t1 'ёN d?EX Ɇw[Md*%ԽŅ$Qʖ|x|ƴR K;>493ƁbBXKz-}7GQ6cVx.Adcoz_5rQ8NOa~_/J[jГ_9O(u[`π矌I#/k{]#͞Mk[ߺS$_Ń-`f;=NFETz'uēYqEA "{.)0v4ǓX\nM2ӻ45[h#'|Ya#|6Z$5ˎT{GjIr|*q;+(Qn2mˣ-4\.aoLu~ǎgA8&rg:W@jޗ2]pMW4(m|%W+ CCQA0YGf> $?DsƓ>COg hL-Xu*YYrLIn܁lVz{=-);/,AXiO0$y`h:iI[H|.rAJmE`,~cV]sGLX7} LQ_d͇HeiK iG6c~> oR/R L먄s[B nCíI =lUCuo>ړ>hq#~^I 0bd6UƩ)ߊa*8݃z ٌDFՓ |@=wEWOU<3*oIVv=tqnY!?O>- B^lSY})Ly:S:,j0zW8T ڵq h<X'XCX*;K\+6h=` K金{ ]F-V~Nek݌3;eΰ#m|Acly:)V$G'sгZfba#XL4# au5}>}mq^&3^.oiЇw5ҟTB˥.ԧ!U>sT$Ŕ9 /$4|LMŏky"Mۭwm(yv,@5oo_ {~[BONqVwNl U[>{+TxN@$ԡGP^wXٷ-8{Ы`Y۬{; #]`1|:eGص _{]] B&-[Pꪢv,©[4wbD[ VJJl f^j.?)_miKPhPY.mōMe9B{ $N] !]GqΦ/C8%:Rzޟ0/Xډ̇w3k/* $WrbiY;H1)nza# 3 ql)CP堫'03h ? !O:L[dƔF 왉(q LOoq>u'4 Q騼X,N5NǏc%*V։=~&8dC#w!g.da_Ǡ?qKcդ巒;@7ټiz&MSH}]p\2HڷQju ȸh`ɦ_%Koy./P9pfRȍS&XX6_I2g~vYc{)4dܯC9ݚi LnʌmG8U:}pZ"Lٵn^ wۗwxch_g\z݄2}m^\`* M/q#ƿ$K:;S5܋gU*7n2^r0vjoXaUo7XlϹ_f=d̺;7)qtp݇@{[oJʲ]u<"͵-S2$M^|ǚץ c\Kڦ4N[KBkbT N~5y-C]Tv&C{0T;&7"mT= ݾ~SeO( Uu`}_%5}7vyHQy;(E0q( AOIm9TB =$?3}UԵz$N x:c%438D=834;z nj}*>TcJUxDU 8"wFg gM߄X?swn4g7%M'1$"Qݴ@:\oׄmQnR(aKV$?.dEv3:~ꐲ.K07t^"C"v̹%/epȡ$d~%:F\d +7͠~e0/; \:8vE k4l݊4s ]8/\k4Ү-Nu!;Hw^v.w[^Ȼ*0_jN2v#ct`V(<3COZ"__ĕW\~:IL@o@Jh1|l]W?`R} y ߁A5 D3 Iׂ.vfG0y8>N.t0C`ܶ|4N]2톌\J]|paJ=oX]ggpYFrgJ2h(,w|nl յO ?i]A1L\!z`\{^qetN\petv-a*OzXLW6=SnZ|2d|bjcqB9%)xQ{X::2oo#L'+K?pƠ+NH6 a$y?0H{'y+Od 3SQ1γIa?#qiW17zZ{NZC<Ю@NjIs~S >D3AtKnŸHO p_CpRu ;oY$oS0xd`xJ.Wp{K8鯯n 7.9'o_1\FN?=J23384F2VԢxT ns!^VbD:cYn뛑2 ѓ$(Cʒ~=LゥN/n6@{oѣ fG$ `࿑)4AQ=u-$EDF.5;L0j*^7@CHr:t heNP6YIy!m(vݑ ]'w~B{Q2$tn;MRI.L)on^կOJg 7: zzhĢ cxє^D]%Hg6Ĭ?NPۢ6=#=[!;yEdxy6Z07|I2),$[=Mr}RL!KJnkn}f0t46cp1gέJ?pDμ{HƉOgbQKl E^N6"a:[u ˹^~; KAh/X+ 'miʍ/oH76Qih\P.Fg/+G=1B8Vb; y//݉')NjL0[dNMV!Yv<JcÎaUs? &~d4!%Ϡvp~#P_[ºǭ2X)GV HXjs߀w wr2WQea ݽ8óq_MN>!\+U|Wb(GQ1k_(_J )H;޴aU C;t&+L+u]Œմbh4M?0*ZaC&)ݱ^| {SUAͼ"X8u/ڤ-)WaYT\ކMׯOB o}TSBztmm!~D ɐd򢸩 %1Pc%r$rˊ? ?Fz.{{@%L1 .هCyfP[9҇/z5c3[GgļFI/rkߌ<~nf`mˁX׺8t>N% MWd⻶/;+4*߶FAn^*ttd5l;qI*U﷖ۜ%pI[!q^X=6=+ĉIf =QMaˎ!qˬDŽQ8Us;K~Wtm8꺱5zTci'bD|)n}nWd)C1L6([1\k0ZjoJ A0kW--ZG][bQ\3hZ*(cݛ :N*ms9S+ c+A+5E>;*tnNdHMkXՋ=`uǩO 0X&`f:9iZږfB;K#lM  fj0`մ$t:>I(Z%&?@rHCxTymPZoojǏbW$g[f$L~&ˍ6aR>eȔ:ؕܧ3WַZL@=%O4%[<1{Xv9418 S/GUtݥD N@i* cD ,k!]ĂdPE:8ȈXhB4HI/KA8?cf,Ne9y FYR8ӖkҜ0eZ5Oh gtIj@%x`/&Io84c6{ԽN]c萻w$Νs5mNe$lhG{]wk.`sѻs'7*sooAmWE&[ ®3H3pl!mΡ m75sI~Y/`*pxۚu}FliwFg[Usw%R'XciYxW,]}n`ʟ{!Jii܎8c<ǻ'(L;znn- 36GRpDhM谪lMoM]R%;g"I,~̇km]XnDyWH0EyT #cm`Rc|0,&&^$x,m($X9xt O˪nJ>zԕk!1Quߩ6 Yt3#{H2)Nw>bkyH /ˡo/>B+ͦqE@˧f[> L=c?} q컧qTG{2 gaɷ#YZR:ԫ:׻.l廥 KÒ8ߕ~̚}[fyx2Q`z8T\y)-O[Ŝ.׉ej/H[%} t8l:7i\OP%N  r"\H=2\{g=}wR}>&M&8ZuHFJLc ~{IX&}_ku\4_(6@p; th xjy|mo0[co>u(k R]zs0W{L$.VMa)$Niyc/&zڕ̥y ˻5|wz77 cKtlj5O +`쿔巇9q} kg*;\?e qcS;H}0oF$t~ n fgnh3<0S6ēQᰨW.Eh+daYaiP^9{nUqu)jQUЖ;?fBP"W.Kﶏ.9ƨO6IC Pyc4[0qjDp^ӎW76fitٳ{Mtq] V?I rk҉qH3/x(χux% >.$=+U,[EGJuX [!(_5^RӝƢ<$Q^'q̮yV- (W6LnG#B}Ge[{wgv̋ Gv]&gҡSUi yC ce$Hn3tcRJ2<٣+d.2;a^_Й7NU͐־.\{F)\)Ԡ Ů.=ͻ#9}}lp5͆LZH.< -GaͺH%"z)\hljԆO7[`ߊW 0?/ CZ W6ރV8C9z@B%>`@nڧUkcT49b vzUɏ47A1 h8lۙ;+JP!^酓/ZfL蹉8B[2{P)mG] O;yqehp,i% +@4Xm\cQKo%cg:cGlޛׂo`gU *QlV0=a;0k_ w쒀D~:&NkAuqҪul{ ۶vx*"Y1PS^c]O·6ā,ngwV.U jv3@ufV* 7Ns 3 tm AI7/Wxic,:_}nۊ'X[K ~'ړBbɿ5[%碢5vLH֨¯˵}v5P ޽=u 5/ܠRmЇbZzpiAMٷ_3 hwZP1+J/ģIw_Ogt~׾0YﲝoB4$aj8M:nIo__p[^oRիV:$8r<,Ny[?1{ƻj}@ٖP'X^c`]uPYO:,~bh?MÏ=#ٿwu`8{0UŻw8aec9Ϟh+3nha)ڹG`Y٪7V=aTioKul?`oĪg#vStv38.[W. "HB] ҋS6#X-t|&c'M$;pmK{R̊U”fQi1H?cqs&a1c\W4pVZƣJڰMn 5=o kK4 ÎgZNc&Qm@}m=Ǒ(e@U}I.]]*в e5jLNɳJk4Ώ7acۺ<amPo"+lM"#?ncwφΆV8KD:DW հ rb ?=u]".k?Ʃ\w]w`v?k)8*[&>zW_#EP~BMbJBjoܝZIMXTn;ч[:F e& c*V>|w2V>\R*@K1ѥGDjӯ#V8c|'@<'=e ş [o0%ELf{Mru)n-WO; ǛE]R5Vyr3B#yPҚ{W3J&,ː>xRJx-W 8z7Ry8{D[}mG[Ic2ʻOZf0\E0*.툛 wN$xg$@ケCn#vMT +~6q",j1%Γ_KϮɀ0I?~_vpCsFqAS_ط H}/&v^yIJ3۪s{ma95gZ9u4tyNi@G3h]1֕=TԷE7ط1^,9ֆ³?5b ^)U;45ָpU?G&aBI%6a&ck Xв з,Np"8.j;-~9 5V4>1/m(:X]8EU/}|kN{ XĶm PڜQX<)rdKU=q>Ad񨎅14d[o=yk/Sc"QP1:-_"*^ [+RHuEJ^(NNONlq˓eC>o Tz%|Z]1ᛱ>=G(*;\PI4P|Hs'(ɟ /e+t2+%<#Z1~g]oW5@RNu;z`҆O熊|iY|\*L ?=]J`5 7^8G`jBͣZ9( l8<2:jH:@r+IJjWb6Tr&8`dZZ{ĭ{w-?+vvzlvq:X/ϼI.{U[-οR?r$H3WIKkF^77޴&z9Y-T-P.`O-kvhFV6F5jM8]&a:oY^$tًb,$p̮{=Ұb[N_-mp׵[1S{86?FcsQ,p`"pgVXUN~ ?k%>6VQqS V_Z-Àa`ΥV~QK?wz9 LDz6wKеJr :߇8VjhKH\;eøY}8z"O YNX긓Ijkf>.^#{O j^J2HL 2< u%8;j uqhA0&MIB4LS03kZR4p]U˝y{sסIY*dzg~q'( ]k^} < E~}0;lS0i఻mնx ©I3W2<):eu8k{A֖zfc?*? Yd,C5e,5v.['Wf |[~ee]h*t}w>wf[H >"R/>SCY").O{_vfXCp&Pi 3:zT06{,1oTyiIf.ۛ25 @{c(H;anuy/$BnT\ B4d_3=ٳ-/ᐅ"ej/M7GqǞWݮ{&z'fqxO cC\S} v7wKtAq۝AʐFPz:ү |l iB>vwUp)&-⋕py/uPw S69b3 lk/p LgаjkV0E^[8V8ۑLF,;\8t9-ƅY`:<V(yl5+(!3TΪ"-eTuh;tW*=ۍ#e#2%F }0Mn\kA?qh}W'a4ٹӻ);jn p v0nR=P0C),`Td #3qڼxNE3U^AkC]k|pI_8AOXYoФc]` i{{[`J-Lp$aU]0q[NO+И8.هMb?thM un$ogF0qV|_O2a7 Z _7W$RN\F XxЄc{\Z.HȾOByTZ.ho_g Ӈw)O2u|xO&; Jn¹rmov >_v^8`}qg9&jf#0N΍׸*lm>iW$>8mLȿuA i|Grs=-CfϑЛ usm%{h U%bsU5.\-9[YԱ4\A^hƟE[Kq+xdbUjܗNNvgVbUyY&؝ѷv] Xب}lBWw*`dGaQd59l9RfynNl=jV}cӗo1\MH= ?7|y&NlñE"vBZ2CjXӡ:f,|/R{&&#j7G)$#k9wqcNsj9RlՌrXWsc.$S~ E٪r`3ο;`j&K]мr[ m{ =tj*&=psvq? +8}nGW!$OmH?ڷ|`꘠&\Nںi[*u7n+p"X^ @gY̵3!Xe8J`à4#<"GL%.G|,^ 3^3F8j2;0lv.ӫbwaS Ҿ=窴If^7qPzguIzۖfEzSNqeh%_(O>c͋JOb람:cxWqleR'/1{ 0oݧ﫨XOe/|D۾KGzg&э"o\P |qiny.Ѣ8T5R7Ilxl*5F:yI1a=J~(J jAUj.l}>Xrjg߅2X|j&A˿af1%^V{N}V(ROr}^ÿ0MrA6Ԁnҝ+ݍi۲4#By -cY+ 0A{vvi|6Ll)Պd18ّ#S/{A0jYu$_dvF0Aey*i-')\iz|WE֝aY?8jct-#RoA߻^i(f'YXA`&7m\ޚD2xW GSiLFӀ;(B>һo\ٜR\<_8~I'ʻO*B+_~^LXֈsWgFQ2;[:~A rG]W)e_ʤnSvs'qRK=UOXiؑA)%@m?(_IZ/L,`%,~*t~>9 fSd٭v&-sFc⯓qQbQ/J`ٹk8}gQzxJu02х«jSHu"lع1kWG(BXVjE%Mgɋ[ڠ cpy;B.܉F{vW=LΎ_ YA< ¥Y{7"MBW*-t r-/wiSЉT2JZ8L/ॴTU-fꎭL&SE'ʪvJZ /٨VްotH8tmoņ$ ̺$_8ݙ?Ae$w[ B'"N08c󕻲[/  N&)GhUu݃@G(` V;Ocz5$(Wfvse)fP`8RKz$5Rp}UUw7EPl0z!D0=u,"YcO%`;[sX2v`26@7nbo;XRxFՇ{ػ<VS-)$cXk{ţ ?]{6| VKﭝc * )foE1-LJd8`dxL$`u d "}vl6qg|n§sBk~-r)5w ^QJǺ`ʅ\fRu]y_$ wN56z`g˦v8c:ftm#zK66КΔ4P3+'4EsOI,$1- L|׭%懌(a5%.Е5F[<էl M'qf?wE? rہu<Pv᫴J&F*=4~3rTj/qxUmuDQfxk6 a/esNJs꾉#aMXּ;jPh2{ˆU|:T*3ɒvlU:֎==(2WQ u%SĐ9SnhΦ#7DG3pp:*|}GlWzpzb2&T)-wY#D~:TV6ZJ*Y'1)C)COoG}7tE4d4D\w`$U8T;ıxB[$>  Y?RIyZ94]fSt5Ygo<0'\ 60(PT^/ٕNR Ml*3aAU|lfҗ?Ԧ?tT`.t[M,~.q˧q~u3eS|’2XO,\lnl1xnlzsy$ŵ̽~rI^sC1Ne9˼{- .0Q{gyx3ڳ`ڲ7XH '_CHM ?a X|p,vI4OdI=yzͮS`˛qQ[r_) M,E^>-XiF&U: b<} +.~|r%).`U}[iMp_99(T˽: /UWUK.2/fp,XxкP*g~&A@vX-erYznn썝)`|T2q!>+v'=}݌E-zsa~MXss Tj6.&؄fML`NAjo'ĽdR_& .x>I}j7즙_n*Y[n/h\Doҧbm]$#[rv ͺn]N$K"8>2s:pZ! eLʭgڜ0o8mnzψ7rGB8;O$+eB8w7QS,6Kֲ`b=2 'k'D<0ޣvo9*sggvi70cWQKg0h_ DRA7 ˺)TB:Dr &><cYiℽ4'x3w`ҧub^zN` ,lJCɽW$6RPR5v^ФÕAWi`ʼ.=Z[Ed]]…߻Kފ{&GzHҰS8E|'X]q/tNC\ڻF 㷾h7ڌ>$C;\`)jjEx{WٟojyfՖ . !3C/4_gkS^o:iV×yv`~M.ך9ٳ{t38w58 ,D h,%9aB:G3Y 6PkcZ3Īep~A'Hq~fHƲ_27~as*~pv]9ڽt5\VzT[r[ ̻[o!DpGYLmB4~qF"d]x|&<>ǾX0"l/H9pZYɘ˭<N{a-8.Z-aٻ_v̵{}xd}"V%pp'XVs}zS9령k"ΥJr{6Tifs&(9z-%yvc̈Cdl? w[~Z0q?W WcūYhPDDZ-Bǡ?Exf34<ޤqSFqkz9TmG!;'3U\A% WgQ˝uN|I#x,enW[Ci!q[vMReCWю=?Qb5[q?,^( Gm,VBKH&w͡UP=v7[ KOM WЮ4}*/ZXT9~Mr= . {hWְHK뭷oNDB%wSӧ! ք4gnR~EPĬW_߮!s"r. )ʃTt\R=rɭl+ޠ"9 ubb7W…bBE-Ċ>,ޝ"333kANꑲbT.֭C XZI]nLRzھOcqq{..qB\'n4f=%;qLK2`y(A9('S /}`0V~&:Utcq{RY.nw[_0eՌI=3gAjݝ=R*{n LW93EB. lpsm ʉsݢدA/ٿ:C8&t¯DГwqn,3CP t47Qnk׿g3+~]1qz:ҟQM4!~K mhg[4(w)p[b')υDK6[\*D^²%؈o=uE#o]ɤ͙ laLaWL сwsv滿`Ku[)DgT)#H ;j=v5)JZlqscU[sB+bwQWPTPq]W<&_|?.6ދ4IMIrMwLӬ:e{]f; 8R"q(N0Z@ꆫϓWlK^Q rլMm! oC=8.Խ0~#{8`<}`(m|We {*_FaCz&4N抳&XEmV=b-g}#QN;v!"ǤozTu9Okh޲a`xw5iȸ9_Il[/BNJBl &sk6- ЩFiቹu WXI<)EbėB#}ˮ3.H3Pܴ1z79H:{~]Z93G.%deF[K*>$r1~x%,茴twq B":aMwcX0L%pR7 `ͧuŚmʶ8TێM47~{,dBGq TvRH.90+UB9gYE~o] $Ή!k"a{сBK8K U@ߍ7cK_H˨6$ K UJr,'7$V]jl | }nF,,tԥӃp&3I.qgKެg RQ7,SoqfNz}f{A|=ҞgĚ]Tj.ӎ$|8Uw4='Ps)Q3Ef}xA]~.Df$7@'ҭk"ͦ((0e 6Ε~ yr(g2:WzS?5n7?ҷXUC3cX'Aqi{$'' kpA|w^ vȪT/< ɭ@~-b:xB,*T$vWtD9VIp0UrZ\eU {E 8k}2VKt_Lnqrᅼ1C&Ecs¯Y&8cYiy-\zbgS _NX8ÿa[ϫ٢?ñIzl܏T}J*$靷 Nʇ ah_sWlQl"j7b/&1o[J_]wpT_ nٻkAOۇ5NEQl+͊O!<&Yc‡;q6e[cJ;7S7g>#u!Όp߾E4 #"Fmd]Nߴ-_5uzv <0y>WYJ2NS⥛l\9>=o Ε,`uUkΘV\1-Q=5=-w ޟ󥛔zllMK"Mv{@5XL}Z. ;|p(KO-~3b0jF~u^SWM%kÒR {[J(8x< 1\& Y#_BЁY$&#ؘdk.lu[\I'vd^wAdf_d LO+g{& 4/2oh÷r׸i-kN͟(1;C_&IߡFu+dU٦}op-K͍JuƁ<*')*&e-5V:=u C>y8"v}o5r@@#_fipۣeCsTMX]Hn]}ܞNbf+Y8|ݔ5rgn>7V:خ_q\:]OWH0rֹ\AF+=;q5X~W籍*`.QCjh8_9#3%gd V+vڵs~-zC3 k-up8ٮ9bA}Ս9sO[}͖*;/k ' v3^c妯aw8ά[FUCXqs%)pA% nQ:L>h}UoEWI>%mQ{ΘSuAsը jV?-N_|,NP/L XlcN@yuۀyS?JYkŗW|5eWH}"ɵnv%(,9-ηr ^` |]2ٸnX]"wڀ?sdi?&co(eaObx(J}W@3r5ϰMN!k3e&CU6o4߀f ru*o(^`7~JwA+'hY;(x\Ԭ 62q`𮀴楪$?gI_@]z#Ik<ڣ* @%-hЉ>({2?NuTS&)Ά`@X腇eSjr2 ͛pOm=h>"vo,cI` y DBkbCdl*}(;/>ĞMǁT/F_h6_ӽK<Uc) ̲2.7 e L~X_+[8QA3ڙdt,27+ʪ$QJk8&o,S?< ssEpM8!K,l`BK!mu+Ll>db7%(F*܆1oG!Qtp{7? ^翇n@ϹݜvTӮ2@Cm2{i[\fm"Fqmr]O#0c= BNw˯OCyv"kqi: HI*I$K7)?|1\8å_e+u=H'e9>s@]G5؟\19NR]"yV+SK0ۭ;r6_׻h,qj?)Cyma|쵑fAP[sO/%8JR׿{4o3^Zҫ 4ůۘ(+rm('zL|ݗ"~v4#ijϓ춗yB=m}cN]󞸰_\g/̯s a~xa>S`\`%Xmez_0u8ĺ`|=::\4tL?G2ɝ{ N5u/XG*q>CAhn,+(6rI`ͺE;0?:*׃vB dghG~3'Łޱ#(,0?Qq["Rj!Efi N7OBƸqar}!;J|ϴ$%p)9>)ə G Y,m 2}߮'CYnPD48A!<}8_ I-*أvʨʭ A\8WVH 1JJh5]`R֍u?^|sL}`ՑT*g>.&*SgpGG$q}d)z?~}S3Xho+N{`b$e󹫫l 6rQؠ-?w!˔9A;U1Hj/s{ a跈KRKvE5_%C5Cm.@2SRd6.L:&=A1C6OٜJtlUDžCI͞F%ͯ} o298M9/mΪEZo"bۥUFBcgQߊh,ԝ?՜ ؕLb]-j[u{ölY:xLAq'#qrc2U༐3HVVf@_:O1$Mǎ_Ϟk˖izCY6T7ܺ_0v+@eѳ[&6FڸǟZR+F? JS4T6%쌀.'w.=ue0.rIcgHGHwoa xi>sbD;䠕oP#c?Ys^|cXK6@<8n+LX᳍8WA[WLϐhk]7 zN U1*+j_o@GhԞZ_uE k^n>* Z$XoE#cL77w08BhEhb(f N,;7RzM@˸ Ӵ2"(7ۗAg"`Qߏk$瑧Ni`Ql庽N{[zuV2bOlOc_ ]|99 ' ~.J74٪]y>~J#W"rʓ\tM'KByIA!Cu 0,ៅ [ 48+qTك~97WCg_˛k ~:s7>`coY00xl,fḳFOQGY8!?C.ɫ3qNuo?R/9bfwSE nK# H>)]tXŭei[Kr&C3-ڍx10tֶmC@X^ގiLi8 O?y \f^7x3 ?'UgZ#q9(S3;86EWRw$XEẅl7_y⼶NKG<̀K,E,7!#\݆ \_t6]pJá}eޫUl+kL=(7kwi8RլgSqCU{Yz_="332z`x$5Ԝú.l-S͸t8&;WWqSx0δAY tƷ=:gzXMu7d\]M͹j{oòEyzF=):TNR&#a{뾷m#yr5V-%)qyL zntǺa* E/xjHf-ÀomԾ8oN6:}y}j|c@RZ]pqS gneZ#5=ӻKgO Nkna>Ŵyۨ@u=V6|Q66n&OV/nh<C;.>BH벯u83is {{Z2V5t*;,e\f(%:§Z؞AGHCBxtd 7x ykcӭjmH+սPC8*xlRtjKD?gl}W@{P8`dzȻ`l*H9ݙnm9ǩ<'=\d0nc1~ `fy!ӕ~i]XK۴Bg!c _ΌsdJFC0GۗqRjТok 'uWn=eO ߠxd`yϙw辉V&XH11Lm);<{S]ZL%-F`P%A(?^[J8{uAg H]?TEzaqy݃0kIVn-aUi#t[.uW/7gƶwq82~pS⩟tV`!bc.J$J|YRd_n8E^ȩ  74?;L%~vGOGܿlCd  Xi7,;I 5}Ø?5IsikDVqtVC+Xm)9]peM̽JSwEr}͟ې=S֋Qֲ /Ev_lҟ>kj¾qA/:tƺr ߃[^~N߁9{4X)oCw$>? V./EDĝF3~ t Lh=c Yknb˒MEmH!-De[.ހ!~ɓ{iG #Pu֯%Sf[)K+'#2KglV'v Y.z^l9 68 v6I؅Բܓ10&hb@å;onZmclW2οI7Rf 8OS\nsXQ g 8y;2Ul#—X`CTBh*zOm䊿K\8T6V^`*7NȞ$6.+yj+F%v@d@[+\ځ0^~ߣ9ܞJo-ݲ¥w(#Ew). ?R~Sb݃y17Hǡ7Qğ]QlOR+k?yOTz 33qг#OnDp*+w5c-%~ !".\Py`]9=;%)!5J!D(vl؍%lJ[{0zn+;'G˯pTu#K#SԘܔ 5"6t9w+9}>mZ,'[[~gV؅~AV0{(O;J8R놪F%%ER8.4a T[_ mkW_ma ?w<4TpD(vEir4VED'Ǝ {´(d<' Ywu<,oٸ֗_ee`Ⱳ\ʅVW&vHvh{ފF>3 C;,E~ys bM[tqǟƜ\<8fBRT <%Bbt܌8AF;RXfz5,;-^aӽui0F6rt!vDnS]DHeSša;w[ϝA"`,|41y2[Xbq/VuN|ŗ6&)X|bfð5AcixX*]8UЅx+}_' ,&u^b{[>Gs$r̼OsV[/@Sv,\?ug=m:(V82>dڱ;h"YgAC/L6\Y+DR_]7&ҡ/EaӢNqm#ʇT]_?ɯ݇IV7,2=bV}"AʹR4:,{o: OU(Ï8>N^ǂH UpnmazH{_92(v6OHäG^:P-[q7 IťF*P{T)i?{ (wCIEx{\Ҙ:z1Ipk]3$듉 n+Ofi;a},.^ZIn{GLl{ǃ.n#2_σ^Һ]$sPQ Cͮ_uEebuC Z`}Iֳ~7 }'?|'kvLCdʻv#2 (:A6'>PV hanJrjlayRI#0NID]Ud2133OH; w{;k(@{wsR'' E, "Svj߄4NwK2ؼPѰ&,^K_R!9'TܨTU gՀlڻ`az}aa)lJ&؏RA>(qR Fq@Y(r`m$VP?:d=f?dKxU*tj韵Ga18q( i{{{].*{:c6O=9`ex^l/;m-1[~uH+m^Zrf^:G)e8Pզ|ϡ%RVV2MJTȅ?88K_5-[HJm&oD6xPcB(SK\1=ˇ/:mǜ~? ZayV}`8q?gQ%o,Wvy6v1š;gqnlgDXӀ6d=/*hhe^$'vnW5\dkq?1Pg:Uڢ. 6ZrU|p61. B´p_y>9hf=BpJg\mFcn`DiB9;آ>7UbYp`|qX@pg-V=_VXiU+[zJhVg3m|5-_(Xl ܸߖXSV#d]Ï`vZ4 :kH^oߕ7fhpw+_?W?3MG0b4~*,89l;X3Z%عf@rK,ɝ}?܆wN]%Y~^ aW|ɚ\!m,;a|&RGWȩΜz]sv0Kӊ5!1 Z߮pdh).s°޳Kq"&}[6ؔtR Vk#=ů"$;qriV-2q.@ 00^&~e&{;36~ ι/)#X'>T!8\ݾ&%yY޸VPv'wwű!`?7BG|/Vh튿?? *N=*YtO^x ==`6k4 g!F+}f*L|o9biC}7aOqZQP8tw5y0.0c %"9_2埝դHjޯ[XV?Zk`,$Jٛot/a⒤u?z46&LFPlO;:}y0ƭsu .Ǵ{lA9@RźΆ-WqD ɮ=є3%qu}3j8H@DjfmV<76Y`G*;9z L~U im\06#;ЉsmǑ6# ` ׿CpcԴ@Mflrt^-:0b\Rh (-j̊Blp6 IJgϗ#3Woݵ_~[{ e\/ZzxTcuO0OG#Ste\ {3XwOo-~&qsmp:-s˼)YrfЧvۭ`yjT}+ݱє-{CAnlY']EWY`pXt߷\+_QYIG1Or=Lsvة uBsCζ\XTBz6;|cB:6s+t邍8]˰dieT9uhhv^a"a_oJ@{+8Z>irkD R`X]F?s^,׏ؖa׏ġ'U3'+- TrU0 _c-\Q9 x55! 2D'6 B%w!vb֧=Pb&!} 4*I%<{r,M6YuIQo0Ϯ0u3v(Kyy/9XLd-gh~gCG3J4ľ R`D[:nb 0V6_Hc;&0JqM.9z&5/;]p+wzkԫVBcᏛ8iwX| `,*T6J^A+0. |܁=Y /T>vw@,4̷\cJD0Qq <0#(CgRDJGgo/x$LɫÖ͛Þ['#B7A)4&Z-}zZFd5ʄ5/@}NWYDu76巓sf槃F-CU<( Nvჴ'c:МG|^!{\-C^0k:ۥ"0b9;HP2n 9'(RT}2|p{/@bW54FlEӿJ4i\*kWXEFA/ ~bg` cWGSaZ%'\|~QVotЇa^׭[;~aK$LveW vbNa{ϧ%Sg]HƙI#~a,^]8l V'0nq2OhRZ9ģAc4GXI ^ $+Nr(]Q ] ؍PR?Vh;,.N|rbDw+,lQf mC T1i3&- 6 j~ߋ}X$JI6V/BvaYܜ8)e v~៥a3#Ll(>EypL7U$܌͉=:ƷcWHe=Kz*Ӭ5{de땃q?sN瘝c~pCV5鴣h!4[ lI̔n,:k ۊs7a9+Ǫl]'{m2zZ(TFl2priSsuξN3J&LcZE0|CS.Ð-&PRj61m,5+Ra:BBRP|r_[,n?z0 <_jb3fr|Sئv̻casekv¬Rbwt$/*ӁZouF?JWBQ7@őhM:M_fl Tqb%hs7PZBYk"?p}q<L>oUH+ s48V= /s.:'PƒT4&?B!Y\e8n9)LcX-p`KI})$~%rGT!Y,^>F_Aq]84x3.n-Wa9)epRvFOq1a ;p(Ezږ'Q;v8#UzJw9j?H]6dxtŦvwuqB>[/vl:zaAAɕⲇjse0@yMS3 ֨8(w[~0LgEדY8ׂi9c_rUйxdJ0_?}76D y'KׇM;֩Mdɩ9d{1ѧ>cۂ]S428jSXdq~A1Po9 r7~9O>&Ao[,H]?О9'AAGN 6?A$G8~<[\ꝱ1pJ >sGz{nzwkZ\~b'7k1 bm/6qvK}! vZgSyihnzϺ(ueCoKB#8腅}z9VnʼnV[MI X+.`#{{⌫9fy{X zK65Wm*q2.iS,:~ v m2)lnՠ h`/~4:HF`Kl8~y@GF[5^=ܕJ6QȵB\{9OCN Hڵ RsŶc>25ߋ{A]"Ig7BiBZMw{~ ̻G@sbZ 8+bЇÜWEYKmRߚęnJ'|+Ԙ|7v@B.ʨ k*c#X- ޵wG%63s+z83@[yToi)WT]Hs'f~ LѐƾCG%&ۯ/RGCOxZ?d~jRPƳWaܣۂBcQٹ|31l]3SɏhbEMQH9t9Hgc> =s*})"b*վ"8%ӌ~GZl}M֥)%W ~}Bq{زCRcQw̨0?b9DRZ~v,!'lgKRǥ[_uE?u+L` Xذ-<#btkW/72.6AqJ[~9@av\>eP>N>7̓򱧊8$w/z"oBZ¹Q¸9iރiJ9鹥w:afr8!(KAч2<:Ki;>q: b }+R>x]NLN$WT;[{e.qB慴\&NgƦw'B`ɜ%.Կ,9 IƓִ97w q v5JɊe+;}BǴid ]zဓV{cl`{ GT4BG7_(N4WDґv;9N~%C*3;$B .F ۿIM 4hLU:O3&ėX@1%0o?db9T)Mz/2{azPꔡAK#N1f-CGU0:,rЍkP_pJo1Qb{qlpNHl6-ސ|n![3*O ;>3/oAsЩ@*rPZ넸ؖc>\D5G-d^QzZQ0lėC7l6.{C'9ziQ1G̢%>~W) &hWM]x_4Yh0J띗{N4k7 ql^zmdԜɸApwŵ4uz r8krQ6'jzQpM@'LK"6 =` bךNC7ݲm>TO>V A0<,U$EP(2Ae'Nt8jyH_'6 t˞붆Yb75~~do ]%v٧&9~=ZmL(?e".A ݛ΅qKlgn2Q1&.~h8r:w6f=U79a`"k cu`كCbkT˕dpJ3ڊdC{Vo\k9 B9O[~WLa[v$*~4bM$>r٧[#] }*>K@. _``n5}#=dK|p]od'I w(Hҭǝ+{Qcodk]8c#M}(NR_żϗlZ;.g JIV|OF˞nf^QojQصpf`|~6>biw%Yny{1%XGmƷ`=M=+~#(Hι,|@1V6A 5I;u+NצiY!0"Xc` t%YNʼn$@ğ4c5iű[ga_#`_i[BOaӿ)64~EgN:5Kٟ= r!~P> @ל<>&*^8(Bv<%ؓLE7UcܠǾWX[8us &-.ޑ#rrv'-Ҭ1w{Yq#JC*XyVmOr|;g!zy[VİhzqVH|W4ڗkL"MM~9Iq-5m` Cem 2ytaW K.X~5?t^ػL^^2X8L#/Y|euZ=ncգnY'8.}KqpiZړTn.%]̹_ UR|$'qtVк OrJ~]`4XV#;_e9Vu+~=@ۿo",pWYDdz,8fnd3qyHr`Y7 c!H+09"K#_~S|Vrp&ց7f82!= _- ;.[#٤ V8XmSEtl# Z/zPhy \?9It]ˇ97,K&85:b"!1l$j~$ӥ/bu@31,/j^+9Te ʇrz+$~ĥ2~y0i7߂ J7霩!m ,p3H$,sBpDsmS*JU| ^kYB̝BpwxX kgwL? =[a2ǩOl8ٗ0diIߏ~~M&X6%iyHV=]ŎSVrr,zE`/ ᣣ^-nO<z nn.I['CG*tC~ Gn Y{VCѡuEqnUȜ=vX<3?cਫ਼KOwK`ϝEw `(}]wpn!.wh#Yq-ꅩi*/aq~W0}lt6wFZևRP,w7[J t^{v2f(NZnO-.asw1,944j>+2i~DpYdz͌'9,2O*u&20s:#ebxlBÜi2^rb&hsuIv6L`T)3#(Ec{oܧ~c~}@_0QvF\8UrS }݅}k|10Wy",' ovGȆ4cե"O "alTdV6o *HĞ#h[ʍdSo$ ru$Yb&oK|JǩXGq: eC`@i.sǁ+C9 zS&*A: w?W1a2o XB\mCm<ͺC_a Mo;qhLr!eD\Ẫ@k) MſŊ +r,N^Tg_2/^Ǟ"F JuyA81Wȥu>{(V-߲g}On/%(|B}Zɱ~'#n[j`Ϟd3Qu 2l6/ιlQObp'WŮ`P~L#yhUgvl%`yG^WQ/UsZs}SxXΤJ@á+ S:b_bi;E8EV[&XM~~׻噤)A^N"xˢHtie@6#;0nFƃ;)nvdeQ~, 6yU0;zcD6@vP۴)^?l~Hi V  G'30SOl7@kҰ ~3y, -սģ_`*Z|[ 6[5-QO\SN|3 #@j#}R'ǿ@m!bYn4;vuK@L~vRF0ņ"֛ýqG5T}TuϯcψGɩVcs%gnZ/J-T}A(hY%$e_ VϮfm̶q g~y^nLocH)Վ RB_^dv`\`U]TGot+Lde~ -UY$ip~<O%-oºY>ޞ78=3aŕayM ˠ,;o Ra gK%\" 4»!-T lڂ yUYggLϞ־k0-wՏcM#gu693lSڼo)6?1ݱ Ko(Xjwn1V]L"^4=XjU |Ld,%w mΏWQ;!ھ/u:zҿph$Ϯ8U?o~ RCXNb3Th.H&(sn<nKڋUtW!P_[|m VJ3oNndV}]n!wZ)6J&-'NQ}c=,$n ׆9EKU(oÉkw `NZ Гd}- Y>`dMw xD &6>_ EN W}I?6&iP gVp8})fibjφGElGH|KT ~? ]}3&p"bUWϖ`62[הJQyYzRjb[W`_2f sg͵lJV w'00wHݷ="Or|v^u90[{t[I*aeiIBX0{~OZɫxyP'DcdJO]Чs\C7Ȑ ؃jV~0Ή^]w.fwj>fqum/k0C=B8Q,|U&eS麩MD8;? 4'V&`]6 mF!0.<:L/}7Zq^϶%YI̍)\)wH1c]stm}5Нbq{ 8`{5PTq8;_|]Z-"IxslZHƗO!XWŞl;Y y- LJb%8H-%t2r>_l;Tٝs0@:nz!>0UGB3pD@wթpR !Oy_N UcW. H$0ژS/ K+w:4%j+}x\8uc ApT\<4_k{Rf$잴ЛYD 1$Ͼ`ǩj)˾q1{ Y9E્f/)8ɹ[è٣R`'oS?a)g{I$&;y8p`by $#KHxu{$ /,vj ?,+5SV&zA_H[LǑ@ޘw1}D};V^淧 I\u$>M`IYPzTM-D#piu3נÀ{+T}7+"ԏcI !Nr0FKٛ|e009>cxΐuwI H?dhag>d{RPK:&~ ]Esb>8-7 PwoӅhJEt!q]\FhM`̢"k'EV IPX!*4r['C`Fc5]GtyEC[#lZe2PܸEN9T*mOn~g2?'N<*/nJIi=B\Y`x!jVvo;cq~OߘBy=1 XL_dNPk$K5; v;oq!{JKAI ׅT>!?ZF=dBNYuc1dOфE#0Q{j?̾w^qs\ʭ$U97EH.+AW)tl0ԩVGe?Gfbׅ 5ﱮdVlui\Fr~8 SGS]ϳRZOSuj,4\ruC2x*MMa߿DX@/u[Lp~[D-L ƚሓL;U?oXf>Vy:hx .N ]TƯLFvgd~g#+ݯ;QXD"F.pS۶࿿5﬐(Hl JRlÎvͼNa9c|l+j1NC6v@ -O*=Y\HfywL&;J5Ʃ2zѨ:i(IfsD_<Ŷ<|a?(T>Uo᪝ؔ2)ֲh7Uo ]!NN^]1}{5񁉯С?(cR*oFgm&Nv\jEn n=g_'*ĉv?R0vXZ8۾o_ÚCO2;CoX_>Ԛ-K H$-*~O}l:S.uH>⭆`=˂9ص'udg>CD6TUq5F7% APw~f'Q{ve tϾh\O\$ ޒO$g5Pn"plhd0!m5ү"+5Ϸhb0(+}b}*y|b7{zlv%J4.:Յ*|UW35f%azT;{C::㭝Fz2CVNgzVC$y$ u`HE)@oO˹83*T;j0Ay*] xp<2; <^XZt6,5U&9r2_-aLR-v~+n|rCp4m*KT|EpM>Os<1ӈ =<ņ3[KR;Ɖ ;`\9rj]r9la[*ba;΀kyIK%l+a\]fLJd:!Qw l0Nxٕ^p,^$E%ZC5ިEl_K/J3g#eGD6@ [ q ^+ ݵ6`ډ#Püݒnuepv|4 r$nP [ 4LeAB$ }7}~yb2z֭5wN7Imz |svZq$l^pJ/{ yT[@AL5w3- oD?)j KDЄtal uiQ`mk_`@b$cǃoHto^5}z $zbgP= U{ECk +Dp5){`;G X[ajcsV (+#92K5/=w=f͸_rkxe~4+Ty%M+7Ҡ?FH dJ2:\kWeĭM60or8̑'h2/.B18'hzvp*?/?i2ť75^: ͱUfHӂ:owXurNsf\J!LRv%t^ĥl>O 烚zeC [m(rqP*,*8|,1n*bdsIӟ'Y}!t3-"=9΢( W;8a썇3\tj3#/OE:=#x }go$뺛',D(Пi;njE%E)$am?sbX#ZN 6cIbsmOpZM gkb{g7|⛉EeqgWA O#9jv°pԹ61"c5b5)dy٧tE NͅQ$]?!Aߵ 6*n{ L82 [ B%_Tr wSpNC\mOz=JKafDz.O y3DMaZDjk9䝴[S c3oܾ{afafdWy?q\_^' &.}TF5rze(^˷Oy^__Gu W.JTTӇS;ޝ}1f0G2GrD0DFDrh, _coCKE(,ep~]g!urF2 ϑ˟9[A);I8’x}9il~v+XCy5y_`#}ۙck0["#^w/ MYf hh2|Iܓ^D2pz1 7vjaKזFq1k.Oo%4O˚%! -..e*:}x։ ,Bs@Op5#~E2dm [W^:? ~Dg&|nEI78a/3}nOMXQ ڵ;nn]m;3 gI(cAط>h}컟3]X~4ֆ9\&aErqiy/A?aEƵ2}r'TDnݰalwaOčᐚ/HzK3Sc%!,d olJؖb`ۏ)Z) {dVz Sj&4 ;Ht s+FX`=*b8<"s0 X8p jJ2v7t!1?VNYĺVޭccLKȸWKZ+o9۹~.gnFq^dvI1Ie6kpHE zb^ԅQgaOّ^]}j ֋}pZL|P:+cp?±bKoY)Kؑ?.A=-jq S{ "|w #?0ecs/uKy|&K`Dgb쩠] ǧHNJ%a@v}tai" |(R9"`Wy}0 +W”%u{.SI!?=5yS>\YY;CˠdRTSk|}sp_SѫhgNT+{ckW٭t ; z8_41߫nNfEmXf}s~c3m? fa\˸EPZK}׶$VHRr΀ތ3mP]p$a^ey7ҚοȐ'woE? D)Uˀܑurnq hʌRW[͂ f? al5[sXcOIeH&6w=;N0=lɪKu;=`םGXpYą/ %we[/,4h9I8tam.,&C%u;Ws^)\徛Z?νW=QGKEWo~3$MeX ymbJ%a.7tˍ899DT gVAv>},T?O[| +ײw|SOCG-? I 0<'uPe{ܿXE7ZS+n#pD[A!Cl3=oƊi4jdz|`ڎ'lxfR|A'4\ָʆ]S;kpݙpأ}tpqpx[Vyv7B`ӹ/{q6C1~YZ˟\;h/&;qLR^ hWؓ0ΞRn~Ptܷm#aAm %'qlWNme\ cK%Ɛjȍ?waI^aPrɦÌ[59c' h-k(t&1]|%k^dN`_- e:lDz~q8%6[\Pl]^7nIc.f ]waR5.W>MEg?"pM~`xq`Qx~] :n242'k6QհK..Lǒ~2."(lg6 ̒UގOdڑ;ϲ&Z.tTldr O VN2۳s= =15CzeebrftSIƞ+ażT1gR!;8p9fdv {TwCM;9Bo$`;@ws:$S!% kڟ>Zb'L{~sD|ZoyXh}hU-nՃ]vnxZ e׾'yW=yPʿCh9)_4:w;p8kbݓF%qI8w]gF͞OK_@ݥ讹 ڇ 'i>`}Q0v8&صA0tp:,>hn\\V f>5wM?uRG|9A5[! ZUsm A=MH7b%)'YH/mUzG3Xx?m8)B#e;Ζ!$`iϑgpAV=`x_(apM}m0t$Yn%?=,`OD͞,}^CmF PԆeݱCdR]3|%A>74;[*Nx #Nٹ|ʯN^aظ7 ߟwփd?q#`nd4,DxEL.ǙN}1 Q8S0K0ȷqӺ:d J{dgknhtb!fysK51\.Ǖ_(9tuNU< aK{pbnSk KvF\l?#5Ir+<=LnʰđMMܟKN,7?㎳ gm'8ͅP*%m*XܵW!G6y1R0zCRVьRo9$SX"Y_=wzO FZ^y5K+W=wwQ9Vu%f rCZ=avajDN~kx GKwDGhWj n"*öP^#PBr8fLB~|[ tUkaPz&)1iyaMx1;Gſ$&ͼ:d\(kK.&.=ܔy+zNt k+GHzw|@U.:?Q"bK WXayC@…t0]j?T5ɌmHos6; 'lG'AK'VX{;dˈ=Vcx Z1h;v؂]<.һ>Jso6 }1շPkVSn0$쫨'jo J?|7qa^'|8E6cV+`yI&L0hoq?RR,~Vo/}Ng+FP{mT'FG;OpUaُI&VsBZ{e8o ;̖kF ffq<=2YJ փ8 m’#"&Ms}$IrD.]LX:q^_9!jiy|ąO=^LmmH4mH u?ɇt0`@.C YYxGkq&ޗ~'K z" I6B Ι?rv9I )ɲ`A{Ww,Dsρ _Sd2rfl-s! YX88+d9An,6dSeu7VoO6L5m->AI<#VÙ7Xa"⌜;+iƎ~Tr607fHE:G<= }:N cC9D)̏h7OBPv>VS#N?髾-!VrjkԓEuKI!*#*U3X,crQ[V n$:M5 J~>{s3TK鷃饿qhs$e%Iasv]GC?Ta2C BC=R(qdu|ہ6.t^o>R Ӳ嗼%z4_TTqb:/iEooaLƖA;F8<1կ3K_ qXvZtj]J *G2{v-gmmwH.1m3"Pu\{#a.'_[67MZ/؈`d^8l׵0ۘg*0c~k-^ܶZ S8<~~X0/L\_{2 _Be.B)e4^@ԗ$zɳ^$y1d_]`m30W0~,;:4(X5 AH'J<*#֨]1,@W}t!;.+{^.# { Cvh}EPR.݈%2̷%>#mM>>4?LX D6BIf}z֍  $޴?#<՛Vy[mAp,f "Fmjx~cXs~V˞]ƟIX_~,8pNQ3]&k3ӟ<'p0B[;wI\<kk^Q6Iu:zZ`lu )_ٷ R8> 򹨏'XjHkE KM Dc #[#'_QwأrdAp 3_ *^.Ni`*~b$n~bP+ceI%6wt>4j:0TZO4<^o)k,6,uR7(gTɓbw! hs-cCFflhc%EϬ)CiHϝWli&_P'%(Nó@M XpbOmC66~RHq`L tK)fPjÏ|+{a`ݲ#qoa̷Bw@^v\ugߒ,x*]}|W%u n'~8Vr_iSK?|+RϏv(ɼa~Uqz7ڝ 7,2~[YЩrS.~賉KrNj #0q`x6d$E07B[af0UTg'~]D5bDSY_g4bY7Zϩd>I_Ŝ܋0^㸕/27.}&;G`e=r`os;oUz E '%T+2UŶ #k#[H֖krZ:Pb&|!ͧ.)E/1 h 6H] X*wU"m-Svk-_CGFlSklRE{ 7VP'^y=Ю (׾%6oY܆PpTE lze @%IYK, +sNCq1R-4\Ci@3ZjQ9L^&`T|yotCյ,+G>uR3gy6>G_TũuAtM0f'N `9 M>B8otkS؝E8NWA@϶pRVł^2Eϭbhzf*.g=/ rAg6<{XB0ZckyW?F$m} f9Yoc8gK־K@;n[([|6zu7*uXuLKbN\.CU܉ޫ}&/p :̘2{6WسLjHҜv&ꗘ_.iPU yal5teMN$nж"ԫjeV8iYvNLдPޙV`z]@rUO=fUE p['<.o9k}0o}dvw? E-#y^X!Sw{^&h̋C~;${omI]Uİ^qxyǽt3ߛеoK1mӌR g+oZaYh%B 瞌 {&SLu }&M7A8$='1;r҉GK"EHV%ϓP`0<:"JpYb3k1996x6%VgBta 0w;#>k.Do cn>=x$Hv6k?WnfPy+J\7}uj[IJ{krtz`+c6׿gfc#u l:|Nֲ9 8y?`}̑.QXyIs|xZ{IkىBd`nWEygdY|^?yySs+[g&5e@3Sy} |m< 6vէ:Puy|[Fi,"rgt9=ß=E ;r g*tXv ŕ3纓`2_䊇"7\MR$V6lwЊ+/F5ּZ g?{83fD`+͕v0`S .ehdQLjOXsyKE+t;w]q_ϰS34=DBkSjBa#O]sDÈ+xX/5*K9BS0W?tәFL=Upo'Θ>dYJ.껬7pD\󖤰=0 ~d]=ﯙi}&]XUXZ~y?Ѭl?sh+## TӴc`sv \&cmYUmL4Qz4*{4aCQ(,s'`e7g@Cw_H֫{ZBu$Yw$kp̄([u}ĐV趫<|[N7~NFT'YB>ym k^ +qа(*(7^AJN0'ArG5-&Cq\.Ӏ=..dǨhll"lw֧H\Ή5H_~xYS/ &qc[ c`p Ո#egriɔx7NH6/;J6aˉW,AeJḬ:W2b1N萆#LMlm^azGm-vټ:Utw2H kvwjrC))WGXE?u?](I@ #wDZK}>f9}2QdI mya }Xr;fQ+w$ٛMqܬ31BhrBaL<>+Z`$DW4 x{1wƪgVt7?' hfSsaI~ur`_O|3$S5;~8y?>]{$<گ 7yd'`ώ4TlapL6AC;;Ma/g $˶o6q@gq[V7(|w&}*5sjBčY{5._Fρ~b35*c&:*Xf0sw\{7C˧ܰP%-]JuV} ~!{N8YUU˰taR=,ea;Bp FLmkUv{ӯ#N7rz?0D;8`/dXr6r̛uI$ =3 ͐`\!4#1M0˪YƊE4gP?f{n>kWG*'XO 9CTql_IpJܓ;TF1MH>n6,0X'(r{{)WF3j5aDɜ=dְc`f;vkgOE:sT{M|*"s?t.UO=)PGP5T:Lgw8#%p4"dx`crQd,ZbLJ-aNZv,j 2a#SH[TR˟/]?K\&0iU ɑGH+jUcmAҫ$~Ypg9G.AdU)lh$؇Gƾ z#<@Ek/M8_g.sn\׭f~0%(ݳvgw=ص! u_ RpE({<. fW\#"Up'mXL*2oz0{ݾM# HjÆ`(lL+%"[9߭Cg/nfk_.]-〩KL<]+kƽie5c䰭SLj]`NޙP0؅~w)|sDV#6#㠃Q=޾>0=/,f}:ۙ!VX4܃c)"{ݻfPu%M1#)~ߑ#6?0'đawp]X4vz=؍E}Gq1bwGƬNjTzlN;r˔>NolSl1CzPiQS#镍HHN˸=qARN^CrMBђbH_2a)*iwXJ%DLkfWq$/{a&5u?}U"zitID\W ,_W-NKdsoXsQ6ź0")^}Db}V>z>t*٬TĞ>啐."?V$ 24 _*$Y/ZjcHwwp:pZv}zt8dB IxS UڣOLMuI_TrzMw=@r a},c+^.uC ߌ4v8 mi۪hLYԓ3H 6]ɲN!Kq~򇂎5y' ֘j:`.rhcRKٿx Qvل3Fk9zݩNl6L::qsbJGrJ6Fz0E k,_ScZߕ˒`a3<<nTя>~t-Z]ԂT %UX_3]s>T U>!B8u:kIho*5qZ=F޽=Cy֛YzJm#}}Q9J/ݫ۸@rg {9*a)NCڇ>-7u7|(A _[yV6T-J kNEڒPf *hx"bEU7:ЌQOZCzQ_:s' bemx+z:BqOhy$4LpӇ~ځsS-9c]ռUN( Bn_7Xz󈌏Zę wgbGm g^w\Rĭuԋ07f0yF\|'pF:gLr{7ȥi+2<{\;j6b ծmG*N=+j0b?CN;k`^2o$:^{W -z;,,= =dzU!!]D^07f^ge, K65+v)u ]w} ROKάr~~3\X@Jw {8cd5_8uyq0 zN'\_ k77ۄÂ5SoFſ)Tu|TVs-~hM+N8Pg":߮<És٧V ;JyJ}sj͆:87W?ݕYPSa/H+ cǩR.<>tgo0rr9ye1] ҈3>!YkzIv.1p_-yuMn2  gXg8%U+?jʕl*_f{k ?8!Wܒ JM$SKs8n%j:]u_0w6m|$4x5UtkVV{7%P/VW2tzMSvB߁>8؞urq!u;YA)}Cxgk3U%Pdgܟ0'wmn{mSqzf^+][tAH~|0>wEҹUn.>髇0y ƠsRYFZa;}{8X ;N^}mޘ}voqikjԦw`gy#E`t^ X47Wz\ /60.vnqZ!rC9vG.1|e#5vlJVc uK硫m/>m\؜bm:'T]wBDa+Ӑ*@$LGltRdi:%Gr[͎=Mz3 zWy@&SÔ dQC0߽>9z9^0Mܟ Lhq7n`Z7;I݉[uNډK8+7AUc+չIJ904ܺr s+DndVc3=ځoOAԊ`|r!籪k%䖶n 9JP`>vI} ੔%$egEnh]fYX`tlyG.w,25% :<ݡRm$wmsT0*VԐZixvs#!HQ0zl8?sѥjk? gm/Gh!mC FxOe}C3j;د]xQ(TorOeuƹ=G- ,Ĭ݁w82Iņr]~8֘jM-L pH#?.<&xZEC꠿G'dslH%9=_O°/a{dh8mY+M6 NL䮇z_{P!w!Rm01d.ӂG}U2 yUL 1ێlGRzܵ8j5]{k ~o{4cS!to98!& :aK(8޳*!xf/0i7`8g|Ȼ{MsOxzqq}{\zmQY֎cJ~[#m:Wb/m$ᢆ ڊ)1Vyx~X N]hcOmm϶X#p& 6Nռ #&!P3έ-U JApyjLz`?[>&ɏEևU~Xt G<*"j Y&cTXV{/d?kb3+S?U9/brN3.Y4}t0Ԧg'{B;tKAYY+#{~@Mr~cS8V,ܼ'JG6DTm=qɹV}ۦc.}<>d #aDZ4/:l6BBQS.s\Qv,Oz*F{4YCߵnt--'&YyBBMzXl`D9E$OuKOZ&p! z^@2hS;R'3Kac_ڻ?@78? Ckeӥ7r67 ̛&_M ʧUOI?^.gQ1ʰl?-;p93bL]1<02GQY͗Y Kٷ''~uexA|X0Rw =BzzԳ9\| ٚ.YyKWqgVI) (.i;}}l/;#>9cUJ8/U[7RQ{͸i3֙*j*ygßB[מ٭kC~,VR/*$~Ԍ!n4J`PؖoU쟘xo* E?V$Gr0UAI'MujCYZݕutWğSIWB]3dKΕ\8=؆Kۆ$@(zGRn'.z-vr5X;%]h˙^W9"z׶#j+~`4mgbcY>qB*բ@c;pr^PL% U~j//Ķt7 <|r`ftZ9(|]r|=B\cѡW(oqI}N,Rψ,Ǝ̐ -۠GK08# XfN1 pT ~ M:q/?tC~h{1* k޿aŹYoK௕~ :[h䤣 @Z=k!۳h[8]"tdaBj'yt0L[zۄ3:?eժ#M{'/M.ɣQ,B6qsw'O~9\m [8'-]Fu>O:G勡+Gw;JLZ×fTa%O$aS3.Z&vjۍ(Gq\30սSqO2 -y]OF&߱|tF'fw4=[z/`\(@d+!_ GV.(:`ymu8]}VW 6/Ƃ*i,)0pEC$RKg)@إݳ z"^ jdn+wKnӹym$Ad]z8aW\%qKU>WsLAzj6 :O]`8~ytm4 H[L3_)kI)e?+4<7=Wo27 hH5c#!6Q[~Bȸ9a "Cd{`` 4,^)}^e79bݷo ^4uӸˆFfLR0aI!10\R[$;wŻpTE7o|rveqvZuL_eUTynnT=cJg֯zBt!@/ߛ`,H m剝λo8(lfj>Щ{Fa]k`4YMAcovƁp88A:UeE<`]UC"I aK'I = ~}PjSc:lшttpIra9_LtT^D xk݋L.G ))\xm=ez^D376CVx~߆Y({vRu\Fz%BtB}*m7J0z\^5P滄=}>vI",lwz 7.)+)sNmP6)NWMpJTo\U%}U sZNo8fp/;$o`+ޘ`-OR0`8^h5O'簻ΊXc) 3FzCЃ-ź`?g^(tyFp絮%0kٙ!?6YC?|P]0LOK\qG؏!Jd S~!qҎz=0έ+3+nBǥSYHϮ01bϘ\6^[7oK+oJ;Jpz?J0(ftTvŁ:,ivQdʛXG;n?jm/X9T]"%C !*3d𻐲Vh+:jOP3] O$VC {ګ7kW&pCxhMl.bdrXG$|:*iw {(*V`d_Ӟf/ak4$b.kT L (:La>+gz@PHM_74 ^KKhj=a YdXA͒FIu(4 |4>pۮ=E@;tԺ?0`yBayzNc4vjwx=\fajݷ2q8]4Ț ݠ*yɗdR]O%({l7z7oet\ qH5>Pq)?=©L:Fmnv>5@#jd_5 7\M0{")AO7_,ABB1'?rz[?(\MMӓL>·=q嵭/bEHbN}B/b[Zͪ'o 7󼡌U>:?wU/&5vz~5Cvv$lhjAF9WE"Ѫj'tv,g\#I}[s'.I4Q[Zj+ 昜0|)e U2ex8b1*+*?^߭:La)?iMqRHEŒׂm#o0eE/[Bwy>Cns7r m ʍls}*G[ҹwKH^AO]nIǃx3k眔ɵӭz<`"ЭAR:]0.ycb~[ӱ8sR-!yixv/M07ѧ&aޔx Ih£+mMi#`_]P&~.^q0؆BǺ߻懱,w܃"kDHV-Ӳtod(zm:;qGQR^g Tk#nc?#?DDn+䄡L]Wq~vEhn]Ì'sד͆T/HkUVbzeC T[V꾧;@OJBql_s~Ac$ż)=5 j>:& ;ٳ<1zbLPsN~\ s&̉MmCXqjVN[0`vI 3,\a˴ǮO(aNh4V*1қaX[E}t}M̗=L$GAOxк$뱱oq`K޸~gZqz;8h2 2<0(\'Wv%<˙>)_]Y8 h}'Z7KT(IkKG't/ZB1Gp_`Ț,Lmq+R2kuoĴ0~RyB;];χ|6Sl oaT^J4PBax" dˡq/,=fܣ93H0o֖Flh9~%"omC&Z h~FՈFpZ_]wvqOP#X[/c C_-nB]D ۭG.0\eO^wblHJ`fxP9V=.Lvu8zaM)7j#~ }|krRdz*M2EyņBq‘ZXOq<7Jc aw(7ay08d)+Jt1(0z'OHVg|̧>-/ϐl?4C5۝ag&7M/rNT hS) Le~eB{n]1s9êE쏏OPrj>i'zPi|r\"I; '2M)޺@};&w`S,xޖf ҩ;_[SIl:Z Wr4hm- 罭oGW[m=GMq!}lhN)痟^ı]Ank0Dq^gy?ZL+58<$ue]> X;pqb'KbUan?|u<Č!ly`KĬc;t8f9=˻xacaqס\aT%3+'B0[kw6^)_7Mnv&+L'YI>m@d9ԼnA mg]5%z>l“`[rv(M*^mEq;խGF*uoT N}Z q9ķs/z…^S덷Tc >AcF#"gֽ%/KhM g{6_an0(ru|eU?ʓ$,287ֻ MEaw[1{X0yܱa*[B2 W 7 OEJGHfEÙ!?I'|ga$6Fn >;M 5}bA0v~Y>)seW/_8:mik-6݀,AwFM:lh%|wvoWfgqƏakD&u"fT4 ZHT<X!69~@K{P>GlD9u!/M@unОd>̺[ob*dvAY"AM:+\+)U Èw0~yv9o5amrA~Rsڒsb;vmF@L/+L/G<wnZ-Z 3,Xeg1D ^9Nt$Y+~bdwV5h75P9DRX$o`CkHOq&~7)%!{ΦENܗ,|4FNv"Sgu NiHv,ʦ>ٷz4z .fW)d,J5Ñď2$sSݭla<L|,G|W f8şՖNJ3U}Nh M}pސ0 [ym>/<h5b!gɢ˔#ĺе2cNuL 1:6G-qԹMb;Sw<^B s_t,;"x`ZP{Oq2+wB' 2r 3v{G|vV"[۪fb?fI _ JYşܣ\PCZZ( wlő,:$ G&6)6sC,z# v+al%ۆı:q@%4&+U05l?M{Z? 'ˠ4;|płcQ}zæ#]lh TKjֲybw:WQ7bKa6O 6y 2z1:!N{53gP+%Z%,yj[bhr뚺q_~@ށUZ #vpBn`.f/djݚ<޻Q1IJ[kr@h%Y6}'|.2׵oil@̗/Y5s]w%pшыmo DZ coG k6V•}lPQadqT9*d$j9؉d;qGȟxpjp0ݳFEp(+q:b9IC߻Z Yxc=iC"Y\M}{{z]#lvtnu\E*Fq*X޴00'O9 aϮS 8I 7.˲%r`abccš+/a? 'U?(Jae_`n| >9׭}}y@ZJ藬jĕ`5xySYT.mV΄$X+k[qGn0Y@EFO`ݵ;R0E!N)+ۧv[&a$O)@c߳ض~` =+Қs|WyMkFen8&k[4V;#NUSB|tX:蛣y(A2㋈{Ϫ:L)r×)@t&@;E*f>A_z+= -S4}"j#Ⱦc *P6,𤋮4Pq`<όSQleRq8|cNP{n|VOb*4tU8o?xةU'~C<0ՙ"iUb'{zgIY_a$!rV sE&޿ o}8`]HTI&}|&. QDяΡzLW,$9RvF|FƂ]@{uY"L3n{^wOE_ z2>vh4"={Z)2 5&6#<AQAH:赫<:?-񣌉_ }TZTU )D`kfoXm#?loR(Jr.B/~Fͭ"&AzyܾsPK`H;]`>?ۄLzy8 K@5k'D?eEɠnW3$l3U L_zw=5]]w#7=RHRrB^Cu'!_~I]YHCk A4 sf{,^pC||}S| ׏~$ ?[Q0n;FWtnT;`>+50WTKbz9=_tSq`>3ks/l<`i˭7;6H0G `Ɩn &Vd[x<CBvSh&op>]ꝡЦ0,<^7,OR꽺CrLgLC וq#SmlXg 2Z5o IQd- T=}#%ElwJaWj%_>llJs p"zfJ2_q`}N3 z$,gXOU\J~N foxfƽ~~xb|k@ۛdM B2(0n i&f\qf*.{9hq~.8{iId.jIV-3 [ڼ%n!](&H~ Ak;'7qhd ?KL-U' qfW΀'Ҷ-Dڱ )ȗj?8NABjP>^[5#9,e.!L[8}(~`0U7L$w@|?;AG\`_z2wo9w.$FEA˅fiotGu U?G}'l3nIP|í럏) mʋRc079B_V}!M[4K-,ů%6 bEk|{'^\li7UW9.#7;) %P'I ;<( 30wOvu쮰Pә̓!O`r}PwiE|g$˭@bvߘ=}}y0{ks<Ӗi#g}? bɺwű}6E(H^\\CpNnI=׵%p#l/vm W<=g;lkXcB$nL9Kiv &vqL/ZIPVnz].tG.m@{d?4Kyi_T#^ǗTؔXN]}/yYةкၸ,nql i}t{aOr'qQyquWm2m 2 M:@jBJ7T"W83%a=,S<8 e=W1ROω|#Y x7#ܽ=qKe/]Wfc@m`*GqջrTq7j0iUĆl|IWiu z A^hHtNu}AyXzTLֻ#ЭR,JT<E+|v)wm!(^Uip|ۍGpEp ~M\ӡ5Ӑm&+y߯S $w|áQ;ZJ0A~}(;,zT%q:;fl8Qc~ʇ+{;cڶ8@7h)RCiE %{D5 6@KYf"Jb t,')Oi;lb σԗaNbetvUSFXA &&Vw@U+RZ R:䙲D)}y42.(f]Q9_U1{Pt4n=ڇtO~L'j`1##k(*@g Сb߼zɁ><¦~ᴚ-32X>r{/0䀟'TǕ6)C7Z%\s2L+ |@!)}SG]}sձI!j6蹻Nu GY.2^r"N4?rE2pC 3[|}t/}fA+]}]UY~"8+mi}D!|jx^aY!m{B. z$Y1ɜ|(H4G+xH6OvH^-W?K~T.,o~Vw%t4Ɖ;+TRn Ǭ \6^7xBr iǟDd|G.h_ _Yÿ̇<`mS.޵˶S%^t#&29jgL ,i߿ Sˑ#^K])X &s$8vxF'"y^kE;K8E[=tY2*qX3hXO]xC'7<[Y u+WT<_^5\7R۵!MҮ5d&- 7:nP%xLWa`yN輠@?y!+☟t蟄U^:h<g6?Xe:0+fب ]ÓmqZwe f{bl`FL|O:z9YƷe'D+BծE, ʸgu1- o%YNYȇ ٿT;=mr L4> kG'zl&)Ic.gO/z00gߕBG(_y~Z0J^K0[ͅW:# *8kNw`J5z GCX{vIxjF+<x jki13=yΕd?1eF Ob}7<'kĖ'܀aSnn P#U6*cT;`/ֱ7z Wd?ۮ^ MmRI*uHϞ(IJ]VteRa$9U ={k.n:7w8Ǐ=IR|+$Q'i!)BR$E$IH{}=~~.~_/8ެ[||OMKrnڊؙS^naX~4pc8ٌdRS @kpDǁ`+n:b[9#W?iUl24oIf9& }^r#۠^V_uxm43:sֈ{@6\=H޸D=RMqg!h>i2?SSxɆc{wCv!n47LJJ8bil sݶ~y+53Mߌz's% a?а4 *ݞ*8q#3U MBR^YzB8,%G%вervJd7V{>Ihח^cF,==wٳfY7|ȋhj #H"R_ıNMW9_lŞVG`VgegPI%z OяXk YFFz]EeERV{Y!goWُCZTC1#~-n:p>$#L\"†N#8g5"qˁm^A}2_f.W4\Ͻfkb:JKVŘ @;~']OLSK%W: .3; X5ڑuG\ KVXI} hyzzaVqeN-/T,],Үd ړ +rAk>B.Q*.4gA*SS4`ٛ4z1X2Ln$X2b\`i3^l*>xx=,Tf2jeoE1=) lF&ZP%={;ϵ;d؆vv]W 2,/4\^[znj"/xuK\dRڮaQ0{PqR;dLfM|p%;zZ׊U~s%S7Ws(8gxl̹]fX ̫sD}HFAU|^ TAv>_XvIdnKqbZr7vǫvK3y6@cУgA8ay5+:X͎S6ռӰ"c enθ{<*-2Q#X]:x`+ 9NGb5b9gүd[1ϋ+/y2, ?=q UrHL T raڥ!5Γȟnрha.Ma<껝TٲO7Cgs]/Ob0ݧ}eu[wErA83_hBǫ8 3N`geh[ o[|If,b+͔(eJ3t\Ǘ-8 ^؋c<(6Yf .ލ)1,M0tM 6<_(+= _ѫ K%n >G?-UsxR7e:?CUwL.5ќFd^!cǠX-򞍚14{`Ť M2ۍW*,q3lR݋s?^ԇZI9:`Z^a-+'qgUt4x|2-6()F`8UfvT f![?ҺkF," E@ZofCqX { ͚ m[LC?hZy7C\Xǒ)PGY#Xby*R8f${yq۸`''Q;.Y?HE䷧9J`I6m -lLDW̦lu6ܱ0+[XaW7nzp}Yը^(ۓ9+vLϼ1h0=~0|C>hL}0|"s qsQWXg\a%BHb w )ѬRSLs8S*0:[jC▘X$`Zf̳%X "t)l@s[ %4_z=Ǭ_!cD.O}\텽׶'şѹ` fgk5z 5?xUa/:a2q!"ݗu(5I|:OόbK)9XM9)@Ohf`6wz)DڍL9PN 9n'IdKod[tò8y, #㚙G=~#JE]5M5ʏqF-f]>9!酓f[ϕ:51Gaaw@Lض!, k7Z󾥸*,Xl[D5ZL7vTs%{\ӐW<;slB߯4a:U|G [3FN2X7}rj~gw@рSx*6=XzӞK.cqLr;WB_+ BvŧlVpdܐj2Po8hÊ]WEQ,GY\BG2pz×AdWžWB8ȹ-\{5?a|YZ5kl;t1qD7I;FAd"vْ̪gMT xjب/u,^ەFrdL[6v0*p~ ΖD)m\984޸!0Su _Zx$7uZ j^8;K42|9#9 R+2qBA*YrƊ .̍ː aGȅ86[~@MQ"N!cc~mw:oc5Y=~~eҚpXL?2 o7"`fotk\0!AŢl򇬸dSynlf Lݞ8 EzD ~&Lq)(\|pQ=&ADW&!mIjDPLÚ7e_r}t/L^^|ׇμ/ FO,#H/bbҲ8i U'`FVƿQ0aiRw;4¾QCb@mi&${)BS Y濊r_58PXQ%4=B[VN~E"!eM/V+ 㘯U9S^lqKf;q՘%Pd~ezԻ~ :{nsM n5YiAA|Ŷo8P,McX`O{IH1N|v ktRV$Y݅z YdAHD7iӭCFP`FGϵHm-SS[0z L}s5/\]m5,J= V90{k+ՈCڞ,q]W ~DE|tq ̔ !Bs]PoUqW@ GiN{[0yN/3vnk!bg?'p(;{rҢw脩ş5 !:02m}z{Xml]5WZzo_G2ocw϶[M9JR36tUR%=47nP_6$8aޙf=욲^w#M &eȭ^CXz2iyMdҡs*cGC,պ0QC}gWmX G$^ØGW;!I~m̺%S &`e9Цi'k!`M; 1$9|Nuia7 U_$UX ]^9-!BH+JٖT#ӎb5:5G1zi2ֽy@Goͱ8%\]͛`g3Yev}%Zɖ4q 7~ࣸͿa4ޥ"V[2<aQn?W/`IYA ?y SGnHǢgP`rt_I[z (E% TYbr\9qrhLA)bm^nc4V({z_ͬs\7mǞ,9)lV`1u11bqʃk絋Y$#P0`yUw,v 5`\_uLJ3v}ք\(;14<5.Ky>*e 7[ QZ}T`,~6rضZ'Wvlk*nI 1ܼf FȳV6z`5݄8Yp `pT5x'5VO~}%`Lå_VF?yb]jP-螋SzwXa`'?OKÂ:KȱU{ L%qbYcs~~:;#4+3O 5[qA`~Z^5Q7bOC͕Ssm Q #UH9϶gA*S|7Ef*~#KPzrGA sDu( _YQ'E@`z+&>сPcp J+_-"-_Sd} }rd,D.2k^" xLw8%GA{>e,]nndt#0dh+? .;u]ƞ5-lO)^y/%E4SX}^U<-Yȸߒ0î+'{7*brH.lNFw%Nnm֌)\qc8@u凧fT5%X]jx X1!H?-cٷ؏ޯNJ+!1{'Ϸ|Iٕ~ϾUQv2(oڼ,,_C(HTTV)if8wOw4Y>t"5I~sj4"8vhC!?>[i_].w*}HCZ7*.]4; GSt1E뛚|vkv(S`՚/0eܥ ,U?WuPCBW v5 dz *}|Pw=Dy3C @)wOf#,xsNj-x$XV~e*V{kwf0j_`隝bo gs4n',~")kгYxHp8gc/sǩji?~w-$A֥=E<!3*cގYI+,}¢ :r)BwޯoC%Bk ɭFAVX;`/ + b vnJw-1d6*KDp1y5L0*L{|Xo>J Rf3Y0Pi_L SOrP ?¨lYf-]^Xb]~*o7t+γ$Aخ #硳7:+Cw1`\ <?ߒ5G%wgUBD0Lfm $~8g4ny{A4#2Ү*'CNA%p.:}f0}(h8L:s19?nx(TL!?2ЇSշ;֋kHп.<?F`W0.Q+d&B5t$_ Zkoj𐿗ahzr:X9X(zvB >|U%I Ö/I0;#fA{r&RV١) V_Ca0(~LS+ 8t-beP*G2s?U- ֹ'g;y;y /?I&ů@m;vl} n0 a[9``g`wXռ@EfG | ]h4Kr@~\3n {:ոkτ6#g V辇ԾuA6seUDuaC t0N 9f̊X7HI'SXuvYb_ N&ۓ›viS,Az̿JBѦXkC8e^}.HJqĿ{ 캭]&iW_ ا(dn0gBfK_p灖c %Bzɨ' O߄5>w|U܋䓓jWQ cJ&MzMf}jSw)_7v9l3W$Xt"sЦԓY~R=k/*ty䞵NǁbZsȈf*~j})ɼ%)0{6;" #Dzhq|]ί;&~YZnK>AĜūi噊aÚ0z+5K+[ɫƧe`9S1ٙ'ߚ#}Y4pn^] ś'c:`;׶6*[:}'ɬs&Wcmu&w:x@sǙ*`(UTt(E02[iKjO%8eokxϺ8bDH_҅q'p!(gHn~];=vmz*RS+ύnƄH[wkɤ3CO Ψ$7H=dG&_=!o鐆\ΗbZX?)'CGKu݊Q_5VeUQlSUoߥzpL=` CTwsyI vl"x.9idQ,<0q4h+t~oх_xi÷!uoՁ7>$^$BЧ.ll-\q- 7pl{m\sI t:ìb/`Ule.\) kSL7Z|Jm$,&{C~$ڛ*^\ ]u¦v $m =ak>MJ`Eh(!#+52bĔq˵_׿DzM0ܷ4RmD-v[60⦵- 5;?AW7N=2*y?c߽ l6 `N rEV:li %n<ᱧ>BkiQO~JEL*:K Z`aRX%1.#y!/GDc~gKX<Ve3~[A߶Ѯ|jGAvY?tXvA$+' eŠ\D!yD =+SQr07H#՚]MeK>0w?dd: \^# fX/9 Pcem4uwϩ,(:|iwץ/#2 ^19Ņ=* ;goi ܯ#C&&@mCfcz<Jdd˷>ާBcdmUvvOZ"\p+|%Ff'-'s=6n1>c_{͌sXߤ,vonuG¶$ w`hh9hҥ*mX ow Che_^PI +PǼ;`d!l_S.bۛNF~>2{\S=G(5T)4wWK&MsR .B6 GHMpyWB'L} 6eU@`~rH=MN1uuVǞN!7U7tXw"m:**HoLpG)K5+\*r!?J3k4WDXc)|&3z{NI]#P O>z--R=#b )542۱צՀ_U˸`F{^Ktm{iM<17kߚQn9eC4c|dSB6Sw!J4Jr^=I*];?]mk_@~K@Y2ҿ3B"eV;GiO$-F#8g{I|HQ';{;?p/so<ډ=8Io)3MNv_ͷ)sMyШ5)y5ٕ="#_rE_BFblW<֋LҾjzVr$(fTk'-&0FÕqxrS|* 39A~ uɟI`Ϛ|[Uvr?J f2tlh/Tl2V_+YD¢ݡmH)|27Y=z:xzkce_@T1&} &e1 "2, [U/Uf$(av03Y(@ MTmxww!edsR\&=0,foyk}`pgl2V>Sad6,Kipf,Ԁ '0{itۂŮ_$WMIr0ƭ'dDqɦImҘf"jc:x(Z3Ө?׌Om`ט: 펂σpv|?vJNADJ^ 2 )\ z q~o ̥CqqOmAd ui8DV jٙ C_ SlaBv~ f;2h`ޕrػ8whhޑv%Dg"_`WpfxΏ ]^{Q庸ţX١\Wn^ ׅkcV*؍{c;awrsg5RZuvbcƶWj\=s`N{mS_comC̻.iJtdS@*k->lKg;{H?IJn{ ݆Tְ[@g^OfiU HTo8VI/Gfɱ!(|eo?,(kBGq9\rq> ;zC&L9%mf3QyA;m BFY1 o5OҦ:eqt@ fY߽8jZoHŠ޿]'pnK͍8kgyMP P7 ga0{~Sg1Mn[0{я352E u /Pq]v1o ljф>ґh K;asy:oڲn.˝+W`.ޔyfi/_C\0`p,I;>=gv_tuUcw;|;2$:! tݰ,Wr8hG|HǪP}Bl;Axo/ +/stUu?=hվ]D&s>Gm fؽuWN 1o[0<{EA@1œ={p1־GI΢$ljy`C1Ɔ ጢ¿_ᯔ6;@$p=w Ad)S?#7 }?VԝÞPv=[wB׾zKy,,cNFڣYJ ?vgTR!5mfM}ISKm?y V)wŗ[G95a'<2gnh}z\CrcL ۷Jǡw5/Fا dY8־{"OCUlԕ~ QRdisG'&`4㥖|f}%$^X{qE1gBMGw\ZM[sak.L_Zy6Y .A΀i8hW,'TBzy6ΎZۢxNja"#BAּs 4CHCZ`ԡ!{es%XئčK/iۏڕno ]wV{ʶ 0p}W3HM {WWzx*%pP^O] VHݘgMNbT(ge;>W-^YDO~CHpJI$f 1Ͽ;{?\ t't"2uLC ƕ0php= \C ˀu c֏dҸsfӅa#߹βǮ9%wO^5@b<^]B^*֝uGzrܶS!;=Cݾ ۤa_J t3e܊]!e"ppK-0e {9)G;hcꆸ%aQs9)잺XB>}} " 'qT x[f&p#L0.,;i?46R09$y0:٪<2} ٱ6+3aBHgqX8l,Z>/V)cǯ!s|{?&s(ɩ6?qp3.OrPEdÓk@׉?c8J?a/0' $snk+6FZem*S*zF\x]%MO|iB+k}lMӷvv: 5Y(PI7A6ە!JFE3K)g.!JFixfn?:/["^[#T,((nn9O7CJdћd5z}ַC굟s[ SBsD:?;#VC)qD9=r&S'CW'ljkaPŎJ>rcK’2}I:r?] *Vא>ʫL .7VM!hD9s=T2 /ם<-ͯ^#+83; 0sҗk0C;;W\[64LE}'~X$ TLlÏRVGXz>2>_7ypTc~e2WR3?sz lS\i8{_ؿX`-2lxBI\b@W!#/RyuGZǮz'2ɼ1H ])X-!+~ sYF@$z/4XzzB\?mV]t5ԎA F8yb]v>\84SO5̐bKtp_V$Cc ߝc0CF&w>S46顳2qf6ee3%8dwhl7I.HʸVz;\ ԮF`̃Wt uA y aMYR[{:0"e*Rվt[`/4Bu8ܸ'`Flav1w0 H$w^g}" z!7vqMސ[^w$a DĒ'nw_[g-%#?k8"`VfDkP>`1/#qg_b[Ⱦvt^{^+Zn%; H1%6%U`6MBem%y_\yk*_]1 }l\/A'kN)Ր=6X1bi5J"yt2AGϕ uZ ӌU$ߕ¡42 6ٻ3Tqa&} uM*8~G$cPW|ֽ 瞗獤f[63O\y|6֯/L֏иr}EX%Tu">˜1O?ͷ!Jz&8LG4iM\ؖ}ڠj}UU#8+we)k`f;mqɤ Ky0қfPխcB_=4 ׍gJ@}u̴Mff| 6>,mH:AV缾Eb0S)&[#QD?d{g~4,y]v:mÉ{Ю:/84쐉9|B* ["tV4 j~992 LǒNMc2?+g9-L3S)JvjreO>k06&K<s60-tFo}qgٱ&5=t_Fϼ/a/qglmΰ?Hv!.נظXJ?FTE6d;Jfa'.Hzיo')gYǜ|Բ܇2}ϋG&0#pNIoBR<E2)א7EZ'`p|cG`] MU9j4Tqy;w?v]cΫ#YYjY8kDq sk3c?߲mrHa2s3Gy0+_vo74ע5I7cE!yȼׄ^I$s8m4 f=L KLEc235KJB.šFCr/nʧ1wڝOuk?뷲\l^b_z Zg }RgP:Li'cSe> NpEΝt7߮+Ri,\zC:yqGσwTz"JY,/8~]kT#=DS ݷ_)k3pRs*왂G}u`sfSmcv_.{K՝3<5R%]]g|\.-Sf4i2PV}4kt۟4z{~m{wBu;^@ףsVZ0Hh/3-"moV𫳔?J( %+U2 u1)OB㍟V'쑈KȆNU0-\֚Q/:嶺>LU3'pʋI0/ Of ؃2vZk:r2,49YcuXE-Wct_/l8fnN&>AVsd;.-awOgQ 6w#T޽Qo $5}Mngޱ~e]El oPd+o/C{+ףu/ڳ8*~ewWis't6) >APs%Ql}\;5X}LI=B̲~[Ȅ΅lSVag*!xVz^n}9tFl0'.AKx|TaI{2y ͮOFFTqq1}Q t tCq 8֕f v'Ck&]þ|0'X1Nq|k_F6´g7s݊GHջBs5WT3Xc('y,lS)ϲ$qα}`ĭ9b#&))?6F3A+5j|i1Fq< 0֓M`Lf54v|gC=0pmr@meyZstF&1 &nVV#娎_悱v)Vb%zW5 ;f$L@1]N hX87 Zړ Km96I&?9VpЃ7Y'iJ׊982`^C5572W~ (ɋErgF472m?%A$ /z8[hog#=CqR6smŜCͷ-{\K6ӝ`Ro,ro[b^LBڞO`L˾1j+~ srsO twɆ^(v5.q{mNikځRFkhu׮k_KVXjSel[ƢLxsK_gS ŵQgvS Qw)،3PG*|mpEȶvZJ{mG+ppY]Ç0=^Mág"/XW]FQӯ~{2ef cz}'\:.%i`F|O.@LX~PJ; {Obɚ-3=UGmIбI;S[ypts0{mY,d cie#mqbυ]řW1aG_b\д/'o$ 8)]dwqnꧮmq{`#.?p򺂾jt.X(jXK}W\:;s,$XGmL5Aon܆R髾k{X [w.byAHgvHsFI[UaǸaZaKR{s+Nʘ+z.0:]1TXe *u '/UC߾^{5B8ZQ5C[&zXkt)S+ c昺2N[S=n+8gOGvbMYfeϕ[زtʙ R{΍ag`yXʾ;$o)a+aB8e؋sؒc4kKEͿv X/L9AdM*oWQ:7C{kL~x4XTM[Mp}Ҋ)(L4Fz' 0_ۭ>xa(ss7zǥC,s fg{N[(8Y*"':g^#m#_Ln@؍0j;ol /D}P%7;%8M␕cw,0TBOǍ@mvkIOrlq`I[f+ f;;ԯm[Ocɍ)X1JlZ];i!0-¿*wߕYL+ "|LOW{p4>4ǁ,MnTextٕ+2h2910'*JM x7mŲWNIEYU?ty:jI Xǧ| q%àtSDOWІW&x5qD.Άs6ovNDS*z3ys00ƲV~q0xqYM}=hŵ69H (՝ĽWľ(g~}5Ƿ{*jo?TmT/ E;iaXX߱՝v2wU 6H!VRk*g[ RD'- ƗX@rU[aHl YO;lfaX%&y>j'wRʏ]: cwWbεwf1WnPgy9D6&k^.[jHœ912F?GDx!9<ݗ nn@Ѯx;$NY}(w\w&9$MGvn1k9|dגNűʡ] 8fgKo`v߶ H;v , dbәd8@喳p N5>r_;4o }z"~7s򝓚B Ix*pteֺx3^hz$f9?+Tzo8-mrٿI>m'&1j93<`Cjɤ1؉ih\'2!]aX6P~^195 kdar p ٦ |# O";i(AgMKΗ}i?>hh8w2%bwWr OXc3X{nOil]sQ;[@3}41ɧ ך0o񢕍8\ S?:pz1{_Y|Lp g6mySG$}ƚ-JainHtQY*G$oܗI1scAO9Mq@.(R=3߾ukL#ln0FUz_&D. 5?U:FN 8N&&ط+Oh{N~y;m~'Ҽ9ỳj}+S`=&e.;>gh6'6O }.R 27S2`~5 Oaùi2랎(pȅø550w9yHlwypHrJEҔY.ÒgoǾbTmF\ Eྻ2|Vf7VPdȬ9Yw #;NQdW˵Pjlb$Iد׳"#J(&g -vW+f$K PbC%\vj^/̞D7o]%`z߃|̱:w1?[q4ۼopZczQ;tm=N y#t^'V.$k%@krab)Yltcw؜ uj?=${C1HG3쿃OH~6͋C3}Pp*5Dr9,%F¸LXf2ɹ5Xnp%'pk6P%B p Z<ˇ j&=E$|?O9^R!Hֳɒ37s Z'1+ )x\hvB{A.%D{e%U}CO앁sA<\Uws>~POhŁO[n|cb0Ǻ 0W&ju|Sf{FyC9lZh|ݿrl5)jvzzշvpΕܑ+,W/aq6{6)6^t.G;mbŏ$!oG8v}ƣȔhf,{sfsVϿdqf4$~v0g=<>KWrH/qԙ`~z]c!jFG2w,(d ,dzBڬTꗑvJmI2(j>:FC\ZF2 2챜Hg0sTiH sx Aަ=ðӣclI{O |]>,#Ew\w%5BsaG|9C)jA<,`*1Kh]2ZrnX@lLU(6}M[׬\ݢ8opaz Iϫ@dضL}]o}As_]x#0IE)gMq0`Uθ}LT~/\"7{븠> O8Yc\/ĩ8px-Vm)(LLjī:@w(MZ\RBB &V.DVdÎjj =tN.1K8xՠZfjX8+hV/lt;Qʡi]w'礱_B߫]Y[{DŽ 指_ obS"vf \J0E ْkr쁞sVٕMeEET' ZBj@", w߄ڃ6:$Qg׭Ci8Q>thi)j`|cF<f%f)΂ܒAQV:,3`>}Pꍆ^\B݅~BW}2?drv-,O"P;-b.Z?[+-$r)W}Y0LQO?ۚmNc'dgq䫫mF4nros*Щﲿ~;iuFY{ȉ8Y+!envU -O PۺrO\F]'4i3ϰ'x Ng߁J7`+C.8/:NxGg]<eHSZ[Ǿ|J^ i/[7L9|,U(~뒡׋75 _jznY%)sIv9KJh'HR ތsό\X\N<@\c8gU7 ~ ]0Il^$ >ˋo;n|X sW;] |B&N}"~6ym~uNȦ:<#Ռ`9RqMCa*Lz7uN:W{dms)8,êP!t"uׇ8Ҥ6/c2\ݓ߃?:?umj z& }N)m$ur,'٩* d2 7Jjg` m#vX`x'6f+tΗkB Y8;͘~%9nܹ̝0΀Vޫ6aGv@_WY`0:(=YF۾]T~=>0y4s]N*$]wv ƕo~*_ZjI`?rjkR7kPnUG-3/K%ҢW& Ъh_᥽dΠ{ ɡ_|W4c3$&м:|^і{ZmT(kk}'_g~ZÞ^B.tuV}%v0׆bz>grNkN c ~&,vPDOao~p\bR&]RsЄƬrb ,Af[REm?faLs>8P'.c;zylƤUOD"Td_ }UDBXyQe缲V@=d>v0y=f[FOS"|>\YW:t(Ci|f·8tu/_Ty76$Gof.K= 8B,A 0R_h<wq׏;B1@5qeY1->/ 7+b|-mN3!FrI:' |) 8\z2@4_Yl٣G>%x,*-|ko^|cND_ Up:hjI$ :im@nQ~.,F`* ]gao_Ϸ0|f f=+ыFo/=B`ai@ɡPGEqYP#3 w w`뿢""ҕi|VpћQ*Za= ^Mcͷ*3}dDžk׺ ቜJP8/S:] `Y*o֕g —8#S@}% \tm&&:ҳwC64v(׃nKwcUZG[̠fS7'JlNg8~){پBM(YZPr O;|% |l텻B7+)09s费BtƝ'[#pHg|3n "32.޺:оt6*" &.CEӽ{¢^|ʮnB@1b>JnG/|5ѽm{Ҁ?_qCNcpQcH+(W 㤳WOy5>|y 5g +B7~5TQOY/DR}8_l.L}<]^MMy`sǨ`(h?[FwRqp}KX[+fw/̢?wyX r=h#0FhFmt+{-ا4?IxSO%>v~{ؗ>>ɯ/D+˅H)o i#7v\qޥm"H}ACpFYBM*-p:î38!Ӊ7^-"Vtzxd5]CG)dhE_;K!gɬׄ>HpHV&'xZ;CTt`N30I/8@euhLtŴOǾU͔:A=^nB#N \e(Kp=XKf;-~77 ma?h}OM:YSŅSoq m2fynjgB_b\tt9=&Ԩl|4, :9٪+a[`EF/1<'rX3qW"PO$PV1cFOVSAHg jQr}"FpF36ߟґEŒt{=CiT C_`ZBMۉ^xRM.}H}hW&yRE9 , v6Bb)QM (@N_eI!:ño5柾&oeغZiTޥߤÉ(scg7礝^ u?k1Grx/ Re|nU?^,U\|8 #dMpNWvߕb%ޕvz uW@Gk!m`$wf石q~Ci(ZUUQ`\ (Gy`=\j:R Nhg{!M.Ck3(VSP36RLd6{x磽j]W]޻sY~悰:c}lǩ1йFABSL Rau'E}|q;le$Hc6.`lߩb|j_D=TL(俳nNq?|joˇ+9!_LYcnJd"oDF˰Lzg&bZ sNīNh~~"R3Э]q~ÙWiͤ}ۡ)m6CO(+KǠ߅y1-:wH,wzܗ=㍑fv2Mzx݊{UtO L讼kGYhZCw5LnbծN lkͮE )6sVR9Jv R.XN=3K l5vK~wvO[?26g8K%o~\H:&0Ӣ# &ܹv~7JpQ#KPɩ,':s:ysDv+2yOh<ʧ*fβJ]NQ)?Dbrv , *;JPwTj4n(!!eѿW%FG3|s]+]K<{~;Q=8Hsy(Ñk|X Mijvȿa L1†М8q#IOU+2BVvt@d,Zdq:b'uы(UIhR']D<N#+{1eA8+rWqoepJ њ}vсR)V-/{rHubh4=sQ,H*.?[*H@vƋ/\3*W:TV6 eK.[cgf7?V=S Y>,x;C)žǦ ?nǙ޺ pV=;D79'3WlhI0Y@R>TbOrtN˞B^ϋ^lX:[kgWNۡk NQ+&0k-!؛1r\;'K7 -7LTHV4Ec-:h# /O,sK`8E=ݨ,މ>ıvVAu[a {]KX{`rӞ8'&dyu;J!ˤj^U1$=j>!Rn;Kb3UPl[kV~"H#ArS;vRt'm/j-Zx?V"X_6s.W ڑA;2y2$;w^/(X˻M5>x)8{aMO?;쁓m$ҰMnwN\MwHHkUovn63 Ã=^PPi!6YZxΌ Mfwą-бj&jGe6 Pb8-^ em5ׄd4 ɜO?Cx{p$Po3<# A CviӫMdy<~6 &3sl ߑcgS}uL ?#1>DWo$QYt]m';Ĕ?OӖV2B Y]?+r˼JSNIrء)]%K.͏a~nzm+uy?ZAJ$n>`DƑҦߎaXԁd/`)Jp޶#BQHp ʊiC NNES,x >*k6 ~XxUmm{ C8S-6Fݜq9QtT>n>RTTHTB%mHV*HH$Tl{:˱y{g|sYdg9دԣthzu`b˷W>3h'.YtmQy>k,nc9k#rYF{"NA-/H\60Cdo1Mlx Cǥ~ۃ@]kbvŞҖ0onM0]㫶&c#҂W<*q^CNX&_o9IBH:0/Г[3`/h2^xa+AƧ[@ЋVof-Cuk iti@|->_)D3~O('o'y͊Ϻ6龳k`1Fݳ#ĺ/i/+/XJx'Ӧ7kq%g0}FǯLjү)ǷU lZК˷ō9[&!B>*trcZ`KИ)`>@`0zkAjŷL($9j>ƆsN3yl Kmpdj>Q{:ddH1g媪6s1}cR8?MI`[n҆4l{a|*B<&a ZH?rЃ- {D¡X$2/Uz+ E(0P H#>Jfa˦pzfD=ٌA7q+{=ʱ5^q̞Ux묓 LlۧV'Dww m!K}0GM@6:Z J⼛Uaz;eNdVQV!LI7yQ ̪]sV8|/ 6w-gq~M}5z)~egsPy U&.8?Ӥ#+Wj[ m73g"mVaPu rt֋ZÈi—FlعaD.P±q{bmG<",g8| PN> ouwZˣuTkrI\)ݹ-#س}EiW?~1:rZk]ܵ^u{6,|t1u5kSkV+B<{ &Z?UF^c4XLq& Kvݗ4wH7N .; n 8K;頳~/|\[-fM-Pۦҙbngr{ G8sYN[ ڭ3Gz$.4`a4壻#_d0iKK`cΫ8S~1}W! ݿ< fç.. YG?$1f [ n\2>Z,[ǐ _UcreY,Sp^ 7z^|Ih u&cS'!d9a\ *>l]b30o&P*)s"Ί 7ЏGrCkrl_oj؝(|[UdUOd3>pY"CCmʎnaxDd0`R% c"'ZAlpuH|[<:e 4ۡO25OzAFH}$f?8(k/'5ov<Z -\$t7ɞ%ݴd;oW៨@eauDCym;b )#E:Z2t5e*~:?g=gwwVS`^DRiw֊t輩sqZ*Iu_7 쾃 XÖdBSf{1!pkINdI.ap=ۯDԱ@K90TJ4j? ĥ詜O5('qmڸr-,p-k_"~pEH`}4wfyU91brp(R֗l]Sg. Af{u"hyBƮfgU/d%0wҀ`Wn~m,-9/Kea͚˖p}Qm`*kl (N>'}ׂ2yC\ȼdSh'46EvOR?M,ap*ƲXd,Hj*͡.4jWD%.ťSUV]dO 7ܸnv8= Wa9Z3Xr}.̈gëYd*$}q%)T9> nyhD )fW_[ -nbӨ&6>N=v-(^c/ sX[8fG"͑ISTv篭kq~ >[όZvp.69<4'#2Ww&$-Nf=K ;rT];|q>tE` &<=p$yégm"6N=~_ǣU/O^^gY&aPk̰dNgVn$Ag%`wj)pVR  T 7GccB $ZE/ּN۩c-_ݰWQcs3 ^յ},"t2˔GǰkQMo0LޅU)J$5[ε,cA90M83`8~%NofSZ&|rjż#d+B:@cwqLFQ? /Wʔ"}Pi*'&O bq{ 8$x$m*g KۅqltȈ~~d nuץN͛n>[̙.,\"Gy]>亮sxCY֝.A9[2~:Lӂ~gEWj>q 鮪ցDZ.|M}xt\Js:kP }Ga^@VBply5 u^4EL e5֎?FCYzkW(,_1=Sd8&LJsa1HcHtH@^kQndaOoDy|7 sz~Mg!~G|z?zpjxdM9:ܖF9e^V$/]؍X%w\ G=!}K\Jk9OfVyׁcͮ7^@}ixV,^r7S*4'Xm>Jx' ӧv6pe^h )m/NV$jF;.6Cado\d׆Sߜ&ׇLOɮ`2qGTIQk%I7la6y4XJ<j|ЋCo=7z]%O9&EU}9`pj>o͇=Peֳj2J`/xs 6mhHߦuSu!n;ߦda!ꡙ5,ܣ/P Xkrb% U5Me]oY'X2N~3-oIs%JqM1Hq &^Ǧ4I´Íd֫n_xYAn0vl_S/Άc3OlR}$޷1$w$NՌ+u=J;\t^YӥPXڢ {RI00f2z9~N 9G0P^ Ҥo jSYp1:y`tOݿVM21v,ف3k4Yq-K˨d4^{?~q꾾W4z+suBFtGpќFuF݉7^gWh7oFS9|K`54`~P]Ըҫt=#ah}SYzuZEk>YJ;”#aH`i~Zכּ% / :;{2Z q| ƾZtF{/e`k Y/<̓};A|g\;rm%|z3DiZ>=`Q߷ K; *gD8eAfެY$X=MiO:_Y@% Fvޝhq?qSw בԠ_P5SFⰬoRwp@^vyriw&PUضp|A@iBEe6Ktc ON!EeM5V_:5C/]>0:*{q#DH `T]Cav/C Vc9ּE;g|ybif= Uk]ɏ{ (Q;?^Ã^+^%7LkF~-GCTp57B! y>8\k  <)CpW%CLW6, {GX+-NPvӁl0GUtI :՘[p2ƲasJȦ65# ]0fәXJMĸ owLzBIBgŝa-4aYd+x37,pጣ%ȤBnjZ˿Hi~Tuaفg0 yM]F܊كFIeܾzE?bȜG% |O1;;S=z`>|OdՀ`u 9pV~' 6e#m[S/[3/ޱ/~3|n )ބ%rDj; H-ߋ 0h:}LWՐ!rSt7' !3v7lcf_ ة1G\lq:~#iR.K\PEƩ!uO1O褊O>3=bܑ!tu!ⶉ7xaFI7R}i<ث Sȳ{a7f+ϸ!9i7oJ1#6M-'>.Uٰ|Q$`^\'fn[#FŐHf]P:*]$ݺ}3>)pmy=F aɴc L7˥}=pF-BS!/܌ۋ1##]njgTȬ Ţ|pD뿶~1C&yd2\u۫c݂Q87Km:*=>rEp75q[=Qa7LO+ka^mc=$SG"nq@![%C5Bf؆0:t'rM"nV?2[oZыoAאǂ|VXý3i=O߃m[.!EN"Xie+C1Q@?徠M _X6>^X%oOm\%¯JW"ywJtU0qF{NHGn}=?CҶ#Xl;&aՉNo8V4܍þknJ;~v6g/'D5~;#<86%B9?nRȤaLI}/% ]Bɤic1ZG>Tudza6qYlwy 89O#cnx^Nlƴ^r.B 2AXCǎjXۡߎLbC2%wXfD{ ~+ oL`0W;vSOJLk7i, 2L:GL($PR ]RTy]~3$ uoř^}ś w\ׁ_ _ϭ7 @ ndi>1l 6*\(* dTOOaԷ0Q~g_(+jAh S 2p|M=~lـZ'KptE{FѬ޺zv5(M123 ůͬޟ7?U!r<p"llRcbXN 7 D50mm_|~@PX~%{덴X.'z$ުCހ.>x]\De#ezT;8c3B{*wM^2'`k:{ $VQsdvo3a#=b0gG.F FZk]"`Göt}4y0>6?,dS-~oQƱѐX&); (To6w%G씸D鄌o<+0=.FV3Ձ%6]j[yJ6+M \υ];k¥N=aK^1H 4h)3[ - NAoLj(׏Kj\\Sc+B[ZԱ=~,4>OiN)Y2C23Q+&52(A0V\}xAFIu K-&Hh**Ӳd)f̕1 su8gaB_eOi,K!s} f%X%؆WœK .z4y|0)vz=E`}8v&rǍW:E1 ͮyUL qX^B_uB& 2XlmlɊ n0U3Eu.|Wiʪk*aC/m(8̓e/>gM1HEq]^(ξ~nj97~R'Y4?T`*Ų%tqPšӂ'fEj`e r񽥹MSGy13z02xsD \XaEo^ f2BXz閚q`ȆMAfJ˿H8xr,E`ử.u˼A*S $![X紽xzVN'`4ϲjw 5=lA$H#rƞ;Q`Y qlj!3 K6!Q{V c o%v5z3Sl̍xFOԲ\b{־i[1F8]eלpbAΧ0@8 %gO^WJw~ ad/l f~o|\|qz@8$dvtʋAMc8.=^~"2CgZ?`P`S|h=[rŶ_dx 6d9S m4 X]3 mZKJ@RQz삃wx2s f)H;M.'-"ɳ֤bBM` -Kޢ{)`[dnHieV]}L5@i}yC}XZb/ޖ/BzwCz#{a2|F}h 4ל(0=I=P &'eC ;yt8̳Uo+:kX\PRGf~'LAh+'kn9`MR`5 威5ߡbcMt5aC\6](#o3!\? ?7#ջw B]ֆOq8j->ZQ)#: .t7δgĺj^E{~26)}$ ~iƆ nCs!|.lzi+;T{{uߘ0q}}EɗOpfN7bgs0`Nw5p@z+3oŇ'}8kڽ_- I8`xйX4iN<ۣPS4/B,!&vO^&y_¦pǚa[j,$"'Y81ѻiuwըw`X O5~qJ˪Gۭ;^u r2}|%.`w0K Xo^ .Q jޏf9aIԕkukޅ2읅0}.`=뻋Cd:OaN?$H\zrrlчk[rLKyjhyO_{-l6 Oc=Ds|{%g!:P@,flFj-GiqGBFk\n'irI=0{9[Ggfźn=Oe6GXƑݿnu-ưc? q%>|/}:$[@ξ# d+ +F-9{Yz ~qR;6<`mCw^굵Qyc_.FHQzcm:סQ-(#sln{kb5ev_yKV 2C;R~7ܾ_V闙ь 8l{y하 .tž7K$O*H[e E_ \xETEpgB>|ȍ?w.t Oƛ1D8.j&"5Fb -^P./+>C gaqDȄ$VMXjizmtUMS6 ݠ|\5Aݘa =;v]uА "v~j>F_nþA(8gD_U:<ìn&nO`ŖFi)* OiHswj -Oa hUc0 iGx`3@ Z]菞$3Hi`zɗ{]pKQh~Ty#uճ)jӹ6C+0 {]mqtHg#]z!q7p[KfM8uB:k>a=gnU˂dm?`ÍdVwCQ 5 @%.e4 MHX?-\Cu.+3"J&5ܱ 'p玳nH?sK?;žNxp80-yi^9alb7_p'8IVxDf%wh$ 0yaU]ry6O{0| Bp'$ݺĀi|O߂J~e^`Dha>| fy#/~6E <0u^?"]33en<_{U ]E+OkOI2}694^\y';=׊kuZl,}};=cth=6~U" NJvrp0LQ=*}O %mPT 0BZ8U[U sҞٳ3/7qnp=;P'qK06>H}z`2U?Tݱj.|dž%qB$]N4e>PsىQ@Z-lו8W`g_H(ڨ)e-IP}~z%!x|!'Sd{O’CdPwOM2vf1 WIC ؒv&N2bI[uDcگ[[oo[*ȷ9qbn,[/C}1,nh$F0/0Ʊi?65ΆMgTF=6btu3ts~+I?^=BEdx D~&?ܽ;Al"Ž42p4%HǵեMJ*}غ̲k82Zl5g"G2/EKލ}âُ/OV̕-gs1zm;pyu%ǡ*N9byTߙ& cJ<8)?}w2SV/}MKznҕ,^7)0B F"7;2`mu8f9"_.'V Y H}7|  71B@ {\xH}68X*d>?| ƴNo,_ ނ٢aEWq%fr6ZXIelTMk:-KI_{wdžwQMmonӘ4xg7dp51oae,Hzv7SѺ#P[\s¬}(WӜZ[kSAJWjXӰZ.h!v` ߕY}R=Gq(QN"۾ܛ9eR.rK4Ghs{&"K5isJUFΊ ,^:|,6=uNs"PZQ)nfM{Q {"AJdҝZFvWR-^lIXiƁ ;XӘlSC1\m(i&b_TLO4. =XA6 k82+D ؾ? dsW׋!u~1ek&lv;?ߗȕA;U^.]wuB$ד8밪.UGʹ6I^%L?[s]?mX9`` wHFJ^h3# /pb7? 8<܇ `v2_f;{Y]C_r6nn1&;%Go2%HwB,T^׺Q~G8kqxh+x ߇هӤ`T( 慦x~=¼ґ<84ײ䧏e[Ca.S*Sj5 gc .~^=c a|X{/{o$ hNbd2m7%@鍦C\0|q`(;rîal[5fzC ve?kڡ+6dYƯDjc Yx?QS>Fށa6pqg0t H{U-@#a|>]_& ˃l失vy)-nTI_++՘ZСp<[`˄IFDCpڵo2 o7J''I@y`R Ny4NL֜IFE.[L6/Z\4 d牵.3mǞʛ\Q\y6r9]2-d€ىdί*]#tK2KfWdf-&6*dy.Fչm-l@=u,M'C]^5W/~M\ =vZBGa1I=*V6LhJ.ґ3.5̙[S&ِJ;8Vazm+l6(Whؑ3^ߟڠ,oU՛SH;QyfkHJuz2*ˏx/EoCA(K-k+BIV76x\GX|zjq˿b@omG0+*N><ҭgkY',YP{IQlo)/,|t]?,/9R3#ҭ./I+{JM.ƫ?C WH5:%Zu8VxC[x_Uyy_5s Fvv(w|pO-pI*)Gϒl; sLjx8Euȡ37pώv\<"O`Sdqd ϯv|qMد~~';T޻N^$A1nUZsZhxn3 q3zC^2x&A ԰FɚE8۷ L󥹷=Hݰ(+!b-%js~(ҔS"UFH"4,{}="~k}o\.ak+3G2ZL͎ \rz_?پs#fnW!ZiL}:\|a@ɹPW [er֥gCɣt}¦r`q~ME-Ejcl)Ix7_cM:=lO y EO.GBOZBJPmyhB(f_Ă4 8pMe,y4zod<%[%V-jӧٯz>-`DYg/RrK۠kN~ yz;?LN-kztQ8KYkkÜS=),Qn1'Ԡn# ]_ΐt\om6T' ̻6 GbLʵ%K3+\nzVI7lꑛ !דOЮ/Nʚgꡓg$":c1c)\q>.+ $|éoC,|BsXngR7Q28$%yS y )Z;zrryf4`n\=D^?˒#"@-JOՎ:#P2PoskA7Q}9%Ύ]#V=kW@ nuk3y6MiŘV撘*倩;!v}%^~JSlK:szb- +}5]s&{8(踻PT &Z)P FusQmW8ZKvɬJh0\ {ҏtk76J9̈́0Uu4eYJ0R\p MY8pW1Q[x3d!$Hwc2< ] /g!3E;~ I:reJV;⌤=X|j~'R.bpŃ}ɍtt߭ORNf8~ɸW pG.nu(пsY/Hݢ %6Abb]'e{R'ߚoI#}U8vɧOPYS(}nmN3͸AlwtyhS"XP38pΝ,o[w\f=WHO-^4dcW\^i2 QXtڐ=x!ʇ'9K>uܑ7'} z&}ɦ.dvGxvBśO,['i%1owaא4|T&)B4U3@]ba6BM[dw]Co!jqΘK跏f]'#!]A?^+bǗ=G1?砺?+M3֘ t`= `Q*gzhߚBuϝԧ0h0Ȯ?;m# ]|c \#2u3f~0fACʣ9'N|@;9:g++m v힭27ON y $pP&vg9v'SI#]4he'n @CYؽ,v=0;[GVs?Dq?ݘ[G9 KNP\;tU_RNJ>4Ș#m|FRޮ'D0Chڪ&?~~ر(OJf۸uJz3XlΏu1yL36@cߎ "3a̔-/#sz}'ṳ8Le@,mK#yM!eOZQ<GL 36H\ 2}iX{o8BmCp's~< ڄ}Mf2n6aޛ-ޭ];idV.-z [& SNQ7z}rU?/.2/ smS2kCg[ Rt`k=[`6eLÂN y*p {SF6&߿c:*ӳ&q@'aTJ.,&ETXDZT˗Ӓ⸾G, ^pzG[2oaKù_͐v\}vۊ.&bw 03-UEktJ$a#@Nh=Dƿb1Ŵvdv9q/҄0˿.T 5έuX5e@'kv;X8p]| m00( =ª,_B)w,9PT$8Vi&eX[W-bqJTŀL=1]bUaZ~ދ\öS3$f7gሚy8=D__^|"[ jbcx)4E2+W!UTa4{N_qɁ5aC Oj\?,C_ j9ӵk9S$GKfqn!ʎ 7^wpf㺧eK:K.Vp_`y#r?܊TUksuLna 9yB၅;{X>e=e51Ko=`? T6?z(&/gf)I3?h|h sZ-8M }Hf= ɤWYp&/_W3;VV 8O#WK4̿ + uU:д)5[iސs5vMoo};i?o*;Tᱼ$L]2zboY?eϑRuq;\'~Տ0GII ")g9HM*O9P5-%p$b?]J{vsj}P3o]{C:4\| =B iކL Xa(\|z\kxڮ}6״'7Q0Ƈi"iI/M/͹%K"-eX` )U T.k>)ktA AN(`{мDĞ>* b׀``5  Bt|&-wb 3cKW؞l d6eVV}dW*qP}r^&-rYv^&־P|OF;InCwY:J\lSeKۜzyQ *yadֳD EEpPIUsZL6sb*oDq/o"+??LOpQAwf RTN\eUS} Cb*v^=3g4)47@z {Qd5ؐN6!{QK"6Qܸ4p}lX78uce"K$l$ XDRIԱz`U@B*Wy,Odo 9CQ,Rj/0X=g/Z|Pq GȠ/~ ZFYmN\{ɃPGF[dx=.I2B@Z?ۙLwlG²OM N w=8D-!wuqcW[fmVpmڞR6!4,l{5OuxE\C{e70@z ,My`ތ325Opl68L}+L& [r%pFq4B ΨI`?'6>_ǵy{1ll_9:ʑ. ?6=d 6Y\Y zּԸ_Tɡޠw;<1XگKK$/@Onz˭Dz, j ~XU4< B 6/h}zbvǦuC^8#!Ɋ M7G@aw1k 0Z}W޿idG_@g9oV,yjB %n˞s'`ZkǓs;Hbe6  gVUm;0i>% 4^:$?hldEޙ{p=fAWr`n̾73*WC匯0'\~Ww?: gXݩO_tpW!m$<.: M&XYYBnf\]>[ 4*w˓K6<넺ZcF&U܊mzQE#VJ>XP !8vfU=Mxgl3X(g.) /'5cWj"0ʜ]9,\z}7SÅԹ~E/ (ݭFE &lqO ; :NN97G)6۬iKxCg=N8쑖 4IQXg.ZaF*CRu:=^n ~71נ/WX?*ҹL\?B? \{qL6{fG<0 =vG iNJ?7abK85j݌7i~Uv3k a}bmz+(? <ߺawJa~(h_*Gʿæ mx_%)ݙӼ#G d+t"[,'|u3pyZ.y=#nYv r" %+H Bș`{~m*wǵvsZ(Ngc'Zӝ b[wVx]o_&L=@黾;v׸]l2_ΪCDm(!X9v<'!>Da8Q=~fQF7u(-iYxK;[<ǘxZ8ʘ`;6v[`>Nwx6{&s biJrEP~neWs$~q[4\@˝Jq;;oaRBdxc/kmq6n}c0l%޹1YogiI?A )oyi X0Oߣ CR]OR۾6`?I~:l+}ڞ;H*G$P]廇_+@{4ppR䅔oC6[FDFyMaְtm5{ѽfz_AC-u) Ł=`Cpm ,$tqE6( wʿ";!#Uzë>sHfnd .X|JvZ8y!2nB| bwsi::7yb WO%Q8XZ ͋)AM2/-÷s+yoN>(BϽym!c{uHez̦ KLjPVR]U!͹9YQp_?_s θhYiIe2bwu[tk, XҶ>wϨmh.=Pccu "z-2kM# y;o%ޤ}Y3>|Xt->-ӅwS:(4q"}?3ɻm6pK@sY}ʅ:&n>]  vͧgj҃=ٲc:0 \+ĚGl V8}Y i*tkO#-l2n W Ka$@l^!%c'mCFaw2Y MV]B?%?N&5=}Np i^%{`aQf"6(dgz҆?3@^{A4dg֫e[jĞ!.O2q4kXGӵy@~Q 8gN+(/`/v*d/K„#^\%w>xbGNλ(-&>PvI=J}twY)hSht':x2PТ~ǒ`/x8 _`b!j49FKH Wc8Jp٣@W~òA:ң/.;r|M=L2KI@h, _Q  ?~]mlWw!UK~ .~3dZرd+avKjb$ *~uAq&{>g\oZ*pFZȷ5csc_HCg[ޢD9HʉWAz_1*_ Oa7ʱู۹՜P '.\}/%&WhD쇵'jaHoR̳+SlN`$慼.VHS"{$)?y mj~_#*6o e ֙+&Š5ojq.׊/˯Zs(3V^C-ݧ ?\ aETR >TI -Ŷ=4Qs(] L#[a8?c2PӅNh((q ا0?~9A*4*,ݽlQÂؕnz59mO]A0NNѤc*wD0mlM+5f&& .aJwI?IrJ"=v:ېdu!gp?o5q?+Lz3)0j$y3zngsd-3&Ws[ bHHnH:gB9b/>l z.puMªWn] ǿOaTҚ0ѥvpuX]t-vh};΋d1ǧPΦ5/k JSrf-./Kϧ)[Z2ϵ6"nk &ˊg.C5me;5|o.*_ L7HMI#8)g-FE2a@8}s־Sʗl&bf;0ۚ:&eebqbTd#/vkUVEH@P+nʒy~ !00n2<ޡF^N;oi%k:8e\Fҍ9`iJ Ϫ۟N=s꫁ǔ "=%7<.̑/ p`-tFlMG6`;u1;0M;q@QA 8)ozm= ¯YFC72|p,NA 0SA$f;ݏT¢<@oX%6eh6KW8|v^;{Ç=#~8pT&w ̖ >sơnHs\o!Ea28c1e=vOQ Ȩ^«)/OqÚg|;`n}&)_VXwⶵg^㤶b%WfwRIe^/DTw=yZln7@S喫Fk1Ӗw @K8,؝[ѩoe\!曉W=ՍM($Lգ_k7äKIþP$*,HcKė/"+O#QeedZ Wƅ֒|t2Dג5i0`-0]I4rln.@)0:ʊsv%XKojƑ'^}SeǚOw/ąGM`a r뇱'_|odWpFjS_akn_̮3D@{5\U~K?nCκO#`a-ٿ~5m'$-IUٵ5wVƏ˽k(ߠ[̘sN䆡%K70[ C]j/qv]W;o>vL0 Qz"{졿!M@ `C+ NX'uC0,zHscأ} p+?~`;q0J-ݩtt&8-G\Oڋ 9m%#k?Y@oDr%&tSqzEQŵ=UFK),0 $H4*5fK^03x>^ciIM+Axw0jVypٺPgN%,{Qta^@NQ۸噾@x{HEuh`9|0}u︨w.^} yp~{o9&~XC9:^j?\&*hVf l@'a/C!tٗÐ:'N/8o7{bԸŏRsIثV Cvytɑǐ Eyvo P:wj`yDa\up\H&Fw|+ɨ [ eXhƃv xsc5(%>$Hy%+@3|6hn(n0癑 )33>QaSUǘ L_l&8x{^ahۂFR _{!!"_&oo:pϝ^|.C?3 7b%S;=07J^/2LY[j!C7X"e'aTgcvGPV7[iгEo5dvSUVb.w-w}i!'Fɬ|¶T,{uM9 x\rf2Z76Yn8qd.[z~!6V8}}CN4>ł\٠ؔp& FCN"OXz Z\:){6C@=kYQeדU"XҙSludVg7aGXwqU,7x.\֛{fla^vMf9o6Tl .xn'g.kU=]sn;)] m?\*&8R &K8} ;{o..'>j@ʷ]8lgoU'4cR@mqW'$=]Y^_!SomTDWM;YTB8v|j*J3\|ǤCZ%ϠD|~'OaT[TM8C{yv_+\:] G閹ߚ]$M=t- EY|`ڙn8\;F3TsVKt[w<Ŧ s=|K-aEfio;W0mM> ;OVƖ$>]ޠ8GǚFZ] >O!i Wdǥopl WL,/3i;P@FJ O 1XO!sҶf]j̾rY+X\r Vm^W/e};/ncndRĵSt2}u$.&b|3oG&[ž+ĸ`x޹)yz ؘ,k Ya۰6(S00%*~&B+v͒*ԡG&]M*?O>q;̱g09rwoU/^=/z_ ?#{^:c~Oײ2L6}i]sGF (_D&yV"il"qНJU DLeQ8nxiAhD(;r6hZF;yTnd¤ER?=˞~8 p mtnSL]f᥂C<.-g#hxD5k;O}cX7"k "6Cvd]I{o뱾Ǜ5~{lC?Gq.@:҈•pv@InSV.~q ٿ̓g{cw=V9xg6oZVn^%cW Ҭ,mpA+p*sj׏(>wN4 el;>|2Āk9H`ךkzԞհ#X~VV6g7Ͽ،ŀ]/L{Ő=ۭ7-WI{#s47wd t^5Uȼ#\O8Y]kq-98FJ"bvݡ !afYԃW\{8s,Xni zƿR#G.Azug{S| g'MÕ{Ͱ"56y1b lsjvWˇƈb\9qi4)?`1$6 )mSW2)q&).2܃~96Djɝ!sksfC\(qhP4G9},زn}YM(;{t3Nn5|UlJcݝ10wㆠ.Xmu|lNH/vy3zEg`.n0l%C0Sn 0:5/n)TWAAun>dS)Iy !ȿ5G?wfs<'_}xBI! ,O 7T΁8+X<~?b3>ٲ5B}'%E7ؓ6)V['6ÛI/x̱ ^!j;68nNa`Oq k@F-bli;7(aoWanK;`lMkߩB&`N*3\299R e  KI\IƇ3&Ѿ{M{wYCN{<җ_D=ysM8rV&wz PG4yg~>VLo ,Ѹv0RtX][R)+] нq맽ys> I#+ΟYj| # HGs'ӯp DXz_ h @Qz`J[bdN77aS?}9n ?a=|O ^'e$9CB!DGu)N6n=~욄f7oX emĠČ!k5 ̭JU_05`4- ؏y)j8/%/ě@=xవFoy_Ŕ?U[[S=?F 1-5kӹ{OEoroر HkY?(nWcA;8Sw0JE''FF2=\Gu-8}zzt7)[@_ڇم#>.É.`\yۿ5@fG껳dA¬,%ZnI@s{|;Ni`/Nm5kZS`d#|`81q@[{ l,|WiNH[@aE0Cj'7bN{S$?N^۝֖BڑuLá0>})DPR -cp5F c!&{lNU"Isl~_}qVZX=—[̧ oRw> D{\RvV =a̘߬*2چB؝CwoƅM4vt`?ųACCs(.\d}⎫ :6+̽' Xމ$f1parUfƧvOV/֍cF&[=*PrGm(ha#% P h{1RoV''?` DrN)._/>? O]'3_3:`љ`E!rɮzRI0z P&f3-l=<{"BM'`&\D8mxM[MºaOC`kE +Ec ^A?b.'VOx?c'{)-iC{Q5>sL ym7KI8ٱ\ _Sq܆)/8-K[O(<%yn΄bߦ)F6x3`zn VBqpשq470|_H$>_wmƶ$̪ܒ% |'KIB8ɾNjZ>IKxB8t>V&_mV]pIΝ^R5; }Ex-d:87 B7rPE@#w&)+qݧ:=ЮHK`u8ˆ?9']8}O% p[x6|%ۆ nI3',qQ|eh%}X }ۖ>/AR = )R [_o.m_KHeS<+;.BH8k6Ҁ'o0bf5dȪV5ƏY*;nC$X:=n0^ކ|A|~cTĄd^aHQQ`{]kSu'efmFolqT w}7\cYj[!I|=!*ʐԕ1z;>Å;5LKarCWdziZyHR?m]^2zgi'|c%SM+g 䑺2vGƼ%lo-r㽦:8zi 8qW GD:tZ$jGUMu!L3( iڏ|Uq%bүmKj[_Βm$>(T2ܙ|yK> ,L2Њ) v[dt 3;a&b.;:E2orxaS̗b;\Iٱ]0 ZnM~SqC[vbEP5^X ݾ}[w]nJ\q9F1ъӁ葌&蚾drEap 9K wBu[0nYh3q>| 0pb~/V>ZfR~O>ML\LNG3!FuӀ [=e[?\1Y~] GD< VӖNgjH\ ~LpI~?hl< 'pQJhc[ VAj koD2~K0sT%n4~[;B }˶XV|8P~n|XqV=ݐ%s1$v%ZoFOW3w J8vݮAda_x؋~IXUO\|ZJ18:]cJЉ{%KcrEt_]'aÕޟ>%Oq"E^X;=R,7U寛ev@"E_svS/ ޖâXYÆ̾HzI 5Il9'ê TwC_賶#)V<ѓXs7|>%ojni;hc?3߮t/S{8aD%]ֳ FrݠsN>Jih_ ͥt{81o::=P]Wh8m(TIAd[X7Zz) 00߄|KYvt=y,w~C_CHtpےlm.4נ?m jέ,3Smh:n#kO #='ݟ,D^vm&PٖН+$%׌[دe ե ;Q<Ɖ3ߧ-zac8fV:ޙdH}4woӐ.Eω3"Tr?0\Lݮ^El+x>=9ĵ̼ +*7c;Ǡ\aBfe- >U SVڪ0 UF=5Xaiӈ5_^C ~a|Ofgx=&.y=8Rjg\%އ(ܔL]>o^#]FHKFI=("]zTEDVz0 6M”b@4vs B^}ʪИ9`'mg/Q$@qK&Н4͒ی3n=7i,Bo_ 9[wLhmOz1V  L eDf}}{M%xY`ڇiI)g=~G$N]Vq,>:7$`(%3i*1!kԒz^_ nߎ+w%(㖽WӍ@ VǤ31J f7p (r:1{qSu&OdZ@߱ı{O=NBAI_{ݧd-}0з;Lz__ 5H3+¬F6md:eBXߵFdD.jQ+:Vs6cgzPn)_OM|'/#sJA}:Auf g=5zBupBWz"/`)&.Zf#G!脼V+ȹœo۸Xb9$ կnӁٓ@fTX@9%2쮵ޭNLHp8i? V/w1 :4>-`+<j5U|nd܂ʱ@ڟ!KX~Q8SR.on~n#UsW5ΜRtlNz?%;x luC'0{$ҸoKCqb<]ɓΈfH{˳+ԱKl>Xʋt`pS2673fJ~F:gB* ajkgqԸu,[moOb|/O}IA'xp'z PoEɾueq/vv,;ɸV 7kՋw0wW@m\:W^Ñؒ2в_(םO>g?09KJ[]|a+K9.w:׀\ &U xÑݛ6XiRz: Z?YâY.c#4JOγN,E{H ?1kN >IcCaNd-?"'%;8e:Jb?_R8/{[K _u(P ]_6LX,;p!8 kŔ_7a[gqn:j)$7b\ XlVu_Nݜ cqnϘ=Cծ9x;H7> 1y^?Ŀr {n쿀Yo- \tˈ=/@ۗ=.` H1{[l^$Aɘā?HKP8.gfZ31Iná"8z f(f aj &%'lH]+d= 4_M9RNWԖ:5ͤ01&z0&9{99)qc3\:LW0aBǢcd ߚb?-i5,v=EH;Cw %"{-L\x SvYޢ3>ah̙%<p?#^*kV /dؠI}./zT ,(ɄBkHyx+m\^zr$/}ՇzsU`#Bj kIXe{r);ڵna@^D3qݲTHSLbwM1GKh%QR6rm8&lc Ab U : l]q7[&LѰI;ns{8h/Bg{4cV#XܱiWQDD~ ,@{t(O!R/<2W"m O<n479H -& Wm'8@,&({hf8moVv $8 7h+\qHR軖e`=%Ds;d՛߷.5m+'~t6"ݯ ;w󟆚 Zq<5? ;+>%(L@]K (Ce ]-#λV8C߭dyi,7[y%csEpmP̓z(zf[y .>?\<5)Gd+wlqfy)8|,HgpCǙ ĚIilӖ0=.)ValHEVtѮBXtXK>v_iv '/($;+!InObR=+ m yK8?-wb[GU4ϲƙ&-pn˘"ڇvaOe׍`2qϋn7Y+-H踲㠢@I0A2:`֧;_xs_˜8DstG+Lv 2WΈjN5^=l=lCտ_"އ#C+f6lgʴHan&]3/hr Xll_E\|9FüIbp_JQ8x^-! 22.W|%9y2e, &Mɰ.38k߷o`é ({->;^;% )_h dVQ8 3z%ɼ zn~vO`̲J=Op]i3P3f4>ycC- @G-kX6aGrpWhH>WvAdI7zgߑ7 n_!3y } #+cҧ\G/aU0V;O:fY;P)S 3r)]@-M}Sp$j̕,YRI>IKyoUt-`,VX?dCP"ҪMF_{MuU ζ QH;$RSڈm/o)#P ?iIk; G9q䍘;;`P]XCiS_N{t ojakРTYf^=۱M~߯nf9יƇy B#Ȏ-zW$ټ+WSneZ|x=al $Z bo8a8C5%]qٴyR?4J}"g+d8gVX[VpGHK2RozWhemZ3~HgTMOM ]NШsMnm?N:?]0ms70>ك \Z`QA> plR/'q~(ǦMv1 u[7[33A3CC2 nC>:Xn*#GV=l }%r9"a*nQ6cɐTVh޿j`O7OZox)G%o~g3X3?,v)b.X'l(gfƣ0cU#c9JJ!φkdrTdg\Wհ7X ͖v< $담׃PqsSk2'?:₆oΔ͓Y`kTy? dDRq.wܡ0-TrǽK9_Oom#7.XOx.7bݛ R8&0u ?V)s2c6kyJ+\9{?}yXذ#b(8WWEG}2٘ Jz|Px/X8إxY9oEl=lxJDf0r͑hǎT0xl }_MfͫDb3'R%0SǒvےrO?% EqnأMldd(,N} ΅&@q^p?xd^ll7:6Bmv{]cw2.v{A0hg'K1vq ef\vZ̞aʫ?|*@=,W a8,d A0f c |5ob 6}c~i+3}{ 2$(u,T\5G\q}Ʊw΅s|L<)Ŏ`z#vHy}ސagfy2Ǭi(#Kq\_zuhYK8rvz)hw>ߒ /Pr6Ӊ+*²PQiO,8 #=Ü[ A?FomG)vk3[XmPuJ,$[Ltп 0`хO )03)?2q~ ՅòDJnkrtN?*6!:c/rF~_-[:)|ܝ}yL, óPstX%ɬK麐ʘ3'n#].ՈYoNM^g=|kshk+P2c^F΂^\tmrn*\r 9qGG#6fUV~s$` ]Bk"(93U_ XBкAD#R'elzŽ{j N6ޡ6e>6RyNkfnjiϚq憠KYh.K|X{S^;xJƄpy%\6{!oD"Ÿǖw۽ 8qaUtg@=;<>Ӛ9DĖy7Aw1|kqĜ`xwxVrOIAϪ"d3s߷ucO럻۲]Xiir*kY=傈ﶞ$'jҫ&k%oT,p7cFb lG17Mtrch>nz65k\FfZ6\n,$nl4{*xņ/JחVL/Cob:ϕ6j遵cVEvKy>7OHA|dwƐ8)jI\o2lųl[S[5[Պqͺ<影\QHzI {[[O7vu.yg|g[>FŴ3,R%lNvoK0m$=$ nZ6MB vw ߩ*;9X_RDc'msÐ {W _T[zZ>/4)4T07H3lzMfr:QCn5եEe%^{0'[ZKlpKk_;k/DyG xh^ f3eFMhW| OIab p tt0]ܾq%~Te{L6%\>IACp$ufOȆeL=÷ /"q!w%H,ڀƴ"3KE ]U}at^gFkBG1M;ʓX .Ԝw7ЧP\8$ k뜘qe!=B=<8L=/*(hsuZm]di6PS sG##l@X+b{]zds7KAu˼\ :+aX>" G+gX4 Ֆ+_cD*ʧk0cY8ɸ ;2qߛ X|9l:h( ‘wzhRb Ӆ~p뗙_␁ZiqeSrygӄ&pք(5BWhS}6$'YI**qQuqƂtޙ~0T:U<u*F2$@̷M:|VY,\?yK:vLAu*&([G 7떉ЭS~*Dղ]UC"!H:6P>0.~^1ýj+œ]@_M»'k]iu12NӉ'@+L;|V̨iOMR6ϿF3 v(iKpc'Hzur&{1]{[Xv8Ɣ7!k88.K!6Gۼ* $]kf^`$,{l7-d벨ߢ*s^?vdu"ϯ oƿW.\oO& Y1Ή>GNqM\#n-'cn7R`.hHui1ڒf]Q`H3'~V4S߲^=U;hA(; JufiL)+ vD&h|6ɇ kbF;Kzc+6/c[;c٣|uH.*,?Of_rݫ _`E,DʤlOzmԓ6*W8#ISqۋ N 9 ӛupsV%OaΓsdWQzR.vnnk:u=jaRɦĶM<<hBd0tbw9GDnxn'H.J=IF0,mSsjt3X@d6~}c>&Nnpgͫ6l쏈{k#M`ág#wLnBN?,a&!|v $,Ώ}rW~0,q%˒bSGjOTngWO0V5{ĵHTzxĈN1th>էHO%6Zzݩf^}AK6A,qwvG<|jzZCrbUgrYIx֩ʼ<[L`YZf=~%o]1yUz^t\=_Bw^dǜ'+_`hhÍwU'=e 2cqϗ|m쪙LJQg>vZ'ÙW@mQדpWXjdC?uv\{{S4W<ݷPy!LЕoUmLscTM^ѧG/m2ms ח />m)y/͗]TE{w&q"'OY.H$4?a׎m EK|v=tKz*:AH>}3%]:?, r{]SIᛷOLͽJ{qHQK|8DX} i{\U6 H~ 7|=yt˗S-kiZrr5Â71u! ( DwF9ns9ߛ^t"/mg =!h=VbxOnOsQHX-ð>5_.pRkNooT, $UW *UEndž|moߦ 0Uedm};$pޥc|pP@ᓆm\%Iv8.Nj\/8?J3{y=5SK0td<R2]f ֧zװ+t3GF8j~cN޲zt4bXfr~'E[Iر52dzIkX4U(sRHSz'hH4@Qhu>zHD}l1ꇅ:يn:.:E:N] q!醇@!?cyC?L6,W' z.֘?zg8]J3xZJȦicҵ[,djm52ER]EΟ7z|bW&mȠl acsiPo JJCl2DpXggm\zĂ̈́VjCRֻk 8qoi}0v+Y6f >^LIdcKq.xq;[\DJnv!*%XY]ƕ9>_HY89YLJZؤkVk #5aRRpWNS7ѫzBм^x;.xvG,6Ìbn]Qu\>Ú^A{%| _ g* 2׳r1<Ҍ)vsR;B:i8A㋫jF&Z,ޗ$HV5,R?P$\9Kf ?wa^m/(˜^00){>{͏tSKCw87mȤo ѳ~TS>zIOx.#>5 qT:(z:.GmJ韝d27ԫ3jlNPSS#hGwYrX]ϸ|GE܍oygN18B35LZZf!cwEMapJO@աuS"SCQ@6i%p i# {ߋ2߅{/aa44Ꭰkyb81[p]W=X1J\yϪ{vG!A0=Fۍ aJ"@O{&}MYU#ۨ=@ f5= Ot'r@7,f kEbޔJCA0‹9&feG)γ8XPhxR G-*qǞUkSnaBXRW }JZO_mT39e'\k6~T)tg[|ʤ#npŘ$'7®/qQQPDh.5\Ѕ:C0PZ^_|vT'b>3*Ws3qtPY0kק.T~ ye#_ RSw_{z#uݫ} Q"ngxҴev6ii"N [όؕwCF)V zDz} hB =W{BR3E[]qGp=+I0F,gtsxGH`Z+D~S`*Vzw)Иy>26>&",ϖqbmW Jo?r#Nm)!<9HLC&B\&'2a#пdar._O2) ZJ'O8ʣ9wS.ep"ÍU0z) 9FNepyTXSa%/v$%uA]~ mzS Vҷ?l5>!g:͞d@0%)YŸftLlQ3# x}&VtO29|%0y(Oh%~(KcGÐ)O MB4[!7aɷ7 7x"8LGq,ũԔG=/}A -2c t,@aڏЫL76<A;`iip]"7 3S~&1'O` NV*;gzFON<sۙ;tk-2Kgd>tuX$2gt%ռa >鲀0!%Q$"?$$E݊CP-B&Ny@bo}I {J+©k0)pVy/lz,VmۅigMl֓W08d)TkjDT,7 JVPڱFQq%Q)Yȫ[p4r*~>s(Wo"߰.GSP-U}^T' AQzB{ոrc}"⤸~ |,+>DzH:qNmq1ָ*Ԅ`'P@d[җSJЕ.{v&kaɾ%h9k6?~ounY )J صǸ ̧>H07﨤zɿ:کUt%ͷ!JW+||{^P| # t[Nm̭Ғ.~B84#jW;in~+8c,=s^e}™h1KR;X#/SkB^.NրKCA$ѣ~-]t_!-tH*D"bH;o4XbE]D_9H1G9q%ii1Y<43qeX0=z2qIɯ4oS8]4g#jkkob\?urvV۹o#Tڍ G`:oHpBqO7N|܍gy~=_˱˘~^Z&ԌL MUe`Xe8(._,#qd J-|,$Hݽvժ.aW +kIOC aHSV.`J<4+M'X>̕oÀz26_'=s ҅ SmPc+Vp/X> 7F0L! 92hOZ/k b|d۫} i넍LQ4Ǒțf=5YA"Q2 yHzQ;"U㴐 Hkj"3Z%vԾik#/f˅UP w?̻#-I}_qAַAd^L:e-J'EUkSLKVqͻwӤr ޲[l.3aIG<(1#8v)ƚ:,읗jbG:%=&N!OWZy]gJ*AEdoVUI%O V^;YѻtEyxs`}cWGٿ]GՀr¬[~7}*6_56гSa;c4% |Xxb-GbLz&kH9tU,yi0sE/:a7d[rҗB߶b \9" :nir3a&?Hs 'ޭW@ǃߗqP3OlaR}Yg& \ ${^^s󞪽Y8/uUWY_*Cn\.=-Xz.Nw:?迫EBKk冚7<k-b8hp<>s+LF8\#)ɑ8Wh3<(bn$C.8̣]h%$tz¬J >jD"xS> 8|+RvGߨ1㮃dJy̓d[F1zW#UoiGIpփ]93 3utx vJqFU,crtiCnv4ȶo+q7y}e[~ %ŚUH9 |o 7Srدiy- _n,<'XvXaK[83z5*&sAaN=4*;rB5F;vop@ږ̬a2擌p3E]t4BqeVH~ʺ[GG;d?zs}kɯ:'㐶m8r4)zWh0]IBQBowhDc U(2GV4$ݼ,38CogGu:JwZ7U/c)Cázo<\{PAs+k9v˨XNfoӍzTc! ˤK'Dr?attLigrJ!8ߞX,&U&3_To Qc2c;f,sku%qBد?_4]0$C򣖁)O0"q uքFS}Jks6RtOak#X;US"%l1U q\? 7]<9;/MjB+AooC׈0鼣;h%ut)e{2DaZBNJW/\ث1CI` -޸j)I0x9R'm\5CbRƞ`+}T"`lD.4HkjE>E~4uaW*_iP0;96񄇗 ~bɵ8˽o/WdCЗ#i r4jkƟ'ރ $XݟQji pG89ttg '3[@ǪB8/~urULd?;\Z&t'Zy蛏ӈv*c:&F&I_xԩ+nTA5y1uvoaIkPk.X h!{'dsg9jealHw6Ρ+».s+ďg6γ݌}$z W}^fWSGM}ĦBxlLg9|L2mw!uM,!ed枽~@/{ FjU>;)\M=Y@܋'Z2Ssl滝ߖ.L'w!v/8gy8}TP.ο4?{st58]A Fh"AJaTSEyo;Jd+"pw0^[^Kzgp`x著;p&wg )δK/b;pf>Y)9ޞt3mUO.W2C9#a^&1>f)nMY/l%{"pK{r"vFjH{~` \1KGV*tUAϟ8J6ArKs2wah܅x@ޓ&K&Y*.BϳrRar 2=p ܼo3A7si?c%N/SSXʸw1Lj8s ƭ"]9u#icy1/\ӵ vTnj܁nN8s}j'e?͋gG+r(]<¸ B?El!U7v>\_^ `5nsP{;I!B gb)r2}>i/׭1﹛Zִw @%fFth\\a?),6ps7WܹF0-?MP\tlǦOm} U}qf+ D>% _-LI+Ԑ"1Hbn|Dqػ]aiD=I D5{73׊!8U J1<>80)Cו5\\AӦ:avG^Tĕ_da]{ v@M+0^`1oJ:=^,ݳĠv0nv&;W>T6dsbsݓ[?6A",a\wKg촒qPOHG D8SßcQNŇ{$X-nTUD߯<8}EML .sjoqNǤP(ӲXG~^GRTᠣT6f Iތ ^kX7ʑ"* cMa^71gl=IL3 jևbHي3AWq1b0L|,<>yAFKcTX$ㅂzۨ`tx$T Li7K8@UA:3u*,j27Kc,ڟ`!Y ~wGt._"Ӛx>Q0m\S7ibZLSnOWCs+t8VE%qۧN}ڦsT&UPLo7k-M0Ⲵ,~ungd4-O쿅g qpykڝ,0\iKBq:LiO|}X+&* E#q,h*tѭB\νLf !5 Ty宇 @[^n E_lQu2SQ^ M6^$薝|PÑnq$D2nO-)Xq7en4_14O&ж4DE5n!Ν*|.,6qcr0]҈L/<_RZE PwSҶV9˿O0Hb)մOP cw/,uq0{ VvY #cb>u= :8XX_t8_ѕ6Ozԋ5ycӺXG+ `uҫ]ƝyW>֕ -W`ϟu.v[+~72ARD@l 6~?|$GG-G|6(pw/uoŔ& lSVu(\}p^6[;7ϣ!PXZX~3_˻C?[EZg3!( +gujӬ=طՐʖEszMY\ A-i26^ J8#6rpz֫ep̗@]}K2s8NNiO:2b|*s{8$vpaKHn#= 49]w\@@o(:!4A[7_0͛?@ t:6FKcy) W#8\|^=3{AG;{s^'ӵJ)-`[&ۢpk+zq7~ Y?~^hljG!d;_}t~ Eˋ,8< UI2grx͞]0;ZYw^g 9nXbؕn o.9K _h'&ÁøPF8N1)hyJ17]o']aJ'PMH sU% Kj6Z6,zŗ_:hCga>+L?%j6֭ӴեvVi16z\x^)@8)RkyإIP9 Bo1aبo*xDFp;v3QWxo90q:oכ}B>?:a{#i(;hRX0HJcmE>p'W'nj>W43rCk r`[1.~L m^Q${_q*,jWBG~J \x"Mm`1(>/xf`+*MfV_M ;b#i֍Z*U#}&530wxJkz}s# 8^xAҚȤ0Im=Xz^4L/ޭ7`yF;"=c_ zRM< qcY1["Hc?߆#_WJcwhvh+ ,߃ϋsK$3vvl9,[K3+ Иyl2>$g8m!?nZ gϨ􁚚V YΞ{z:dtrqS;tH^|CO@W7ofN&$qȶ(hıEqI5<|fF'R"}bx{S1ͺ]59s"pLGJ+cHjlZ90ߗ 8\/N:~K!,3O8q3cFvNZ#3 j*WW~9Zϑ8|`nɚ ^:_Jok)–`xEyV\~ndug,дy #|Ҷý-x؆OزUM9TZh٫=l0v/8)v=a*1*ϮI%`,*u_x&O#Ceev2L;8ଵ)L-|AЍtoUݕ4lhvq{{e%2]USrTh۪G#=k]oq_L'|zo$Yᶤ/@k=OޜCXӇsO;2`C}U5\4yRqFi,X jtFG+> RcƋ=JG(R,te%lYͩ8f=rkk\ԲRyo$mu?ܛ;{e Ғ_pi *n"3ʰ [x=dmrFft͚4ߡ܃eMuxǞPP-}ݎ !<Ќ=OquctW9L%gӛCIH' 3gpі^qnxkLg*.s,.Fb̾Pޮ4S%x z%\܇},6[걞L; Lh>)4"EJ۝-jiI3rgM8vnQ\?4SH8Xe{ԧ8on357DYꏽx[eD49'Fo|F%<7_SsKv̟ >n{7Rힳ?1Gya3+7qwTXᅴE>qK);c.ͯòh4/nަ)G}(TErM8Lm 2usލ8Yt)Ϋ=aۃGY3Bcv}6g"KJo/mUܣ[W.TMq3{p3ҏ`ϟBwppVK{dyцzoܒFp=fâ;G-IWƹSJ9aݽw@%YK.T^^K3)Gpk Xe/WkGmeFFO7s9x/H\ʼngaBry-qNY\Td-# K|ū-A_⧃NmwFUlz(w#ND$`:ϔF%?(7 ]Elas)\FE dK(2 hSfX" '4A7QUſ(Gfrg +ë`v7w''+ n6 YcI-/ӹf 3 OdViw>> |av>4ʊd@k饫![OdEn)c|Cpip,ʃc+jaiWXo@FcvZns8aYy|0@ 9}2PR)> _9sOXՍ-tƞ:!9Σr&j#g =;%ZEgsVF?qf3y2^EüB;ظ3X&|3lc ub%ad!ZZp/c.r_e->]Q@;_f!Azlϛ̄"o}8fO_N|",ϏnƑ.]XW6lZqf6 \F*|+; ̧]!CB7r0tPur'p&*M_&YÅZtǫ[j) 8'G#+ ti'=ROUaFL[=Zpǩn T);ݣF…)-0tڬ9!bt!#a;O0Xۓ 4aDj?1?Z͋߭,%ͭf^ ΧšmC|jwCE֛b$-`q r G|{!jg1ׂ~;/:{nQaCz]z8,o vƧ# H0yݭ't3 ;>%ⰸ]iYL=zqhCxL ,58;U DTt9۱=x(@r~0R"NJ#I'`1n]EQ{fdK]&3`.?$( Gc빯CY&0$Y^qO?%dًrm|q08#pjKj8ǣ).dkaA1̴BKzU抭|a{lA:"ljXQ/nIĒ{C?qP8> knpL^n dújd9%'Xqf[g-,$p(LY+iWSߑ0#O %%3\u2 =/$Iyn@w+WlvR.g4 v+Ž%o2߹=jx^b]fҖ _vF5fFY)rViÚй馚 _q.$oU|"}x#p))l铢Ǒ&Q[R^\(b,U~e6"}ȩyb C {bB8a3.Z)2 _+58xr-xCZ \}9wƴܗL`hj +o xmJ鍛d̷/BпNR/h|t6ի"da2@mR+73hcžD-@Oxp(?,r=\aw$nkV:B Aڭ'i#׫$!-;w`=VW1O$'^;c'UBL7uLsB2ˋ̃Xv[=t(p)gςD;}Λq]n uw=c63RB?9 VnwH+/!ߐƅ;`=Ij^*1XӪ!]kqY# &tOB_6,<_ogOŨex0FM+qMV*y&%Nr͛o~06N[:/[/j^jfNګN`[2qELD;dM ΃}0KԕVbS(˹dYpgrq#HΦ5ez!q2(x}wB.`j_,lg_=v_t!҆$g6@# 7. $!˘U;Ѝ,U5{*:_ljz @`>ƘE5`5ٝY_^k-6{{v/N}Yˬ|7 .Yܰ^ {T#a/ ,\ԁ_!^߽~tky+ q;|N'V8,=igajpi]V#G:ֹW}Jܾ7 ( Ka5HzOUEk6rX?ӧ]?p2oc)OXw.޻s`%E.݇ӻB] 3Ku N#n2 y].Э8,փ&7}A@_ںʊ\=Wzk[^8wڢ=i#gj|jmlWNa2ApA|V6ckbY_](m&|>S8q_Λhm9v}sSp}j U1bVO纵{z3_|MVB~^8f^'gTo yuwea]=C5xB*Ӻwn`V /ȋ fl "#8Bz=՘{M&'EkwiysϹ!5󬚎9֕Ei7soq~pt|ƹ9>Kkx%6`| Ӛbm9ؑge$NP`a<Hl5s'aTNu;m6C9Xؗ- C^Bމ2vB֣cXװ\ހW%CamՁḴRrӰ JQ9#},oAg8TNF1KvSfhruSdÎT `쏰Ng:M,_usV,iUP(-&?;BbpC:̩0Cln8q}` MڼT&FG\7.@dAwΦ˝]t=n.U|űG­UXzsdn9LM5kOqǹ,lxZ:qnmǺ^cƮlhDF0gN\=t&#pTR ib2KHӰj$H7LJZCډJ{ m`) tl(tMR66ј wׇa\X@z`ph"ùBM`B~yʅ +`~ΰC[[*twz՚qe0Y)0-" >Ls_E}̡f{0;Z1LR 7Zb3=#J I.^c&8\>r4+^s{߳dL}#]P#v }V8Tz3>5Vb} I*yj;M׎6O2s*K 0x4o ̥3Scj]avg{%Nɇ֭L~KX! tfpi\hd<65 ?G#3uM;8K Ky_쬻qZI)$8Ml^|E1\cbcJj%-TƨEzG M`cXzc> J̇HΦIǶMϑzIS)oZq0>AV݌{;a~hQg\̈"n$&w^u'i2fP'H&Wp}Ő'6wGRRed'PIR$iIZB**BPI}z_~tgx!uN OҸKlw,K*  ?P^R۰ATEum׵ mVu SJ2$rqb_Iw6]\.>RX渶拵:Ώy=?QOzO%rd(IS6g{׿0ߑtg=TSɤ{ `Q)OF=ܘ/壟>Zlܨ5,t30_69 .cQ1~+42";2;۽۫oAO Ln%K"<"ytJz%D1>"Wj"] Y-D\oٸɳ¬YQ3~T v x[=6K%JGŽLjVS)m-0s-fY1h mV.3PH@e,`j|<#z޽ ׬!\wq z\H06ÑJ2mwgY RK*U 6'#0fKqX+9=#3uD˚XQ22LRWh{ Mf|rN9<w^˨_yY‚fu-<߲r;gH@N1fgڢU N:f6C `Ö΢ܰ0D\{#s-`l%5nl)V ޾S)V]JЦphimL#HɛS`yM0ٴ՘`?gێ|R~nW~i| <ZSK_a\OAK<,/xC{ſNW tvqAʥ@-*;N&U=mj ܪ&|s2Q;D-MVSaMZN(m953YP V{AO(}ݽ)ˢI_`c)N٥RA~Mh^kl{yGMLes~7"Q='dX@o~WDAhs^&koY"9/I% \׃OMN_GҚU8?wʺ)+:]9} _MZ﫱I{sX4:nW9[NI02'ꚪXo))7t^(k *V@a`?0!WMOWKIz5kJPN ~*MFI<{Lm2'>) {%РJywt(38Jt{ڇ⿷XW {$^NЛ%/T)٠o%D/4=S'3ig_I}u}^񴽙 \vY4Of#RO }[~؃_1ؕ<K~tKlh2,1qm0~gZ X>=dAƩ8t"\z>ߓXS*uĕ(iTE1/vwX*ןWZ V˙V0=YJ ax&t%4f#xį}EhC +}{J8[W>cZÎF5n`_MWU|?a'odoh X ERvg?5n.{ܯ"r=+L%C߿ +S7/iX3wD؉dWeSE1G|! o0?{oI8㷳DG=*bJĐ}0*p9 2ܲ9ŖOn}* $)A]EGL?!>G趪oy"\`[uM$_*Yn,c33{cN!t{d7c|n8pkFJ$;\]"sGI>`Ǯ']*`.wlQXe'3b>Iqٽ~ؙ2Pqq>P/2*+LtPgLFOixJŨkWZ W2=iW<έ_Z668{l ւ6 GnJB|]jU3%X21-s}<ĎzE@츾#W~kLE)0O<,ZÐbVMSdaŅ\tuxTvc(~uXMy. VtpU6X>{%AwQVf/t[1X]ye;L:yyA|/1#:7jnBe^5 ':n%f?{PV1W zXcC8VC&$o2ڢP3L?t`b3\ӂ%mSzZ=/j-gMOqF\ K}oDԣNt#ͽ8vK,4 M5Ēnmq^aJl2N}]vQO fAXss\{zOh2`XVf;dg]d#!IO`ErYbh6*DUjpZ$|.i6ݺ )!ىMFĖӇl 4-y\9++Tԝ1ݢ;u.Tg=D8(ʹ}W+q:eszVFso>RwR@,! +c9M$ǡowŒmB8CO"3hk-!OowJGLy#t"̗x6Z.G'Hg^tB<\zG?3u_qjK^5 Ig$_ii?2鋑9Їojb-o(ݻSyԟ{ %y#+H z,3m2GTlvUضu?|RX)Kd|41ާXLͿ,D¾f{_B %nPn5Sw|`|܏`x>kFSD%eP~1\SRiX%d 'j0Sԉ a2d^b[ QYk8rښ5v |8)E"{E^B?gh>*4ڐK<_Rxɗɹ_0=/X2-O8|y29 JPx]}М=թ$NC0.A"I~H+2Dr^ekVx, CCt9j{le؎?2O^JNjÀ}{&@t`#.c N\ rFUIC̺s>nZԳRUu+A[j4s4#4IU o+cc×p*+KO„i%}*< ;k?snl|R͛mشfPv*<` nfϥa98王^HbV;YWua^r#?MWž;nOwzs2xcfn;R\ٱ!=T[3uy}R|.!WD-Ľ-/>Y w2ף9`+~(AZw0UE_f_|[~#ŸsXXʭytʒRe?δ7Iqتc v zGG14J8?@r7֦HsZsE|%}iMS_?~#oZ: = dJmn|.J- J\?L6W0=jGfqǑJߪ5?6d.q6#zb˜㭫"`D~nSVѝX7~~R@TJg4#>0 co`!9Ѧp4+8@2%pK}$fOt-X@j"3?*%%p =IןsJ;lIXһi 3dÀYYkVYlú\T{\ q=wecPd< MˋÜpgܩ W.uàiA NRoAqh>3#=nW3J} qaϡHǽ r>+>8ӝKOe`F0)=-+xpm)J9cWO,t=:ެX3'RBLG:W߭o-w0`EK B>%ksGq%;J;gz$3pF8_*vڿR y6.Ȗzdf ;=.{|`tSWs^Ղx]иg!ȱSWăj&L?!!m0KvFZ 9Kḱg%.Xn2dzcۃ~n*De]ݰWSO]1OCՎc×ae`Lʥ$r"Tد 60N?f|yG,[p]ChÕE7zP6$ "c~>|Kƞ귴6kx@T=ɰhwq:Fe? &]籹`(w`!7;j zҹS sʗ<<왟6'paaBBwr}Ґ;Uei+/`:X=z"v-򞚫fvQ.P?4꘮ 6YO>BE؉' [Y:(O岩Z1#Vad,{V:i!jSHzy.δ a}{u+C b gԢgx":l%Þ~,ΛǡzIi _}aKa?pXN΢Ʊ (_!X~n0|֞P#`;l)zLrN#٣JZO`y[ƀ LhA `iw1g0yKv8N >~_å&2MNfмE6y2:[qM6o+P/4 cQ?0Djj ׃dE`ґ2TYwXRU8QsfN]b˳LT?GR /i V 3~jYz(T [;XD7 /_\o$+vh}.z54Akc.+U,IօqmVΗ\t8a툼w(L9 `6VP{d".#vWɭ$u1 -/^ӎ;:N+LJű j$}p.if8G!{T#[bpݎM@);ɖ#K§Hh;TfL ׵@ª~a ĥ?-^~ AJ|SNBXwz1=yg;kY&[_H$dZ|K)&cB'[N՝Sp2nf쏛y&t=`Lx( Aof$|V_3dw4(|[ގ7fZ"G`]tT?Wu-.:>/ږ:yw6Wqn`=1PE^[JX3:6-ƊL54JzOD=}~q\ٚI8xo;H $W/L ƤX;H}52tc'̡;Q_ YӽO{SgKn(Y~ J?GRj]x?͈&¾#e춷pRZʷat;9/A %/a)vƅ;0՚x Ym#Ф8MmT\Iy T3]%nM{{s;T%${v:0ܧ$>Iozp%N3kX­ww2;߹qf.ޏB-;!#۷3r?ŜNAqH Xw9o~A  66.+u,ξr*jAJd9ߙNЄ ?AOoVWI%-ZpXas'̐>xn?~89=V 8Eнߧ`VY:9neʑr\UPw8ʸS ̹SttScUBG+ȋrtPmU4ujl#_e WBs|Rn2Z ~jD,L<~}T{]hT|sLjlWUAXr6~pm^xR*y986ft45{B : zwF'ױyofDy@DD'g9 3=HQZNm+\"f=-U=[>|ű˔=3g,oZՇ ~pY' ~̟GDaN"[aؓ ֱU׷q`P-sw}J˜HF1ACb-it}`lL?0#`)WƮVD"ݰ/11jSL3WO MfZ ; PV|48k^歡ea*"Z04SuAx|W;[v g@tGh;v˾wWY=b^8a+t)wfgZmO}=WK}3ڽ{}P6Nul'4A״k i_¥sZӖ_2؝]]GXJNLX ̠1Ff8@$?yFYirm.>p=!W8FQICpZv}<$dwn9-0234,d!9~9 xwTt桉h5oW nB4œhDS89xp8I"EH3 3<GZasƷ ^yK_5˞Y怀*8r!|JP~Cg7Bx_!hZߏ>|*s^:|=6%Ǥ:\Bp#q|iVafdrٲL`rtԮ o?_ys_$%#7ΟYZz7eB8hHy{FRE-wܺ0˸3>SE"uّn%6[ۉ3b|=4iHFԤ0V~2/+I9AVQ#*7N!%&^ :V0p dI JaAh+pFJ!ߩ4R VuLrC6Wf-^%Rm&U"O.|Qdx1Vtv+'."6nx'.QJ!< Ռޔ8qt[q?KF(}7. *BhdT(_&ؤhhq} e}lSS-EŽ-d`nP$ (G B)7+i 79ӗꦠ!\6|z }?mh2Dzvul<;-t)eVm}UZf{`i ͬe_^~X2ׁDaI:/e銶]"30Q/`seED+Ҏ5.`5`2etx.V&*dv^=zu4ԗՁ6?u_t RHwNtY#U?,Yoq~R)ztjGqnɃ[pǗ |4_}; r&G|l ?g;s>O:O* أ-n  lm?H̬ptg:DR SY7a_e孼bdg'OXycO4onY.=Jmt eǙFi%y/mqRpY% v_6}YdjFz l}{ ]V8C~~kt-:M  \ XG7"[8ʿ/1w9@|(jޅv oۮ0$dv )t{qS/7J̏{:{݊)LO17\.'TXa5c[0f} Lrދ;hs vN 5v7[kIT1] ,zkx_[N< ˱Z]/w cp+Έlu^0Ữo&U.̭ 5M18&ܣjJC+/Uŵqɜ%'#L0/Lן]p~ O^>=!_ү+8沇xHBCjn--X޲B,OjJ1+$ D*l~D ɎfL 'R*lӯݽFPȑLVضy37 ލm0nv{AעAʏdҭE["XYb^<; cl W)=۠$*7,ZBU<%?V[ʢ^c k:a ݕQJ >' 5q)e.PՓ;/c`ӣ.CbL0@M#ߓ)yWIE0_i. nAf:`7uCؘx{A{4t'E9pޫX-ly{mc!3{:.м0Oϭ l#.戔N2%qK/L4w26Nsk9+ $Lw8UQq"6+saOa[aM X+ZKP/ 0?t C~,/v~= ^]&8e'0Ki{wxfs+Gs.ȎS,io0*茋Z~V5ٟOnN&{Κin6Pr^ł YУ&;#MEb?#U| /뻓ItͻdE'+,*Cf?6;o"XzK{ !CWSK1څ`NʗA >}aг[s_vau '\w$<'Z,a}%a[l/ rk9}DG E}*KkC뮝B +8~zQC &')cx7's54ŕ Jёs,2OO? k<@ ºFf֬VT.Nsv?kzcVgǨ/tD\YXIY#bs؎N|"bd+0byC`QA7c AuI!پo\16rvlgi6U"2Âӳ==˩|{o޸đ~Q ?Y '\A ᫵rPyiQt~H<~ )\O"nAyB(.-X퀎,#N0W$w%ݴ O8sB6~yq𺟽{g:qJM8{dIgLZDZb/3Kә̾cQn}Wܼ/|vRzXQg?$?|a0hTecl;5}%kW$;Щ`z/7 {1߆seTa()S> וwTLyx>r5͆L;%P.w3Sa"U'M/r"-F6L-F|wo}͗EIuuvgXGm)Vs=fڱ(N =&HKKACMWaNSmLq#i|f%<ǰ&z@YHgt|M[V׆Wς}`Ni.cNZJv|쳝]Y~ .e0w1BsJɇHC7.$lg̿ 8S}߿Я6%YeռСd5`q"_/w&{?3F, kaҕO)Or(pqtRqt/P(y 3H_ kKRE&Xu;-I~Q@GkZGl Y2S[SH זùiY~a3 hm49b-<+ +l`?[x81o/pډsa!™Ll㝛D4 Ʒ6-ٍ} 1~{zBc7. oc;R>?J!/޸-wτ*nO> \lEqksOAh0J̒`&^bah:l-~"ݻOFZDeOka`۟0QP4jBkԵ+[Ruoqs7BXrpIq9k'k~Vᛷma9ITr;eÐ9!~4 vSl|+gp DO5/*EOŒ3n@utE'ߊsbVeȽfqx^i ?X|nE~ \6=I%'XzvHGv~#t͆[`_.q q?o*ISR/q.$K{TO+'#..LiŁWfmaN.C+:iK v]%q3+%bN:'`6pΙfᄏ/0@9嗋*Y8*M`f"3T"z WҿDa]Yᦦ"Г{ps4L~t! 磘eJciqU B1w0ra^"Hèq۝Roʵ<]Ɩd3ΗCMJ-4 *ݒ6d{$L E%j}5[.ȣzs#wI{t }vᆗ9|EV 8 $zU  C{E~6 aH9+oChrkvM**U zw\;3bUTtcB6,%xgfFf[fnr%v]}’lզ LG?+yt 6_ױuοgxhzM͂>0_T sEtF EԻL1cl09F*Znpf䯧?@%r5CtHd_yL{s ۪;{Q(fn;Y 4 9d`]\R+ZոG݆d~*|%d9.e,@kk =q@,[Ä>*dG cUbk(k UWp&yX%~-OU$ty[]* aqp .B|В]O[ƅu0iMi .*;{¤Ko^q?\3q{o_o]VdX׺gw5T~7/3q"6HKzQ`Z~o幠Ie>(MxB& %luO cr048ZL|RVIivk!ϫCIfzo9*aW =pNQ-z Vr+HO>t*rnWF]d,=ͥjEu|8|l*p$۫,^̜۾F2aù#0w{A%=e, 1(*{06w"GNO EsUo Ww>\l[EjW6o!vVQ&tBBDPMrr]~C0JAJ4>lR(~\!};Ʊ7Li!(LJrqb1LOsÛG<4kˑ5ߥLCw;p A,V;* T~o wv\V29#& od8|gk2J@Ϝ\LkՒL9sc6$zkcN]o"̻y`q^@7SrhUۘv̴t·Q έը}($Mz$ 3fW}s&FeKC‹kέ W?<Ǫ[JkmA̟PkQb͊ئtaϢCbk\TlW-)?F‡aaDbO谀dOTo85ö'?z=GreYpSU qð唚6\n #6|wUux"#ED;W5*8G i9f8Ţye&jL`j-L Яaޘ{P}]8W1L%0/lXӨ;E9ۜK7q/5n.LwՊ3:- srd˥K6Y(R5U? yzdkەa+0b#ckeA_*㴺՟~?wΛ(rDŽtf Uv_R׆Md u5ϼXW=\;!ZL%H'kr6x:JMY˪kQ7i`F^C&arŽ59 Y_=Ϋ_j>@xO߳;? v\Wr,L&Ou.U> u͙ujLca<3>ʓ;6ÌF-\;3G`.ZjX>_N"JIF8gkޓ0 6or? {g9h[w acڙp`,toϝBы0cpNA+A2ifFL4o2yFMG뚩':hN!\{h]8F047vMO;RI* z7xS:`탹;׵0lV0Kf;E2yftfMM*; Zũr]- 2=ͅ3TҨgv}JC!\N&΅ży^9 W .mClxm4_[& V Z1'2|1[ }R?@Ihvxklڷc6b͍߃Ly\i+C"< t O?%=Psajro83&mxb'ۓU{yqtV=jQ% '{u[ #Ҵe0kSяCVw2Uuƀ +}2ݜLط'W zU'f= dbj&KS$S=-d2^]ơnlQ3mo}"u }A;ĵvi-:ߐitFHnw\ѢN `f,5} m+ 8qjrRnc,c9Z\G|bu= ~j' E4-.w|V4wR O9N&s ոq!USxkaÏ=qnxH>8 KX5ʺ|Y1iL?Gs0v[c~Ey8eG=? k<回!o Wg#1%Ԟq{xsc5 ~vsVat]+> C~tƱdmhb֞C( #IkT㬐#uԻԤx^蟨qg?eaq|60²:$* &\h ư oa6^&$=)z4 ~=+_ ¸U10Ym#w|Q~r>-*rpP[g8mR)s7gc5 db.ed*.l}dVT9,NW0mriEfJk65P| F z6${mYydG{罰G8g̲V(EsHX :21qtǒ0Wo8s~5RϾ,\2u#ƋoPu2o5Pw8줵`d O#ȫ02܁Pv R•k@o1Q fW8\5¾q}?niCr3ײ! [woZ##gwHP5) Y<`LM,|/fgWULt>K @ۑjVGi@Ƿ`z> g^)3î>*{G 7Yƫ1!1ߜ6qv7/.aV^oY(}%NXmZ;Üj?M(Zb-+=k/AM`ά`n}>1gUCwftK/tnZfaƁBv lr&= 6 {>>QOfIu̯'9/2SsSꃛ9%`xZuWUZ}VH;O'7<Ń-i]6j.O`5;fMzc"0RO{{qي+>8sE1`zT?Ի·ܑH9>7~֬V^̀7\E:0_0⡘߱ǁh,]r>k<^iaw8HqnM ўշa3)k jlΥc߹(Xq{r9[f=&CȤ9.gHf=qO%Lږ0YeR+B8xQngQyD07oyBmaaaΩg)c 獿]IDG{v5mwT=.^: p*`&ȿj1͏rY df[QDºk?FaY$3}^l;{X~I#݆8(h/#0d3)ԫF~7o0'Eo& v$PI˺| {e:N5"nj\Coo髎h%>G9 >^ ;LG ԍ~±h]3M!`77F\;9 yc{#ZGӚ&p`LP08 =G2]-R y㿏"H!qzN꟟%aO,N~Zsw4Ǩd&֔ T/tmQtFJ9>KǛa|S}#pʇL,i`ػUzSʼn8 '՝JbDʣ-)8cO qU\מǂO80[H&3̜׽-PeՇFryLpVkW˜_ƇHeLbck9u˅wZ@ʑaEf?F:~~"zG;E-ғ;F0~ÊR/ۘq#KwϗI:is%2]qBC/u=znHl11獲&([q2[N`W_ʮA݇PTmU>RLæ$grOq4eNB+];)!|<)LdbZFDQ*G8Q9iG-96D+FzJ9,tiM޶e.ٞ <1=/CWlje! rE_G Fq8tlTԾfb4vͻL_5:^# 006 ze c{j"*#I{o, V\}T#ߦ[h|l.g=L, nmq' =[=lS> CfB-3~:xOzT*`h#/ekx.C,>]ram0k^?Onv,*aS׏8vY0[)K@S[aYV,'6)ǃa:4 |٩g~7RE\Y;,ɬcpWz[^88G;40N ?Q4 FeǠ֢T6XDYhPFxmXT`Pp&t|7)O øAiR>9&@9jy%8v&s|a(Kؽ&9L\Mhec:!t^ Au!38@Zơ1; .V f܄oQ0^XBZ3āJW,~:@O珞8ѭ ,M%=PV*x_ӆp&@"&6=8fo{ Ӝ4!jᔸx%Au9~1t\W?TfD;d}_ -W\~MLL) KIt}HX{o9*Ҽ&Uc|-Fܓ$lG2oq Og]S_nK%?  KJڠ1y% .q[X!4T㛳80sDpqUMԕ &c8c.4>oLB& vl˒a8tcfE8i #o[){OҎBq'.+UGqPSg 7}{1]@SyѤ .=G(q탥*+}sa~7En/Ob _q,ukG~ uǎXSC3P3VK$\x5]lֽc;HLut>{gҶ> ߇RTqFШHJsZv5:],2#8mWu|+A=B,,/u͖Y{q8._$du,_2f矉8j.{4K8 srsi:%( ÍM#fSLbdE>z}26ssC-K~CzDƜ.Tt8)؊snҘ 7aF׀H,~%cY'az簿:&"p;$i n!5o`8h2OۏDM)\i阧)$r}./g|YL>ܭ3Ir/֐z~̅lf*J]%턉ئX =aa±܀Sӱ@ZeO0!Tuvht9R7 ɌɸxPma5'`㉋NU'u\gvHũ[Tmd&oAr~ߚӞ4BѰu_mU# u@ȍKоC XBl7m/Vtw$aӕ)=63'??ӅҎ76_z̾T^;R{b&]ه PF4{L\hp_1}=~Hz%cIMpQ\A* ϟ܌ #^LU?{a4Nr3Vvc Q#\c&e: K\`_nA= h/q}pf\n|[%U*ΗgmqnObs/)9k9ХOr0m~r־:HUHIZ}Y +|ˇ3]WGuLzw(p|¢fA! 2icމrkd"٬#{,XyQ2TZUR|Gz[ kSj]' ;Sub]/Yve^I[jn0!Mk%\Ո~\?D qu )4Zo Ȇ՝ kt[afT+{>'vq56{*zSwRǍ3C{rx\f*\6=ROŜv'&֗~ʷM#n+3q2^ߙT2;{ XhjDZ4/ pG:ҐzX0bub뼛$=,uO7pjǯ0x-OֳBԅ#p;s?%x_ e0d32:'~ zݣU8ŨՋ Cob?>@տ7*ÇS_ (GD %,&/ ֨KqIa6M#컋m'{g|&3!ʖHsl1M :Yc#J3e``ϸZp^~s&I!}e>"o`C{y^8/&I&^b]i9hhI?S,bRې6Kù2=AS=)aSoyH}Xԗ?6$ԩ ʛK/i҈P WH(uWKRׯ׉Zj }2]Z̵S HSgC/҆?qu?!xOϑݻ`r*cI>ߟSݛ,nL]Hͯ+N8E8bT6'L ?;B)3GF~72]]$`&K5'!.&3 ߙ=gZM5a kJgI45$ƍ?KGKk-v8W~Y!R?Wns+uu%eo@An?XHzc"ZY=l~YX:#-̞6CK+ XquvX[y':]AXo`_8#׮}yfl_`}7e4aSȴU!+D_S /QYI۠zMihj6hkOO?`~xl*_yGzށeu!< pŦ#k#&hscQTEtWFe^]`1|?p m]tZX5x=Go%%aGr`Lu$FWc\V̢u0lpCgO8ո鴙"4qeRN%Ї8Fn\ܾS̙d_K?//0%`ky v} Y[ɌiE[jpV;| AQ۷? @-io㫩P%'v&K [/̈́q@Az\r34 Et"m챒;%8՟xgoFO;{)-n'T ƒt)~t( %T(VH>~j^L~U%ZSw P8|MUyD5Roi45z 8Z|S2d1,޳.S?HحVyvy!d>W] nx@+=o7D&AP{?6jt 'ÄV:0ܲ/ޝK;ƺ5(({?\_)rYu[pNnc?i˨'7O>nH0ʺ2$rM$`ffq0s,V\3+1@gp t *XryR #l`_MVhL"8/9[o~#W$2YoLfLk1YT25f V%n cd|@ٯdŒ&ʫ_Pq{N1Τ jacv< UmjS ?T}(UV4KcNj+ en۠w@;p>lC_52c%9VhÃn/ CіW Syz,Vff)ijφ'jr3yL};#G& !,sIy>3u3zƒU68-؄c}&r,_?7" Jco@֭nq"uy%Xk AtG5 )n8S_훔8&@Eb2V̝ 3wUxDf4)s"UdN-*^"aJ9;4{9$]x>/m=ˏ˾ @aCM1.=]a;+Dp߆ݏ\ax 6 14PX#X\ F#wm^Ko +^|a?N݇0)*ͷK&K`auμu&y6 n\9 Aj%*P(,aa{b) TCbޤ62+|(6vِo>g P D˵~ MO;`7D@~Yj%Rpy:_eUX8]%}H<# ?K~\n?8pv@̝\Ѻw졖uFmp&p @MJ8g(.`Bb黏 `OHQ5 !v11ThPxudfx_`V{Wa"6>r$ۍ[<EcޮVC\%,a-{.K5ʯі`,a2\>8z93)zZ8L74 \⪕@OD .J_?yamѽlfL=v;}_k;ML戇y-8=s ;Ap]}IU8fP`Xx';hr["Jk%޵edB§Vcgq=Iy:jD`Hr吟vG33aY <~ ^s |#H#G(#oμ]ZrXl8,adh{uY{9mƑݘLчc+dLbes{^J BB2No2z yWpj8&dN9 W>-?Ͻc5e*߰Arb/3tj s1~Ǿ-kXy~k<="XvyQ~ 8_pbC'p)Nr$IQi޸6.̨i6\ߜS41 O|p &nOƺusF)-\;5_7}?%?<Fc*tb*rC:Kl ;nHFv }K? O(o0ijha2C5;CkAhb^Rm/d޺؊@ȗDMʞ]HIap҇ ˇ~ލ3珜$GN0DQpf;+/q1K#'H[LO 5_C#W͇0*kǿVhR8& )7bӼK=5,H/ݷ\75?gE v%`R F8qHu㱲g$=G`s/sL6~Laqm q5lܐ[ 'e=x D+t{c-@*G,7kJ-+cB۩ú<Găդ:Abζ| =m;k]}x{Hn]7B4nI\*c La6qnm+0MHƲ.d rCc32Z RBjc(F(e"O-a%l>=9 Xn*90.pn ̨)S*gfԊ܎Tu;.pa&xÔ鏋G+NycsvwpQӗ0t4st 7c ӂlCOqNRޱS*8s$˹ISܯKÕ9W6{q5,{>2^LwR] :;a#%.\Tݚ+6;+WOoZ^]]}Wod CߛdSf2H8>p?Q#dԶoV!1XnyfwVԋ-r/˲fu~6}73 C10#\>:x鳅$ެmΊC պva0:Mb.dV߉{t~N8AG G1$jΉKCb.mQw[Bq厠E8k<1w&oHH a!vaC8{2G[GâvORSß0QZ*K}c?J F"6 tLfНN9?0ltl3t(ӎ&xן9CN@NrZ[[ {xk:6M<<`1eE]?۵]r4⡻k~7ET-I{4ձZqhPqf' y`pXBcsRA]_~5 E@7cHS?x`:/d|9aеk*a$ô~g뇰ӜBf6){EAw7fd`_}RgU,9;Q⽡ LN20@&=#y ,e_ǩ W`bz4m[d 럄|m҉4縋2V. Cg%/n3{Z"܀oԏ=Rq4L2E5esjz`j5[K;۪`cyn_) Ƥy\.˅͵@Ks:Pϻ''6 >F|d*SO±g7NCK~JuJoE fL_] oebik; 8:CM=>n/APDu>Lwt[ϟbKz,$>F@mvEsVVx7z.m9GV ZYPKe^fC'9] O򯝑o˽,15d٧g1^YڡEnѺ>! n[,TBm*m>T**) c݂.Ȱ mU7$soPq.m#Ӊ ĵ($)PxzԾ3 ;D#cQF r3J‰q;YKE]Q:b{=Xd(Q3YDЋ!qLvCALYˣ;wqI1(6h(/I$ I"*R} VQ"R,%IB(v.n 9s<3mC!/4ƁZB)X G*NZv( n邾05g5gqzWn&kb)Eb=3ڞ˞8qiL@Bז(d:DEv\ o@7ʼn*;\w;J 3uicGL?h|N_RKvՌ+maΘ"`>mkl Nے7u fzL.a-GaE1qt>qiq 84 ?NeMM8&8t}6KSU9%_:0^r8eK|~vu芽k6+BtUU{_3\5[y+&_rX ?y?|GXzR/`$8ݏ5eTX.h"KSȏ--7|p𜫦lJ`к zT쾐xP_dXNa9ۮ WrI߇ 8r\D@\Rܯ#F(`Ml+Ƚ=q ]G^IܛEk/2|bIzf F[*a^l5B \Xhei1R;4qgY sQWעdo 'HS \~:sշ0Y`3Q0/?(Y ͸}b~8)mC[a~8=X`+e' *(I׻ޭbKEȯBSO/I^ eӮޞXt?sga)t[}I,֛+'EߡH[Z` T!ǷEwLdns)v {Cܙ)ޝg$;F//!-W qQCs}:}nS G_ W6F!_Nfb^`X_EjZ jc6<öut6 G㾯лN9uO0q,Zs^Y4= 3hW :<[%E̤OswԹ~?,j];RS>v*[/{[p^nQMua6+Ym߉Um;5ڱ8BN1`ɆVtz}߱뻉܇eqm:?kJN >0S=IGߘ["Fع@FjgbxvP廫TflTpf?MX/`n:LbsLt!6l 3r#o%J}fD^Q.͍|PbO#^Yܟ`0mށ3rXqWS[o}&5Pøp!$5* 6_? i'F~\P5-:T ^LnQM^ϡJ$)=Mar F0O&Gv®)^{~tqB C°hZh ˎU0u)X hŖA\?cq.IvpƱ<3O6Z2 ys2zqQ "bd9Xp6rj`YpB* ?VӚ26BJsϟULySгYMfdVgИ-Z/ Hs l`XeE>Rs Y|-7LֹiMc=g3XT9뤘 -QƆ/`K:~ٻ ]R):֦CQz1%y/Gh-vU:Wc<;MalP\84. }@.S{e`g"m V[ۺ 5i`MA6^4:ꢎ"__zIa$<%5pUp`9鏾q8rDAUFr3r+i9fx< tYsr⦃"tܭM7!{xՠXʼnq@7HHϩ?*ЌсU)?_dy;D mz]1eP"éu\swW,xYs_`ڝ@!,{i=jE/\"乻&!9l+= u1L 6?~/Vj}ݶ2T PW`_0Șy7/7)L{FE5W V]HUj}& Z?#H {IA-CAѿ~9\cVۿ9't+] o냩c7taὮɑC8Ty5hP[&?/xb5ORaaW`Yq)%L ̀3n_us$d$`߹b~:Fs/v`uv3Fza_i_pkC0?Zlk%wYRu8Ol,UzK-ư?~M&Xâ:Y )+\@>5 A¬d lZ|[D Fq eGzu.9?һb8Q& "p"D?zX-ZSol3s&sŸqQk<9 7zv WGT|Tl }Ƈ>`A廰 v7:aG uΆB+S1֐|d ZuzϞц80|nX(`x+'WpiB[XV +9f5ֽر?H}\~ 81?k}\@!-OCwb`9)Wa 5*cyňOaL'z4g{Y`,<>˃ !&:۟OLuY, ҫaKg 2'FϠ96 j*gl/')Ena ӱj Zۚp`,/qSy:*MAb~j0Oܿ$Tء;z<7Խ!aF x 9h`;8=+V:i\b4hIk_8~CqoMǾ7LSTlH&Lw_m$ ѻQm9Ó{{o>5k俨:hpLt.ĩuy@Y{d 7 Cr/n3֚8ׂfzᒙ3;74sہe5.g[WԶ.QZf%Ve;L]p%bZSz2jk>DAhME!1YݨUoe~ ɪSnx ^g*MMkOcia%0c?;7°Ctfl*-Ł`C+8vm;ɷ= N\op쟡aq ms\K:F¦?0A#i#]$dzY&KIs{bo /؝Up3vnO<^- Bw߉%n?x;-K5tn0x'=q34q_{>mef:x<#g>iJ9h|Q]JKx6؟Tavb+Gu}N"]$ LVE piނo)SV,[Ă}| n7z.C5%]vέ |$-D@,9)Db%7u6Z-GwKq)o6׷8fCZu8)>XX{+y0)lz6a6~ 5zߓösN+Sï?-:o\+z0Ae *$. &tI=,xա> oHk̀i;} 8*Y<f~m Mš Kal hM\lysBa2JDLBņA_ߍHͺ}K(Y>׈[)*ԩmV8^bC*#ïNօY·juʟ"INQ 7cgȚu[S.s3d3þMߩ9U8piwS8v{ N9 sy^%RQI@SfwZ!7^#GqOxVcrв#߆2bcҎ_#4A~s=H6CT#p 7`8֗8dӜ#OR(8AbPdfCS q.\P88 q֊ {>lЗF>@=C"\^ǔ_03CrԄڒSֈ<5 7. <ـ[>f$~6\:[Fa.rOJwpc$sw9N ~]׽/$,G+kzZR.x 3o_͛bhdZOMQ3Fpl2 r: ("fo Ȝd g s@dbHP l+TT^] i0o'F=i61sNy 7{_$fs? &W+ .RnOoĽ8~>%>~8O i3OI7ߖ~LMbTbaL?P,/Dl_GTC~Guz6PJk;|ۯخϻa*Yٙ #Eo/F2QجFX66Xrc&T8|ŁG2(vϰ[O{N;J.}G8+}/J),;徽ۊ/ b߀S% wa@-(,fn+Gجp(^՛&}=;X_[žR ,V8!dj&#WR &IwS98t ?q{-o,2}N f>#`zO} #ڡsmq+s}ohwiKm5!VOZ0I̽AGݘ3+1L+xPFhJJ7KI0̚{WO/n (ϧ=~)GVϻaa y>G? ?-0AL{HelЌˁ6 '!&(TE=O'Y, fَ;3sq ??+SX|,̂4Oۢ#kJ0x!2wYB"Ђ fNA3lOIy4 Cì8w3k/^\~*yQR8 Ķ?if|OEżF[u(Ub #@#֩a6xjd(ý9%/`{fN8{8zNoS韦=v&R{D,Uw $\^Ʃ \4&?@ʫ;h/BU.!=#W G%_oNn|ꠏ]I 6[jǟkT 6Fu)IfiBT=;}} pqYMKq3g}#H85gk MhiݲÉK8RgR:Nhama5ljп A!* _ֲ. Л\sK8N-p܃ַkQ=X2À1ے(E\ugϢݑDʶ=M'*XQ0oĜ‡/iGZ Lܪ̸GGO_@OݬsB[낱뗳ޮ1ZBˡv m̢Ng_,.7tV>Qw2sH-) .+@HwOs`r Tz= ^+-#صZd zv42A|0/0@zL AV:H6i%U6cɶHB}O?l~7:ź;oz xAs癎&9| /,ɻs#$`܌ggS 3-72s:Oe-v",fD,Hc:0$0sL~v:mؽyx8 9D+z{zRѽ@OiDv Hq#j{g'w.0ºȗFIeX +ox/vX;䴇/Ԕ*8s r aKEAn|Ձy4Ψ8r7KKc5kJҐ^S/_J9EC7?6th>WR-<7F!$?n|ai_v uzTys bZqFOepLi7ͨl8@:Pv7$YցnMޖ (OY~%lpÖY3|'gjjg}ٵ_Z~p1Q_fx݉ :]b1X.U`?n=kEfAZN˶RzaA6poEu/&}B mQ2j39k8a_{7n.+*`MqS̏&?X)W͟V mܲ lJ A5(뿷V(Vx^7;e+ɊiCoHm+ǩP8$%' ^60NJ)ۧeF;iAg3~ײ@]|E$^(!”ZDvWO[R q/DX;Scy%@:'6]*LG%qM/ՙA>js!cl<g7݋K"ai9-{%&li LjJYYR(!Ox|Gre;M-?[\ف|}0T,v 2FqGKPr;"N 1d!}qZk `oizIp;dܿO*$Ա#x~^ wDkC-BC+[DXF] zQeWyÃ+03_h6yg{*W:}) **msI灚ySiV@F2_L6QzkfA^*]DŽ`>&a+wg95+@ʷw04P^qy_ J(+gtj X_W~Y3V~1G{^`s +7Cߩ΢BXqh->_ټ+o@_ N;b$&͓ 7/\"IO<rn 6]Ne}/.'Qȗ,c5Ǘj1wT5.peټ>OywϰxZhBQO:PzʦmXq$e>/XHeGƭu$ty G~u^H])]i)8QWcZh}:n0=Cϻ>ٓ]̇ jM`C`=-c#XdO >0i=ـz #?m]2A;w͝I5vlx؏1 ]e@ -˭bK!' , ;7&qecZҿ"d~=b#@ HH ~5x]-l!k(b-F v<p%]W |(֗ry\c>_Yp6aCwWO:q@uT%t]Qd y~U{Q64R0ӻhxXvk>vMu346H%>֙0 26;r[}}KA-kaO])7[ H=C#EŚ"l ʮynBL) ? #URK۠/!rNp\!:~ ĎHd "K"BynuMIOyWfej:7?1 )vH-k|'y,}4~7u$BT}BpANv8yJ?vJmhIBd؜:c٥y!*|!h;5)=^{NMx-aW/̩!]:2oT vPq^ssz^cz_f[#Bh`äO-V$@Mp#gBAw0\EP pxva_r=`q*lwK\xxe'r؈h:BmL'pam83, 1h7Co0=tOC:A|X^p5 Zjeq9;taV Ry>e񁾣@]gGkb1CwNG#}r;١_AN@C7/vsJЫ>=܂ܝ>/@Ix/6t1\ HEي1Oaƺ\W.t8XD֯Tg_?K`s<{QٺGٸk,$q!cwV]+ѐП!îag,ce\: S3B3{D8iB.B//ypto˯_)o6k^בw8M[$%LNEcdtxXۀ"PT$ b/Mn.5/qkj9,VɅ@M9=[j'N]@Q ͎[:PkptKi܁E%Pefm7LUOݦv#nj+nm6$vo^C:@o06cCP7˓ sǵ xS6]r̩Q=+ڞa{[CYiЮM*Rr^M6 Du1'z`H+_L?ߡqDbp%޽Jlx}5Y=1LaK0g?=6y9#H:QV>dνՁ8G=8Kz0xD˫8"q{rM7-'_NwIJNؒ`=f= }\0h̄ӭMM;@}}9PBrul޽$H+܇@?պGI47}kg}p*UEaΝJLU㮣Xϥc;,Bp#?J ,d @wڮ}Ӹv[ D^˵Jc\oi-.O0Kwwкm^P\{_U#9 ? 2r+.(&ҸߓgY4 j^KhO~۷2|0M? ki1H6l1Ě׈ Ҽu_/wLTֶ0|͉]f*W#gRt{+81%wԸkJMFajj ZWbp8~k}10]gkk,6c+z&?zG)#+f}*l .2m0U7ݺ cHul6/.RrFйgCѺߐ.$Rg/]I/) LgC$rr{cm5U]Vxf#g^r XNSyEfOYXGl?V2) 63~KaSX5, :e{TK٤Ťu/o3)8`8߷a2ׁ(]3. 1ʼnPH1&/'m~^n>WZG}ѳ1=|lo 8SaqΩɟTRL"%HpayoX/* 5w˭Q&c0b83d셣J 7cݤ[›8fp*+i]Wvp3ِW }jJ7b= `udL:7۝H0(*ATf;P5q[/jr3r[:yZ!|QJ-6f< Vv!V8);os?Hcϓ%[6jCjg^X xjm?3p^S\P#L^ b*&ItąKw9Ϝ an7;=%*c% 0_S잔Bថ5e\Vna[X?0Z<{Zd#pQa<Ӻge'y 57\r?z.j圑<<6O>3ǷDGvZ!Lxtɽ!CCn~VmA"Ԛ"bB71B>|0' VXcGU-DIf-qZKuy6G<;p\_NGx41|i% +;>θK}z"L8,x-KeY.|$8RFkW cUb xc {Q+)LDu_eߊ Ɣma>&͛#6_ 7O~1q{=K{Pt(xk߈cwYKegE9Jrs7T?wFʲ@N q,ӓ'`ߙzXU &/qvWӭ_+ӤqThmW J2Ųd3$7b5ɴU T{оP-;ٷSFPA%O1KZ)- o>|/nĺg''8?i}G5 zsOЃU+ +eVXM2zPٶDL9缲0KtVy챷뭚`^%jFWK=_Ic㑪.>apiO.kR-,Ϝ}kޫf~u eYU#P^롃$]uKh./w`7 ߉s;?%8\xvnEH<#v*qC}SͥX+ U{VW^}ޮ{;*v+vdeH.[*c9x/9Zx]19mjM/1;?r#`苬8.pX)yJ2Ù?w;}` 6ڕLN*2BM-`Śb?> Oc4.aT?]Z!L$9qQ0N ~x% 6[*¼M<>`=ֶ`p\Aq= qx@P{סbBU]0[~uywLHnLJc~ĵ3\~Q^sb}⏱ƕ`}gI ca8I{G^ؽ)8hZʚfBmTp !s6_cL6lE͏>1!o^_ ^o.go^)T8g7֔3E4L.zn#IY:~Li ӹv.-24 zЧLTZ<$;i4Y~Jn&6U`JXzλ֨t}(R]UnANU7${yT¦(wAr0[̂uG`!|cOq>fqOV3ڏ+}z._ -hI s?RhŢ(2AΝg}K!"uB;#7rՏ(,>FBeqZ0\:\?Dݗ8U_}8+F =okzԨ@5;s1ЏzM^)9LG3HkOI}ի6П;t^!dL JMÑO k<q`i5`;tg}XK۰h*#d+нљIpU:,V?S۬YSa~nU}DF UqccediGL2WAւrm!f:SQD`Z*t``[0ԡek#n:v~ |L9'7ʹh%CWƼiؘunI&}bݱHY䇱`}[K|;ȬD@hɦ- 15wj>/.2W<.-!~@dZvƍd[O =pwƱ-Wb=bϐ)H'FN_!}K_= RQ$q%m't-'gݸ+LzՆ3t8$}77N6&@W^| >,zbغNZ]v=J'/n-aм[sm\n:U?1^f_V5NR3M{*.ƧO`а't#+o`OHĮk.aoj{cUi NmKֆiٓאZV47򛊳R{"QȳVqO‰W*a@S2gU ^L&F~uyi0TCh-t^e:g񅎩c"L0;WfKe{|F``&Hghclj]\u{ItKun(~I=*Ma~ʓL0 ٦NpT!~!i͉`qbDBݯqVELNl&aP.ۛKX,{k7l^?}d\ m[)Nk~uą,…'fda QT5l\ מ-\쿀4ѕܓؠ7k =^RmqIUx`1 qjbƍm̈?IϰN`s2Yサ79k9Aƹ;`Ќ}` n: ڍJ៺0f7jhAK %֏auH_{R_2Q[y*&e\N,QUc׃3[l1`txs8N=5&U*])y9qK!J>>i.}V{<|4=& \y8b)ݹ%os kMjlyEaPq U˲R}N/ Z8p|d in *!wq`z$?yf!qyW-U%'_X;ǚ*+MfV|(v7rT\u|iG3J8QXN _c_`lvU~ d-lge} ƫϝ7o׷A9$X.Q<L.uIh{SNo|':~ RxoLa'PBne`4U9AXe\1yf )<b(b[\wk!QXZ݁M7ũX$]U2CMd,qBIxA Qvs RDFXe(ڕf}ɱ3OqU8[\~NA NJ8u?dx.n" ,נc?jsH3T,8%2⠭-6,07/ި ?]կKr%:R2Ū#@gJ@f.YVbLMHeM0ldF0+3poZk) ך?Qoh bݖg6ЯoIGe kwf0iWu ܄­ȧ{e ;^B@!`WGAz0n P}@ۼrvκ߽4jwHgƘƶ|83Qa1sKU].f,a?{qX5Y&h$Y<@]`Bfkfz5k`nM뾱:XGno+r7ϸF[EaK s*n໤DkyT!Xz֞]EZH>$>'aժ[!0_:hԥ%76{ޗI};ymk;wb<[-',E|_GuԫB;߬xߎuV0vr¿staV=[$t=2WgcL8e2{߮ _jq6mm 9uPڴ% s73cR(6g/ӻBhXņ%Y Nh_8lO'|܉JƂ3q3 McGۉF2Uϥ?DŽefI𮻃`xb? 0tP6E&F—?)Hܻ[!+{w \=iӫQD&>ƚۯ[E䉨Cq #qD FmX-Ճ,z>N嚎`oaurJ>UkeeUohȤEt?0ds<ܡ:8sSp (9%:WHaxU*=S(ꝍC:k^ښlTqd0W`˝RiG8Wl5u57N8km Gs걒5iotd+}ǫ &Jw>$vUCcc4mC?T \Z8,n~P'Y cㅜF:.$s{bI#$h5oݩ2;'5`&ԀB.~ Lj6͇_V_J \/e'fPtŦ[-whs; `? i!K,51J7xn{NlS]aC Q18Q׊JJqۧksv|`-#K!oqf{Ma4&_†Ich-ú4b~/ra6A=6.uW<BEoL /a)t 5{B6_{^:8Qs(?Qt*:e%p1PՒ*}.ϲ*bԽ:*hbI/p$:yvV l^7 r uTa썩R}C,:]$B\OT cO>lXA@.X-9sƞ*k=jUϗM6oSHA)źU9a,m0Xtͧ6(ļ1gs;׆a7ITVֹH:AXbpPw'N>]"<^M ^a&Pq!/V_pƫVtNm>Kni,#޲(M4~}ϙB:Tk4$>L`C}H йw `xbߘ\N[8գ/AY6õ?VbM.j}vKT^ZBΕG(zUY}% 8SzV&.ԑqU5M>i'dR f _%%1>@IEBc*۱Qd=D^8ϋV+MO >چo: _|89Z. +SDma%uhV4V#-|Sb{}"l:pvPPW: :Ko!sgI%7#Z:v%;`Fw^ݱ yROס+6 䋅dBqbs,C2y`Z ŗzZtf߳erVR<)d aRpKg[882eG2kc6]=0̩A,evJ eV|᪂0VÇ*z āsJtŽ!qgnƅI3sz0^e[gh+8oӓXY:r8'ij#SjĮk1$hՒ r JmJf K6ܛࠐ~'gفl_1|Fċv$fZ#.*a+:Hwv)^ٌT W+4Ȉj._|kعK(&Ρ֛ς F)MqJ,pNά3|6F^9ԇO3`lu7m"{G/_} z{iW#qcbo-vMx'uh{#uoEh2|arXU8I]UT'BE[r= ?åƶ;L`Ml˲Tw.[Nu m~|ӏ`VvQ[4D'K@SEE'tԿ 8Z=4-w$̈́cByX| g/^Pʷ>\M 1%Ɖ4?y`?^8Ø _eOEcz1˭.Ǚs:-=K/a?ɝȟBt/51=VPÊg׻I4:Pߟ- ЗMjXsJ$oO_Jkq:S@6xJ00nkE }M&}6f]4?Fq+33r%P|8銺Ir9\ R)h1&&ljPOzMI\, Ie,:+9%\siUnQVʬp*)yz6zXi"U~ {Ő<ԡCA<}{)p#n,3)A˫޷(OD H_á&Oo55,ګ!l`NsjQ#u?$+pM@-vZ4_'gxPl N.bYCf ܟF xVn [f?n'KutB/K}qؓ(*T+N_ OFj/\b7.x34&gN?)켹iqaLPIEw pΌ(77BvWu h BE؝1X枋FgO;k7t.?u~G` Ggh}ˀ+%?@Lm9ͬ8v0g G_6>1\U} iS?!]*A%TttGhV'-lx]UDQS:?ݐ wx>QQ0R Q8a.w?fsQgy 99:zykwygqa?.'1ٕYY|yr7nzϘ0cG]̟\SF0;v#tol)p5VI6~gX*؟_ `*ﰫ5h ۮ Uݔ 23Zh.O>=-s{ 8O:ДNY7Լ,&%,x2`.3<|TLrw^gMWC,qO$P^lz! uml>5mHgvx9% \& CINp^X sHE_I,2u@ȃ UmHgA10au/$/ a"ް8j²gătGZLh@Z .H^7!WO`u`[8z$1m(8ArKM8)K i4_%{ =oiG͍U썇j׀6!*E*yS_l6֕٬ >=k@Ǚ-˅*>X=1Vx/(㱹EiH"522>pLrע''zKm#99_'!F!ݧYax"sMB%w!e[1Xdw +OӮ#8sޅu6:u@&ŵ4a[E)_s#1T2sب-0w-$m^Dfw.!u2l@y#8.qEvC!`C0T]8jmN59l;-s_(G.0"W~Y(~w2aUݷSx{ +N8 UƷf |.)9J)д|"Lj>J(.;p^j(Blds'y o?|tȶg倁#C 5}*~FJH|D!d&iCx\E%\}`P*&\˓Nn?e +z̉"M<ڳ+~`{E/4ME-6 ]nso.y_].~$R9yPe/jcuAS'byZ_]yg,[`!+!tlm)m>|X4 NsCRħ71w) D ևMOfKL3Cޱ50B(aR'L*3UGGv]_Ky~+'#t238\Z+-u5w XR54m°*@1omx]nGge.&>@!ye? !I*ǹæPtC1ӊiA?H_')3'}bR@qXnIV5pHzja J־C[eqX& "7ClC0EǼ̂3{ ЗlYc8pv&$=F/[}.TLESہOxq~, 2!ص̲>=O!txS&7K* g|:vq>d\SS[ o;-9cbZh1[gК O+$ TɌv=Q4;0BV=X,epwll^Ԣdž };BGQtO0ptI?q+۳L!ώ RІE'„^_0Ϧ?y3 Mؤ/17]Re}UvwjbmՐ8ivdK0YIPC@êِx@ڐZá:.֕^]KV.B)Úje5S):?->Te>hh vІ3zK8V>%dюRaO?[Y/PlupP@̜e- K/cA^Vc›^&ALi5Qx?^YF39}r 6;&RaZU:6tƩ_o'(gp{gG5\=}[0mbۄ`)u?f0cx,2x^@1XXq uko3cǹG/, mxUJnW]!;fx  YX#6~1 npe1VA2 aV9pwRYmq4؞ސsY]qLL>6?L? Q o/$"末X!]HOۗ`ϑy |+䆳^(|,V.!_S+ĎLh̜P%ڬmꃏ8'ac)h_w;Q6N_ninÔNWʊЕ=i]Ua# {SDL7w{{cARm۩.}Ml|[-eH Np,ӅUpVӚq]'ʵ\kˏ'6 S$$%.S(k >ca@XE0>?ĩ?ꭍV`reyWl1-Gg'. yRO_v -ǜm_vWA^e?|W:;$φܧI+Ask{t< T3"F[G*- >'HJ#ON4M{Drn3.=xG%Oqtһ 0:-^"3s? |.vhс {M< Ҿd^R?=φ‡WkN3? +§+@2HfvH7Y@ X,6vܸ:+mkl> |py4/G泧$HHӶ; B~ѯԠJ+j'/X4Z!aX`b1}|% M;&a'ǺpÞ[GˡI!:NK<.}7h|T_3lFR a~;p(z!W;Golz;Xabt0bWۣxjo¼ũiʋSGWRWC)Le]P}΃Pڶ-tai2OF"vn5mWڰӁn813ĻiWv[k׾DU »#(% {Faw:6qKk!V#^G`rO籊 ʖ)#r̸( U76J$Ȼym>լ[췑h,%H5w\V6lSvD`Wq_:T Uj*%K=C(%E)ї7[¨Ϡ((K*p95 -1]G3;([,8N%Sz( Q*ϿM ݣKi( CF}}]l!us7%,ghֱ潧{& \nrzlgԗ:v랁VOawa&.fzV5q.VMЫe|Wل}D 6QPfz?2!LuAKz ͍̀ z{,^ΰtrc*]烃k2+c#v3{e7T(Wjr#6[vsWb7kRs|fh{ɖfjYpgp{ c+W@2`/9Nr*􂉎U*ߗI4B#$wlܸbۏV@W{a>79 &*`'Uak ,c>ke%{NʑgUmY`8HzpuثJu~F 4,bN*xv3 7Gj |gF<.eqx/ c~]=})MJDB' nxs89Cڄ2lvKSf֊kH}[Tץ^`_|ѕKigz bEAlJWAer'^Ηt'{$Iѳ= C?quu}=uvg`c)Rq6{A<+zoYlhgT+8?\ku^/j3]!tپAzl?tQß[JDYD:c4[nYF7̌%6*Gn ȧ ,n5`' /pɔMa{i6^ f__=w0G~T KOVEXVe7lZE}SL? g=I)_=&=4e;OPsJ4> USW[߯10A2ٵ.).]CpӐ}2:; 3\wpeioT?N!:dF:N:?)" õETKr$1?`*_@8/>*=lġ=Rb;vB`K͌(L[6`4>Y|l's ח3sx\3% 5Hbus#ʕ L>g:cк'm Y!yVs:("H08F0_=&cvO :m;K0׺U,z9pX0ZT#ieZD=]$Bf@elfX $ĩ/{pȏe5/fCgΌL0Ŷo2Z"ް6 2z8+}L1z^8XUM9땤ckyy~|˱]BX{|8f+Q޽7]琸<۠$B zߠB!+$KÔO_ 8\4'R|sk߾a܂V-:;<7ocukjUQnI9?VnIS^rOvw8%_mN)b]o.6ew!m7NǓ8yKC tuBL|wجxM%ve=xX>ŗ1yYx8 + OòSYEZsOo@ڦBG0qiXv/#&ц3 þ: ޡP>Pu}v20>UKI0-Pk*6̒O|5l듯qV@( R`KH'ha3qRyt )+dcoP0O?gDzIHXoajzڨ'9mw=~NXMV_TTf{(WT]WT>L<7;1Ghxb\t*0;yly fzqcy%a?7_atyP F6=[^< _'Ldr &AEu´s~oM8e#M'8YaQ(2 +6{eq^aYK@d*Kq.L}ΉZGjAeƘ!e|8ޟtcuu+Y` ]x_lqBăӁOi݄}Jݗ|pD>5`.LU `u\f7ej,_I~ED鶀uecgGj\a(tjG>X5pjz|12FV. "C 0:+e*;_gv*=mRR.-,"akťG8^i^T˘3ђ |og.-.Ld ib;zS!pM7_{%7'}<5I`( =(z%* eyK[9[&J@(VMv[kBٽwNbwSqX G\Uzk~3Jl0&j%@&z*osaAB#[iK<\ArՏsB5_;zvyԩRz pāʫ能 S ?o#2# 6 lmcx7@0Z wvPv +yae* x< ^>YץkR]@ו< z}K}N~9%Ӫ*Yˁ05R.۰  A*{.zat hn *%'$L&S?C05?`|U Me.X{]=qQ((o+Wԇ ֢XܠORqzpN9wE2h-ٳƟ|ңi8 7'L4/ <=`n26Rrph?5alН uumi~Uu]a%]'Bv2UG&XC;ٴ>_xM1go'vtlA3Rzj{ 5x ;NC[ |qjoApc;,r>Pz!M:3V'QutHO('jkNGZ%ڳ_7L1z]:t ؝a[;ٓ,0*]n/jI#L BrK릿բB皻v5ox髰sW^뀳BZ"wk#X?;#eRIQȺ(I*RTD䫌 I0*NNO{o9<~_?yps/^tӯ^{1LOو.cGB-jeR k|Oig8&Ml֮j I9uL8Lz'UeC5:u]{ H t)*~R' 7ˆ|@Wr(_6 cC#)"΄ a_t`|P;#K;lu9 3XpEAhCs%yp~?};MtC P3"xs"'=q{QRRZ87zzzj_AYN" &vcxmoc@.!rt a))QJ&bYpzo{g)}ǡ}M: DެHG"s-XEl9@ M{y}VIX~A+~Gäه B7uf&MtnTT׵3; ͔;"*!hQV+dn ^>IM,r--oy8h8űĐ.R8C#lf^SuG%h$5BO"1zG<=y~cتL?yeaj&~]Dq9 Ct3FWamMTFkbE*=g3#XzHAx=-+qʌF[Zon\5Uַ (a;R4b خ\1n(ۻ  x/-'l}+SKE-19@m+W?hb?5Q/;*x!YK^Of^`q,fՆ`LL\IP:VyQ׭W'ZiDavx,(Nu!8ihk ,ܳG$8>8t[ gzyRZMLK,Deh%`fc10! a?'-bT+Nn%Ӎ炽 2TmqƗH81־x샍Yh̒YڱZAʂd5un/'j5KoDx_{ܦWɯ X A]Qd:Z2itMmW 5efd}OHYbdm •}I Ð1+> 9W{sc-Ʀ`y"c%Ut) Nȟ;6LA\|~HjE8=|i=|j:ۋ܉̩iA鯰ܩ#U̬>0$AyFFve;ZS+&#[b؜caYS!iT|Jf2S _I`I)+2x3=f [ gA<}6TWsKǞ_+&< '/&i]BLpub2?0-Fq,k Z\q&B8t}J U7>O{ .$!o \#9kUY]E}? *|v 6^ֶ`e޲e(Gݏ kv8b4|zW<:7vhpƼXmW[79 8pGExvznuRǍPr9>^}%o>4]x=d&.|-ǓwƜ fU&H2aQ2=Rl6VP^Pt||<aZWC^ nÃE`9ŃP("ZE fsL{Wۢ%4 ?_,)Y%ò_`\w P#"ҋDC2b TX\lq V1܁jlqݭ c+sopfSX(IuEPN{|=@E Œb|c9?:I/y0.4ѩS@{[ glAfl;:I2"L8b kPT3t0*VwOf:Ƌ;/Ayv4^+|=Q WsgHUB^aLv:Z˲AT,}sLyV$Fo z/%k7?<f;d`u2&~G~ iz)Řuz{diiң._cX7>P-ίY۪ѡ'~A8zYf_:Dx| ;BwZli埖Z1V 'xrtlmaYx0u-^ΏZXp1%3U]oMRModa_jcNM r^Tp=?1@S&r&gH%>ɊiFBz82VJ[ă.,p!`jM7=^ضt[v@kANSvqM0EKmM5-q/ʵeʏ3gZ:]2P ԭ7iIc!{L|vlW,ۄatqS6߾(3Ugނ:m7kEe; rO{,Stf v(U3yzҎ~}t_YI{jxptyTSE[Y'<`ӪZ]bI9+Fߊ%BS(팬k~/}ԇ*sr78>w2iAvIFυχ"EA,zSg62j#`7NNId+Bw4wwų͞;W~]ct!тRAa)(lqƏ &cїbpjv&!p-wc>}~r@ẃ+G 'UlO_.O!iY.1YYs`8Md` Cl+ǕfحK;^ #bx `Roøj)U_87pZI ;^n|_/Wieg'nFPWAsC-e۞lH(mCw5.OaP66oox>:0 M þ|hZ- _6:R`r hgǯ Υ;@h$ܫ,Q8a7dOv3s"/:e8[|yrnZ^:MFbEnw`Nų8ߟgq!Li[u z.֓Q]7l '̠oKqU$`ے3zv65Im#=*p[L<=*3o8lZ]Gsv zO?p+6&Ƹ'.Jo-=`KeR-pBV^*$X.*-20-Wм2{U3v@/LGnܶPy24;;C35\\t!12=/mD;_dGttzQ8̯foJ dr8t&Eua]PsO=r:!5 觷iwVxT@fҙ5gK Dw* dRCfy k|6>?:Q _-(2Y'-a~vԝ?ZO[ENkzHX^.נV=d#Q>_qF\ێC\\ّwH/کk%Oa8w1zë'\>)y6$x-aeIWLH|vl/\S,F Ȍ>:;R3~X Sw+|=:׽6G'%د&.x_ 8\1y)xSjߴP0!И}c?cFoaflh~&'rU36r_JLO9Ly|V_Eya.sY3}x?9CRP$9\:@9gH7s;ԯM&7Ϊ.c ˓6SI D~q$:1(Ti +^q}'_VPKyLp. 'ZJo|r:0_:Z`?^"HOhpW, ?Lf )]OR]7G߱6]4.DGlĨC?Myp8܆oҬ>ö{/M°j}BQZ&Gp Nل:]wxm"9MjACVn9֥EPvpsFs# j.v?KI1$eò+_o6Y䔴~.@azD n|e1Hb/2IERciuB.V "fKO,Iwi'Tw8WAQBz8Cf`j֪_ YBfղ .+evGaN0_x|Jnn'kCXZ,ҍSTÊ߂PL>&%?(y'hB{e8dzФ[COp _}jlo}if|8n OWGaGVK>k>Fga+偦,mI;[=q2lONfԈg50%y`i1\=vU|ܖhmb[:r=v.y FSvdf;T8 ?%u`8LY&nzsU9jWiҵ'?sM0;sEq2i1y*ޏH,g%lP֗ L5[pyzW{q8:,l A`;Ά}Y 2yT'eȤgO/lԁ'X!{ )!?qV.u&7k6^ #O]u~ZݳV!4鬇QX:)sY*;,q{+*ErG0Ƕc޳COM1JFO|۸ۚpą|l#o's]|SǑ2$}}dt+Th]Ћ2Xt0j? u+w ˁ*I5=jq֯ѽX*l`X.{>X53r3Xϫk u<A]wj䟓w3*H{%V{,vjYSIw̓{#u.m*WX/̽ƶ_HE]0fl1J3 XVA_KIۏ%Bn)ߧQ8z0D(Bm`xI%k %X-s7#jlM=&3-7B&?{(E촿Q/6_ qGXigGU.lz˟H08t6!a\< :f_$0|Iܘh푵Te8tW4Av]dRt= TsLAaf@0! cETĤy:Z|NŢ-?G뺍 ,2{~t=vbspж%XcO0M3kԂ+Ń9mq7%+$u/Ep!٩c0]mA_K$Um^v;urcuT;\5㿶6l%/a[_6 ZXok9'Zb~4ܑMUkUa&ݩ9 l?ʲA0ql|aLbd4RTjnt}~tIC>-]`b&4;FHZ=Yr1%b8t)0y_6_ם j5'gz8ۉΩK6uG/C N]V5Tý&> K]h7?;b"mo 0)=﷯|v' i .pC;(Mqd>*;a G[ϺF~nec0UlR:bө9'~aԂC+OHE`M~T\>o<ӑV0&}6ea#7pPrAǪ> r (|;C+  ]W꾚ON8PU )73FLB?6-vl]P'qj̻PM,}] }@c /)8]\^;O[/,?X-{nBo{󑊡]Y@.$\R_Փ @NX>ҾPtL(t ͎&LcŎWq?c!uR NMa8IU>혼xBvyZq|ځa05}5 Ӽ%" Xx(Mg1YroV8cG/+Ly׭W: PLa/}2 -Xmnv9`\ l*C3!Y f %9=7g`qѭq%Z1=zݝ%2n2>'5 yU{6r}gRڛvTNo{-fm l+j&U6;Ce$Dn-)Xz+/P]їSڥle1e1l6po V,>]֞/fPQկD%wⱩ,*$]3-Iw6=f*8.g\ {mR %mj0|*xtH$üX¼x#R{%qm;77``lHM0@/: KV嶇_EUhjLSN]Þ$;#dfk @ڶ/̖m1_6 I0S?J^d3WuMʣU"`N "Ӷ~3_Z_; ۼ—/lRJ4*<}B{MŏBOmW8@qzBMqv81ki9SSla5Cc2T'ָ7w_?5⿿n~P'tPu glӁ' w`Yd"_t(P4xW0c@hء6Fz]dd\.sN7G^uqX&=]"oݺnԫL&N'b@Ҏ_(~~%&%`ʤ]]ΜJCMd:T_,5xs;a`u,׸f՛̿} M)7K6ckִ>\O)~O^͢ڣ?qis'Paq)09 '|^W!Łd0 Wzxn Ӎ}q-rd67#luW-VRی*SF[G2J xnN 'I3.?]8,8<܁5#_)8B{h0e:)qc<&NX7*aEp\Vtm:)-'g^;ûp:zU1%͒`٢c3j=D&}p ^KZ @- w+xgu?]#?9a>J\'uqEg VIy5-xpe0S q㵇w2).VqrHN URG θ+q\G~5˜LE*TWrۆ.`=7J)rNkhPvꅟC@S;v9[aݯtnlȍ\._on"A-%2]•d*"g?lOT|7j _ayq02]cBzy=̰e{~ ڨx9-jګa.Ksclx~5n[ #{a6YDiK9q8)﵏z]TpsB++sqAJ^Yg\Z9kB@[8xUzԸ'Ʈ;!ff]Z`ش!6øf[vn X7yASNFm6&缱?\Gtc^4 4D߮ 51ng6 ZN~&J.ǿ [0]I۝6.ȵ|3.8]k2fX=ǃpQz`Z7&T9 TnSݱ0j]d#Zե`E=lXqn>l ~'H̙1j@*U탥o@sCyUᚳR,=װNKf78Y:Yn}J>Lo*K}-/lϚotwFk`_)4?~zkxYhӌy9y=6ʋ/ñc0(Z6;̗Vq(ۗ%VQX:3 ̼R Mg5?#0fɷ0bo ^:ʐ#n#t%M^fpw0 V ylf~A|!RX^*hjFPis~Qwxl$3$.b:BN* !juh}'=w8Y[]x>k=4O2+kc-b<1}W{j?鮜~:ߪ&l?6sÙ7YwF!} PAHOwț ݸSlL3jocLrn'gY#i w>yro$n0p0O~B0Zk]y(Οím+ EiBEO/c̮cl@U*-^upц q¥68v2psѸІB0X#na|~K gCe8#YPuǹ.ϵA MB8|`g!In`W 졓 S:9$RQi}DŽo8^Ncy-,[t|J:劧>#/bI#@YuycG|?߄rcu<}H:5%bjD[~/Ǿʉl|ԇO"M^-b|'?+:hg$wQ-@ͣ<ͻR~#~O"xSw&,4s?ejc. . ^ll#~HwR9B[pN L8 q<}(aϠ\U? m?9eVq>[\iHa㇣ݪAcBde?%G;QFю?򧝠J=(^ ɍk*l~w[0t[oS=1iW+|fD#6o+T־4Goyؾ8]Xw'赛"-\,a߿42Q9LW)b\/z l|9 rRB]}&,C:$%7[Tc^&-IU 8+ PSS`0{c9 t{?v[ԳCiW^BJ򗰪V{Nzecq{s2LI fs6&0KЏ h>Rʗҭ /$G*"Ӗӊa=ƶ >w>Tӊ¡3f9N-ŶRYQ*TmW~@ P%| hL 7ߙ#Vk H<"*sނ{5޹1e8L&U՘Cry,XK攉qf`tug&qZp+N})β? !MqCג/W!xٺc97 #LI{DQB,iovC=_-%:V) z8j|Nd>8#msNrmzmwFP ϸʼMPGf_]yNiBQX$+> 39;%WODH@ˏC'yT9y }7/>;/HB{\enb>/D $e{Ry=.e 7pqNرLq/L #^L2sL#7_wZzTJɌf] ΁ ?<hȖ- l|K? MisaIJ++Cd,)*m΄YX'0Ƈ]yd3 'q.dmַn|-Pcͭ]ޖ0t=uaXmW̞P荠cѡ{+:8(V%LhYՄiғa&HKKL_ՕskMɤlw#ڄHV ahT*FT3ޤ~o'$]e3cӀbrOm%oj`M,wɖ'aO0J(J7>Ø޷"D+r^ja^24-ǒ嗷 \4_[=B5Z/>p6aK ˠC^>4v^ dj8fKala I{c7vlɎJZ> XgjCaSZr^}~, 1N!=#N)|: K]'bpNw PrJh#բL^u :) ּǺI8^veKHfMk_Mksk3ZD k[#W^Xܡks̰pFE՚L3@ܢX aMcɬ,jbgbJ(XԟR-z"U\>ak^tuLg-Of㰒M~V(އ:6$Scχm-)".77gg?Np9x {G -MQ8r#/%0: &s,CW^=xoCC/zwHN=dհD/>e Llj;:f'OGOzYGF&"C R')_*aL͉E𔉻g7oӻhe; \[%A U0WS~<㺊V~[q!sb>.LU A9aM2dV"rK1;( rOj%Ňн9Y?đq"x]/51佂0yȃLv lšPO19 ~3O=۱gߚ=.?-n"8.5}EjYܩX1 /,q} s :] g(O]T!?n_#O}G̸,a~i=aXfrw(Ѡf6-jeM::X*x]F="`e65iżI-3Ewiq:ڱk2q4+x?[g>Do3SEi2g԰}%7 |x#~J@<): thbL"(z=OP[* V#_MŽ ?6Xq}o'ֽܓ/<|[e#^}&IkI+k,E=~BKl#E>gG/NJ)˦s8}V?fogxY|%r`-m/Am3/ruXԍh;k>kޑ&φ<2 V4]xk!d[eiPr_-uf֔DOFNB֯F пaӔ?76ՌSߠ!9gh~~H=c}s9,iUi/699kz:BEʫɲhzq؟n`|ﬥp t\K({*4ڇ`ذ'G ӡ-dtkMƈw9Hm$0#&w7k>LEE,L[@ Q w;_ 97a˫)>jr,badw+8x|/;%Wa5} oՠƫeGâ?!Plɡ`|r+J-~C˩*d)5Թ_Gg&j $Yion*}Ԧ7|qϱ 4S7,`O O ~+?+HLEA*@:R8"x: 6'9d f?? l@Zm9)65_mLtzeAo`7_鍜m4O$Z (ϭ_y"/.*\xPjŃNx-`Hw(mrե%9V4?Af'%/>e{,{q-V L50*v3`ۆwPt߳mW8pp3Xȸ: vyfu\%s':O#W<:,^֋sQ/K7GXjz"Mfsj=reQpldw;X\n3_i=o hrĞ#cӔtOQx\"jA%_Eu!f^kLoka|'3G Vp= ~qjKvxsl1zuI2ˮ5H6WXIՕ'g 7J'0&!SQz:\3!IU>Lr1}׎ <<`Ɵ_gggΪ+1uG8⹕` ]y1) ~bl8&t&,\hWn#LԈo|O*S0}G>@1tZcFdp/y}"v-Y$1;'E4j|uEe*ɲ/x5ϭ EէSwmI?󆑗vvìŕW&.W`}"珼M7?wêvOT.B& k~+W̽23߾ qE6AXMf/uڸwՕ hTG(RHqp? Y=, W`8?^7y{ ؃5=ӫbiphN?&3)0+YXHa\RAwlqfǦdDkеۉhՙ8@i+ݰ\FIcE@B}[9? ԫ}owy\5:ag)8=;m?O cbGzS\B :*(uK"TJ9x <'3N+}/B^n$vLߔ/N秨f 36IFsok2v18O*)LJݹ C_V^BIox߹&;23r~9;o{!Tv[8lIOa(n`%[20xE2_7P.'0_˗([hn`<+>;Haj*ؤCf_#=zH&N+o8F;sG0:o Y}r#%2vt[L9uNky.6@k: .=O.UtcNB+-q,ѩ8PuCXXJ.j;0}g[ڍ1>y3@ٙM0$M? ]SCN#56V5R:(+kk˲9_ b1_4cuʃSr TAϰ1(]Z M#*qjb; Uzʷaq*mSxR 'xr@֌V1vOPJ/7Xɤ%gc`aIJԉ+~<^h& Ù&j +/x64O%;V~NUB^,;Ա Ormo$;^4F3uWOB_Ua6TYuF4gCliI5]O$.,ť<-ga$OHL{? UjP;»vbC=*}ut۔O~rK'L:v75jg[e;XD`WR丐H&pp*#=۠#Q1k.PƛŚSo1:I濟ėyW%ԀHym{Ō"#Ą*2,E@뵈33MɌwd_IVťSo|ti ѿ߹0\t rݶ[P=8h&FO A^,CG/׾bjޟ#u(>ǵQ}a9Eb mF.ʲOGsj`*ru;L*< &d2;+6doo蒡S5;ƞޣKUa&gQ@ڎLRdᖴ}3MX}kAqC޲R_ cSRҙ0b V &89|W$]~= ՉO 4`<嘯E/v_oc^ȼb'4;i),Q^8A[ɬ=/$Vx`> po$gCgF9Yl2^#gTueW8 'q|S+qJ%')U=̔ uȷl3S8pk $pPr `1&V=auUX 6?7/صS$vv7 ]U(и5n@}.}፣28WDaYʼdf'yTq@'.}gq'N?5w9۳F>s;n_dHs!d?V_K sh]Iµa. Zwn6.'݀f裼ڽ O|=CT6&<6S wV &鸰zW7-0T~&3 Li>st!N;^p ("c6SW'cݺbChl>}6ZW} K)31we=\[Uޏ3eS6U ~7ap8kߜv]\^6) zH;#Xtk32q3a\XJ&$4^-A5 ҃)cJw` o0ԘCeoкn ,W?8< ?{vO| K\QuiYox__Uru\;,%ZG2 o [!_{$O*XJ[+D.\}7<1 LXf؉%^@&8AhZV~+0:8P aw[f * lGqޟ.Sܒ >kJT}5;*(sb3%7]Y%\P(`V=o;M=/q0܆yC^?[ŻL#0v&WB\:@ap'~t9g$ar73=Lh96`Y5#U)$sA_Ks`}q+ z}pq8tuR6w[ym{{'G92Ggz{n]>GW&_' /e &U`iJߙ05^)lEпy]Asgt~1-[ZVN4dYdi&Y$eq'=A}Q8q"|uM#6ysX 8Zc]0~^\P]u-%xSHrzT*G[d a<=y0O?yw}VS_L-1& MBP)!,լ@0?Mx63dP|!I G?(8mA>PwmӃT;rE,D̗duom^}X7iV?djiFfZ dߟ>n+❱_E-8x|b&{ Z= V^SV{dK%Nj0)n>DzdF8oaTqv#\ G= k >X_]eEтzJ 8R7^]ڰjO5 2n+%FItThkeR2# t-z*㞛 O`).*lF!u8VTT=:|%l}}a"W)*,(nַЀ"ɦ8#Bꐾ;-v_6'kG!xu} }a=U߉(l񪘝T9]}(g0 %~L*s ؍$o ًw4vl>}c)vC@<9ko=],+k9&2goP2AVdGgxUi ;p1hbwoƮ_ iY,z6#ۻKLoB8|w0`x5c}2nY۽M{@?fub\lqXFSed枒DZI].{ 5S~RHn$9;e#H$&S2NCqB-@~о,^s7֭/eQy-U{&~&:{=DvOpl6LxXc5¶kJW~{֤d ,_1 m%R[qJ0T;z6i^dr oc\V˶Q*Lږ/nK4 BpG *n> {[ӀjtT<:_U!P[xVt(ib7_lMabl~u*>@U%;OQ`QnY6?Â>}0޾iniQ=\m/"Xo?ޫv/ 9O8.tMG3֒7ra0Ԡ`{_#<)vv׮*zٔ6/uVF'[a >JMV; oTϹ9/OL_r#al/kD}KjLՍ|J&<)Di } 7A{!c(Lv'$NW лnho=\Ώsy{XŤ*Tq]Citnt]7PMjv4 >n֜b=f1W>KL7غjmib,|/Qi,9)z8q&,&?Ak˻0j%W+*G`={H8R9rA '7%ajC0p֖yq|]nf,X>: [Ca,)Q8;`$.2VBA7"wٔpXIh .j֋N`t?Y0۠~Fs [6fXu{N5` ͅx!_ϟbH:T.6?H/V2OCWˡ7EnAI='Nd .R ,F?+#؇FTj 91 Neމ~;0axʕ ;cW>avie& ۜ{T ͘^W# 7}琙NGYL颫{aL},_NqGf8^~ێh'sBa2EY1WSCO'zA p!!A'<G@ دPЅQڌό0&bkllx:̐p\~ ݭ.02QcaAQG$:r`/vlzS2R!vo~'n+}E*y2p8oupߋ㹷-dfjí0n*&q!8@' No5gˬZqz)AR=< f{1.Gq&:~l|ve ƨrpv6W@7`|zwBR( ޻8~y+akJZi2PWbm9wL{`'|P_Ev?? Y$zL8W̲OjqpJB~8dS#gH{EyXKfo;{aizhy`0ǵ0jΨXOtd>%|JIÓ^g6e| ]Qi) '4׷3xrj p$@@K6NmQ^0=J||*.'ԁ\ja/D8&] 8MMpOw ßy{̓~)5_ZG>N?a.%ٚJ²E:a.H(㻶68yAoq4pL(O?pBx),Hf m &@2oTI(`z.8qr9hJ&mE8zMc_ &A%'#+G9oZ~S{HSCәe)GßNUdZA8~@v 3jVbS:ǿ1"}ARx=M"H!3x)}"ؽ7f@Q i"w4j"GM 0s! ,W#\:k{z,t1Ffu_}nتWc, 1.o[@謁i_Y~[;vK =$@GBY ނRG?n 7Yz΋)ew?Eلɣ 8촖vzpT^Xrް{\2 zK"!iI2 ȫd`Mt?p .})>Tl //hf3x: m׌O ZU=ksG=נ{ZGʻ7Ǻ77DD`!H~ 2V~tt7pÄڵ_8[8]ڙEBApuxX亴.^q}k:.aI#:sB%/i ‚_>X{tq3,JǑ3&zAv2RNy(g<4zSD7|}/PB7Śb8k"UO˯߻ 'Qwl=K龜OL_<+H6#eq"̍ߓKtͶ$ &}IG nϬßeXD:$+sJoPf/[Gase8qouE!y$kXs" ;Ɍ.`Q,- z WwdAq@qMt/MQ%'=Շ[/8k͟{W9Ƿ4s$`w#`\x$|=k_WBvG 8 ,X#t'}% 3hF,|xDցnO!y5X.06!% Zuiװ4~s",-񆉺'vB۳B'%,zh.Lv|6N3!-H#>s.MH 1uT-k + [Od cWka!/@mjTj>>F/!2FG>flj]R}u12C}s+ř|ƹapx!ӖMOc;;>Lnp-t3L%2x=qƥmОOvXdY#(Slw<.j9E0;*`V fQO0iN_3CS'&\3L޾0yԅ]d :N¾;au^8Pr@:vW$3QV~퐐CZt1Fm}/YXij{ALi0MÉ9I>m58.yEW JBTRJ,WYec[TՠaDž8oC̼S6-X| R A.3Es<fd9;`tuzdz|#Z- 2-LOc߾_FGw|,ɒ=oe8~zMI]#yT0dlL7ߑ?TXUKTݶXqҢgϡswBXi. f5n˲\Jڥk,ncd ubc;HCo#}>_kWZp;x;T$2oxEr Fh-Vkt[cZqZ( k_TGX[ iC,}h<ιVx{(-SCzpGLZ6Gr_꣇%M1nk,-,)ngkt6N*o/^Pg7f<8 jy}6n#Hurī,m^b6-f)*PsG~+e|wErd-9F/aa)9NG_4quCNJ@XKqrlMU_YV76? aXt㌮̫ǧ݃3[*8ahʰt0RrSn"ú pWe l^n? ;:Ӧ1bgH0Q%wj&#}* w~u td`7G~~m3P+( d܏z=(RgTɤ~mSЎM NDKudg]f{'X4j)@? xP}hR k 7\;=CK"^wNJ|0H7Ҡk4mrWȿ;^ƹ?w@ρ_q|Wֽ[c$U E uƑt qk5VrYq";IZ CӸ쿋"p֓ջYйC )/KxNjQL/ YsªQvhv0بCsqD0:$5wlYhԂwiTI?N'y`ʟsviև_v;:7[jX/mpXT 2qmHN dc+sJ\`\ȕO)9`btfu +0Fmƽ`1g=M!m3w-4!Z w?|U63 i,_ay3BS@N(uTD&ˇ8:pcEÓ` $ a *'z R6jgin;2xy۳Ez;h3}F3P,,Ѻ{q6N ) )qKHsV|yƿw@'S"vh> &ПgÔVi>̌];D-7ץPi97|.@_m mNy6 u@u+0R6A5C-Q6)#>y,+ mLP>b}32>8A'~Qfƍs5 ;xHt_rw?8iOj',Jv?Ď=Z=~CETn-G/qm f_^9 #AlAs&8.Ò~zt ƌo7!j+C5N8XKTv e<칍W¯tP n`R}z)iq|W'ѿp`\;vX]rEo kL"bJZ.W7y݂厼&Xh>%2]cX$G_ ;F`qi0* mq1Qf$tMR˱IV\ǹօ8{J[h#J׭Jn¹' dmGOVV[~;a 7;#m_!!nf߁Gvv0zjP}›k:Th(dk6;RT(tQj8a+4i{ Ӳ%l,LjY,޻:`ֆ3 b7_5_UӍbHPr#:3v>6jX %](@?ff{|xVq-5M& <>}ʬׯ^ĒZЭ 0I0}#kn0 Mo (gbe no=@z಩̨`EhW9V \ogL4]wwN{[;B0(yo5?:їȤ (ROY9&ua[?Jc?:ÑuBX>'Ҥ.`;oeŜwtaO&x V{o^nFpy52}`Q,W^Uj*Q(OGS;]z(\ЬW˞=zvyIluô3KܨwB72=9t'ӛotGLدU{W,L"ݾl #\`I'BU ƫ<щ#Ks+ (f}~+d];??7?mzmz erߔI:fcz_ǹu9:̜F5RYݷXn}MOedށN3olr/Cp1dh_dkZ l8wt8>~sXTv]0y8$ASf\xT;.eO`g>6*HxA0g]|QmLs,{X=.yv9Lf;එ!/{ /=C K6;bJ$yнg͜)ʖypa<.t(^IL҇ Ocf"Хm&<~F:utN좰k<5Q#h;zl W7en Pf¡뭑Ҳ[ZaALf,N4;m" Y;ĵhZc,ӊd4l)n5ua_ܝJ#6~Ϟ3wÿ1KB ]^ d3^CK*rx,Xٶ8/ amdОZ+vuJ-Uh[yM)Xh$u0|]l9ت#@x_WLS(t%E9ٶi_놪JdP*H)J$֭h^u~?xyƯ”)zrp2V`6^ddbkkzLϺ}3果4+50#g'C luz0/Q~oZ݄_y9blHݹ12.oS9ٰ'b{AUR.Li*!YY_}&+l`jdi:o`/ U W>[ah]/ soS{7^> I=׎qƩb28-aWBƄ8s(As7S?q"]z'&v_w@zx,P0~RWAOџCHyɹNhx~F{HX+-G:̡`h |$/p6 ƿh+πK$hʄy ޗ 3ʎg G0EYT(ۊ~8 U;;:y,|D7E0qT戩k4>5~>B`%,\wJyoRxȫ[t.En q%0-6ɒF65t> QUa ˹>>@p$[\VzU$8Y#-Q'/h^Jӓwa>;[ukz80mk-:} 9&lamu{FZc6ј̇q1N -!=  ܫ/F9݃r9ϴ{ *mpjU[y#7[/.6vaS0O2NH^9sy9<ꪇa 8xK̽vYg LK#ߥr^w: r 4W(yd+%8T l%n58>vEzt< =Wg}s]w?;ugo+:>!VUc$nx#ɺ"L[>$aM)^ z db[U=k,-3OMy"d ҩ!A%!3[wgvI]\Nq_I&G6E%zoIIjaQ'DʆOx^pľu6190}J[ʣ$gL0lCSS")0oB<5Y#9"ݩOU vXꛡ0|<{4C98#]e lq~ 8:\yH?C0{{/40 Nݼ ['ާI o!dTt`+C:űFk2뺚%$n6cNP3ҟK N-LJN6qrкo>2 -~2زvsޒH hŚ7*4=gO+3I1-=zg=D7r!v^iR1iki~tb% WvWLl>EUy /ҩ(T M';,8`Рls/ iEA&yu/; -X Eğba {%&h.~~9yB&vb6fi?8e-YEs{w3?jϏϾ݄*{ղ㚪+%GFFU{`ږϿǑ"iq%N=/Io鐪q#2G2o'$"u00&"[`F-d Feȸ81լv Dv=ٍ1wޅ4= 2jO `ܫ 7 f/_9.>Tφ~M+~v,w679agɤ7o*s$pAbWޘ,H4ssk/أ6,x.d>ҡZ0jS0 ۊ[x|dᐒە(5׬u 6$ Gh/$Mh~> )X\sw27>y_9|^uvUڱ*d~g_ge*oy:Nq70cEZ}%Clk~=s׷`be8u~? `sSJ)dRYv(Z猵YTdҚUnk*)CwK vEUݥ!'ܾwɫ֫V=V } K)$sQW7\ 6_߬33#w;7"od5y KA\\pՏC?~C/ eo3Am:t(q{.Sbhwm\OGbRFK斔CFgbMS;ڷ]sP%j,ZvhŁ6)$+$Ct'(P-=;>}߻gGdhΚ$2V':$ٔaҪbt_&#?+'g#y-fm(0nP8WF2U 7F7fL^#j.c7AQw 4>~TU<۫*_{Q @L'=YwGt_-O*cP Xd%S*|?DWEaf@-M8ƥ_v FEqW7R/gih3oCm+v ¦o;7GkKڮed_mz!gL5-%Gt2yע`1=")n5 LWWdTpI=Կe7`nXGu(L6`TK{SWHՐ«\tp1Qu%C CQܝ{fGk~f2?k|z3dҭad !cdN4|Q@|U u6}Aln,뇓Eg&dܾ/bVSU~=Yeqo$ R*Ldi.&# g)K ^ɭ[Z7yynYr߉|t< '~yk@].8Uy#0;v޾MTɤAˡ0%Kp]+m7]8)m(f&KyB{4ݚv>\DjBpEf~2 h:%~]s~Joz5jEKKk.x1[!)s[ u=BׇX|unUG=o*y]eHO(ky]%_б4\Rpgvxá00h"1 U>癅!g{ZߊaIߓ Oz:e>k@Jq? j9K:Hf+rTIW{Vq}dvʼnUjO8GlCŹqYKt/3ݏ7)-Z`kɑ/"6F&9"sطe[r~m2G֙ܨ8]y(ѓ M=o"ov1(-Y_Hu2&s簩D}k˾|.#h=tﭶ|%x -T,a䡒>srgB TXl=\ӡsajZԵ׬:$Se?*Cu=O8=?dpzXk4'_L:]kK2wxO7W=\V>, na͚4q`*S=9Z$%]Ŝ7-DÅdNUwvK7tK'm!M}&h9h1yHX`ɍ6-ZDpv}z0fN`ɮ641;eqj?ΒƆ, C͐,2ױ#8ZP*5]?Xڹw@e:ŷm7tqťk0My" YۣrFa%Y< NΚ@ֱ!YY9U}SUXHʌs.xLt~+won kxζAiofAДND'CD&zqUʎp/28KJ+MFxã=ß߉\bԸN4\Si "ݛLLP6Jk?f8ꯏc~vnE59}vAfUNlmm)3,|=H{Z%w)̀s#.vlD#L#0OV;G:o{ν8ar;G ]hoj&`k+h޵8.ʇn 01޷/sRZ2~Lmn8g'94Ms썕:7JsCmK=&\$EɟŪ=(QV iCqTXhzo<ruж[!: FtYP~̳cX1|O&qwpÐ {&&ƆI?B}#(hyE~SCȎ^i+Sub(᧎}!PmD:6~{dLZ4~^@5>8]}@X蹇qsI(0}zuw^rFfR'Ln)hU'OAUƵz1G)-G9-zسs@$& KvDWSqʅ~KuGsrkUMGq xY:u,z/XFN4lK@qp#V(lc*rW/U[6D6W5]:d#/̏`V;/~m7{z$D\'^Luy[CXCUZ% .9]gy>*{wwC :M|#TUv:8< k:SWO9T?L>h Ih:̷\P ]sHC'B6[ױ`j2gAݥCbg6e;; JM|#>%d`|;>A5y5 )\jynt6(6CKOڠiǃ^ih2[ںWDѷ'a;eк7Ht.O/CpNZ4,^mE|R})O @aX'~S2o|ys f- 4$`yg:J':(C^O`9 ]eeqwB5p34fJB``?k?8%*O {xo)cvV`sԊەl&N',R$sg!˷wXߞ<ԥ¯pWE]_9aea#@/^H{1~;w"]KZqA֥hZ`lKzW#;T+U䧤|@1Y_/G! pNr-{'[m)}za.(3eF\Ju)wyU>8NP\Xzߗ !@8g< Yg쟀 pI*] b\ 5*\R+R8eݨb2 {lY/K5`T)ȿ}+^,lS](A&#n0l9TbƏ{Ze+nR\0o"}DlSC*n jIqkqw['U($Ai#;OFn~-R> ?&4q*ֳ{T %|߽S`՘ՅL*N\lz-I|qS'em G HGGNbm<9DA8kA-|?1)@@c!Wΐ93"3\YW+'VB]Iݠ뼐{gp|{g#n{7~@a/^{2OUa ﳔ-z!~&aspk+Enړ! #~oS(eX(ZjC<EB:qT''v.YƟX,tjL8^hU~HčB4?zh?Q?ɂN9;(IcQa6&,~E+t薬!sE]asKy'9q4/-e_䲷qq{y2|V¨zg\_x.&kI%ᰎ7FwLKb G@Xj,O}]!}w,AUW<|d;xI sҺ PY<s7'w&hHa>]?qt0c9߆Hw sk4K0[5ϼԃ# hm(H>ĉͥGYؾ[0zҬC?77.{%uL ʔ7F@a TJCЭ]iP#v2 /"?5oClGH|7Ϳ7zc]je\M8 Ej?|y[/hakcCRǗbu{ɜ:RFI28o7za{Sq6Ŋ}O/yΟ{NQB[>eBR?n>EdL*RWrS9W ڇԎu vY\J6MMĩB81"E((@-čFUcjGCҼ 3%FOdRH8?&vWx}HzX>Dl j؊ VYq  ;[ޑ`& sAЉaM}UdF2g[̾kݱ:k}Ad(wESM>Xuݎ CZpXf~.~;xDZɅt(NC`n#aObDZuR%*Yl|̶ J |3]b^/ʨ[+ X3za^A2 hu)+BmpZ|IO)r Cinfq'qYOfB%ȸkM|m.ňd3 S{jO{`k&Ȏ+)XRShYwf`*)v?q H= KV{Tz+=%]qLFL{Wb4]ZfSTk'+Ї7GCKšxХ.#-`SGeWX'TQ삠T' Aʍ޶XO?"]jy`˅3G=YnDuaVy1dB?&įz,XGijvLxaDu0oұjqS5ZuzW6ʬ'oID:Bmƣj MõF),hi$!ř^-ڥt7ǝ{8y-m1Rf/Hd =Bnӡ[ ,Sm!h35/֒ IS'N'%B qά\[6OnC(6Rތ "ut%XLp¯PAiW4pz .o/pzcf[B=#Vd^&4/g]pa:0>Ad)N)c5?UpgH/q;lݿ*m*<=sDo#L|OUnqUn/POmf.a*Pge 6XMΓmq x8kGYy2{#0}nЏ۾dhÝ05oP1ʋ%鞷|QIrv]ްhGiůY7cI_plc8}`2d_5/vϛ理n<_?:#>`k?NCAndnDm0ml3E'k&rNJtsB4 pMS( [7HGk&=`Zwu}DWFI\:TF& ? {iMp~jI࠵з׮_w(-8f$ew {į,Z8>ߦWH'暗T~XShC)Wr0٪;9 [wT6X?4ͬM}ӡGbyo؝.Ԕa0sVN zŬ yznzPDմY+<-OHgK F_m2iUo9 tAŋZѸ!ށ1پc0~Ɯk`A<,R lMdq`C>g(v|-kF̶͚O) (g=#/x&nFLq1O F;- l"d_xuh;wN_{}_*dΝ]߿awB%L<13ivXhhN#4Xqpb12Jpͻ='a}/h WcIby6+dyVɉio5 cpY/NƼ^+ T| MlwjnYYpҏLJc2W{6lo"^_ZvRZG lrJ0'#tE}Xb:#Lx2zT)L:v1`;,,D^}[彀xCU%W9(q`ok,%My8bܞVq3zSn¦Vsgz?c{v5GV ѣU?}+F8q 4.b3E: FL2gߞ Ehs"//Tԑb}|v<ʐҁ7Ut@7'Cm4 ?'sKvU6Bl[-uloX"Y= }9$7T9Cכ@S L9e^wi F-),{(y䙢!!Ea^M;W>p)?iWģO[R@ꑛb Xdu {k'87 JX;˾ JGyٍO^q^`*d !n(sK]:1)ūTgrL|5VŝVdˌ <-307ԔF[8%U||y g˫W&TX/yad=:cFn_~XopFh6U Ts(9% 7[uΫ T6duy/=R>KIzFAO+= R4m"=ZslfH|aKxP& .lF^2D+1Կ n׍EfaDNxk[@VQnT9"l"^*Ưm*8gJj( .|L I~p6]PqLx{_Z].(\ʼÑ0v'kd jC#í&Ec8❧%:'^ȋ`;؏ JXxݕ/!A"?X6hϔ&0ؤ= ݩ?HI{w~UOv XM` ϚvF8ME&jrS myxڇB_`N==0r?w q; !-]p'>0BrSwHP!%$}u.8o+LN> 9ANs,)∵?cj\,xPYMp+JQz6m5.ڭ/ $W۟X7I낀*C$l~]JCzitq<$J| 3TzRdԑ5W@uqpň(ʯ2'o)@-X7&Ɯ;sF't%9,-%#e*z.\VzB:GjP8Y͆Ikg]C2w@i{؛g0B]q&Gr*`ƶI>t!! W?XOMd1#/V30EdR畽}kuك_A{bj&N~u`OUH"}D)FȈw3? R$n۸ .+y2r 0n=筃W$܏ߨ^y&GƉ0o='gYmyY;ԀC Ex8Gwꨌ05ۮ!EN An0,:@wλ |f`u57<9PY[ Tt.V%QRԭ`zpT_} tN(¥hiLfP(u}4ɿa|Bp\$. {FXwص!wW;B擬-{ߣMVZANzem_8b}x)O{sa8fn,P}覂$7|2T珯d_]-%ƱGT!@FTN#zڋξ᐀a px>q\%@9)$UBfeo#R3Y _{D׋\A"< 4[fcA|e:5ƍ#o_lO='qIN %m&lCRX_cEP9_Y抾%+Xףq#?7+Q7 җ̧K\ 0p`!erK TϮ][4>g4<v4<ߕb$AؙH~'逴tvl[F%\ m̃l2twlZ/]?.Oڻ xŸoǙg+G7A{ 8p0combǘJAӡ@z}`$RڬߜXX:Vy]H %k=Hs<<L3{lU6Nw^a@yU/  Lqū m#]ؿf86 ]p}ֆYA.!M7y .x=u?8qGS&8ǥM{V~x 9ZuMܚV:xld=jZFq.'id)ssţH ?15q'H\E =ߓw8 >wz߻ZRܵ@/BvC>HfV͇+gY_/o<_KE^:ga{9`@2+ )5_a m 4׬=shݡ{/q[4wCY|r1>I9Ua[Z8k`~ z}j9 T>N6?\./„&P FS:׼c_‡6Ns/7T Px dCi-ILf[vӖ" ěSzͷ+4|S O:'Gc۳CeQe\/Rw )z9\>Cb sPV˭JX;]zt"%5AE5Cd)ÖfdY9f\:e8'AvF3̼ηyп=/}DO. _0p9v ,X,G㱇s0~_5GYdm/Sz9-hFp 5_Tvc+F`;yPn ]:ko"V$WWv2Rz[!i0欠a#M9^Fb9ڤ7 Tf2r WR `idT82Vjjϯ^[+O肃 N)VQdlPx12m"7_ŚF`/jڝƲ+ C?F\:9aHͷ3dǯtV}wY}2LJQ玴YX8Nupf)GdRsӪ1`qzUު A'?emb`(q~Ӿ;heb zxQsr}ArO-i\Zqf7wXwLۅt|֥=qʑ! *?{>P ѻM!/Cp6m;Ԅ_FL<٣mVf{lOsp/j W +!<'%9);&aZ%LVm?L5kkfpq.X€׃/]?cע]){}wo2n)UO[=\_"&'*;ݪ9ִd Ƕ/s9\i NC;_Z(N?v7(yxq Ʃ44M4|x^ѱ&BMmzg96-ObE}7%>`Ɂ=Iܭ{ֿ[J#lQXs?나e~*) Me޹4hB&(ev]Gcj܉Ö=3G?CFWet` $m«"._  |{ NKW@_gXr˺=L>|=PQ +6Cw0gbl Dk6E|a䢲6AIϥEnVx>ė4ɤO{Xck1sLsfm6lĥ0c/MCMit`f<vGFƮ̡ yaXd!4+6झT]=#h*9R0}*6Kv@ŝc+=|6&G1GX>߸h@*ϺWdRח5Aip2\,pj_lY A_=o[6[wg ,$ Y$K N`oId> qKY\ 2G' 6¿Z^U o9DwMF<"*X)cŲ8>*I?zq5t)1HO@`LK=I+6Ș 9H,#޼wg|tZmE5U dK.-!5P^}y]}ۮs;[ڧKUDpQo X{#K7/{ |Va7E>udHpуMYhLb{'Ԑv֝d_={!dnp l -"=\#bK/ehFƧA Tmufӷ3lV|~[:QOp:9V{ UްBٻ@S X_>ԅD{ɘL 'K`pIAf?wf`ϛ*7Pj2P3[~E̿ $ KVVU˝}.9RWծ4`8nqVFJ RC:U IaE+ɜ:ۣKc% 5(%qjbF&Sʩ=y9u_ 86%o .+`ϒ3h34O VG>48w t]+! NO]S|569ҔKܵhCOu{N 4 Oj@WCy=_Ft2Osa㽞 + yĂ iY!֡@ca&e{3G8Zz5PTk_%;9;qԋ@㣇ф5Ӧ!$XG<>hG<qW칋=ݩo`fI}ȋg;^-s3=ٽ62n#x tmz׋ 7 ]=o.}6Jwytm֥C7'>{%#Lbm^%,6CqB]z.zmv!?SKaIy ynMw?>SƩٓpxz-,e\8ܼ,p41i竝q3MsJA ;)xُ(K 2dx|7Y`} %ߏ)lGTz%/%by gx},W-'k}8`W_l6L_ގn< [_\[XĚ0~xۿR%jM1 RayD S|VLW1<9y_f~dCW\ |[/\Fq㞶me?#sKΌZsv=0:BlB&o! K@z/J)y*c'-8zg֎e6hZ%aim;ܻ ;'%"3wME&7??ƚx}EOӆAgC k.ÂL-ol*/ayws`56QL6tt0yC &?3Nö#o_ܯ=ΡHa*_L/h&7u^]B^yZ}0B欿ezH>r[w)dpM^o9ߦ},v% x)t| Ցy޽J/ȃON%C1q}^[^*Mq%#fBd- ]C׽> kM{8ix텹;X-)w o0_qѵƳ >wn,^vdxE&8{غ8WHyaw)6Jm˞$r#rVGΌO}6U}3Bg %ef;=0u$c[ `fcSh/TK3csАı9஗aIҽJS!19h򊋌W)9`=ʞdg+00D7zKتmdɰ5^%zXIHmz hsN@QֲШ05wOjկxi~ =elY)عS[7Pu߮?HNǺ'+ R_YQiQ{P=̛YN8y/l,~>Z}?4M )L){%̖zĦ'0OĆ0,jL;7Y8@٤HC&tK!) ,vMӀRum;>61 s!p쁂о|>nšV(^2֩aƤJ,Գ#v<{Wj[tb6e-S|مOqGЯK# fk68-@ h&Γ+U:i(5>Zʱ`GH0 Zp[V r/L=* Wx:,z3 NZ}C ۾heF໡Sg$\X>X$o }Z\i'Шq+uݦ'k+m¿կ#H Lu0=]r~/6ج`6_MO<#B%N]y[z N4g%FX٫wWa ʓ@480:bD=RX+Q^^<-M#su@|d5N.5)Ǫo[~/£L2ž]>aR dx$ōIvr"ĕ9&YԱVB$vvb͍[%\qQ}Sro~%Dd'Hl$NҊ -" 1) is also printed as a header to the \code{data.frame} } \description{ Identify the number of factors to extract based on the Empirical Kaiser Criterion (EKC). The analysis can be run on a \code{data.frame} or data \code{matrix} (\code{data}), or on a correlation or covariance matrix (\code{sample.cov}) and the sample size (\code{sample.nobs}). A \code{data.frame} is returned with two columns: the eigenvalues from your data or covariance matrix and the reference eigenvalues. The number of factors suggested by the Empirical Kaiser Criterion (i.e. the sample eigenvalues greater than the reference eigenvalues), and the number of factors suggested by the original Kaiser Criterion (i.e. sample eigenvalues > 1) is printed above the output. } \examples{ ## Simulate data with 3 factors model <- ' f1 =~ .3*x1 + .5*x2 + .4*x3 f2 =~ .3*x4 + .5*x5 + .4*x6 f3 =~ .3*x7 + .5*x8 + .4*x9 ' dat <- simulateData(model, seed = 123) ## save summary statistics myCovMat <- cov(dat) myCorMat <- cor(dat) N <- nrow(dat) ## Run the EKC function (out <- efa.ekc(dat)) ## To extract the recommended number of factors using the EKC: attr(out, "nfactors") ## If you do not have raw data, you can use summary statistics (x1 <- efa.ekc(sample.cov = myCovMat, sample.nobs = N, plot = FALSE)) (x2 <- efa.ekc(sample.cov = myCorMat, sample.nobs = N, plot = FALSE)) } \references{ Braeken, J., & van Assen, M. A. L. M. (in press). An empirical Kaiser criterion. \emph{Psychological Methods, 22}(3), 450--466. doi:10.1037/met0000074 } \author{ Ylenio Longo (University of Nottingham; \email{yleniolongo@gmail.com}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/dat3way.Rd0000644000176200001440000000244713432435606014512 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dat3way} \alias{dat3way} \title{Simulated Dataset to Demonstrate Three-way Latent Interaction} \format{A \code{data.frame} with 500 observations of 12 variables. \describe{ \item{x1}{The first indicator of the first independent factor} \item{x2}{The second indicator of the first independent factor} \item{x3}{The third indicator of the first independent factor} \item{x4}{The first indicator of the second independent factor} \item{x5}{The second indicator of the second independent factor} \item{x6}{The third indicator of the second independent factor} \item{x7}{The first indicator of the third independent factor} \item{x8}{The second indicator of the third independent factor} \item{x9}{The third indicator of the third independent factor} \item{x10}{The first indicator of the dependent factor} \item{x11}{The second indicator of the dependent factor} \item{x12}{The third indicator of the dependent factor} }} \source{ Data were generated by the \code{\link[MASS]{mvrnorm}} function in the \code{MASS} package. } \usage{ dat3way } \description{ A simulated data set with 3 independent factors and 1 dependent factor where each factor has three indicators } \examples{ head(dat3way) } \keyword{datasets} semTools/man/htmt.Rd0000644000176200001440000000556713476433177014131 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/htmt.R \name{htmt} \alias{htmt} \title{Assessing Discriminant Validity using Heterotrait-Monotrait Ratio} \usage{ htmt(model, data = NULL, sample.cov = NULL, missing = "listwise", ordered = NULL, absolute = TRUE) } \arguments{ \item{model}{lavaan \link[lavaan]{model.syntax} of a confirmatory factor analysis model where at least two factors are required for indicators measuring the same construct.} \item{data}{A \code{data.frame} or data \code{matrix}} \item{sample.cov}{A covariance or correlation matrix can be used, instead of \code{data}, to estimate the HTMT values.} \item{missing}{If "listwise", cases with missing values are removed listwise from the data frame. If "direct" or "ml" or "fiml" and the estimator is maximum likelihood, an EM algorithm is used to estimate the unrestricted covariance matrix (and mean vector). If "pairwise", pairwise deletion is used. If "default", the value is set depending on the estimator and the mimic option (see details in \link[lavaan]{lavCor}).} \item{ordered}{Character vector. Only used if object is a \code{data.frame}. Treat these variables as ordered (ordinal) variables. Importantly, all other variables will be treated as numeric (unless \code{is.ordered} in \code{data}). (see also \link[lavaan]{lavCor})} \item{absolute}{logical. Whether HTMT values should be estimated based on absolute correlations (recommended and default is \code{TRUE})} } \value{ A matrix showing HTMT values (i.e., discriminant validity) between each pair of factors } \description{ This function assesses discriminant validity through the heterotrait-monotrait ratio (HTMT) of the correlations (Henseler, Ringlet & Sarstedt, 2015). Specifically, it assesses the geometric-mean correlation among indicators across constructs (i.e. heterotrait-heteromethod correlations) relative to the geometric-mean correlation among indicators within the same construct (i.e. monotrait-heteromethod correlations). The resulting HTMT values are interpreted as estimates of inter-construct correlations. Absolute values of the correlations are recommended to calculate the HTMT matrix. Correlations are estimated using the \code{\link[lavaan]{lavCor}} function in the \pkg{lavaan} package. } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' dat <- HolzingerSwineford1939[, paste0("x", 1:9)] htmt(HS.model, dat) } \references{ Henseler, J., Ringle, C. M., & Sarstedt, M. (2015). A new criterion for assessing discriminant validity in variance-based structural equation modeling. \emph{Journal of the Academy of Marketing Science, 43}(1), 115--135. doi:10.1007/s11747-014-0403-8 } \author{ Ylenio Longo (University of Nottingham; \email{yleniolongo@gmail.com}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/exLong.Rd0000644000176200001440000000152213432435606014363 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{exLong} \alias{exLong} \title{Simulated Data set to Demonstrate Longitudinal Measurement Invariance} \format{A \code{data.frame} with 200 observations of 10 variables. \describe{ \item{sex}{Sex of respondents} \item{y1t1}{Indicator 1 in Time 1} \item{y2t1}{Indicator 2 in Time 1} \item{y3t1}{Indicator 3 in Time 1} \item{y1t2}{Indicator 1 in Time 2} \item{y2t2}{Indicator 2 in Time 2} \item{y3t2}{Indicator 3 in Time 2} \item{y1t3}{Indicator 1 in Time 3} \item{y2t3}{Indicator 2 in Time 3} \item{y3t3}{Indicator 3 in Time 3} }} \source{ Data were generated using the \code{simsem} package. } \usage{ exLong } \description{ A simulated data set with 1 factors with 3 indicators in three timepoints } \examples{ head(exLong) } \keyword{datasets} semTools/man/Net-class.Rd0000644000176200001440000000224113432435606014757 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/NET.R \docType{class} \name{Net-class} \alias{Net-class} \alias{show,Net-method} \alias{summary,Net-method} \title{Class For the Result of Nesting and Equivalence Testing} \usage{ \S4method{show}{Net}(object) \S4method{summary}{Net}(object) } \arguments{ \item{object}{An object of class \code{Net}.} } \value{ \item{show}{\code{signature(object = "Net")}: prints the logical matrix of test results.} \item{summary}{\code{signature(object = "Net")}: prints a narrative description of results. The original \code{object} is invisibly returned.} } \description{ This class contains the results of nesting and equivalence testing among multiple models } \section{Slots}{ \describe{ \item{\code{test}}{Logical \code{matrix} indicating nesting/equivalence among models} \item{\code{df}}{The degrees of freedom of tested models} }} \section{Objects from the Class}{ Objects can be created via the \code{\link{net}} function. } \examples{ # See the example in the net function. } \seealso{ \code{\link{net}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/mardiaKurtosis.Rd0000644000176200001440000000350713432435606016135 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataDiagnosis.R \name{mardiaKurtosis} \alias{mardiaKurtosis} \title{Finding Mardia's multivariate kurtosis} \usage{ mardiaKurtosis(dat, use = "everything") } \arguments{ \item{dat}{The target matrix or data frame with multiple variables} \item{use}{Missing data handling method from the \code{\link[stats]{cov}} function.} } \value{ A value of a Mardia's multivariate kurtosis with a test statistic } \description{ Finding Mardia's multivariate kurtosis of multiple variables } \details{ The Mardia's multivariate kurtosis formula (Mardia, 1970) is \deqn{ b_{2, d} = \frac{1}{n}\sum^n_{i=1}\left[ \left(\bold{X}_i - \bold{\bar{X}} \right)^{'} \bold{S}^{-1} \left(\bold{X}_i - \bold{\bar{X}} \right) \right]^2, } where \eqn{d} is the number of variables, \eqn{X} is the target dataset with multiple variables, \eqn{n} is the sample size, \eqn{\bold{S}} is the sample covariance matrix of the target dataset, and \eqn{\bold{\bar{X}}} is the mean vectors of the target dataset binded in \eqn{n} rows. When the population multivariate kurtosis is normal, the \eqn{b_{2,d}} is asymptotically distributed as normal distribution with the mean of \eqn{d(d + 2)} and variance of \eqn{8d(d + 2)/n}. } \examples{ library(lavaan) mardiaKurtosis(HolzingerSwineford1939[ , paste0("x", 1:9)]) } \references{ Mardia, K. V. (1970). Measures of multivariate skewness and kurtosis with applications. \emph{Biometrika, 57}(3), 519-530. doi:10.2307/2334770 } \seealso{ \itemize{ \item \code{\link{skew}} Find the univariate skewness of a variable \item \code{\link{kurtosis}} Find the univariate excessive kurtosis of a variable \item \code{\link{mardiaSkew}} Find the Mardia's multivariate skewness of a set of variables } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/mardiaSkew.Rd0000644000176200001440000000352113432435606015217 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataDiagnosis.R \name{mardiaSkew} \alias{mardiaSkew} \title{Finding Mardia's multivariate skewness} \usage{ mardiaSkew(dat, use = "everything") } \arguments{ \item{dat}{The target matrix or data frame with multiple variables} \item{use}{Missing data handling method from the \code{\link[stats]{cov}} function.} } \value{ A value of a Mardia's multivariate skewness with a test statistic } \description{ Finding Mardia's multivariate skewness of multiple variables } \details{ The Mardia's multivariate skewness formula (Mardia, 1970) is \deqn{ b_{1, d} = \frac{1}{n^2}\sum^n_{i=1}\sum^n_{j=1}\left[ \left(\bold{X}_i - \bold{\bar{X}} \right)^{'} \bold{S}^{-1} \left(\bold{X}_j - \bold{\bar{X}} \right) \right]^3, } where \eqn{d} is the number of variables, \eqn{X} is the target dataset with multiple variables, \eqn{n} is the sample size, \eqn{\bold{S}} is the sample covariance matrix of the target dataset, and \eqn{\bold{\bar{X}}} is the mean vectors of the target dataset binded in \eqn{n} rows. When the population multivariate skewness is normal, the \eqn{\frac{n}{6}b_{1,d}} is asymptotically distributed as \eqn{\chi^2} distribution with \eqn{d(d + 1)(d + 2)/6} degrees of freedom. } \examples{ library(lavaan) mardiaSkew(HolzingerSwineford1939[ , paste0("x", 1:9)]) } \references{ Mardia, K. V. (1970). Measures of multivariate skewness and kurtosis with applications. \emph{Biometrika, 57}(3), 519-530. doi:10.2307/2334770 } \seealso{ \itemize{ \item \code{\link{skew}} Find the univariate skewness of a variable \item \code{\link{kurtosis}} Find the univariate excessive kurtosis of a variable \item \code{\link{mardiaKurtosis}} Find the Mardia's multivariate kurtosis of a set of variables } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/semTools-deprecated.Rd0000644000176200001440000000341013526762575017044 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/aa_semTools-deprecated.R, R/longInvariance.R, % R/measurementInvariance.R, R/measurementInvarianceCat.R \name{semTools-deprecated} \alias{semTools-deprecated} \alias{longInvariance} \alias{measurementInvariance} \alias{measurementInvarianceCat} \title{Deprecated functions in package \pkg{semTools}.} \usage{ longInvariance(model, varList, auto = "all", constrainAuto = FALSE, fixed.x = TRUE, std.lv = FALSE, group = NULL, group.equal = "", group.partial = "", strict = FALSE, warn = TRUE, debug = FALSE, quiet = FALSE, fit.measures = "default", baseline.model = NULL, method = "satorra.bentler.2001", ...) measurementInvariance(..., std.lv = FALSE, strict = FALSE, quiet = FALSE, fit.measures = "default", baseline.model = NULL, method = "satorra.bentler.2001") measurementInvarianceCat(..., std.lv = FALSE, strict = FALSE, quiet = FALSE, fit.measures = "default", baseline.model = NULL, method = "default") } \description{ The functions listed below are deprecated and will be defunct in the near future. When possible, alternative functions with similar functionality are also mentioned. Help pages for deprecated functions are available at \code{help("semTools-deprecated")}. } \section{Previous measurement-invariance functions}{ The \code{measurementInvariance}, \code{measurementInvarianceCat}, and \code{longInvariance} functions will no longer be supported. Instead, use the \code{\link{measEq.syntax}} function, which is much more flexible and supports a wider range of data (e.g., any mixture of \code{numeric} and \code{ordered} indicators, any combination of multiple groups and repeated measures, models fit to multiple imputations with \code{\link{runMI}}). } \keyword{internal} semTools/man/singleParamTest.Rd0000644000176200001440000000756013432435606016241 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/singleParamTest.R \name{singleParamTest} \alias{singleParamTest} \title{Single Parameter Test Divided from Nested Model Comparison} \usage{ singleParamTest(model1, model2, return.fit = FALSE, method = "satorra.bentler.2001") } \arguments{ \item{model1}{Model 1.} \item{model2}{Model 2. Note that two models must be nested models. Further, the order of parameters in their parameter tables are the same. That is, nested models with different scale identifications may not be able to test by this function.} \item{return.fit}{Return the submodels fitted by this function} \item{method}{The method used to calculate likelihood ratio test. See \code{\link[lavaan]{lavTestLRT}} for available options} } \value{ If \code{return.fit = FALSE}, the result tables are provided. \eqn{\chi^2} and \emph{p} value are provided for all methods. Note that the \eqn{\chi^2} is all based on 1 \emph{df}. Expected parameter changes and their standardized forms are also provided. If \code{return.fit = TRUE}, a list with two elements are provided. The first element is the tabular result. The second element is the submodels used in the \code{free} and \code{fix} methods. } \description{ In comparing two nested models, \eqn{\Delta\chi^2} test may indicate that two models are different. However, like other omnibus tests, researchers do not know which fixed parameters or constraints make these two models different. This function will help researchers identify the significant parameter. } \details{ This function first identify the differences between these two models. The model with more free parameters is referred to as parent model and the model with less free parameters is referred to as nested model. Three tests are implemented here: \enumerate{ \item \code{free}: The nested model is used as a template. Then, one parameter indicating the differences between two models is free. The new model is compared with the nested model. This process is repeated for all differences between two models. \item\code{fix}: The parent model is used as a template. Then, one parameter indicating the differences between two models is fixed or constrained to be equal to other parameters. The new model is then compared with the parent model. This process is repeated for all differences between two models. \item\code{mi}: No longer available because the test of modification indices is not consistent. For example, if two parameters are equality constrained, the modification index from the first parameter is not equal to the second parameter. } Note that this function does not adjust for the inflated Type I error rate from multiple tests. } \examples{ library(lavaan) # Nested model comparison by hand HS.model1 <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6' HS.model2 <- ' visual =~ a*x1 + a*x2 + a*x3 textual =~ b*x4 + b*x5 + b*x6' m1 <- cfa(HS.model1, data = HolzingerSwineford1939, std.lv = TRUE, estimator = "MLR") m2 <- cfa(HS.model2, data = HolzingerSwineford1939, std.lv = TRUE, estimator = "MLR") anova(m1, m2) singleParamTest(m1, m2) ## Nested model comparison from the measurementInvariance function HW.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' models <- measurementInvariance(model = HW.model, data = HolzingerSwineford1939, group = "school") singleParamTest(models[[1]], models[[2]]) ## Note that the comparison between weak (Model 2) and scalar invariance ## (Model 3) cannot be done by this function # because the weak invariance ## model fixes factor means as 0 in Group 2 but the strong invariance model ## frees the factor means in Group 2. Users may try to compare ## strong (Model 3) and means invariance models by this function. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/indProd.Rd0000644000176200001440000001044013432435606014525 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/indProd.R \name{indProd} \alias{indProd} \alias{orthogonalize} \title{Make products of indicators using no centering, mean centering, double-mean centering, or residual centering} \usage{ indProd(data, var1, var2, var3 = NULL, match = TRUE, meanC = TRUE, residualC = FALSE, doubleMC = TRUE, namesProd = NULL) orthogonalize(data, var1, var2, var3 = NULL, match = TRUE, namesProd = NULL) } \arguments{ \item{data}{The desired data to be transformed.} \item{var1}{Names or indices of the variables loaded on the first factor} \item{var2}{Names or indices of the variables loaded on the second factor} \item{var3}{Names or indices of the variables loaded on the third factor (for three-way interaction)} \item{match}{Specify \code{TRUE} to use match-paired approach (Marsh, Wen, & Hau, 2004). If \code{FALSE}, the resulting products are all possible products.} \item{meanC}{Specify \code{TRUE} for mean centering the main effect indicator before making the products} \item{residualC}{Specify \code{TRUE} for residual centering the products by the main effect indicators (Little, Bovaird, & Widaman, 2006).} \item{doubleMC}{Specify \code{TRUE} for centering the resulting products (Lin et. al., 2010)} \item{namesProd}{The names of resulting products} } \value{ The original data attached with the products. } \description{ The \code{indProd} function will make products of indicators using no centering, mean centering, double-mean centering, or residual centering. The \code{orthogonalize} function is the shortcut of the \code{indProd} function to make the residual-centered indicators products. } \examples{ ## Mean centering / two-way interaction / match-paired dat <- indProd(attitude[ , -1], var1 = 1:3, var2 = 4:6) ## Residual centering / two-way interaction / match-paired dat2 <- indProd(attitude[ , -1], var1 = 1:3, var2 = 4:6, match = FALSE, meanC = FALSE, residualC = TRUE, doubleMC = FALSE) ## Double-mean centering / two-way interaction / match-paired dat3 <- indProd(attitude[ , -1], var1 = 1:3, var2 = 4:6, match = FALSE, meanC = TRUE, residualC = FALSE, doubleMC = TRUE) ## Mean centering / three-way interaction / match-paired dat4 <- indProd(attitude[ , -1], var1 = 1:2, var2 = 3:4, var3 = 5:6) ## Residual centering / three-way interaction / match-paired dat5 <- orthogonalize(attitude[ , -1], var1 = 1:2, var2 = 3:4, var3 = 5:6, match = FALSE) ## Double-mean centering / three-way interaction / match-paired dat6 <- indProd(attitude[ , -1], var1 = 1:2, var2 = 3:4, var3 = 5:6, match = FALSE, meanC = TRUE, residualC = TRUE, doubleMC = TRUE) } \references{ Marsh, H. W., Wen, Z. & Hau, K. T. (2004). Structural equation models of latent interactions: Evaluation of alternative estimation strategies and indicator construction. \emph{Psychological Methods, 9}(3), 275--300. doi:10.1037/1082-989X.9.3.275 Lin, G. C., Wen, Z., Marsh, H. W., & Lin, H. S. (2010). Structural equation models of latent interactions: Clarification of orthogonalizing and double-mean-centering strategies. \emph{Structural Equation Modeling, 17}(3), 374--391. doi:10.1080/10705511.2010.488999 Little, T. D., Bovaird, J. A., & Widaman, K. F. (2006). On the merits of orthogonalizing powered and product terms: Implications for modeling interactions among latent variables. \emph{Structural Equation Modeling, 13}(4), 497--519. doi:10.1207/s15328007sem1304_1 } \seealso{ \itemize{ \item \code{\link{probe2WayMC}} For probing the two-way latent interaction when the results are obtained from mean-centering, or double-mean centering. \item \code{\link{probe3WayMC}} For probing the three-way latent interaction when the results are obtained from mean-centering, or double-mean centering. \item \code{\link{probe2WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{probe3WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the latent interaction. } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Alexander Schoemann (East Carolina University; \email{schoemanna@ecu.edu}) } semTools/man/probe3WayRC.Rd0000644000176200001440000001550713475443506015244 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probeInteraction.R \name{probe3WayRC} \alias{probe3WayRC} \title{Probing three-way interaction on the residual-centered latent interaction} \usage{ probe3WayRC(fit, nameX, nameY, modVar, valProbe1, valProbe2, group) } \arguments{ \item{fit}{The lavaan model object used to evaluate model fit} \item{nameX}{The vector of the factor names used as the predictors. The three first-order factors will be listed first. Then the second-order factors will be listeed. The last element of the name will represent the three-way interaction. Note that the fourth element must be the interaction between the first and the second variables. The fifth element must be the interaction between the first and the third variables. The sixth element must be the interaction between the second and the third variables.} \item{nameY}{The name of factor that is used as the dependent variable.} \item{modVar}{The name of two factors that are used as the moderators. The effect of the independent factor on each combination of the moderator variable values will be probed.} \item{valProbe1}{The values of the first moderator that will be used to probe the effect of the independent factor.} \item{valProbe2}{The values of the second moderator that will be used to probe the effect of the independent factor.} \item{group}{In multigroup models, the label of the group for which the results will be returned. Must correspond to one of \code{\link[lavaan]{lavInspect}(fit, "group.label")}.} } \value{ A list with two elements: \enumerate{ \item \code{SimpleIntercept}: The intercepts given each value of the moderator. This element will be shown only if the factor intercept is estimated (e.g., not fixed as 0). \item \code{SimpleSlope}: The slopes given each value of the moderator. } In each element, the first column represents values of the first moderator specified in the \code{valProbe1} argument. The second column represents values of the second moderator specified in the \code{valProbe2} argument. The third column is the simple intercept or simple slope. The fourth column is the \emph{SE} of the simple intercept or simple slope. The fifth column is the Wald (\emph{z}) statistic. The sixth column is the \emph{p} value testing whether the simple intercepts or slopes are different from 0. } \description{ Probing interaction for simple intercept and simple slope for the residual-centered latent three-way interaction (Pornprasertmanit, Schoemann, Geldhof, & Little, submitted) } \details{ Before using this function, researchers need to make the products of the indicators between the first-order factors and residualize the products by the original indicators (Lance, 1988; Little, Bovaird, & Widaman, 2006). The process can be automated by the \code{\link{indProd}} function. Note that the indicator products can be made for all possible combination or matched-pair approach (Marsh et al., 2004). Next, the hypothesized model with the regression with latent interaction will be used to fit all original indicators and the product terms (Geldhof, Pornprasertmanit, Schoemann, & Little, in press). To use this function the model must be fit with a mean structure. See the example for how to fit the product term below. Once the lavaan result is obtained, this function will be used to probe the interaction. The probing process on residual-centered latent interaction is based on transforming the residual-centered result into the no-centered result. See Pornprasertmanit, Schoemann, Geldhof, and Little (submitted) for further details. Note that this approach based on a strong assumption that the first-order latent variables are normally distributed. The probing process is applied after the no-centered result (parameter estimates and their covariance matrix among parameter estimates) has been computed See the \code{\link{probe3WayMC}} for further details. } \examples{ library(lavaan) dat3wayRC <- orthogonalize(dat3way, 1:3, 4:6, 7:9) model3 <- " f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f3 =~ x7 + x8 + x9 f12 =~ x1.x4 + x2.x5 + x3.x6 f13 =~ x1.x7 + x2.x8 + x3.x9 f23 =~ x4.x7 + x5.x8 + x6.x9 f123 =~ x1.x4.x7 + x2.x5.x8 + x3.x6.x9 f4 =~ x10 + x11 + x12 f4 ~ f1 + f2 + f3 + f12 + f13 + f23 + f123 f1 ~~ 0*f12 f1 ~~ 0*f13 f1 ~~ 0*f123 f2 ~~ 0*f12 f2 ~~ 0*f23 f2 ~~ 0*f123 f3 ~~ 0*f13 f3 ~~ 0*f23 f3 ~~ 0*f123 f12 ~~ 0*f123 f13 ~~ 0*f123 f23 ~~ 0*f123 x1 ~ 0*1 x4 ~ 0*1 x7 ~ 0*1 x10 ~ 0*1 x1.x4 ~ 0*1 x1.x7 ~ 0*1 x4.x7 ~ 0*1 x1.x4.x7 ~ 0*1 f1 ~ NA*1 f2 ~ NA*1 f3 ~ NA*1 f12 ~ NA*1 f13 ~ NA*1 f23 ~ NA*1 f123 ~ NA*1 f4 ~ NA*1 " fitRC3way <- sem(model3, data = dat3wayRC, std.lv = FALSE, meanstructure = TRUE) summary(fitRC3way) result3wayRC <- probe3WayRC(fitRC3way, c("f1", "f2", "f3", "f12", "f13", "f23", "f123"), "f4", c("f1", "f2"), c(-1, 0, 1), c(-1, 0, 1)) result3wayRC } \references{ Geldhof, G. J., Pornprasertmanit, S., Schoemann, A., & Little, T. D. (2013). Orthogonalizing through residual centering: Extended applications and caveats. \emph{Educational and Psychological Measurement, 73}(1), 27--46. doi:10.1177/0013164412445473 Lance, C. E. (1988). Residual centering, exploratory and confirmatory moderator analysis, and decomposition of effects in path models containing interactions. \emph{Applied Psychological Measurement, 12}(2), 163--175. doi:10.1177/014662168801200205 Little, T. D., Bovaird, J. A., & Widaman, K. F. (2006). On the merits of orthogonalizing powered and product terms: Implications for modeling interactions. \emph{Structural Equation Modeling, 13}(4), 497--519. doi:10.1207/s15328007sem1304_1 Marsh, H. W., Wen, Z., & Hau, K. T. (2004). Structural equation models of latent interactions: Evaluation of alternative estimation strategies and indicator construction. \emph{Psychological Methods, 9}(3), 275--300. doi:10.1037/1082-989X.9.3.275 Pornprasertmanit, S., Schoemann, A. M., Geldhof, G. J., & Little, T. D. (submitted). \emph{Probing latent interaction estimated with a residual centering approach.} } \seealso{ \itemize{ \item \code{\link{indProd}} For creating the indicator products with no centering, mean centering, double-mean centering, or residual centering. \item \code{\link{probe2WayMC}} For probing the two-way latent interaction when the results are obtained from mean-centering, or double-mean centering \item \code{\link{probe3WayMC}} For probing the three-way latent interaction when the results are obtained from mean-centering, or double-mean centering \item \code{\link{probe2WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the latent interaction. } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/kurtosis.Rd0000644000176200001440000000320313432435606015010 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataDiagnosis.R \name{kurtosis} \alias{kurtosis} \title{Finding excessive kurtosis} \usage{ kurtosis(object, population = FALSE) } \arguments{ \item{object}{A vector used to find a excessive kurtosis} \item{population}{\code{TRUE} to compute the parameter formula. \code{FALSE} to compute the sample statistic formula.} } \value{ A value of an excessive kurtosis with a test statistic if the population is specified as \code{FALSE} } \description{ Finding excessive kurtosis (\eqn{g_{2}}) of an object } \details{ The excessive kurtosis computed is \eqn{g_{2}}. The parameter excessive kurtosis \eqn{\gamma_{2}} formula is \deqn{\gamma_{2} = \frac{\mu_{4}}{\mu^{2}_{2}} - 3,} where \eqn{\mu_{i}} denotes the \eqn{i} order central moment. The excessive kurtosis formula for sample statistic \eqn{g_{2}} is \deqn{g_{2} = \frac{k_{4}}{k^{2}_{2}},} where \eqn{k_{i}} are the \eqn{i} order \emph{k}-statistic. The standard error of the excessive kurtosis is \deqn{Var(\hat{g}_{2}) = \frac{24}{N}} where \eqn{N} is the sample size. } \examples{ kurtosis(1:5) } \references{ Weisstein, Eric W. (n.d.). \emph{Kurtosis.} Retrived from \emph{MathWorld}--A Wolfram Web Resource: \url{http://mathworld.wolfram.com/Kurtosis.html} } \seealso{ \itemize{ \item \code{\link{skew}} Find the univariate skewness of a variable \item \code{\link{mardiaSkew}} Find the Mardia's multivariate skewness of a set of variables \item \code{\link{mardiaKurtosis}} Find the Mardia's multivariate kurtosis of a set of variables } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/dat2way.Rd0000644000176200001440000000214413432435606014503 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dat2way} \alias{dat2way} \title{Simulated Dataset to Demonstrate Two-way Latent Interaction} \format{A \code{data.frame} with 500 observations of 9 variables. \describe{ \item{x1}{The first indicator of the first independent factor} \item{x2}{The second indicator of the first independent factor} \item{x3}{The third indicator of the first independent factor} \item{x4}{The first indicator of the second independent factor} \item{x5}{The second indicator of the second independent factor} \item{x6}{The third indicator of the second independent factor} \item{x7}{The first indicator of the dependent factor} \item{x8}{The second indicator of the dependent factor} \item{x9}{The third indicator of the dependent factor} }} \source{ Data were generated by the \code{\link[MASS]{mvrnorm}} function in the \code{MASS} package. } \usage{ dat2way } \description{ A simulated data set with 2 independent factors and 1 dependent factor where each factor has three indicators } \examples{ head(dat2way) } \keyword{datasets} semTools/man/reliabilityL2.Rd0000644000176200001440000001151313507677540015647 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reliability.R \name{reliabilityL2} \alias{reliabilityL2} \title{Calculate the reliability values of a second-order factor} \usage{ reliabilityL2(object, secondFactor, omit.imps = c("no.conv", "no.se")) } \arguments{ \item{object}{A \code{\linkS4class{lavaan}} or \code{\linkS4class{lavaan.mi}} object, expected to contain a least one exogenous higher-order common factor.} \item{secondFactor}{The name of a single second-order factor in the model fitted in \code{object}. The function must be called multiple times to estimate reliability for each higher-order factor.} \item{omit.imps}{\code{character} vector specifying criteria for omitting imputations from pooled results. Can include any of \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the default setting, which excludes any imputations that did not converge or for which standard errors could not be computed. The last option (\code{"no.npd"}) would exclude any imputations which yielded a nonpositive definite covariance matrix for observed or latent variables, which would include any "improper solutions" such as Heywood cases. NPD solutions are not excluded by default because they are likely to occur due to sampling error, especially in small samples. However, gross model misspecification could also cause NPD solutions, users can compare pooled results with and without this setting as a sensitivity analysis to see whether some imputations warrant further investigation.} } \value{ Reliability values at Levels 1 and 2 of the second-order factor, as well as the partial reliability value at Level 1 } \description{ Calculate the reliability values (coefficient omega) of a second-order factor } \details{ The first formula of the coefficient omega (in the \code{\link{reliability}}) will be mainly used in the calculation. The model-implied covariance matrix of a second-order factor model can be separated into three sources: the second-order factor, the uniqueness of the first-order factor, and the measurement error of indicators: \deqn{ \hat{\Sigma} = \Lambda \bold{B} \Phi_2 \bold{B}^{\prime} \Lambda^{\prime} + \Lambda \Psi_{u} \Lambda^{\prime} + \Theta, } where \eqn{\hat{\Sigma}} is the model-implied covariance matrix, \eqn{\Lambda} is the first-order factor loading, \eqn{\bold{B}} is the second-order factor loading, \eqn{\Phi_2} is the covariance matrix of the second-order factors, \eqn{\Psi_{u}} is the covariance matrix of the unique scores from first-order factors, and \eqn{\Theta} is the covariance matrix of the measurement errors from indicators. Thus, the proportion of the second-order factor explaining the total score, or the coefficient omega at Level 1, can be calculated: \deqn{ \omega_{L1} = \frac{\bold{1}^{\prime} \Lambda \bold{B} \Phi_2 \bold{B}^{\prime} \Lambda^{\prime} \bold{1}}{\bold{1}^{\prime} \Lambda \bold{B} \Phi_2 \bold{B} ^{\prime} \Lambda^{\prime} \bold{1} + \bold{1}^{\prime} \Lambda \Psi_{u} \Lambda^{\prime} \bold{1} + \bold{1}^{\prime} \Theta \bold{1}}, } where \eqn{\bold{1}} is the \emph{k}-dimensional vector of 1 and \emph{k} is the number of observed variables. When model-implied covariance matrix among first-order factors (\eqn{\Phi_1}) can be calculated: \deqn{ \Phi_1 = \bold{B} \Phi_2 \bold{B}^{\prime} + \Psi_{u}, } Thus, the proportion of the second-order factor explaining the varaince at first-order factor level, or the coefficient omega at Level 2, can be calculated: \deqn{ \omega_{L2} = \frac{\bold{1_F}^{\prime} \bold{B} \Phi_2 \bold{B}^{\prime} \bold{1_F}}{\bold{1_F}^{\prime} \bold{B} \Phi_2 \bold{B}^{\prime} \bold{1_F} + \bold{1_F}^{\prime} \Psi_{u} \bold{1_F}}, } where \eqn{\bold{1_F}} is the \emph{F}-dimensional vector of 1 and \emph{F} is the number of first-order factors. The partial coefficient omega at Level 1, or the proportion of observed variance explained by the second-order factor after partialling the uniqueness from the first-order factor, can be calculated: \deqn{ \omega_{L1} = \frac{\bold{1}^{\prime} \Lambda \bold{B} \Phi_2 \bold{B}^{\prime} \Lambda^{\prime} \bold{1}}{\bold{1}^{\prime} \Lambda \bold{B} \Phi_2 \bold{B}^{\prime} \Lambda^{\prime} \bold{1} + \bold{1}^{\prime} \Theta \bold{1}}, } Note that if the second-order factor has a direct factor loading on some observed variables, the observed variables will be counted as first-order factors. } \examples{ library(lavaan) HS.model3 <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 higher =~ visual + textual + speed' fit6 <- cfa(HS.model3, data = HolzingerSwineford1939) reliability(fit6) # Should provide a warning for the endogenous variables reliabilityL2(fit6, "higher") } \seealso{ \code{\link{reliability}} for the reliability of the first-order factors. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/BootMiss-class.Rd0000644000176200001440000000432213432435606015772 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/missingBootstrap.R \docType{class} \name{BootMiss-class} \alias{BootMiss-class} \alias{show,BootMiss-method} \alias{summary,BootMiss-method} \alias{hist,BootMiss-method} \title{Class For the Results of Bollen-Stine Bootstrap with Incomplete Data} \usage{ \S4method{show}{BootMiss}(object) \S4method{summary}{BootMiss}(object) \S4method{hist}{BootMiss}(x, ..., alpha = 0.05, nd = 2, printLegend = TRUE, legendArgs = list(x = "topleft")) } \arguments{ \item{object, x}{object of class \code{BootMiss}} \item{...}{Additional arguments to pass to \code{\link[graphics]{hist}}} \item{alpha}{alpha level used to draw confidence limits} \item{nd}{number of digits to display} \item{printLegend}{\code{logical}. If \code{TRUE} (default), a legend will be printed with the histogram} \item{legendArgs}{\code{list} of arguments passed to the \code{\link[graphics]{legend}} function. The default argument is a list placing the legend at the top-left of the figure.} } \value{ The \code{hist} method returns a list of \code{length == 2}, containing the arguments for the call to \code{hist} and the arguments to the call for \code{legend}, respectively. } \description{ This class contains the results of Bollen-Stine bootstrap with missing data. } \section{Slots}{ \describe{ \item{\code{time}}{A list containing 2 \code{difftime} objects (\code{transform} and \code{fit}), indicating the time elapsed for data transformation and for fitting the model to bootstrap data sets, respectively.} \item{\code{transData}}{Transformed data} \item{\code{bootDist}}{The vector of \eqn{chi^2} values from bootstrap data sets fitted by the target model} \item{\code{origChi}}{The \eqn{chi^2} value from the original data set} \item{\code{df}}{The degree of freedom of the model} \item{\code{bootP}}{The \emph{p} value comparing the original \eqn{chi^2} with the bootstrap distribution} }} \section{Objects from the Class}{ Objects can be created via the \code{\link{bsBootMiss}} function. } \examples{ # See the example from the bsBootMiss function } \seealso{ \code{\link{bsBootMiss}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/measurementInvarianceCat-deprecated.Rd0000644000176200001440000000707213432435606022210 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/measurementInvarianceCat.R \name{measurementInvarianceCat-deprecated} \alias{measurementInvarianceCat-deprecated} \title{Measurement Invariance Tests for Categorical Items} \usage{ measurementInvarianceCat(..., std.lv = FALSE, strict = FALSE, quiet = FALSE, fit.measures = "default", baseline.model = NULL, method = "default") } \arguments{ \item{...}{The same arguments as for any lavaan model. See \code{\link{cfa}} for more information.} \item{std.lv}{If \code{TRUE}, the fixed-factor method of scale identification is used. If \code{FALSE}, the first variable for each factor is used as marker variable.} \item{strict}{If \code{TRUE}, the sequence requires `strict' invariance. See details for more information.} \item{quiet}{If \code{FALSE} (default), a summary is printed out containing an overview of the different models that are fitted, together with some model comparison tests. If \code{TRUE}, no summary is printed.} \item{fit.measures}{Fit measures used to calculate the differences between nested models.} \item{baseline.model}{custom baseline model passed to \code{\link[lavaan]{fitMeasures}}} \item{method}{The method used to calculate likelihood ratio test. See \code{\link[lavaan]{lavTestLRT}} for available options} } \value{ Invisibly, all model fits in the sequence are returned as a list. } \description{ Testing measurement invariance across groups using a typical sequence of model comparison tests. } \details{ Theta parameterization is used to represent SEM for categorical items. That is, residual variances are modeled instead of the total variance of underlying normal variate for each item. Five models can be tested based on different constraints across groups. \enumerate{ \item Model 1: configural invariance. The same factor structure is imposed on all groups. \item Model 2: weak invariance. The factor loadings are constrained to be equal across groups. \item Model 3: strong invariance. The factor loadings and thresholds are constrained to be equal across groups. \item Model 4: strict invariance. The factor loadings, thresholds and residual variances are constrained to be equal across groups. For categorical variables, all residual variances are fixed as 1. \item Model 5: The factor loadings, threshoulds, residual variances and means are constrained to be equal across groups. } However, if all items have two items (dichotomous), scalar invariance and weak invariance cannot be separated because thresholds need to be equal across groups for scale identification. Users can specify \code{strict} option to include the strict invariance model for the invariance testing. See the further details of scale identification and different parameterization in Millsap and Yun-Tein (2004). } \examples{ \dontrun{ syntax <- ' f1 =~ u1 + u2 + u3 + u4' measurementInvarianceCat(model = syntax, data = datCat, group = "g", parameterization = "theta", estimator = "wlsmv", ordered = c("u1", "u2", "u3", "u4")) } } \references{ Millsap, R. E., & Yun-Tein, J. (2004). Assessing factorial invariance in ordered-categorical measures. \emph{Multivariate Behavioral Research, 39}(3), 479--515. doi:10.1207/S15327906MBR3903_4 } \seealso{ \code{\link{semTools-deprecated}} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } \keyword{internal} semTools/man/combinequark.Rd0000644000176200001440000000273713432435606015620 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/quark.R \name{combinequark} \alias{combinequark} \title{Combine the results from the quark function} \usage{ combinequark(quark, percent) } \arguments{ \item{quark}{Provide the \code{\link{quark}} object that was returned. It should be a list of objects. Make sure to include it in its entirety.} \item{percent}{Provide a percentage of variance that you would like to have explained. That many components (columns) will be extracted and kept with the output dataset. Enter this variable as a number WITHOUT a percentage sign.} } \value{ The output of this function is the original dataset used in quark combined with enough principal component scores to be able to account for the amount of variance that was requested. } \description{ This function builds upon the \code{\link{quark}} function to provide a final dataset comprised of the original dataset provided to \code{\link{quark}} and enough principal components to be able to account for a certain level of variance in the data. } \examples{ set.seed(123321) dat <- HolzingerSwineford1939[,7:15] misspat <- matrix(runif(nrow(dat) * 9) < 0.3, nrow(dat)) dat[misspat] <- NA dat <- cbind(HolzingerSwineford1939[,1:3], dat) quark.list <- quark(data = dat, id = c(1, 2)) final.data <- combinequark(quark = quark.list, percent = 80) } \seealso{ \code{\link{quark}} } \author{ Steven R. Chesnut (University of Southern Mississippi \email{Steven.Chesnut@usm.edu}) } semTools/man/lavTestScore.mi.Rd0000644000176200001440000001772013532031247016152 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runMI-score.R \name{lavTestScore.mi} \alias{lavTestScore.mi} \title{Score Test for Multiple Imputations} \usage{ lavTestScore.mi(object, add = NULL, release = NULL, test = c("D2", "D1"), scale.W = !asymptotic, omit.imps = c("no.conv", "no.se"), asymptotic = is.null(add), univariate = TRUE, cumulative = FALSE, epc = FALSE, standardized = epc, cov.std = epc, verbose = FALSE, warn = TRUE, information = "expected") } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan.mi}}.} \item{add}{Either a \code{character} string (typically between single quotes) or a parameter table containing additional (currently fixed-to-zero) parameters for which the score test must be computed.} \item{release}{Vector of \code{integer}s. The indices of the \emph{equality} constraints that should be released. The indices correspond to the order of the equality constraints as they appear in the parameter table.} \item{test}{\code{character} indicating which pooling method to use. \code{"D1"} indicates Li, Meng, Raghunathan, & Rubin's (1991) proposed Wald test will be applied to the gradient and information, and those pooled values will be used to calculate score-test statistics in the usual manner. \code{"D2"} (default) indicates that complete-data score-test statistics calculated from each imputed data set will be pooled across imputations, as described in Li et al. (1991) and Enders (2010).} \item{scale.W}{\code{logical}. If \code{FALSE}, the pooled information matrix is calculated as the weighted sum of the within-imputation and between-imputation components. Otherwise, the pooled information is calculated by scaling the within-imputation component by the average relative increase in variance (ARIV; see Enders, 2010, p. 235), which is \emph{only} consistent when requesting the \emph{F} test (i.e., \code{asymptotic = FALSE}. Ignored (irrelevant) if \code{test = "D2"}.} \item{omit.imps}{\code{character} vector specifying criteria for omitting imputations from pooled results. Can include any of \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the default setting, which excludes any imputations that did not converge or for which standard errors could not be computed. The last option (\code{"no.npd"}) would exclude any imputations which yielded a nonpositive definite covariance matrix for observed or latent variables, which would include any "improper solutions" such as Heywood cases.} \item{asymptotic}{\code{logical}. If \code{FALSE} (default when using \code{add} to test adding fixed parameters to the model), the pooled test will be returned as an \emph{F}-distributed variable with numerator (\code{df1}) and denominator (\code{df2}) degrees of freedom. If \code{TRUE}, the pooled \emph{F} statistic will be multiplied by its \code{df1} on the assumption that its \code{df2} is sufficiently large enough that the statistic will be asymptotically \eqn{\chi^2} distributed with \code{df1}. When using the \code{release} argument, \code{asymptotic} will be set to \code{TRUE} because (A)RIV can only be calculated for \code{add}ed parameters.} \item{univariate}{\code{logical}. If \code{TRUE}, compute the univariate score statistics, one for each constraint.} \item{cumulative}{\code{logical}. If \code{TRUE}, order the univariate score statistics from large to small, and compute a series of multivariate score statistics, each time including an additional constraint in the test.} \item{epc}{\code{logical}. If \code{TRUE}, and we are releasing existing constraints, compute the expected parameter changes for the existing (free) parameters (and any specified with \code{add}), if all constraints were released. For EPCs associated with a particular (1-\emph{df}) constraint, only specify one parameter in \code{add} or one constraint in \code{release}.} \item{standardized}{If \code{TRUE}, two extra columns (\code{sepc.lv} and \code{sepc.all}) in the \code{$epc} table will contain standardized values for the EPCs. See \code{\link{lavTestScore}}.} \item{cov.std}{\code{logical}. See \code{\link{standardizedSolution}}.} \item{verbose}{\code{logical}. Not used for now.} \item{warn}{\code{logical}. If \code{TRUE}, print warnings if they occur.} \item{information}{\code{character} indicating the type of information matrix to use (check \code{\link{lavInspect}} for available options). \code{"expected"} information is the default, which provides better control of Type I errors.} } \value{ A list containing at least one \code{data.frame}: \itemize{ \item{\code{$test}: The total score test, with columns for the score test statistic (\code{X2}), its degrees of freedom (\code{df}), its \emph{p} value under the \eqn{\chi^2} distribution (\code{p.value}), and if \code{asymptotic=FALSE}, the average relative invrease in variance (ARIV) used to calculate the denominator \emph{df} is also returned as a missing-data diagnostic, along with the fraction missing information (FMI = ARIV / (1 + ARIV)).} \item{\code{$uni}: Optional (if \code{univariate=TRUE}). Each 1-\emph{df} score test, equivalent to modification indices. Also includes EPCs if \code{epc=TRUE}, and RIV and FMI if \code{asymptotic=FALSE}.} \item{\code{$cumulative}: Optional (if \code{cumulative=TRUE}). Cumulative score tests, with ARIV and FMI if \code{asymptotic=FALSE}.} \item{\code{$epc}: Optional (if \code{epc=TRUE}). Parameter estimates, expected parameter changes, and expected parameter values if ALL the tested constraints were freed.} } See \code{\link[lavaan]{lavTestScore}} for details. } \description{ Score test (or "Lagrange multiplier" test) for lavaan models fitted to multiple imputed data sets. Statistics for releasing one or more fixed or constrained parameters in model can be calculated by pooling the gradient and information matrices pooled across imputed data sets, analogous to Li, Meng, Raghunathan, & Rubin's (1991) proposed Wald test, or by pooling the complete-data score-test statistics across imputed data sets (Li et al., 1991). } \examples{ \dontrun{ ## impose missing data for example HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), "ageyr","agemo","school")] set.seed(12345) HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) age <- HSMiss$ageyr + HSMiss$agemo/12 HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ## impute missing data library(Amelia) set.seed(12345) HS.amelia <- amelia(HSMiss, m = 20, noms = "school", p2s = FALSE) imps <- HS.amelia$imputations ## specify CFA model from lavaan's ?cfa help page HS.model <- ' speed =~ c(L1, L1)*x7 + c(L1, L1)*x8 + c(L1, L1)*x9 ' out <- cfa.mi(HS.model, data = imps, group = "school", std.lv = TRUE) ## Mode 1: Score test for releasing equality constraints ## default test: Li et al.'s (1991) "D2" method lavTestScore.mi(out, cumulative = TRUE) ## Li et al.'s (1991) "D1" method lavTestScore.mi(out, test = "D1") ## Mode 2: Score test for adding currently fixed-to-zero parameters lavTestScore.mi(out, add = 'x7 ~~ x8 + x9') } } \references{ Bentler, P. M., & Chou, C.-P. (1992). Some new covariance structure model improvement statistics. \emph{Sociological Methods & Research, 21}(2), 259--282. doi:10.1177/0049124192021002006 Enders, C. K. (2010). \emph{Applied missing data analysis}. New York, NY: Guilford. Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). Significance levels from repeated \emph{p}-values with multiply-imputed data. \emph{Statistica Sinica, 1}(1), 65--92. Retrieved from https://www.jstor.org/stable/24303994 } \seealso{ \code{\link[lavaan]{lavTestScore}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) Adapted from \pkg{lavaan} source code, written by Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}) \code{test = "D1"} method proposed by Maxwell Mansolf (University of California, Los Angeles; \email{mamansolf@gmail.com}) } semTools/man/twostage-class.Rd0000644000176200001440000001460313432435606016073 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/TSML.R \docType{class} \name{twostage-class} \alias{twostage-class} \alias{show,twostage-method} \alias{summary,twostage-method} \alias{anova,twostage-method} \alias{vcov,twostage-method} \alias{coef,twostage-method} \alias{fitted.values,twostage-method} \alias{fitted,twostage-method} \alias{residuals,twostage-method} \alias{resid,twostage-method} \alias{nobs,twostage-method} \title{Class for the Results of 2-Stage Maximum Likelihood (TSML) Estimation for Missing Data} \usage{ \S4method{show}{twostage}(object) \S4method{summary}{twostage}(object, ...) \S4method{anova}{twostage}(object, h1 = NULL, baseline = FALSE) \S4method{nobs}{twostage}(object, type = c("ntotal", "ngroups", "n.per.group", "norig", "patterns", "coverage")) \S4method{coef}{twostage}(object, type = c("free", "user")) \S4method{vcov}{twostage}(object, baseline = FALSE) \S4method{fitted.values}{twostage}(object, model = c("target", "saturated", "baseline"), type = "moments", labels = TRUE) \S4method{fitted}{twostage}(object, model = c("target", "saturated", "baseline"), type = "moments", labels = TRUE) \S4method{residuals}{twostage}(object, type = c("raw", "cor", "normalized", "standardized")) \S4method{resid}{twostage}(object, type = c("raw", "cor", "normalized", "standardized")) } \arguments{ \item{object}{An object of class \code{twostage}.} \item{...}{arguments passed to \code{\link[lavaan]{parameterEstimates}}.} \item{h1}{An object of class \code{twostage} in which \code{object} is nested, so that their difference in fit can be tested using \code{anova} (see \bold{Value} section for details).} \item{baseline}{\code{logical} indicating whether to return results for the baseline model, rather than the default target (hypothesized) model.} \item{type}{The meaning of this argument varies depending on which method it it used for. Find detailed descriptions in the \bold{Value} section under \code{coef}, \code{nobs}, and \code{residuals}.} \item{model}{\code{character} naming the slot for which to return the model-implied sample moments (see \code{fitted.values} description.)} \item{labels}{\code{logical} indicating whether the model-implied sample moments should have (row/column) labels.} } \value{ \item{show}{\code{signature(object = "twostage"):} The \code{show} function is used to display the results of the \code{anova} method, as well as the header of the (uncorrected) target model results.} \item{summary}{\code{signature(object = "twostage", ...):} The summary function prints the same information from the \code{show} method, but also provides (and returns) the output of \code{\link[lavaan]{parameterEstimates}(object@target, ...)} with corrected \emph{SE}s, test statistics, and confidence intervals. Additional arguments can be passed to \code{\link[lavaan]{parameterEstimates}}, including \code{fmi = TRUE} to provide an estimate of the fraction of missing information.} \item{anova}{\code{signature(object = "twostage", h1 = NULL, baseline = FALSE):} The \code{anova} function returns the residual-based \eqn{\chi^2} test statistic result, as well as the scaled \eqn{\chi^2} test statistic result, for the model in the \code{target} slot, or for the model in the \code{baseline} slot if \code{baseline = TRUE}. The user can also provide a single additional \code{twostage} object to the \code{h1} argument, in which case \code{anova} returns residual-based and scaled (\eqn{\Delta})\eqn{\chi^2} test results, under the assumption that the models are nested. The models will be automatically sorted according their degrees of freedom.} \item{nobs}{\code{signature(object = "twostage", type = c("ntotal", "ngroups", "n.per.group", "norig", "patterns", "coverage")):} The \code{nobs} function will return the total sample sized used in the analysis by default. Also available are the number of groups or the sample size per group, the original sample size (if any rows were deleted because all variables were missing), the missing data patterns, and the matrix of coverage (diagonal is the proportion of sample observed on each variable, and off-diagonal is the proportion observed for both of each pair of variables).} \item{coef}{\code{signature(object = "twostage", type = c("free", "user")):} This is simply a wrapper around the corresponding \code{\linkS4class{lavaan}} method, providing point estimates from the \code{target} slot.} \item{vcov}{\code{signature(object = "twostage", baseline = FALSE):} Returns the asymptotic covariance matrix of the estimated parameters (corrected for additional uncertainty due to missing data) for the model in the \code{target} slot, or for the model in the \code{baseline} slot if \code{baseline = TRUE}.} \item{fitted.values, fitted}{\code{signature(object = "twostage", model = c("target", "saturated", "baseline")):} This is simply a wrapper around the corresponding \code{\linkS4class{lavaan}} method, providing model-implied sample moments from the slot specified in the \code{model} argument.} \item{residuals, resid}{\code{signature(object = "twostage", type = c("raw", "cor", "normalized", "standardized")):} This is simply a wrapper around the corresponding \code{\linkS4class{lavaan}} method, providing residuals of the specified \code{type} from the \code{target} slot.} } \description{ This class contains the results of 2-Stage Maximum Likelihood (TSML) estimation for missing data. The \code{summary}, \code{anova}, \code{vcov} methods return corrected \emph{SE}s and test statistics. Other methods are simply wrappers around the corresponding \code{\linkS4class{lavaan}} methods. } \section{Slots}{ \describe{ \item{\code{saturated}}{A fitted \code{\linkS4class{lavaan}} object containing the saturated model results} \item{\code{target}}{A fitted \code{\linkS4class{lavaan}} object containing the target/hypothesized model results} \item{\code{baseline}}{A fitted \code{\linkS4class{lavaan}} object containing the baseline/null model results} \item{\code{auxNames}}{A character string (potentially of \code{length == 0}) of any auxiliary variable names, if used} }} \section{Objects from the Class}{ Objects can be created via the \code{\link{twostage}} function. } \examples{ # See the example from the twostage function } \seealso{ \code{\link{twostage}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/maximalRelia.Rd0000644000176200001440000001135713507701533015541 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reliability.R \name{maximalRelia} \alias{maximalRelia} \title{Calculate maximal reliability} \usage{ maximalRelia(object, omit.imps = c("no.conv", "no.se")) } \arguments{ \item{object}{A \code{\linkS4class{lavaan}} or \code{\linkS4class{lavaan.mi}} object, expected to contain only exogenous common factors (i.e., a CFA model).} \item{omit.imps}{\code{character} vector specifying criteria for omitting imputations from pooled results. Can include any of \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the default setting, which excludes any imputations that did not converge or for which standard errors could not be computed. The last option (\code{"no.npd"}) would exclude any imputations which yielded a nonpositive definite covariance matrix for observed or latent variables, which would include any "improper solutions" such as Heywood cases. NPD solutions are not excluded by default because they are likely to occur due to sampling error, especially in small samples. However, gross model misspecification could also cause NPD solutions, users can compare pooled results with and without this setting as a sensitivity analysis to see whether some imputations warrant further investigation.} } \value{ Maximal reliability values of each group. The maximal-reliability weights are also provided. Users may extracted the weighted by the \code{attr} function (see example below). } \description{ Calculate maximal reliability of a scale } \details{ Given that a composite score (\eqn{W}) is a weighted sum of item scores: \deqn{ W = \bold{w}^\prime \bold{x} ,} where \eqn{\bold{x}} is a \eqn{k \times 1} vector of the scores of each item, \eqn{\bold{w}} is a \eqn{k \times 1} weight vector of each item, and \eqn{k} represents the number of items. Then, maximal reliability is obtained by finding \eqn{\bold{w}} such that reliability attains its maximum (Li, 1997; Raykov, 2012). Note that the reliability can be obtained by \deqn{ \rho = \frac{\bold{w}^\prime \bold{S}_T \bold{w}}{\bold{w}^\prime \bold{S}_X \bold{w}}} where \eqn{\bold{S}_T} is the covariance matrix explained by true scores and \eqn{\bold{S}_X} is the observed covariance matrix. Numerical method is used to find \eqn{\bold{w}} in this function. For continuous items, \eqn{\bold{S}_T} can be calculated by \deqn{ \bold{S}_T = \Lambda \Psi \Lambda^\prime,} where \eqn{\Lambda} is the factor loading matrix and \eqn{\Psi} is the covariance matrix among factors. \eqn{\bold{S}_X} is directly obtained by covariance among items. For categorical items, Green and Yang's (2009) method is used for calculating \eqn{\bold{S}_T} and \eqn{\bold{S}_X}. The element \eqn{i} and \eqn{j} of \eqn{\bold{S}_T} can be calculated by \deqn{ \left[\bold{S}_T\right]_{ij} = \sum^{C_i - 1}_{c_i = 1} \sum^{C_j - 1}_{c_j - 1} \Phi_2\left( \tau_{x_{c_i}}, \tau_{x_{c_j}}, \left[ \Lambda \Psi \Lambda^\prime \right]_{ij} \right) - \sum^{C_i - 1}_{c_i = 1} \Phi_1(\tau_{x_{c_i}}) \sum^{C_j - 1}_{c_j - 1} \Phi_1(\tau_{x_{c_j}}),} where \eqn{C_i} and \eqn{C_j} represents the number of thresholds in Items \eqn{i} and \eqn{j}, \eqn{\tau_{x_{c_i}}} represents the threshold \eqn{c_i} of Item \eqn{i}, \eqn{\tau_{x_{c_j}}} represents the threshold \eqn{c_i} of Item \eqn{j}, \eqn{ \Phi_1(\tau_{x_{c_i}})} is the cumulative probability of \eqn{\tau_{x_{c_i}}} given a univariate standard normal cumulative distribution and \eqn{\Phi_2\left( \tau_{x_{c_i}}, \tau_{x_{c_j}}, \rho \right)} is the joint cumulative probability of \eqn{\tau_{x_{c_i}}} and \eqn{\tau_{x_{c_j}}} given a bivariate standard normal cumulative distribution with a correlation of \eqn{\rho} Each element of \eqn{\bold{S}_X} can be calculated by \deqn{ \left[\bold{S}_T\right]_{ij} = \sum^{C_i - 1}_{c_i = 1} \sum^{C_j - 1}_{c_j - 1} \Phi_2\left( \tau_{V_{c_i}}, \tau_{V_{c_j}}, \rho^*_{ij} \right) - \sum^{C_i - 1}_{c_i = 1} \Phi_1(\tau_{V_{c_i}}) \sum^{C_j - 1}_{c_j - 1} \Phi_1(\tau_{V_{c_j}}),} where \eqn{\rho^*_{ij}} is a polychoric correlation between Items \eqn{i} and \eqn{j}. } \examples{ total <- 'f =~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 ' fit <- cfa(total, data = HolzingerSwineford1939) maximalRelia(fit) # Extract the weight mr <- maximalRelia(fit) attr(mr, "weight") } \references{ Li, H. (1997). A unifying expression for the maximal reliability of a linear composite. \emph{Psychometrika, 62}(2), 245--249. doi:10.1007/BF02295278 Raykov, T. (2012). Scale construction and development using structural equation modeling. In R. H. Hoyle (Ed.), \emph{Handbook of structural equation modeling} (pp. 472--494). New York, NY: Guilford. } \seealso{ \code{\link{reliability}} for reliability of an unweighted composite score } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/findRMSEApower.Rd0000644000176200001440000000420113432435606015711 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/powerAnalysisRMSEA.R \name{findRMSEApower} \alias{findRMSEApower} \title{Find the statistical power based on population RMSEA} \usage{ findRMSEApower(rmsea0, rmseaA, df, n, alpha = 0.05, group = 1) } \arguments{ \item{rmsea0}{Null RMSEA} \item{rmseaA}{Alternative RMSEA} \item{df}{Model degrees of freedom} \item{n}{Sample size of a dataset} \item{alpha}{Alpha level used in power calculations} \item{group}{The number of group that is used to calculate RMSEA.} } \description{ Find the proportion of the samples from the sampling distribution of RMSEA in the alternative hypothesis rejected by the cutoff dervied from the sampling distribution of RMSEA in the null hypothesis. This function can be applied for both test of close fit and test of not-close fit (MacCallum, Browne, & Suguwara, 1996) } \details{ This function find the proportion of sampling distribution derived from the alternative RMSEA that is in the critical region derived from the sampling distribution of the null RMSEA. If \code{rmseaA} is greater than \code{rmsea0}, the test of close fit is used and the critical region is in the right hand side of the null sampling distribution. On the other hand, if \code{rmseaA} is less than \code{rmsea0}, the test of not-close fit is used and the critical region is in the left hand side of the null sampling distribution (MacCallum, Browne, & Suguwara, 1996). } \examples{ findRMSEApower(rmsea0 = .05, rmseaA = .08, df = 20, n = 200) } \references{ MacCallum, R. C., Browne, M. W., & Sugawara, H. M. (1996). Power analysis and determination of sample size for covariance structure modeling. \emph{Psychological Methods, 1}(2), 130--149. doi:10.1037/1082-989X.1.2.130 } \seealso{ \itemize{ \item \code{\link{plotRMSEApower}} to plot the statistical power based on population RMSEA given the sample size \item \code{\link{plotRMSEAdist}} to visualize the RMSEA distributions \item \code{\link{findRMSEAsamplesize}} to find the minium sample size for a given statistical power based on population RMSEA } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/compareFit.Rd0000644000176200001440000000661113432435606015224 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compareFit.R \name{compareFit} \alias{compareFit} \title{Build an object summarizing fit indices across multiple models} \usage{ compareFit(..., nested = TRUE, argsLRT = list(), indices = TRUE, baseline.model = NULL) } \arguments{ \item{...}{fitted \code{lavaan} models or list(s) of \code{lavaan} objects. \code{\linkS4class{lavaan.mi}} objects are also accepted, but all models must belong to the same class.} \item{nested}{\code{logical} indicating whether the models in \code{...} are nested. See \code{\link{net}} for an empirical test of nesting.} \item{argsLRT}{\code{list} of arguments to pass to \code{\link[lavaan]{lavTestLRT}}, as well as to \code{\link{lavTestLRT.mi}} and \code{\link{fitMeasures}} when comparing \code{\linkS4class{lavaan.mi}} models.} \item{indices}{\code{logical} indicating whether to return fit indices from the \code{\link[lavaan]{fitMeasures}} function.} \item{baseline.model}{optional fitted \code{\linkS4class{lavaan}} model passed to \code{\link[lavaan]{fitMeasures}} to calculate incremental fit indices.} } \value{ A \code{\linkS4class{FitDiff}} object that saves model fit comparisons across multiple models. If the models are not nested, only fit indices for each model are returned. If the models are nested, the differences in fit indices are additionally returned, as well as test statistics comparing each sequential pair of models (ordered by their degrees of freedom). } \description{ This function will create the template to compare fit indices across multiple fitted lavaan objects. The results can be exported to a clipboard or a file later. } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit1 <- cfa(HS.model, data = HolzingerSwineford1939) ## non-nested model m2 <- ' f1 =~ x1 + x2 + x3 + x4 f2 =~ x5 + x6 + x7 + x8 + x9 ' fit2 <- cfa(m2, data = HolzingerSwineford1939) compareFit(fit1, fit2, nested = FALSE) ## nested model comparisons: out <- measurementInvariance(model = HS.model, data = HolzingerSwineford1939, group = "school", quiet = TRUE) compareFit(out) \dontrun{ ## also applies to lavaan.mi objects (fit model to multiple imputations) set.seed(12345) HSMiss <- HolzingerSwineford1939[ , paste("x", 1:9, sep = "")] HSMiss$x5 <- ifelse(HSMiss$x1 <= quantile(HSMiss$x1, .3), NA, HSMiss$x5) HSMiss$x9 <- ifelse(is.na(HSMiss$x5), NA, HSMiss$x9) HSMiss$school <- HolzingerSwineford1939$school HS.amelia <- amelia(HSMiss, m = 20, noms = "school") imps <- HS.amelia$imputations ## request robust test statistics mgfit2 <- cfa.mi(HS.model, data = imps, group = "school", estimator = "mlm") mgfit1 <- cfa.mi(HS.model, data = imps, group = "school", estimator = "mlm", group.equal = "loadings") mgfit0 <- cfa.mi(HS.model, data = imps, group = "school", estimator = "mlm", group.equal = c("loadings","intercepts")) ## request the strictly-positive robust test statistics compareFit(scalar = mgfit0, metric = mgfit1, config = mgfit2, argsLRT = list(asymptotic = TRUE, method = "satorra.bentler.2010")) } } \seealso{ \code{\linkS4class{FitDiff}}, \code{\link{clipboard}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/SSpower.Rd0000644000176200001440000001033613432435606014534 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/powerAnalysisSS.R \name{SSpower} \alias{SSpower} \title{Power for model parameters} \usage{ SSpower(powerModel, n, nparam, popModel, mu, Sigma, fun = "cfa", alpha = 0.05, ...) } \arguments{ \item{powerModel}{lavaan \code{\link[lavaan]{model.syntax}} for the model to be analyzed. This syntax should constrain at least one nonzero parameter to 0 (or another number).} \item{n}{\code{integer}. Sample size used in power calculation, or a vector of sample sizes if analyzing a multigroup model. If \code{length(n) < length(Sigma)} when \code{Sigma} is a list, \code{n} will be recycled.} \item{nparam}{\code{integer}. Number of invalid constraints in \code{powerModel}.} \item{popModel}{lavaan \code{\link[lavaan]{model.syntax}} specifying the data-generating model. This syntax should specify values for all nonzero paramters in the model. If \code{length(n) > 1}, the same population values will be used for each group. Different population values per group can only be specified by utilizing \code{Sigma} (and \code{mu}).} \item{mu}{numeric or list. For a single-group model, a vector of population means. For a multigroup model, a list of vectors (one per group). If \code{mu} and \code{popModel} are missing, mean structure will be excluded from the analysis.} \item{Sigma}{matrix or list. For a single-group model, a population covariance matrix. For a multigroup model, a list of matrices (one per group). If missing, popModel will be used to generate a model-implied Sigma.} \item{fun}{character. Name of lavaan function used to fit \code{powerModel} (i.e., \code{"cfa"}, \code{"sem"}, \code{"growth"}, or \code{"lavaan"}).} \item{alpha}{Type I error rate used to set a criterion for rejecting H0.} \item{...}{additional arguments to pass to \code{\link[lavaan]{lavaan}}.} } \description{ Apply Satorra & Saris (1985) method for chi-squared power analysis. } \details{ Specify all non-zero parameters in a population model, either by using lavaan syntax (\code{popModel}) or by submitting a population covariance matrix (\code{Sigma}) and optional mean vector (\code{mu}) implied by the population model. Then specify an analysis model that constrains at least one nonzero parameter to an incorrect value. Note the number in the \code{nparam} argument. } \examples{ ## Specify population values. Note every paramter has a fixed value. modelP <- ' f1 =~ .7*V1 + .7*V2 + .7*V3 + .7*V4 f2 =~ .7*V5 + .7*V6 + .7*V7 + .7*V8 f1 ~~ .3*f2 f1 ~~ 1*f1 f2 ~~ 1*f2 V1 ~~ .51*V1 V2 ~~ .51*V2 V3 ~~ .51*V3 V4 ~~ .51*V4 V5 ~~ .51*V5 V6 ~~ .51*V6 V7 ~~ .51*V7 V8 ~~ .51*V8 ' ## Specify analysis model. Note parameter of interest f1~~f2 is fixed to 0. modelA <- ' f1 =~ V1 + V2 + V3 + V4 f2 =~ V5 + V6 + V7 + V8 f1 ~~ 0*f2 ' ## Calculate power SSpower(powerModel = modelA, popModel = modelP, n = 150, nparam = 1, std.lv = TRUE) ## Get power for a range of sample sizes Ns <- seq(100, 500, 40) Power <- rep(NA, length(Ns)) for(i in 1:length(Ns)) { Power[i] <- SSpower(powerModel = modelA, popModel = modelP, n = Ns[i], nparam = 1, std.lv = TRUE) } plot(x = Ns, y = Power, type = "l", xlab = "Sample Size") ## Specify second population to calculate power for multigroup model popMoments1 <- fitted(cfa(modelP)) modelP2 <- ' f1 =~ .7*V1 + .7*V2 + .7*V3 + .7*V4 f2 =~ .7*V5 + .7*V6 + .7*V7 + .7*V8 f1 ~~ .5*f2 ## higher correlation in Group 2 f1 ~~ 1*f1 f2 ~~ 1*f2 V1 ~~ .51*V1 V2 ~~ .51*V2 V3 ~~ .51*V3 V4 ~~ .51*V4 V5 ~~ .51*V5 V6 ~~ .51*V6 V7 ~~ .51*V7 V8 ~~ .51*V8 ' popMoments2 <- fitted(cfa(modelP2)) modelA2 <- ' f1 =~ V1 + V2 + V3 + V4 f2 =~ V5 + V6 + V7 + V8 f1 ~~ c(0, 0)*f2 ' mu <- list(popMoments1$mean, popMoments2$mean) # ignored if NULL Sigma <- list(popMoments1$cov, popMoments2$cov) SSpower(powerModel = modelA2, mu = mu, Sigma = Sigma, n = c(60, 65), nparam = 2) } \references{ Satorra, A., & Saris, W. E. (1985). Power of the likelihood ratio test in covariance structure analysis. \emph{Psychometrika, 50}, 83--90. doi:10.1007/BF02294150 } \author{ Alexander M. Schoemann (East Carolina University; \email{schoemanna@ecu.edu}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/runMI.Rd0000644000176200001440000001622513532031247014161 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runMI.R \name{runMI} \alias{runMI} \alias{lavaan.mi} \alias{cfa.mi} \alias{sem.mi} \alias{growth.mi} \title{Fit a lavaan Model to Multiple Imputed Data Sets} \usage{ runMI(model, data, fun = "lavaan", ..., m, miArgs = list(), miPackage = "Amelia", seed = 12345) lavaan.mi(model, data, ..., m, miArgs = list(), miPackage = "Amelia", seed = 12345) cfa.mi(model, data, ..., m, miArgs = list(), miPackage = "Amelia", seed = 12345) sem.mi(model, data, ..., m, miArgs = list(), miPackage = "Amelia", seed = 12345) growth.mi(model, data, ..., m, miArgs = list(), miPackage = "Amelia", seed = 12345) } \arguments{ \item{model}{The analysis model can be specified using lavaan \code{\link[lavaan]{model.syntax}} or a parameter table (as returned by \code{\link[lavaan]{parTable}}).} \item{data}{A \code{data.frame} with missing observations, or a \code{list} of imputed data sets (if data are imputed already). If \code{runMI} has already been called, then imputed data sets are stored in the \code{@DataList} slot, so \code{data} can also be a \code{lavaan.mi} object from which the same imputed data will be used for additional analyses.} \item{fun}{\code{character}. Name of a specific lavaan function used to fit \code{model} to \code{data} (i.e., \code{"lavaan"}, \code{"cfa"}, \code{"sem"}, or \code{"growth"}). Only required for \code{runMI}.} \item{\dots}{additional arguments to pass to \code{\link[lavaan]{lavaan}} or \code{\link[lavaan]{lavaanList}}. See also \code{\link[lavaan]{lavOptions}}. Note that \code{lavaanList} provides parallel computing options, as well as a \code{FUN} argument so the user can extract custom output after the model is fitted to each imputed data set (see \strong{Examples}). TIP: If a custom \code{FUN} is used \emph{and} \code{parallel = "snow"} is requested, the user-supplied function should explicitly call \code{library} or use \code{\link[base]{::}} for any functions not part of the base distribution.} \item{m}{\code{integer}. Request the number of imputations. Ignored if \code{data} is already a \code{list} of imputed data sets or a \code{lavaan.mi} object.} \item{miArgs}{Addition arguments for the multiple-imputation function (\code{miPackage}). The arguments should be put in a list (see example below). Ignored if \code{data} is already a \code{list} of imputed data sets or a \code{lavaan.mi} object.} \item{miPackage}{Package to be used for imputation. Currently these functions only support \code{"Amelia"} or \code{"mice"} for imputation. Ignored if \code{data} is already a \code{list} of imputed data sets or a \code{lavaan.mi} object.} \item{seed}{\code{integer}. Random number seed to be set before imputing the data. Ignored if \code{data} is already a \code{list} of imputed data sets or a \code{lavaan.mi} object.} } \value{ A \code{\linkS4class{lavaan.mi}} object } \description{ This function fits a lavaan model to a list of imputed data sets, and can also implement multiple imputation for a single \code{data.frame} with missing observations, using either the Amelia package or the mice package. } \examples{ \dontrun{ ## impose missing data for example HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), "ageyr","agemo","school")] set.seed(12345) HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) age <- HSMiss$ageyr + HSMiss$agemo/12 HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ## specify CFA model from lavaan's ?cfa help page HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' ## impute data within runMI... out1 <- cfa.mi(HS.model, data = HSMiss, m = 20, seed = 12345, miArgs = list(noms = "school")) ## ... or impute missing data first library(Amelia) set.seed(12345) HS.amelia <- amelia(HSMiss, m = 20, noms = "school", p2s = FALSE) imps <- HS.amelia$imputations out2 <- cfa.mi(HS.model, data = imps) ## same results (using the same seed results in the same imputations) cbind(impute.within = coef(out1), impute.first = coef(out2)) summary(out1, fit.measures = TRUE) summary(out1, ci = FALSE, fmi = TRUE, output = "data.frame") summary(out1, ci = FALSE, stand = TRUE, rsq = TRUE) ## model fit. D3 includes information criteria anova(out1) ## equivalently: lavTestLRT.mi(out1) ## request D2 anova(out1, test = "D2") ## request fit indices fitMeasures(out1) ## fit multigroup model without invariance constraints mgfit.config <- cfa.mi(HS.model, data = imps, estimator = "mlm", group = "school") ## add invariance constraints, and use previous fit as "data" mgfit.metric <- cfa.mi(HS.model, data = mgfit.config, estimator = "mlm", group = "school", group.equal = "loadings") mgfit.scalar <- cfa.mi(HS.model, data = mgfit.config, estimator = "mlm", group = "school", group.equal = c("loadings","intercepts")) ## compare fit of 2 models to test metric invariance ## (scaled likelihood ratio test) lavTestLRT.mi(mgfit.metric, h1 = mgfit.config) ## To compare multiple models, you must use anova() anova(mgfit.config, mgfit.metric, mgfit.scalar) ## or compareFit(), which also includes fit indices for comparison ## (optional: name the models) compareFit(config = mgfit.config, metric = mgfit.metric, scalar = mgfit.scalar, argsLRT = list(test = "D2", method = "satorra.bentler.2010")) ## correlation residuals to investigate local misfit resid(mgfit.scalar, type = "cor.bentler") ## modification indices for fixed parameters, to investigate local misfit modindices.mi(mgfit.scalar) ## or lavTestScore.mi for modification indices about equality constraints lavTestScore.mi(mgfit.scalar) ## Wald test of whether latent means are == (fix 3 means to zero in group 2) eq.means <- ' .p70. == 0 .p71. == 0 .p72. == 0 ' lavTestWald.mi(mgfit.scalar, constraints = eq.means) ## ordered-categorical data data(datCat) lapply(datCat, class) ## impose missing values set.seed(123) for (i in 1:8) datCat[sample(1:nrow(datCat), size = .1*nrow(datCat)), i] <- NA catout <- cfa.mi(' f =~ 1*u1 + 1*u2 + 1*u3 + 1*u4 ', data = datCat, m = 3, seed = 456, miArgs = list(ords = paste0("u", 1:8), noms = "g"), FUN = function(fit) { list(wrmr = lavaan::fitMeasures(fit, "wrmr"), zeroCells = lavaan::lavInspect(fit, "zero.cell.tables")) }) summary(catout) lavTestLRT.mi(catout, test = "D2", pool.robust = TRUE) fitMeasures(catout, fit.measures = c("rmsea","srmr","cfi"), test = "D2", pool.robust = TRUE) ## extract custom output sapply(catout@funList, function(x) x$wrmr) # WRMR for each imputation catout@funList[[1]]$zeroCells # zero-cell tables for first imputation catout@funList[[2]]$zeroCells # zero-cell tables for second imputation ... } } \references{ Enders, C. K. (2010). \emph{Applied missing data analysis}. New York, NY: Guilford. Rubin, D. B. (1987). \emph{Multiple imputation for nonresponse in surveys}. New York, NY: Wiley. } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/skew.Rd0000644000176200001440000000306713432435606014106 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataDiagnosis.R \name{skew} \alias{skew} \title{Finding skewness} \usage{ skew(object, population = FALSE) } \arguments{ \item{object}{A vector used to find a skewness} \item{population}{\code{TRUE} to compute the parameter formula. \code{FALSE} to compute the sample statistic formula.} } \value{ A value of a skewness with a test statistic if the population is specified as \code{FALSE} } \description{ Finding skewness (\eqn{g_{1}}) of an object } \details{ The skewness computed is \eqn{g_{1}}. The parameter skewness \eqn{\gamma_{2}} formula is \deqn{\gamma_{2} = \frac{\mu_{3}}{\mu^{3/2}_{2}},} where \eqn{\mu_{i}} denotes the \eqn{i} order central moment. The excessive kurtosis formula for sample statistic \eqn{g_{2}} is \deqn{g_{2} = \frac{k_{3}}{k^{2}_{2}},} where \eqn{k_{i}} are the \eqn{i} order \emph{k}-statistic. The standard error of the skewness is \deqn{Var(\hat{g}_2) = \frac{6}{N}} where \eqn{N} is the sample size. } \examples{ skew(1:5) } \references{ Weisstein, Eric W. (n.d.). \emph{Skewness}. Retrived from \emph{MathWorld}--A Wolfram Web Resource: \url{http://mathworld.wolfram.com/Skewness.html} } \seealso{ \itemize{ \item \code{\link{kurtosis}} Find the univariate excessive kurtosis of a variable \item \code{\link{mardiaSkew}} Find Mardia's multivariate skewness of a set of variables \item \code{\link{mardiaKurtosis}} Find the Mardia's multivariate kurtosis of a set of variables } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/probe3WayMC.Rd0000644000176200001440000001711613475443506015235 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probeInteraction.R \name{probe3WayMC} \alias{probe3WayMC} \title{Probing two-way interaction on the no-centered or mean-centered latent interaction} \usage{ probe3WayMC(fit, nameX, nameY, modVar, valProbe1, valProbe2, group) } \arguments{ \item{fit}{The lavaan model object used to evaluate model fit} \item{nameX}{The vector of the factor names used as the predictors. The three first-order factors will be listed first. Then the second-order factors will be listeed. The last element of the name will represent the three-way interaction. Note that the fourth element must be the interaction between the first and the second variables. The fifth element must be the interaction between the first and the third variables. The sixth element must be the interaction between the second and the third variables.} \item{nameY}{The name of factor that is used as the dependent variable.} \item{modVar}{The name of two factors that are used as the moderators. The effect of the independent factor on each combination of the moderator variable values will be probed.} \item{valProbe1}{The values of the first moderator that will be used to probe the effect of the independent factor.} \item{valProbe2}{The values of the second moderator that will be used to probe the effect of the independent factor.} \item{group}{In multigroup models, the label of the group for which the results will be returned. Must correspond to one of \code{\link[lavaan]{lavInspect}(fit, "group.label")}.} } \value{ A list with two elements: \enumerate{ \item \code{SimpleIntercept}: The intercepts given each value of the moderator. This element will be shown only if the factor intercept is estimated (e.g., not fixed as 0). \item \code{SimpleSlope}: The slopes given each value of the moderator. } In each element, the first column represents values of the first moderator specified in the \code{valProbe1} argument. The second column represents values of the second moderator specified in the \code{valProbe2} argument. The third column is the simple intercept or simple slope. The fourth column is the standard error of the simple intercept or simple slope. The fifth column is the Wald (\emph{z}) statistic. The sixth column is the \emph{p} value testing whether the simple intercepts or slopes are different from 0. } \description{ Probing interaction for simple intercept and simple slope for the no-centered or mean-centered latent two-way interaction } \details{ Before using this function, researchers need to make the products of the indicators between the first-order factors using mean centering (Marsh, Wen, & Hau, 2004). Note that the double-mean centering may not be appropriate for probing interaction if researchers are interested in simple intercepts. The mean or double-mean centering can be done by the \code{\link{indProd}} function. The indicator products can be made for all possible combination or matched-pair approach (Marsh et al., 2004). Next, the hypothesized model with the regression with latent interaction will be used to fit all original indicators and the product terms. See the example for how to fit the product term below. Once the lavaan result is obtained, this function will be used to probe the interaction. Let that the latent interaction model regressing the dependent variable (\eqn{Y}) on the independent varaible (\eqn{X}) and two moderators (\eqn{Z} and \eqn{W}) be \deqn{ Y = b_0 + b_1X + b_2Z + b_3W + b_4XZ + b_5XW + b_6ZW + b_7XZW + r, } where \eqn{b_0} is the estimated intercept or the expected value of \eqn{Y} when \eqn{X}, \eqn{Z}, and \eqn{W} are 0, \eqn{b_1} is the effect of \eqn{X} when \eqn{Z} and \eqn{W} are 0, \eqn{b_2} is the effect of \eqn{Z} when \eqn{X} and \eqn{W} is 0, \eqn{b_3} is the effect of \eqn{W} when \eqn{X} and \eqn{Z} are 0, \eqn{b_4} is the interaction effect between \eqn{X} and \eqn{Z} when \eqn{W} is 0, \eqn{b_5} is the interaction effect between \eqn{X} and \eqn{W} when \eqn{Z} is 0, \eqn{b_6} is the interaction effect between \eqn{Z} and \eqn{W} when \eqn{X} is 0, \eqn{b_7} is the three-way interaction effect between \eqn{X}, \eqn{Z}, and \eqn{W}, and \eqn{r} is the residual term. For probing three-way interaction, the simple intercept of the independent variable at the specific values of the moderators (Aiken & West, 1991) can be obtained by \deqn{ b_{0|X = 0, Z, W} = b_0 + b_2Z + b_3W + b_6ZW. } The simple slope of the independent varaible at the specific values of the moderators can be obtained by \deqn{ b_{X|Z, W} = b_1 + b_3Z + b_4W + b_7ZW. } The variance of the simple intercept formula is \deqn{ Var\left(b_{0|X = 0, Z, W}\right) = Var\left(b_0\right) + Z^2Var\left(b_2\right) + W^2Var\left(b_3\right) + Z^2W^2Var\left(b_6\right) + 2ZCov\left(b_0, b_2\right) + 2WCov\left(b_0, b_3\right) + 2ZWCov\left(b_0, b_6\right) + 2ZWCov\left(b_2, b_3\right) + 2Z^2WCov\left(b_2, b_6\right) + 2ZW^2Cov\left(b_3, b_6\right) } where \eqn{Var} denotes the variance of a parameter estimate and \eqn{Cov} denotes the covariance of two parameter estimates. The variance of the simple slope formula is \deqn{ Var\left(b_{X|Z, W}\right) = Var\left(b_1\right) + Z^2Var\left(b_4\right) + W^2Var\left(b_5\right) + Z^2W^2Var\left(b_7\right) + 2ZCov\left(b_1, b_4\right) + 2WCov\left(b_1, b_5\right) + 2ZWCov\left(b_1, b_7\right) + 2ZWCov\left(b_4, b_5\right) + 2Z^2WCov\left(b_4, b_7\right) + 2ZW^2Cov\left(b_5, b_7\right) } Wald statistic is used for test statistic. } \examples{ library(lavaan) dat3wayMC <- indProd(dat3way, 1:3, 4:6, 7:9) model3 <- " f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f3 =~ x7 + x8 + x9 f12 =~ x1.x4 + x2.x5 + x3.x6 f13 =~ x1.x7 + x2.x8 + x3.x9 f23 =~ x4.x7 + x5.x8 + x6.x9 f123 =~ x1.x4.x7 + x2.x5.x8 + x3.x6.x9 f4 =~ x10 + x11 + x12 f4 ~ f1 + f2 + f3 + f12 + f13 + f23 + f123 f1 ~~ 0*f12 f1 ~~ 0*f13 f1 ~~ 0*f123 f2 ~~ 0*f12 f2 ~~ 0*f23 f2 ~~ 0*f123 f3 ~~ 0*f13 f3 ~~ 0*f23 f3 ~~ 0*f123 f12 ~~ 0*f123 f13 ~~ 0*f123 f23 ~~ 0*f123 x1 ~ 0*1 x4 ~ 0*1 x7 ~ 0*1 x10 ~ 0*1 x1.x4 ~ 0*1 x1.x7 ~ 0*1 x4.x7 ~ 0*1 x1.x4.x7 ~ 0*1 f1 ~ NA*1 f2 ~ NA*1 f3 ~ NA*1 f12 ~ NA*1 f13 ~ NA*1 f23 ~ NA*1 f123 ~ NA*1 f4 ~ NA*1 " fitMC3way <- sem(model3, data = dat3wayMC, std.lv = FALSE, meanstructure = TRUE) summary(fitMC3way) result3wayMC <- probe3WayMC(fitMC3way, c("f1", "f2", "f3", "f12", "f13", "f23", "f123"), "f4", c("f1", "f2"), c(-1, 0, 1), c(-1, 0, 1)) result3wayMC } \references{ Aiken, L. S., & West, S. G. (1991). \emph{Multiple regression: Testing and interpreting interactions}. Newbury Park, CA: Sage. Marsh, H. W., Wen, Z., & Hau, K. T. (2004). Structural equation models of latent interactions: Evaluation of alternative estimation strategies and indicator construction. \emph{Psychological Methods, 9}(3), 275--300. doi:10.1037/1082-989X.9.3.275 } \seealso{ \itemize{ \item \code{\link{indProd}} For creating the indicator products with no centering, mean centering, double-mean centering, or residual centering. \item \code{\link{probe2WayMC}} For probing the two-way latent interaction when the results are obtained from mean-centering, or double-mean centering \item \code{\link{probe2WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{probe3WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the latent interaction. } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/measEq.syntax.Rd0000644000176200001440000005726513532163761015707 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/measEq.R \name{measEq.syntax} \alias{measEq.syntax} \title{Syntax for measurement equivalence} \usage{ measEq.syntax(configural.model, ..., ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", ID.thr = c(1L, 2L), group = NULL, group.equal = "", group.partial = "", longFacNames = list(), longIndNames = list(), long.equal = "", long.partial = "", auto = "all", warn = TRUE, debug = FALSE, return.fit = FALSE) } \arguments{ \item{configural.model}{A model with no measurement-invariance constraints (i.e., representing only configural invariance), unless required for model identification. \code{configural.model} can be either: \itemize{ \item lavaan \code{\link[lavaan]{model.syntax}} or a parameter table (as returned by \code{\link[lavaan]{parTable}}) specifying the configural model. Using this option, the user can also provide either raw \code{data} or summary statistics via \code{sample.cov} and (optionally) \code{sample.mean}. In order to include thresholds in the syntax, raw \code{data} must be provided. See \code{\link[lavaan]{lavaan}}. \item a fitted \code{\linkS4class{lavaan}} model (e.g., as returned by \code{\link[lavaan]{cfa}}) estimating the configural model }} \item{...}{Additional arguments (e.g., \code{data}, \code{ordered}, or \code{parameterization}) passed to the \code{\link[lavaan]{lavaan}} function. See also \code{\link[lavaan]{lavOptions}}.} \item{ID.fac}{\code{character}. The method for identifying common-factor variances and (if \code{meanstructure = TRUE}) means. Three methods are available, which go by different names in the literature: \itemize{ \item Standardize the common factor (mean = 0, \emph{SD} = 1) by specifying any of: \code{"std.lv"}, \code{"unit.variance"}, \code{"UV"}, \code{"fixed.factor"}, \code{"fixed-factor"} \item Choose a reference indicator by specifying any of: \code{"auto.fix.first"}, \code{"unit.loading"}, \code{"UL"}, \code{"marker"}, \code{"ref"}, \code{"ref.indicator"}, \code{"reference.indicator"}, \code{"reference-indicator"}, \code{"marker.variable"}, \code{"marker-variable"} \item Apply effects-code constraints to loadings and intercepts by specifying any of: \code{"FX"}, \code{"EC"}, \code{"effects"}, \code{"effects.coding"}, \code{"effects-coding"}, \code{"effects.code"}, \code{"effects-code"} } See Kloessner & Klopp (2019) for details about all three methods.} \item{ID.cat}{\code{character}. The method for identifying (residual) variances and intercepts of latent item-responses underlying any \code{ordered} indicators. Four methods are available: \itemize{ \item To follow Wu & Estabrook's (2016) guidelines (default), specify any of: \code{"Wu.Estabrook.2016"}, \code{"Wu.2016"}, \code{"Wu.Estabrook"}, \code{"Wu"}, \code{"Wu2016"}. \item To use the default settings of M\emph{plus} and \code{lavaan}, specify any of: \code{"default"}, \code{"Mplus"}, \code{"Muthen"}. Details provided in Millsap & Tein (2004). \item To use the constraints recommended by Millsap & Tein (2004; see also Liu et al., 2017) specify any of: \code{"millsap"}, \code{"millsap.2004"}, \code{"millsap.tein.2004"} \item To use the default settings of LISREL, specify \code{"LISREL"} or \code{"Joreskog"}. Details provided in Millsap & Tein (2004). } See \strong{Details} and \strong{References} for more information.} \item{ID.thr}{\code{integer}. Only relevant when \code{ID.cat = "Millsap.Tein.2004"}. Used to indicate which thresholds should be constrained for identification. The first integer indicates the threshold used for all indicators, the second integer indicates the additional threshold constrained for a reference indicator (ignored if binary).} \item{group}{optional \code{character} indicating the name of a grouping variable. See \code{\link[lavaan]{cfa}}.} \item{group.equal}{optional \code{character} vector indicating type(s) of parameter to equate across groups. Ignored if \code{is.null(group)}. See \code{\link[lavaan]{lavOptions}}.} \item{group.partial}{optional \code{character} vector or a parameter table indicating exceptions to \code{group.equal} (see \code{\link[lavaan]{lavOptions}}). Any variables not appearing in the \code{configural.model} will be ignored, and any parameter constraints needed for identification (e.g., two thresholds per indicator when \code{ID.cat = "Millsap"}) will be removed.} \item{longFacNames}{optional named \code{list} of \code{character} vectors, each indicating multiple factors in the model that are actually the same construct measured repeatedly. See \strong{Details} and \strong{Examples}.} \item{longIndNames}{optional named \code{list} of \code{character} vectors, each indicating multiple indicators in the model that are actually the same indicator measured repeatedly. See \strong{Details} and \strong{Examples}.} \item{long.equal}{optional \code{character} vector indicating type(s) of parameter to equate across repeated measures. Ignored if no factors are indicated as repeatedly measured in \code{longFacNames}.} \item{long.partial}{optional \code{character} vector or a parameter table indicating exceptions to \code{long.equal}. Any longitudinal variable names not appearing in \code{names(longFacNames)} or \code{names(longIndNames)} will be ignored, and any parameter constraints needed for identification will be removed.} \item{auto}{Used to automatically included autocorrelated measurement errors among repeatedly measured indicators in \code{longIndNames}. Specify a single \code{integer} to set the maximum order (e.g., \code{auto = 1L} indicates that an indicator's unique factors should only be correlated between adjacently measured occasions). \code{auto = TRUE} or \code{"all"} will specify residual covariances among all possible lags per repeatedly measured indicator in \code{longIndNames}.} \item{warn, debug}{\code{logical}. Passed to \code{\link[lavaan]{lavaan}} and \code{\link[lavaan]{lavParseModelString}}. See \code{\link[lavaan]{lavOptions}}.} \item{return.fit}{\code{logical} indicating whether the generated syntax should be fitted to the provided \code{data} (or summary statistics, if provided via \code{sample.cov}). If \code{configural.model} is a fitted lavaan model, the generated syntax will be fitted using the \code{update} method (see \code{\linkS4class{lavaan}}), and \dots will be passed to \code{\link[lavaan]{lavaan}}. If neither data nor a fitted lavaan model were provided, this must be \code{FALSE}. If \code{TRUE}, the generated \code{measEq.syntax} object will be included in the \code{lavaan} object's \code{@external} slot, accessible by \code{fit@external$measEq.syntax}.} } \value{ By default, an object of class \code{\linkS4class{measEq.syntax}}. If \code{return.fit = TRUE}, a fitted \code{\link[lavaan]{lavaan}} model, with the \code{measEq.syntax} object stored in the \code{@external} slot, accessible by \code{fit@external$measEq.syntax}. } \description{ Automatically generates \code{lavaan} model syntax to specify a confirmatory factor analysis (CFA) model with equality constraints imposed on user-specified measurement (or structural) parameters. Optionally returns the fitted model (if data are provided) representing some chosen level of measurement equivalence/invariance across groups and/or repeated measures. } \details{ This function is a pedagogical and analytical tool to generate model syntax representing some level of measurement equivalence/invariance across any combination of multiple groups and/or repeated measures. Support is provided for confirmatory factor analysis (CFA) models with simple or complex structure (i.e., cross-loadings and correlated residuals are allowed). For any complexities that exceed the limits of automation, this function is intended to still be useful by providing a means to generate syntax that users can easily edit to accommodate their unique situations. Limited support is provided for bifactor models and higher-order constructs. Because bifactor models have cross-loadings by definition, the option \code{ID.fac = "effects.code"} is unavailable. \code{ID.fac = "UV"} is recommended for bifactor models, but \code{ID.fac = "UL"} is available on the condition that each factor has a unique first indicator in the \code{configural.model}. In order to maintain generality, higher-order factors may include a mix of manifest and latent indicators, but they must therefore require \code{ID.fac = "UL"} to avoid complications with differentiating lower-order vs. higher-order (or mixed-level) factors. The keyword \code{"loadings"} in \code{group.equal} or \code{long.equal} constrains factor loadings of all manifest indicators (including loadings on higher-order factors that also have latent indicators), whereas the keyword \code{"regressions"} constrains factor loadings of latent indicators. Users can edit the model syntax manually to adjust constraints as necessary, or clever use of the \code{group.partial} or \code{long.partial} arguments could make it possible for users to still automated their model syntax. The keyword \code{"intercepts"} constrains the intercepts of all manifest indicators, and the keyword \code{"means"} constrains intercepts and means of all latent common factors, regardless of whether they are latent indicators of higher-order factors. To test equivalence of lower-order and higher-order intercepts/means in separate steps, the user can either manually edit their generated syntax or conscientiously exploit the \code{group.partial} or \code{long.partial} arguments as necessary. \strong{\code{ID.fac}:} If the \code{configural.model} fixes any (e.g., the first) factor loadings, the generated syntax object will retain those fixed values. This allows the user to retain additional constraints that might be necessary (e.g., if there are only 1 or 2 indicators). Some methods must be used in conjunction with other settings: \itemize{ \item \code{ID.cat = "Millsap"} requires \code{ID.fac = "UL"} and \code{parameterization = "theta"}. \item \code{ID.cat = "LISREL"} requires \code{parameterization = "theta"}. \item \code{ID.fac = "effects.code"} is unavailable when there are any cross-loadings. } \strong{\code{ID.cat}:} Wu & Estabrook (2016) recommended constraining thresholds to equality first, and doing so should allow releasing any identification constraints no longer needed. For each \code{ordered} indicator, constraining one threshold to equality will allow the item's intercepts to be estimated in all but the first group or repeated measure. Constraining a second threshold (if applicable) will allow the item's (residual) variance to be estimated in all but the first group or repeated measure. For binary data, there is no independent test of threshold, intercept, or residual-variance equality. Equivalence of thresholds must also be assumed for three-category indicators. These guidelines provide the least restrictive assumptions and tests, and are therefore the default. The default setting in M\emph{plus} is similar to Wu & Estabrook (2016), except that intercepts are always constrained to zero (so they are assumed to be invariant without testing them). Millsap & Tein (2004) recommended \code{parameterization = "theta"} and identified an item's residual variance in all but the first group (or occasion; Liu et al., 2017) by constraining its intercept to zero and one of its thresholds to equality. A second threshold for the reference indicator (so \code{ID.fac = "UL"}) is used to identify the common-factor means in all but the first group/occasion. The LISREL software fixes the first threshold to zero and (if applicable) the second threshold to 1, and assumes any remaining thresholds to be equal across groups / repeated measures; thus, the intercepts are always identified, and residual variances (\code{parameterization = "theta"}) are identified except for binary data, when they are all fixed to one. \strong{Repeated Measures:} If each repeatedly measured factor is measured by the same indicators (specified in the same order in the \code{configural.model}) on each occasion, without any cross-loadings, the user can let \code{longIndNames} be automatically generated. Generic names for the repeatedly measured indicators are created using the name of the repeatedly measured factors (i.e., \code{names(longFacNames)}) and the number of indicators. So the repeatedly measured first indicator (\code{"ind"}) of a longitudinal construct called "factor" would be generated as \code{"._factor_ind.1"}. The same types of parameter can be specified for \code{long.equal} as for \code{group.equal} (see \code{\link[lavaan]{lavOptions}} for a list), except for \code{"residual.covariances"} or \code{"lv.covariances"}. Instead, users can constrain \emph{auto}covariances using keywords \code{"resid.autocov"} or \code{"lv.autocov"}. Note that \code{group.equal = "lv.covariances"} or \code{group.equal = "residual.covariances"} will constrain any autocovariances across groups, along with any other covariances the user specified in the \code{configural.model}. Note also that autocovariances cannot be specified as exceptions in \code{long.partial}, so anything more complex than the \code{auto} argument automatically provides should instead be manually specified in the \code{configural.model}. When users set \code{orthogonal=TRUE} in the \code{configural.model} (e.g., in bifactor models of repeatedly measured constructs), autocovariances of each repeatedly measured factor will still be freely estimated in the generated syntax. \strong{Missing Data:} If users wish to utilize the \code{\link{auxiliary}} function to automatically include auxiliary variables in conjunction with \code{missing = "FIML"}, they should first generate the hypothesized-model syntax, then submit that syntax as the model to \code{auxiliary()}. If users utilized \code{\link{runMI}} to fit their \code{configural.model} to multiply imputed data, that model can also be passed to the \code{configural.model} argument, and if \code{return.fit = TRUE}, the generated model will be fitted to the multiple imputations. } \examples{ mod.cat <- ' FU1 =~ u1 + u2 + u3 + u4 FU2 =~ u5 + u6 + u7 + u8 ' ## the 2 factors are actually the same factor (FU) measured twice longFacNames <- list(FU = c("FU1","FU2")) ## configural model: no constraints across groups or repeated measures syntax.config <- measEq.syntax(configural.model = mod.cat, data = datCat, ordered = paste0("u", 1:8), parameterization = "theta", ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", group = "g", longFacNames = longFacNames) ## print lavaan syntax to the Console cat(as.character(syntax.config)) ## print a summary of model features summary(syntax.config) ## threshold invariance syntax.thresh <- measEq.syntax(configural.model = mod.cat, data = datCat, ordered = paste0("u", 1:8), parameterization = "theta", ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", group = "g", group.equal = "thresholds", longFacNames = longFacNames, long.equal = "thresholds") ## notice that constraining 4 thresholds allows intercepts and residual ## variances to be freely estimated in all but the first group & occasion cat(as.character(syntax.thresh)) ## print a summary of model features summary(syntax.thresh) ## Fit a model to the data either in a subsequent step: mod.config <- as.character(syntax.config) fit.config <- cfa(mod.config, data = datCat, group = "g", ordered = paste0("u", 1:8), parameterization = "theta") ## or in a single step: fit.thresh <- measEq.syntax(configural.model = mod.cat, data = datCat, ordered = paste0("u", 1:8), parameterization = "theta", ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", group = "g", group.equal = "thresholds", longFacNames = longFacNames, long.equal = "thresholds", return.fit = TRUE) ## compare their fit to test threshold invariance anova(fit.config, fit.thresh) ## -------------------------------------------------------- ## RECOMMENDED PRACTICE: fit one invariance model at a time ## -------------------------------------------------------- ## - A downside of setting return.fit=TRUE is that if the model has trouble ## converging, you don't have the opportunity to investigate the syntax, ## or even to know whether an error resulted from the syntax-generator or ## from lavaan itself. ## - A downside of automatically fitting an entire set of invariance models ## (like the old measurementInvariance() function did) is that you might ## end up testing models that shouldn't even be fitted because less ## restrictive models already fail (e.g., don't test full scalar ## invariance if metric invariance fails! Establish partial metric ## invariance first, then test equivalent of intercepts ONLY among the ## indicators that have invariate loadings.) ## The recommended sequence is to (1) generate and save each syntax object, ## (2) print it to the screen to verify you are fitting the model you expect ## to (and potentially learn which identification constraints should be ## released when equality constraints are imposed), and (3) fit that model ## to the data as you would if you wrote the syntax yourself. ## Continuing from the examples above, after establishing invariance of ## thresholds, we proceed to test equivalence of loadings and intercepts ## (metric and scalar invariance, respectively) ## simultaneously across groups and repeated measures. \dontrun{ ## metric invariance syntax.metric <- measEq.syntax(configural.model = mod.cat, data = datCat, ordered = paste0("u", 1:8), parameterization = "theta", ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", group = "g", longFacNames = longFacNames, group.equal = c("thresholds","loadings"), long.equal = c("thresholds","loadings")) summary(syntax.metric) # summarize model features mod.metric <- as.character(syntax.metric) # save as text cat(mod.metric) # print/view lavaan syntax ## fit model to data fit.metric <- cfa(mod.metric, data = datCat, group = "g", ordered = paste0("u", 1:8), parameterization = "theta") ## test equivalence of loadings, given equivalence of thresholds anova(fit.thresh, fit.metric) ## scalar invariance syntax.scalar <- measEq.syntax(configural.model = mod.cat, data = datCat, ordered = paste0("u", 1:8), parameterization = "theta", ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", group = "g", longFacNames = longFacNames, group.equal = c("thresholds","loadings", "intercepts"), long.equal = c("thresholds","loadings", "intercepts")) summary(syntax.scalar) # summarize model features mod.scalar <- as.character(syntax.scalar) # save as text cat(mod.scalar) # print/view lavaan syntax ## fit model to data fit.scalar <- cfa(mod.scalar, data = datCat, group = "g", ordered = paste0("u", 1:8), parameterization = "theta") ## test equivalence of intercepts, given equal thresholds & loadings anova(fit.metric, fit.scalar) ## For a single table with all results, you can pass the models to ## summarize to the compareFit() function compareFit(fit.config, fit.thresh, fit.metric, fit.scalar) ## ------------------------------------------------------ ## NOT RECOMMENDED: fit several invariance models at once ## ------------------------------------------------------ test.seq <- c("thresholds","loadings","intercepts","means","residuals") meq.list <- list() for (i in 0:length(test.seq)) { if (i == 0L) { meq.label <- "configural" group.equal <- "" long.equal <- "" } else { meq.label <- test.seq[i] group.equal <- test.seq[1:i] long.equal <- test.seq[1:i] } meq.list[[meq.label]] <- measEq.syntax(configural.model = mod.cat, data = datCat, ordered = paste0("u", 1:8), parameterization = "theta", ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", group = "g", group.equal = group.equal, longFacNames = longFacNames, long.equal = long.equal, return.fit = TRUE) } compareFit(meq.list) ## ----------------- ## Binary indicators ## ----------------- ## borrow example data from Mplus user guide myData <- read.table("http://www.statmodel.com/usersguide/chap5/ex5.16.dat") names(myData) <- c("u1","u2","u3","u4","u5","u6","x1","x2","x3","g") bin.mod <- ' FU1 =~ u1 + u2 + u3 FU2 =~ u4 + u5 + u6 ' ## Must SIMULTANEOUSLY constrain thresholds, loadings, and intercepts test.seq <- list(strong = c("thresholds","loadings","intercepts"), means = "means", strict = "residuals") meq.list <- list() for (i in 0:length(test.seq)) { if (i == 0L) { meq.label <- "configural" group.equal <- "" long.equal <- "" } else { meq.label <- names(test.seq)[i] group.equal <- unlist(test.seq[1:i]) # long.equal <- unlist(test.seq[1:i]) } meq.list[[meq.label]] <- measEq.syntax(configural.model = bin.mod, data = myData, ordered = paste0("u", 1:6), parameterization = "theta", ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", group = "g", group.equal = group.equal, #longFacNames = longFacNames, #long.equal = long.equal, return.fit = TRUE) } compareFit(meq.list) } } \references{ Kloessner, S., & Klopp, E. (2019). Explaining constraint interaction: How to interpret estimated model parameters under alternative scaling methods. \emph{Structural Equation Modeling, 26}(1), 143--155. doi:10.1080/10705511.2018.1517356 Liu, Y., Millsap, R. E., West, S. G., Tein, J.-Y., Tanaka, R., & Grimm, K. J. (2017). Testing measurement invariance in longitudinal data with ordered-categorical measures. \emph{Psychological Methods, 22}(3), 486--506. doi:10.1037/met0000075 Millsap, R. E., & Tein, J.-Y. (2004). Assessing factorial invariance in ordered-categorical measures. \emph{Multivariate Behavioral Research, 39}(3), 479--515. doi:10.1207/S15327906MBR3903_4 Wu, H., & Estabrook, R. (2016). Identification of confirmatory factor analysis models of different levels of invariance for ordered categorical outcomes. \emph{Psychometrika, 81}(4), 1014--1045. doi:10.1007/s11336-016-9506-0 } \seealso{ \code{\link{compareFit}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/probe2WayMC.Rd0000644000176200001440000001433313475443506015232 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probeInteraction.R \name{probe2WayMC} \alias{probe2WayMC} \title{Probing two-way interaction on the no-centered or mean-centered latent interaction} \usage{ probe2WayMC(fit, nameX, nameY, modVar, valProbe, group) } \arguments{ \item{fit}{The lavaan model object used to evaluate model fit} \item{nameX}{The vector of the factor names used as the predictors. The first-order factor will be listed first. The last name must be the name representing the interaction term.} \item{nameY}{The name of factor that is used as the dependent variable.} \item{modVar}{The name of factor that is used as a moderator. The effect of the other independent factor on each moderator variable value will be probed.} \item{valProbe}{The values of the moderator that will be used to probe the effect of the other independent factor.} \item{group}{In multigroup models, the label of the group for which the results will be returned. Must correspond to one of \code{\link[lavaan]{lavInspect}(fit, "group.label")}.} } \value{ A list with two elements: \enumerate{ \item \code{SimpleIntercept}: The intercepts given each value of the moderator. This element will be shown only if the factor intercept is estimated (e.g., not fixed as 0). \item \code{SimpleSlope}: The slopes given each value of the moderator. } In each element, the first column represents the values of the moderators specified in the \code{valProbe} argument. The second column is the simple intercept or simple slope. The third column is the \emph{SE} of the simple intercept or simple slope. The fourth column is the Wald (\emph{z}) statistic. The fifth column is the \emph{p} value testing whether the simple intercepts or slopes are different from 0. } \description{ Probing interaction for simple intercept and simple slope for the no-centered or mean-centered latent two-way interaction } \details{ Before using this function, researchers need to make the products of the indicators between the first-order factors using mean centering (Marsh, Wen, & Hau, 2004). Note that the double-mean centering may not be appropriate for probing interaction if researchers are interested in simple intercepts. The mean or double-mean centering can be done by the \code{\link{indProd}} function. The indicator products can be made for all possible combination or matched-pair approach (Marsh et al., 2004). Next, the hypothesized model with the regression with latent interaction will be used to fit all original indicators and the product terms. See the example for how to fit the product term below. Once the lavaan result is obtained, this function will be used to probe the interaction. Let that the latent interaction model regressing the dependent variable (\eqn{Y}) on the independent varaible (\eqn{X}) and the moderator (\eqn{Z}) be \deqn{ Y = b_0 + b_1X + b_2Z + b_3XZ + r, } where \eqn{b_0} is the estimated intercept or the expected value of \eqn{Y} when both \eqn{X} and \eqn{Z} are 0, \eqn{b_1} is the effect of \eqn{X} when \eqn{Z} is 0, \eqn{b_2} is the effect of \eqn{Z} when \eqn{X} is 0, \eqn{b_3} is the interaction effect between \eqn{X} and \eqn{Z}, and \eqn{r} is the residual term. For probing two-way interaction, the simple intercept of the independent variable at each value of the moderator (Aiken & West, 1991; Cohen, Cohen, West, & Aiken, 2003; Preacher, Curran, & Bauer, 2006) can be obtained by \deqn{ b_{0|X = 0, Z} = b_0 + b_2Z. } The simple slope of the independent varaible at each value of the moderator can be obtained by \deqn{ b_{X|Z} = b_1 + b_3Z. } The variance of the simple intercept formula is \deqn{ Var\left(b_{0|X = 0, Z}\right) = Var\left(b_0\right) + 2ZCov\left(b_0, b_2\right) + Z^2Var\left(b_2\right) } where \eqn{Var} denotes the variance of a parameter estimate and \eqn{Cov} denotes the covariance of two parameter estimates. The variance of the simple slope formula is \deqn{ Var\left(b_{X|Z}\right) = Var\left(b_1\right) + 2ZCov\left(b_1, b_3\right) + Z^2Var\left(b_3\right) } Wald statistic is used for test statistic. } \examples{ library(lavaan) dat2wayMC <- indProd(dat2way, 1:3, 4:6) model1 <- " f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f12 =~ x1.x4 + x2.x5 + x3.x6 f3 =~ x7 + x8 + x9 f3 ~ f1 + f2 + f12 f12 ~~0*f1 f12 ~~ 0*f2 x1 ~ 0*1 x4 ~ 0*1 x1.x4 ~ 0*1 x7 ~ 0*1 f1 ~ NA*1 f2 ~ NA*1 f12 ~ NA*1 f3 ~ NA*1 " fitMC2way <- sem(model1, data = dat2wayMC, std.lv = FALSE, meanstructure = TRUE) summary(fitMC2way) result2wayMC <- probe2WayMC(fitMC2way, c("f1", "f2", "f12"), "f3", "f2", c(-1, 0, 1)) result2wayMC } \references{ Aiken, L. S., & West, S. G. (1991). \emph{Multiple regression: Testing and interpreting interactions}. Newbury Park, CA: Sage. Cohen, J., Cohen, P., West, S. G., & Aiken, L. S. (2003). \emph{Applied multiple regression/correlation analysis for the behavioral sciences} (3rd ed.). New York, NY: Routledge. Marsh, H. W., Wen, Z., & Hau, K. T. (2004). Structural equation models of latent interactions: Evaluation of alternative estimation strategies and indicator construction. \emph{Psychological Methods, 9}(3), 275--300. doi:10.1037/1082-989X.9.3.275 Preacher, K. J., Curran, P. J., & Bauer, D. J. (2006). Computational tools for probing interactions in multiple linear regression, multilevel modeling, and latent curve analysis. \emph{Journal of Educational and Behavioral Statistics, 31}(4), 437--448. doi:10.3102/10769986031004437 } \seealso{ \itemize{ \item \code{\link{indProd}} For creating the indicator products with no centering, mean centering, double-mean centering, or residual centering. \item \code{\link{probe3WayMC}} For probing the three-way latent interaction when the results are obtained from mean-centering, or double-mean centering \item \code{\link{probe2WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{probe3WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the latent interaction. } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/twostage.Rd0000644000176200001440000001303413432435606014765 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/TSML.R \name{twostage} \alias{twostage} \alias{cfa.2stage} \alias{sem.2stage} \alias{growth.2stage} \alias{lavaan.2stage} \title{Fit a lavaan model using 2-Stage Maximum Likelihood (TSML) estimation for missing data.} \usage{ twostage(..., aux, fun, baseline.model = NULL) lavaan.2stage(..., aux = NULL, baseline.model = NULL) cfa.2stage(..., aux = NULL, baseline.model = NULL) sem.2stage(..., aux = NULL, baseline.model = NULL) growth.2stage(..., aux = NULL, baseline.model = NULL) } \arguments{ \item{\dots}{Arguments passed to the \code{\link[lavaan]{lavaan}} function specified in the \code{fun} argument. See also \code{\link[lavaan]{lavOptions}}. At a minimum, the user must supply the first two named arguments to \code{\link[lavaan]{lavaan}} (i.e., \code{model} and \code{data}).} \item{aux}{An optional character vector naming auxiliary variable(s) in \code{data}} \item{fun}{The character string naming the lavaan function used to fit the Step-2 hypothesized model (\code{"cfa"}, \code{"sem"}, \code{"growth"}, or \code{"lavaan"}).} \item{baseline.model}{An optional character string, specifying the lavaan \code{\link[lavaan]{model.syntax}} for a user-specified baseline model. Interested users can use the fitted baseline model to calculate incremental fit indices (e.g., CFI and TLI) using the corrected chi-squared values (see the \code{anova} method in \code{\linkS4class{twostage}}). If \code{NULL}, the default "independence model" (i.e., freely estimated means and variances, but all covariances constrained to zero) will be specified internally.} } \value{ The \code{\linkS4class{twostage}} object contains 3 fitted lavaan models (saturated, target/hypothesized, and baseline) as well as the names of auxiliary variables. None of the individual models provide the correct model results (except the point estimates in the target model are unbiased). Use the methods in \code{\linkS4class{twostage}} to extract corrected \emph{SE}s and test statistics. } \description{ This function automates 2-Stage Maximum Likelihood (TSML) estimation, optionally with auxiliary variables. Step 1 involves fitting a saturated model to the partially observed data set (to variables in the hypothesized model as well as auxiliary variables related to missingness). Step 2 involves fitting the hypothesized model to the model-implied means and covariance matrix (also called the "EM" means and covariance matrix) as if they were complete data. Step 3 involves correcting the Step-2 standard errors (\emph{SE}s) and chi-squared statistic to account for additional uncertainty due to missing data (using information from Step 1; see References section for sources with formulas). } \details{ All variables (including auxiliary variables) are treated as endogenous varaibles in the Step-1 saturated model (\code{fixed.x = FALSE}), so data are assumed continuous, although not necessarily multivariate normal (dummy-coded auxiliary variables may be included in Step 1, but categorical endogenous variables in the Step-2 hypothesized model are not allowed). To avoid assuming multivariate normality, request \code{se = "robust.huber.white"}. CAUTION: In addition to setting \code{fixed.x = FALSE} and \code{conditional.x = FALSE} in \code{\link[lavaan]{lavaan}}, this function will automatically set \code{meanstructure = TRUE}, \code{estimator = "ML"}, \code{missing = "fiml"}, and \code{test = "standard"}. \code{\link[lavaan]{lavaan}}'s \code{se} option can only be set to \code{"standard"} to assume multivariate normality or to \code{"robust.huber.white"} to relax that assumption. } \examples{ ## impose missing data for example HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), "ageyr","agemo","school")] set.seed(12345) HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) age <- HSMiss$ageyr + HSMiss$agemo/12 HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ## specify CFA model from lavaan's ?cfa help page HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' ## use ageyr and agemo as auxiliary variables out <- cfa.2stage(model = HS.model, data = HSMiss, aux = c("ageyr","agemo")) ## two versions of a corrected chi-squared test results are shown out ## see Savalei & Bentler (2009) and Savalei & Falk (2014) for details ## the summary additionally provides the parameter estimates with corrected ## standard errors, test statistics, and confidence intervals, along with ## any other options that can be passed to parameterEstimates() summary(out, standardized = TRUE) ## use parameter labels to fit a more constrained model modc <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + a*x8 + a*x9 ' outc <- cfa.2stage(model = modc, data = HSMiss, aux = c("ageyr","agemo")) ## use the anova() method to test this constraint anova(out, outc) ## like for a single model, two corrected statistics are provided } \references{ Savalei, V., & Bentler, P. M. (2009). A two-stage approach to missing data: Theory and application to auxiliary variables. \emph{Structural Equation Modeling, 16}(3), 477--497. doi:10.1080/10705510903008238 Savalei, V., & Falk, C. F. (2014). Robust two-stage approach outperforms robust full information maximum likelihood with incomplete nonnormal data. \emph{Structural Equation Modeling, 21}(2), 280--302. doi:10.1080/10705511.2014.882692 } \seealso{ \code{\linkS4class{twostage}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/permuteMeasEq-class.Rd0000644000176200001440000001543713432435606017021 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/permuteMeasEq.R \docType{class} \name{permuteMeasEq-class} \alias{permuteMeasEq-class} \alias{show,permuteMeasEq-method} \alias{summary,permuteMeasEq-method} \alias{hist,permuteMeasEq-method} \title{Class for the Results of Permutation Randomization Tests of Measurement Equivalence and DIF} \usage{ \S4method{show}{permuteMeasEq}(object) \S4method{summary}{permuteMeasEq}(object, alpha = 0.05, nd = 3, extra = FALSE) \S4method{hist}{permuteMeasEq}(x, ..., AFI, alpha = 0.05, nd = 3, printLegend = TRUE, legendArgs = list(x = "topleft")) } \arguments{ \item{object, x}{object of class \code{permuteMeasEq}} \item{alpha}{alpha level used to draw confidence limits in \code{hist} and flag significant statistics in \code{summary} output} \item{nd}{number of digits to display} \item{extra}{\code{logical} indicating whether the \code{summary} output should return permutation-based \emph{p} values for each statistic returned by the \code{extra} function. If \code{FALSE} (default), \code{summary} will return permutation-based \emph{p} values for each modification index.} \item{...}{Additional arguments to pass to \code{\link[graphics]{hist}}} \item{AFI}{\code{character} indicating the fit measure whose permutation distribution should be plotted} \item{printLegend}{\code{logical}. If \code{TRUE} (default), a legend will be printed with the histogram} \item{legendArgs}{\code{list} of arguments passed to the \code{\link[graphics]{legend}} function. The default argument is a list placing the legend at the top-left of the figure.} } \value{ \itemize{ \item The \code{show} method prints a summary of the multiparameter omnibus test results, using the user-specified AFIs. The parametric (\eqn{\Delta})\eqn{\chi^2} test is also displayed. \item The \code{summary} method prints the same information from the \code{show} method, but when \code{extra = FALSE} (the default) it also provides a table summarizing any requested follow-up tests of DIF using modification indices in slot \code{MI.obs}. The user can also specify an \code{alpha} level for flagging modification indices as significant, as well as \code{nd} (the number of digits displayed). For each modification index, the \emph{p} value is displayed using a central \eqn{\chi^2} distribution with the \emph{df} shown in that column. Additionally, a \emph{p} value is displayed using the permutation distribution of the maximum index, which controls the familywise Type I error rate in a manner similar to Tukey's studentized range test. If any indices are flagged as significant using the \code{tukey.p.value}, then a message is displayed for each flagged index. The invisibly returned \code{data.frame} is the displayed table of modification indices, unless \code{\link[semTools]{permuteMeasEq}} was called with \code{param = NULL}, in which case the invisibly returned object is \code{object}. If \code{extra = TRUE}, the permutation-based \emph{p} values for each statistic returned by the \code{extra} function are displayed and returned in a \code{data.frame} instead of the modification indices requested in the \code{param} argument. \item The \code{hist} method returns a list of \code{length == 2}, containing the arguments for the call to \code{hist} and the arguments to the call for \code{legend}, respectively. This list may facilitate creating a customized histogram of \code{AFI.dist}, \code{MI.dist}, or \code{extra.dist} } } \description{ This class contains the results of tests of Measurement Equivalence and Differential Item Functioning (DIF). } \section{Slots}{ \describe{ \item{\code{PT}}{A \code{data.frame} returned by a call to \code{\link[lavaan]{parTable}} on the constrained model} \item{\code{modelType}}{A character indicating the specified \code{modelType} in the call to \code{permuteMeasEq}} \item{\code{ANOVA}}{A \code{numeric} vector indicating the results of the observed (\eqn{\Delta})\eqn{\chi^2} test, based on the central \eqn{\chi^2} distribution} \item{\code{AFI.obs}}{A vector of observed (changes in) user-selected fit measures} \item{\code{AFI.dist}}{The permutation distribution(s) of user-selected fit measures. A \code{data.frame} with \code{n.Permutations} rows and one column for each \code{AFI.obs}.} \item{\code{AFI.pval}}{A vector of \emph{p} values (one for each element in slot \code{AFI.obs}) calculated using slot \code{AFI.dist}, indicating the probability of observing a change at least as extreme as \code{AFI.obs} if the null hypothesis were true} \item{\code{MI.obs}}{A \code{data.frame} of observed Lagrange Multipliers (modification indices) associated with the equality constraints or fixed parameters specified in the \code{param} argument. This is a subset of the output returned by a call to \code{\link[lavaan]{lavTestScore}} on the constrained model.} \item{\code{MI.dist}}{The permutation distribution of the maximum modification index (among those seen in slot \code{MI.obs$X2}) at each permutation of group assignment or of \code{covariates}} \item{\code{extra.obs}}{If \code{permuteMeasEq} was called with an \code{extra} function, the output when applied to the original data is concatenated into this vector} \item{\code{extra.dist}}{A \code{data.frame}, each column of which contains the permutation distribution of the corresponding statistic in slot \code{extra.obs}} \item{\code{n.Permutations}}{An \code{integer} indicating the number of permutations requested by the user} \item{\code{n.Converged}}{An \code{integer} indicating the number of permuation iterations which yielded a converged solution} \item{\code{n.nonConverged}}{An \code{integer} vector of length \code{n.Permutations} indicating how many times group assignment was randomly permuted (at each iteration) before converging on a solution} \item{\code{n.Sparse}}{Only relevant with \code{ordered} indicators when \code{modelType == "mgcfa"}. An \code{integer} vector of length \code{n.Permutations} indicating how many times group assignment was randomly permuted (at each iteration) before obtaining a sample with all categories observed in all groups.} \item{\code{oldSeed}}{An \code{integer} vector storing the value of \code{.Random.seed} before running \code{permuteMeasEq}. Only relevant when using a parallel/multicore option and the original \code{RNGkind() != "L'Ecuyer-CMRG"}. This enables users to restore their previous \code{.Random.seed} state, if desired, by running: \code{.Random.seed[-1] <- permutedResults@oldSeed[-1]}} }} \section{Objects from the Class}{ Objects can be created via the \code{\link[semTools]{permuteMeasEq}} function. } \examples{ # See the example from the permuteMeasEq function } \seealso{ \code{\link[semTools]{permuteMeasEq}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/loadingFromAlpha.Rd0000644000176200001440000000127213432435606016340 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loadingFromAlpha.R \name{loadingFromAlpha} \alias{loadingFromAlpha} \title{Find standardized factor loading from coefficient alpha} \usage{ loadingFromAlpha(alpha, ni) } \arguments{ \item{alpha}{A desired coefficient alpha value.} \item{ni}{A desired number of items.} } \value{ \item{result}{The standardized factor loadings that make desired coefficient alpha with specified number of items.} } \description{ Find standardized factor loading from coefficient alpha assuming that all items have equal loadings. } \examples{ loadingFromAlpha(0.8, 4) } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/semTools.Rd0000644000176200001440000000520113432435606014732 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/semTools.R \docType{package} \name{semTools} \alias{semTools} \alias{semTools-package} \title{semTools: Useful Tools for Structural Equation Modeling} \description{ The \pkg{semTools} package provides many miscellaneous functions that are useful for statistical analysis involving SEM in R. Many functions extend the funtionality of the \pkg{lavaan} package. Some sets of functions in \pkg{semTools} correspond to the same theme. We call such a collection of functions a \emph{suite}. Our suites include: \itemize{ \item{Model Fit Evaluation: \code{\link{moreFitIndices}}, \code{\link{nullRMSEA}}, \code{\link{singleParamTest}}, \code{\link{miPowerFit}}, and \code{\link{chisqSmallN}}} \item{Measurement Invariance: \code{\link{measEq.syntax}}, \code{\link{partialInvariance}}, \code{\link{partialInvarianceCat}}, and \code{\link{permuteMeasEq}}} \item{Power Analysis: \code{\link{SSpower}}, \code{\link{findRMSEApower}}, \code{\link{plotRMSEApower}}, \code{\link{plotRMSEAdist}}, \code{\link{findRMSEAsamplesize}}, \code{\link{findRMSEApowernested}}, \code{\link{plotRMSEApowernested}}, and \code{\link{findRMSEAsamplesizenested}}} \item{Missing Data Analysis: \code{\link{auxiliary}}, \code{\link{runMI}}, \code{\link{twostage}}, \code{\link{fmi}}, \code{\link{bsBootMiss}}, \code{\link{quark}}, and \code{\link{combinequark}}} \item{Latent Interactions: \code{\link{indProd}}, \code{\link{orthogonalize}}, \code{\link{probe2WayMC}}, \code{\link{probe3WayMC}}, \code{\link{probe2WayRC}}, \code{\link{probe3WayRC}}, and \code{\link{plotProbe}}} \item{Exploratory Factor Analysis (EFA): \code{\link{efa.ekc}}, \code{\link{efaUnrotate}}, \code{\link{orthRotate}}, \code{\link{oblqRotate}}, and \code{\link{funRotate}}} \item{Reliability Estimation: \code{\link{reliability}}, \code{\link{reliabilityL2}}, and \code{\link{maximalRelia}}} \item{Parceling: \code{\link{parcelAllocation}}, \code{\link{PAVranking}}, and \code{\link{poolMAlloc}}} \item{Non-Normality: \code{\link{skew}}, \code{\link{kurtosis}}, \code{\link{mardiaSkew}}, \code{\link{mardiaKurtosis}}, and \code{\link{mvrnonnorm}}} } All users of R (or SEM) are invited to submit functions or ideas for functions by contacting the maintainer, Terrence Jorgensen (\email{TJorgensen314@gmail.com}). Contributors are encouraged to use \code{Roxygen} comments to document their contributed code, which is consistent with the rest of \pkg{semTools}. Read the vignette from the \pkg{roxygen2} package for details: \code{vignette("rd", package = "roxygen2")} } semTools/man/lavaan.mi-class.Rd0000644000176200001440000003031013532031247016067 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runMI-methods.R \docType{class} \name{lavaan.mi-class} \alias{lavaan.mi-class} \alias{show,lavaan.mi-method} \alias{summary,lavaan.mi-method} \alias{fitMeasures,lavaan.mi-method} \alias{fitmeasures,lavaan.mi-method} \alias{anova,lavaan.mi-method} \alias{nobs,lavaan.mi-method} \alias{coef,lavaan.mi-method} \alias{vcov,lavaan.mi-method} \alias{fitted,lavaan.mi-method} \alias{fitted.values,lavaan.mi-method} \alias{residuals,lavaan.mi-method} \alias{resid,lavaan.mi-method} \title{Class for a lavaan Model Fitted to Multiple Imputations} \usage{ \S4method{show}{lavaan.mi}(object) \S4method{summary}{lavaan.mi}(object, se = TRUE, ci = FALSE, level = 0.95, standardized = FALSE, rsquare = FALSE, fmi = FALSE, scale.W = !asymptotic, omit.imps = c("no.conv", "no.se"), asymptotic = FALSE, header = TRUE, output = "text", fit.measures = FALSE) \S4method{nobs}{lavaan.mi}(object, total = TRUE) \S4method{coef}{lavaan.mi}(object, type = "free", labels = TRUE, omit.imps = c("no.conv", "no.se")) \S4method{vcov}{lavaan.mi}(object, type = c("pooled", "between", "within", "ariv"), scale.W = TRUE, omit.imps = c("no.conv", "no.se")) \S4method{anova}{lavaan.mi}(object, ...) \S4method{fitMeasures}{lavaan.mi}(object, fit.measures = "all", baseline.model = NULL, output = "vector", omit.imps = c("no.conv", "no.se"), ...) \S4method{fitmeasures}{lavaan.mi}(object, fit.measures = "all", baseline.model = NULL, output = "vector", omit.imps = c("no.conv", "no.se"), ...) \S4method{fitted}{lavaan.mi}(object, omit.imps = c("no.conv", "no.se")) \S4method{fitted.values}{lavaan.mi}(object, omit.imps = c("no.conv", "no.se")) \S4method{residuals}{lavaan.mi}(object, type = c("raw", "cor"), omit.imps = c("no.conv", "no.se")) \S4method{resid}{lavaan.mi}(object, type = c("raw", "cor"), omit.imps = c("no.conv", "no.se")) } \arguments{ \item{object}{An object of class \code{lavaan.mi}} \item{se, ci, level, standardized, rsquare, header, output}{See \code{\link[lavaan]{parameterEstimates}}. \code{output} can also be passed to \code{\link[lavaan]{fitMeasures}}.} \item{fmi}{\code{logical} indicating whether to include the Fraction Missing Information (FMI) for parameter estimates in the \code{summary} output (see \bold{Value} section).} \item{scale.W}{\code{logical}. If \code{TRUE} (default), the \code{vcov} method will calculate the pooled covariance matrix by scaling the within-imputation component by the ARIV (see Enders, 2010, p. 235, for definition and formula). Otherwise, the pooled matrix is calculated as the weighted sum of the within-imputation and between-imputation components (see Enders, 2010, ch. 8, for details). This in turn affects how the \code{summary} method calcualtes its pooled standard errors, as well as the Wald test (\code{\link{lavTestWald.mi}}).} \item{omit.imps}{\code{character} vector specifying criteria for omitting imputations from pooled results. Can include any of \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the default setting, which excludes any imputations that did not converge or for which standard errors could not be computed. The last option (\code{"no.npd"}) would exclude any imputations which yielded a nonpositive definite covariance matrix for observed or latent variables, which would include any "improper solutions" such as Heywood cases. NPD solutions are not excluded by default because they are likely to occur due to sampling error, especially in small samples. However, gross model misspecification could also cause NPD solutions, users can compare pooled results with and without this setting as a sensitivity analysis to see whether some imputations warrant further investigation.} \item{asymptotic}{\code{logical}. If \code{FALSE} (typically a default, but see \bold{Value} section for details using various methods), pooled tests (of fit or pooled estimates) will be \emph{F} or \emph{t} statistics with associated degrees of freedom (\emph{df}). If \code{TRUE}, the (denominator) \emph{df} are assumed to be sufficiently large for a \emph{t} statistic to follow a normal distribution, so it is printed as a \emph{z} statisic; likewise, \emph{F} times its numerator \emph{df} is printed, assumed to follow a \eqn{\chi^2} distribution.} \item{fit.measures, baseline.model}{See \code{\link[lavaan]{fitMeasures}}. \code{summary(object, fit.measures = TRUE)} will print (but not return) a table of fit measures to the console.} \item{total}{\code{logical} (default: \code{TRUE}) indicating whether the \code{nobs} method should return the total sample size or (if \code{FALSE}) a vector of group sample sizes.} \item{type}{The meaning of this argument varies depending on which method it it used for. Find detailed descriptions in the \bold{Value} section under \code{coef}, \code{vcov}, and \code{residuals}.} \item{labels}{\code{logical} indicating whether the \code{coef} output should include parameter labels. Default is \code{TRUE}.} \item{...}{Additional arguments passed to \code{\link{lavTestLRT.mi}}, or subsequently to \code{\link[lavaan]{lavTestLRT}}.} } \value{ \item{coef}{\code{signature(object = "lavaan.mi", type = "free", labels = TRUE, omit.imps = c("no.conv","no.se"))}: See \code{\linkS4class{lavaan}}. Returns the pooled point estimates (i.e., averaged across imputed data sets; see Rubin, 1987).} \item{vcov}{\code{signature(object = "lavaan.mi", scale.W = TRUE, omit.imps = c("no.conv","no.se"), type = c("pooled","between","within","ariv"))}: By default, returns the pooled covariance matrix of parameter estimates (\code{type = "pooled"}), the within-imputations covariance matrix (\code{type = "within"}), the between-imputations covariance matrix (\code{type = "between"}), or the average relative increase in variance (\code{type = "ariv"}) due to missing data.} \item{fitted.values}{\code{signature(object = "lavaan.mi", omit.imps = c("no.conv","no.se"))}: See \code{\linkS4class{lavaan}}. Returns model-implied moments, evaluated at the pooled point estimates.} \item{fitted}{alias for \code{fitted.values}} \item{residuals}{\code{signature(object = "lavaan.mi", type = c("raw","cor"), omit.imps = c("no.conv","no.se"))}: See \code{\linkS4class{lavaan}}. By default (\code{type = "raw"}), returns the difference between the model-implied moments from \code{fitted.values} and the pooled observed moments (i.e., averaged across imputed data sets). Standardized residuals are also available, using Bollen's (\code{type = "cor"} or \code{"cor.bollen"}) or Bentler's (\code{type = "cor.bentler"}) formulas.} \item{resid}{alias for \code{residuals}} \item{nobs}{\code{signature(object = "lavaan.mi", total = TRUE)}: either the total (default) sample size or a vector of group sample sizes (\code{total = FALSE}).} \item{anova}{\code{signature(object = "lavaan.mi", ...)}: Returns a test of model fit for a single model (\code{object}) or test(s) of the difference(s) in fit between nested models passed via \code{...}. See \code{\link{lavTestLRT.mi}} and \code{\link{compareFit}} for details.} \item{fitMeasures}{\code{signature(object = "lavaan.mi", fit.measures = "all", baseline.model = NULL, output = "vector", omit.imps = c("no.conv","no.se"), ...)}: See lavaan's \code{\link[lavaan]{fitMeasures}} for details. Pass additional arguments to \code{\link{lavTestLRT.mi}} via \code{...}.} \item{fitmeasures}{alias for \code{fitMeasures}.} \item{show}{\code{signature(object = "lavaan.mi")}: returns a message about convergence rates and estimation problems (if applicable) across imputed data sets.} \item{summary}{\code{signature(object = "lavaan.mi", se = TRUE, ci = FALSE, level = .95, standardized = FALSE, rsquare = FALSE, fmi = FALSE, scale.W = !asymptotic, omit.imps = c("no.conv","no.se"), asymptotic = FALSE, header = TRUE, output = "text", fit.measures = FALSE)}: see \code{\link[lavaan]{parameterEstimates}} for details. By default, \code{summary} returns pooled point and \emph{SE} estimates, along with \emph{t} test statistics and their associated \emph{df} and \emph{p} values. If \code{ci = TRUE}, confidence intervales are returned with the specified confidence \code{level} (default 95\% CI). If \code{asymptotic = TRUE}, \emph{z} instead of \emph{t} tests are returned. \code{standardized} solution(s) can also be requested by name (\code{"std.lv"} or \code{"std.all"}) or both are returned with \code{TRUE}. \emph{R}-squared for endogenous variables can be requested, as well as the Fraction Missing Information (FMI) for parameter estimates. By default, the output will appear like \code{lavaan}'s \code{summary} output, but if \code{output == "data.frame"}, the returned \code{data.frame} will resemble the \code{parameterEstimates} output. The \code{scale.W} argument is passed to \code{vcov} (see description above). Setting \code{fit.measures=TRUE} will additionally print fit measures to the console, but they will not be returned.} } \description{ This class extends the \code{\linkS4class{lavaanList}} class, created by fitting a lavaan model to a list of data sets. In this case, the list of data sets are multiple imputations of missing data. } \section{Slots}{ \describe{ \item{\code{coefList}}{\code{list} of estimated coefficients in matrix format (one per imputation) as output by \code{\link[lavaan]{lavInspect}(fit, "est")}} \item{\code{phiList}}{\code{list} of model-implied latent-variable covariance matrices (one per imputation) as output by \code{\link[lavaan]{lavInspect}(fit, "cov.lv")}} \item{\code{miList}}{\code{list} of modification indices output by \code{\link[lavaan]{modindices}}} \item{\code{seed}}{\code{integer} seed set before running imputations} \item{\code{lavListCall}}{call to \code{\link[lavaan]{lavaanList}} used to fit the model to the list of imputed data sets in \code{@DataList}, stored as a \code{list} of arguments} \item{\code{imputeCall}}{call to imputation function (if used), stored as a \code{list} of arguments} \item{\code{convergence}}{\code{list} of \code{logical} vectors indicating whether, for each imputed data set, (1) the model converged on a solution, (2) \emph{SE}s could be calculated, (3) the (residual) covariance matrix of latent variables (\eqn{\Psi}) is non-positive-definite, and (4) the residual covariance matrix of observed variables (\eqn{\Theta}) is non-positive-definite.} \item{\code{lavaanList_slots}}{All remaining slots are from \code{\linkS4class{lavaanList}}, but \code{\link{runMI}} only populates a subset of the \code{list} slots, two of them with custom information:} \item{\code{DataList}}{The \code{list} of imputed data sets} \item{\code{SampleStatsList}}{List of output from \code{\link[lavaan]{lavInspect}(fit, "sampstat")} applied to each fitted model} \item{\code{ParTableList}}{See \code{\linkS4class{lavaanList}}} \item{\code{vcovList}}{See \code{\linkS4class{lavaanList}}} \item{\code{testList}}{See \code{\linkS4class{lavaanList}}} \item{\code{h1List}}{See \code{\linkS4class{lavaanList}}. An additional element is added to the \code{list}: \code{$PT} is the "saturated" model's parameter table, returned by \code{\link[lavaan]{lav_partable_unrestricted}}.} \item{\code{baselineList}}{See \code{\linkS4class{lavaanList}}} }} \section{Objects from the Class}{ See the \code{\link{runMI}} function for details. Wrapper functions include \code{\link{lavaan.mi}}, \code{\link{cfa.mi}}, \code{\link{sem.mi}}, and \code{\link{growth.mi}}. } \examples{ ## See ?runMI help page } \references{ Asparouhov, T., & Muthen, B. (2010). \emph{Chi-square statistics with multiple imputation}. Technical Report. Retrieved from www.statmodel.com Enders, C. K. (2010). \emph{Applied missing data analysis}. New York, NY: Guilford. Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). Significance levels from repeated \emph{p}-values with multiply-imputed data. \emph{Statistica Sinica, 1}(1), 65--92. Retrieved from https://www.jstor.org/stable/24303994 Meng, X.-L., & Rubin, D. B. (1992). Performing likelihood ratio tests with multiply-imputed data sets. \emph{Biometrika, 79}(1), 103--111. doi:10.2307/2337151 Rubin, D. B. (1987). \emph{Multiple imputation for nonresponse in surveys}. New York, NY: Wiley. } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/measurementInvariance-deprecated.Rd0000644000176200001440000000764013432435606021561 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/measurementInvariance.R \name{measurementInvariance-deprecated} \alias{measurementInvariance-deprecated} \title{Measurement Invariance Tests} \usage{ measurementInvariance(..., std.lv = FALSE, strict = FALSE, quiet = FALSE, fit.measures = "default", baseline.model = NULL, method = "satorra.bentler.2001") } \arguments{ \item{...}{The same arguments as for any lavaan model. See \code{\link{cfa}} for more information.} \item{std.lv}{If \code{TRUE}, the fixed-factor method of scale identification is used. If \code{FALSE}, the first variable for each factor is used as marker variable.} \item{strict}{If \code{TRUE}, the sequence requires `strict' invariance. See details for more information.} \item{quiet}{If \code{FALSE} (default), a summary is printed out containing an overview of the different models that are fitted, together with some model comparison tests. If \code{TRUE}, no summary is printed.} \item{fit.measures}{Fit measures used to calculate the differences between nested models.} \item{baseline.model}{custom baseline model passed to \code{\link[lavaan]{fitMeasures}}} \item{method}{The method used to calculate likelihood ratio test. See \code{\link[lavaan]{lavTestLRT}} for available options} } \value{ Invisibly, all model fits in the sequence are returned as a list. } \description{ Testing measurement invariance across groups using a typical sequence of model comparison tests. } \details{ If \code{strict = FALSE}, the following four models are tested in order: \enumerate{ \item Model 1: configural invariance. The same factor structure is imposed on all groups. \item Model 2: weak invariance. The factor loadings are constrained to be equal across groups. \item Model 3: strong invariance. The factor loadings and intercepts are constrained to be equal across groups. \item Model 4: The factor loadings, intercepts and means are constrained to be equal across groups. } Each time a more restricted model is fitted, a \eqn{\Delta\chi^2} test is reported, comparing the current model with the previous one, and comparing the current model to the baseline model (Model 1). In addition, the difference in CFI is also reported (\eqn{\Delta}CFI). If \code{strict = TRUE}, the following five models are tested in order: \enumerate{ \item Model 1: configural invariance. The same factor structure is imposed on all groups. \item Model 2: weak invariance. The factor loadings are constrained to be equal across groups. \item Model 3: strong invariance. The factor loadings and intercepts are constrained to be equal across groups. \item Model 4: strict invariance. The factor loadings, intercepts and residual variances are constrained to be equal across groups. \item Model 5: The factor loadings, intercepts, residual variances and means are constrained to be equal across groups. } Note that if the \eqn{\chi^2} test statistic is scaled (e.g., a Satorra-Bentler or Yuan-Bentler test statistic), a special version of the \eqn{\Delta\chi^2} test is used as described in \url{http://www.statmodel.com/chidiff.shtml} } \examples{ HW.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' measurementInvariance(model = HW.model, data = HolzingerSwineford1939, group = "school", fit.measures = c("cfi","aic")) } \references{ Vandenberg, R. J., and Lance, C. E. (2000). A review and synthesis of the measurement invariance literature: Suggestions, practices, and recommendations for organizational research. \emph{Organizational Research Methods, 3,} 4--70. } \seealso{ \code{\link{semTools-deprecated}} } \author{ Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}) Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } \keyword{internal} semTools/man/findRMSEAsamplesize.Rd0000644000176200001440000000367013432435606016742 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/powerAnalysisRMSEA.R \name{findRMSEAsamplesize} \alias{findRMSEAsamplesize} \title{Find the minimum sample size for a given statistical power based on population RMSEA} \usage{ findRMSEAsamplesize(rmsea0, rmseaA, df, power = 0.8, alpha = 0.05, group = 1) } \arguments{ \item{rmsea0}{Null RMSEA} \item{rmseaA}{Alternative RMSEA} \item{df}{Model degrees of freedom} \item{power}{Desired statistical power to reject misspecified model (test of close fit) or retain good model (test of not-close fit)} \item{alpha}{Alpha level used in power calculations} \item{group}{The number of group that is used to calculate RMSEA.} } \description{ Find the minimum sample size for a specified statistical power based on population RMSEA. This function can be applied for both test of close fit and test of not-close fit (MacCallum, Browne, & Suguwara, 1996) } \details{ This function find the minimum sample size for a specified power based on an iterative routine. The sample size keep increasing until the calculated power from \code{\link{findRMSEApower}} function is just over the specified power. If \code{group} is greater than 1, the resulting sample size is the sample size per group. } \examples{ findRMSEAsamplesize(rmsea0 = .05, rmseaA = .08, df = 20, power = 0.80) } \references{ MacCallum, R. C., Browne, M. W., & Sugawara, H. M. (1996). Power analysis and determination of sample size for covariance structure modeling. \emph{Psychological Methods, 1}(2), 130--149. doi:10.1037/1082-989X.1.2.130 } \seealso{ \itemize{ \item \code{\link{plotRMSEApower}} to plot the statistical power based on population RMSEA given the sample size \item \code{\link{plotRMSEAdist}} to visualize the RMSEA distributions \item \code{\link{findRMSEApower}} to find the statistical power based on population RMSEA given a sample size } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/findRMSEApowernested.Rd0000644000176200001440000000355313432435606017125 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/powerAnalysisNested.R \name{findRMSEApowernested} \alias{findRMSEApowernested} \title{Find power given a sample size in nested model comparison} \usage{ findRMSEApowernested(rmsea0A = NULL, rmsea0B = NULL, rmsea1A, rmsea1B = NULL, dfA, dfB, n, alpha = 0.05, group = 1) } \arguments{ \item{rmsea0A}{The \eqn{H_0} baseline RMSEA} \item{rmsea0B}{The \eqn{H_0} alternative RMSEA (trivial misfit)} \item{rmsea1A}{The \eqn{H_1} baseline RMSEA} \item{rmsea1B}{The \eqn{H_1} alternative RMSEA (target misfit to be rejected)} \item{dfA}{degree of freedom of the more-restricted model} \item{dfB}{degree of freedom of the less-restricted model} \item{n}{Sample size} \item{alpha}{The alpha level} \item{group}{The number of group in calculating RMSEA} } \description{ Find the sample size that the power in rejection the samples from the alternative pair of RMSEA is just over the specified power. } \examples{ findRMSEApowernested(rmsea0A = 0.06, rmsea0B = 0.05, rmsea1A = 0.08, rmsea1B = 0.05, dfA = 22, dfB = 20, n = 200, alpha = 0.05, group = 1) } \references{ MacCallum, R. C., Browne, M. W., & Cai, L. (2006). Testing differences between nested covariance structure models: Power analysis and null hypotheses. \emph{Psychological Methods, 11}(1), 19--35. doi:10.1037/1082-989X.11.1.19 } \seealso{ \itemize{ \item \code{\link{plotRMSEApowernested}} to plot the statistical power for nested model comparison based on population RMSEA given the sample size \item \code{\link{findRMSEAsamplesizenested}} to find the minium sample size for a given statistical power in nested model comparison based on population RMSEA } } \author{ Bell Clinton Pavel Panko (Texas Tech University; \email{pavel.panko@ttu.edu}) Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/plotRMSEApowernested.Rd0000644000176200001440000000371713432435606017165 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/powerAnalysisNested.R \name{plotRMSEApowernested} \alias{plotRMSEApowernested} \title{Plot power of nested model RMSEA} \usage{ plotRMSEApowernested(rmsea0A = NULL, rmsea0B = NULL, rmsea1A, rmsea1B = NULL, dfA, dfB, nlow, nhigh, steps = 1, alpha = 0.05, group = 1, ...) } \arguments{ \item{rmsea0A}{The \eqn{H_0} baseline RMSEA} \item{rmsea0B}{The \eqn{H_0} alternative RMSEA (trivial misfit)} \item{rmsea1A}{The \eqn{H_1} baseline RMSEA} \item{rmsea1B}{The \eqn{H_1} alternative RMSEA (target misfit to be rejected)} \item{dfA}{degree of freedom of the more-restricted model} \item{dfB}{degree of freedom of the less-restricted model} \item{nlow}{Lower bound of sample size} \item{nhigh}{Upper bound of sample size} \item{steps}{Step size} \item{alpha}{The alpha level} \item{group}{The number of group in calculating RMSEA} \item{\dots}{The additional arguments for the plot function.} } \description{ Plot power of nested model RMSEA over a range of possible sample sizes. } \examples{ plotRMSEApowernested(rmsea0A = 0, rmsea0B = 0, rmsea1A = 0.06, rmsea1B = 0.05, dfA = 22, dfB = 20, nlow = 50, nhigh = 500, steps = 1, alpha = .05, group = 1) } \references{ MacCallum, R. C., Browne, M. W., & Cai, L. (2006). Testing differences between nested covariance structure models: Power analysis and null hypotheses. \emph{Psychological Methods, 11}(1), 19-35. doi:10.1037/1082-989X.11.1.19 } \seealso{ \itemize{ \item \code{\link{findRMSEApowernested}} to find the power for a given sample size in nested model comparison based on population RMSEA \item \code{\link{findRMSEAsamplesizenested}} to find the minium sample size for a given statistical power in nested model comparison based on population RMSEA } } \author{ Bell Clinton Pavel Panko (Texas Tech University; \email{pavel.panko@ttu.edu}) Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/kd.Rd0000644000176200001440000000463313432435606013533 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kd.R \name{kd} \alias{kd} \title{Generate data via the Kaiser-Dickman (1962) algorithm.} \usage{ kd(covmat, n, type = c("exact", "sample")) } \arguments{ \item{covmat}{a symmetric, positive definite covariance matrix} \item{n}{the sample size for the data that will be generated} \item{type}{type of data generation. \code{exact} generates data that exactly correspond to \code{covmat}. \code{sample} treats \code{covmat} as a poulation covariance matrix, generating a sample of size \code{n}.} } \value{ \code{kd} returns a data matrix of dimension \code{n} by \code{nrow(covmat)}. } \description{ Given a covariance matrix and sample size, generate raw data that correspond to the covariance matrix. Data can be generated to match the covariance matrix exactly, or to be a sample from the population covariance matrix. } \details{ By default, R's \code{cov()} function divides by \code{n}-1. The data generated by this algorithm result in a covariance matrix that matches \code{covmat}, but you must divide by \code{n} instead of \code{n}-1. } \examples{ #### First Example ## Get data dat <- HolzingerSwineford1939[ , 7:15] hs.n <- nrow(dat) ## Covariance matrix divided by n hscov <- ((hs.n-1)/hs.n) * cov(dat) ## Generate new, raw data corresponding to hscov newdat <- kd(hscov, hs.n) ## Difference between new covariance matrix and hscov is minimal newcov <- (hs.n-1)/hs.n * cov(newdat) summary(as.numeric(hscov - newcov)) ## Generate sample data, treating hscov as population matrix newdat2 <- kd(hscov, hs.n, type = "sample") #### Another example ## Define a covariance matrix covmat <- matrix(0, 3, 3) diag(covmat) <- 1.5 covmat[2:3,1] <- c(1.3, 1.7) covmat[3,2] <- 2.1 covmat <- covmat + t(covmat) ## Generate data of size 300 that have this covariance matrix rawdat <- kd(covmat, 300) ## Covariances are exact if we compute sample covariance matrix by ## dividing by n (vs by n - 1) summary(as.numeric((299/300)*cov(rawdat) - covmat)) ## Generate data of size 300 where covmat is the population covariance matrix rawdat2 <- kd(covmat, 300) } \references{ Kaiser, H. F. and Dickman, K. (1962). Sample and population score matrices and sample correlation matrices from an arbitrary population correlation matrix. \emph{Psychometrika, 27}(2), 179--182. doi:10.1007/BF02289635 } \author{ Ed Merkle (University of Missouri; \email{merklee@missouri.edu}) } semTools/man/plotProbe.Rd0000644000176200001440000000721313475443506015105 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probeInteraction.R \name{plotProbe} \alias{plotProbe} \title{Plot a latent interaction} \usage{ plotProbe(object, xlim, xlab = "Indepedent Variable", ylab = "Dependent Variable", legend = TRUE, legendArgs = list(), ...) } \arguments{ \item{object}{The result of probing latent interaction obtained from \code{\link{probe2WayMC}}, \code{\link{probe2WayRC}}, \code{\link{probe3WayMC}}, or \code{\link{probe3WayRC}} function.} \item{xlim}{The vector of two numbers: the minimum and maximum values of the independent variable} \item{xlab}{The label of the x-axis} \item{ylab}{The label of the y-axis} \item{legend}{\code{logical}. If \code{TRUE} (default), a legend is printed.} \item{legendArgs}{\code{list} of arguments passed to \code{\link{legend}} function if \code{legend=TRUE}.} \item{\dots}{Any addition argument for the \code{\link{plot}} function} } \value{ None. This function will plot the simple main effect only. } \description{ This function will plot the line graphs representing the simple effect of the independent variable given the values of the moderator. For multigroup models, it will only generate a plot for 1 group, as specified in the function used to obtain the first argument. } \examples{ library(lavaan) dat2wayMC <- indProd(dat2way, 1:3, 4:6) model1 <- " f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f12 =~ x1.x4 + x2.x5 + x3.x6 f3 =~ x7 + x8 + x9 f3 ~ f1 + f2 + f12 f12 ~~ 0*f1 f12 ~~ 0*f2 x1 ~ 0*1 x4 ~ 0*1 x1.x4 ~ 0*1 x7 ~ 0*1 f1 ~ NA*1 f2 ~ NA*1 f12 ~ NA*1 f3 ~ NA*1 " fitMC2way <- sem(model1, data = dat2wayMC, std.lv = FALSE, meanstructure = TRUE) result2wayMC <- probe2WayMC(fitMC2way, c("f1", "f2", "f12"), "f3", "f2", c(-1, 0, 1)) plotProbe(result2wayMC, xlim = c(-2, 2)) dat3wayMC <- indProd(dat3way, 1:3, 4:6, 7:9) model3 <- " f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f3 =~ x7 + x8 + x9 f12 =~ x1.x4 + x2.x5 + x3.x6 f13 =~ x1.x7 + x2.x8 + x3.x9 f23 =~ x4.x7 + x5.x8 + x6.x9 f123 =~ x1.x4.x7 + x2.x5.x8 + x3.x6.x9 f4 =~ x10 + x11 + x12 f4 ~ f1 + f2 + f3 + f12 + f13 + f23 + f123 f1 ~~ 0*f12 f1 ~~ 0*f13 f1 ~~ 0*f123 f2 ~~ 0*f12 f2 ~~ 0*f23 f2 ~~ 0*f123 f3 ~~ 0*f13 f3 ~~ 0*f23 f3 ~~ 0*f123 f12 ~~ 0*f123 f13 ~~ 0*f123 f23 ~~ 0*f123 x1 ~ 0*1 x4 ~ 0*1 x7 ~ 0*1 x10 ~ 0*1 x1.x4 ~ 0*1 x1.x7 ~ 0*1 x4.x7 ~ 0*1 x1.x4.x7 ~ 0*1 f1 ~ NA*1 f2 ~ NA*1 f3 ~ NA*1 f12 ~ NA*1 f13 ~ NA*1 f23 ~ NA*1 f123 ~ NA*1 f4 ~ NA*1 " fitMC3way <- sem(model3, data = dat3wayMC, std.lv = FALSE, meanstructure = TRUE) result3wayMC <- probe3WayMC(fitMC3way, c("f1", "f2", "f3", "f12", "f13", "f23", "f123"), "f4", c("f1", "f2"), c(-1, 0, 1), c(-1, 0, 1)) plotProbe(result3wayMC, xlim = c(-2, 2)) } \seealso{ \itemize{ \item \code{\link{indProd}} For creating the indicator products with no centering, mean centering, double-mean centering, or residual centering. \item \code{\link{probe2WayMC}} For probing the two-way latent interaction when the results are obtained from mean-centering, or double-mean centering \item \code{\link{probe3WayMC}} For probing the three-way latent interaction when the results are obtained from mean-centering, or double-mean centering \item \code{\link{probe2WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{probe3WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/plausibleValues.Rd0000644000176200001440000001550313532031247016265 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plausibleValues.R \name{plausibleValues} \alias{plausibleValues} \title{Plausible-Values Imputation of Factor Scores Estimated from a lavaan Model} \usage{ plausibleValues(object, nDraws = 20L, seed = 12345, omit.imps = c("no.conv", "no.se"), ...) } \arguments{ \item{object}{A fitted model of class \code{\linkS4class{lavaan}}, \code{\link[blavaan]{blavaan}}, or \code{\linkS4class{lavaan.mi}}} \item{nDraws}{\code{integer} specifying the number of draws, analogous to the number of imputed data sets. If \code{object} is of class \code{\linkS4class{lavaan.mi}}, this will be the number of draws taken \emph{per imputation}. Ignored if \code{object} is of class \code{\link[blavaan]{blavaan}}, in which case the number of draws is the number of MCMC samples from the posterior.} \item{seed}{\code{integer} passed to \code{\link{set.seed}()}. Ignored if \code{object} is of class \code{\link[blavaan]{blavaan}},} \item{omit.imps}{\code{character} vector specifying criteria for omitting imputations when \code{object} is of class \code{\linkS4class{lavaan.mi}}. Can include any of \code{c("no.conv", "no.se", "no.npd")}.} \item{...}{Optional arguments to pass to \code{\link[lavaan]{lavPredict}}. \code{assemble} will be ignored because multiple groups are always assembled into a single \code{data.frame} per draw. \code{type} will be ignored because it is set internally to \code{type="lv"}.} } \value{ A \code{list} of length \code{nDraws}, each of which is a \code{data.frame} containing plausible values, which can be treated as a \code{list} of imputed data sets to be passed to \code{\link{runMI}} (see \bold{Examples}). If \code{object} is of class \code{\linkS4class{lavaan.mi}}, the \code{list} will be of length \code{nDraws*m}, where \code{m} is the number of imputations. } \description{ Draw plausible values of factor scores estimated from a fitted \code{\link[lavaan]{lavaan}} model, then treat them as multiple imputations of missing data using \code{\link{runMI}}. } \details{ Because latent variables are unobserved, they can be considered as missing data, which can be imputed using Monte Carlo methods. This may be of interest to researchers with sample sizes too small to fit their complex structural models. Fitting a factor model as a first step, \code{\link[lavaan]{lavPredict}} provides factor-score estimates, which can be treated as observed values in a path analysis (Step 2). However, the resulting standard errors and test statistics could not be trusted because the Step-2 analysis would not take into account the uncertainty about the estimated factor scores. Using the asymptotic sampling covariance matrix of the factor scores provided by \code{\link[lavaan]{lavPredict}}, \code{plausibleValues} draws a set of \code{nDraws} imputations from the sampling distribution of each factor score, returning a list of data sets that can be treated like multiple imputations of incomplete data. If the data were already imputed to handle missing data, \code{plausibleValues} also accepts an object of class \code{\linkS4class{lavaan.mi}}, and will draw \code{nDraws} plausible values from each imputation. Step 2 would then take into account uncertainty about both missing values and factor scores. Bayesian methods can also be used to generate factor scores, as available with the \pkg{blavaan} package, in which case plausible values are simply saved parameters from the posterior distribution. See Asparouhov and Muthen (2010) for further technical details and references. Each returned \code{data.frame} includes a \code{case.idx} column that indicates the corresponding rows in the data set to which the model was originally fitted (unless the user requests only Level-2 variables). This can be used to merge the plausible values with the original observed data, but users should note that including any new variables in a Step-2 model might not accurately account for their relationship(s) with factor scores because they were not accounted for in the Step-1 model from which factor scores were estimated. If \code{object} is a multilevel \code{lavaan} model, users can request plausible values for latent variables at particular levels of analysis by setting the \code{\link[lavaan]{lavPredict}} argument \code{level=1} or \code{level=2}. If the \code{level} argument is not passed via \dots, then both levels are returned in a single merged data set per draw. For multilevel models, each returned \code{data.frame} also includes a column indicating to which cluster each row belongs (unless the user requests only Level-2 variables). } \examples{ ## example from ?cfa and ?lavPredict help pages HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit1 <- cfa(HS.model, data = HolzingerSwineford1939) fs1 <- plausibleValues(fit1, nDraws = 3, ## lavPredict() can add only the modeled data append.data = TRUE) lapply(fs1, head) ## To merge factor scores to original data.frame (not just modeled data) fs1 <- plausibleValues(fit1, nDraws = 3) idx <- lavInspect(fit1, "case.idx") # row index for each case if (is.list(idx)) idx <- do.call(c, idx) # for multigroup models data(HolzingerSwineford1939) # copy data to workspace HolzingerSwineford1939$case.idx <- idx # add row index as variable ## loop over draws to merge original data with factor scores for (i in seq_along(fs1)) { fs1[[i]] <- merge(fs1[[i]], HolzingerSwineford1939, by = "case.idx") } lapply(fs1, head) ## multiple-group analysis, in 2 steps step1 <- cfa(HS.model, data = HolzingerSwineford1939, group = "school", group.equal = c("loadings","intercepts")) PV.list <- plausibleValues(step1) ## subsequent path analysis path.model <- ' visual ~ c(t1, t2)*textual + c(s1, s2)*speed ' \dontrun{ step2 <- sem.mi(path.model, data = PV.list, group = "school") ## test equivalence of both slopes across groups lavTestWald.mi(step2, constraints = 't1 == t2 ; s1 == s2') } ## multilevel example from ?Demo.twolevel help page model <- ' level: 1 fw =~ y1 + y2 + y3 fw ~ x1 + x2 + x3 level: 2 fb =~ y1 + y2 + y3 fb ~ w1 + w2 ' msem <- sem(model, data = Demo.twolevel, cluster = "cluster") mlPVs <- plausibleValues(msem, nDraws = 3) # both levels by default lapply(mlPVs, head, n = 10) ## only Level 1 mlPV1 <- plausibleValues(msem, nDraws = 3, level = 1) lapply(mlPV1, head) ## only Level 2 mlPV2 <- plausibleValues(msem, nDraws = 3, level = 2) lapply(mlPV2, head) } \references{ Asparouhov, T. & Muthen, B. O. (2010). \emph{Plausible values for latent variables using M}plus. Technical Report. Retrieved from www.statmodel.com/download/Plausible.pdf } \seealso{ \code{\link{runMI}}, \code{\linkS4class{lavaan.mi}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/findRMSEAsamplesizenested.Rd0000644000176200001440000000362113432435606020141 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/powerAnalysisNested.R \name{findRMSEAsamplesizenested} \alias{findRMSEAsamplesizenested} \title{Find sample size given a power in nested model comparison} \usage{ findRMSEAsamplesizenested(rmsea0A = NULL, rmsea0B = NULL, rmsea1A, rmsea1B = NULL, dfA, dfB, power = 0.8, alpha = 0.05, group = 1) } \arguments{ \item{rmsea0A}{The \eqn{H_0} baseline RMSEA} \item{rmsea0B}{The \eqn{H_0} alternative RMSEA (trivial misfit)} \item{rmsea1A}{The \eqn{H_1} baseline RMSEA} \item{rmsea1B}{The \eqn{H_1} alternative RMSEA (target misfit to be rejected)} \item{dfA}{degree of freedom of the more-restricted model.} \item{dfB}{degree of freedom of the less-restricted model.} \item{power}{The desired statistical power.} \item{alpha}{The alpha level.} \item{group}{The number of group in calculating RMSEA.} } \description{ Find the sample size that the power in rejection the samples from the alternative pair of RMSEA is just over the specified power. } \examples{ findRMSEAsamplesizenested(rmsea0A = 0, rmsea0B = 0, rmsea1A = 0.06, rmsea1B = 0.05, dfA = 22, dfB = 20, power = 0.80, alpha = .05, group = 1) } \references{ MacCallum, R. C., Browne, M. W., & Cai, L. (2006). Testing differences between nested covariance structure models: Power analysis and null hypotheses. \emph{Psychological Methods, 11}(1), 19-35. doi:10.1037/1082-989X.11.1.19 } \seealso{ \itemize{ \item \code{\link{plotRMSEApowernested}} to plot the statistical power for nested model comparison based on population RMSEA given the sample size \item \code{\link{findRMSEApowernested}} to find the power for a given sample size in nested model comparison based on population RMSEA } } \author{ Bell Clinton Pavel Panko (Texas Tech University; \email{pavel.panko@ttu.edu}) Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/moreFitIndices.Rd0000644000176200001440000001303213432435606016032 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitIndices.R \name{moreFitIndices} \alias{moreFitIndices} \title{Calculate more fit indices} \usage{ moreFitIndices(object, fit.measures = "all", nPrior = 1) } \arguments{ \item{object}{The lavaan model object provided after running the \code{cfa}, \code{sem}, \code{growth}, or \code{lavaan} functions.} \item{fit.measures}{Additional fit measures to be calculated. All additional fit measures are calculated by default} \item{nPrior}{The sample size on which prior is based. This argument is used to compute BIC*.} } \value{ \enumerate{ \item \code{gammaHat}: Gamma Hat \item \code{adjGammaHat}: Adjusted Gamma Hat \item \code{baseline.rmsea}: RMSEA of the Baseline (Null) Model \item \code{aic.smallN}: Corrected (for small sample size) Akaike Information Criterion \item \code{bic.priorN}: Bayesian Information Criterion with specified prior sample size \item \code{sic}: Stochastic Information Criterion \item \code{hqc}: Hannan-Quinn Information Criterion \item \code{gammaHat.scaled}: Gamma Hat using scaled \eqn{\chi^2} \item \code{adjGammaHat.scaled}: Adjusted Gamma Hat using scaled \eqn{\chi^2} \item \code{baseline.rmsea.scaled}: RMSEA of the Baseline (Null) Model using scaled \eqn{\chi^2} } } \description{ Calculate more fit indices that are not already provided in lavaan. } \details{ Gamma Hat (gammaHat; West, Taylor, & Wu, 2012) is a global fit index which can be computed (assuming equal number of indicators across groups) by \deqn{ gammaHat =\frac{p}{p + 2 \times \frac{\chi^{2}_{k} - df_{k}}{N}} ,} where \eqn{p} is the number of variables in the model, \eqn{\chi^{2}_{k}} is the \eqn{\chi^2} test statistic value of the target model, \eqn{df_{k}} is the degree of freedom when fitting the target model, and \eqn{N} is the sample size (or sample size minus the number of groups if \code{mimic} is set to \code{"EQS"}). Adjusted Gamma Hat (adjGammaHat; West, Taylor, & Wu, 2012) is a global fit index which can be computed by \deqn{ adjGammaHat = \left(1 - \frac{K \times p \times (p + 1)}{2 \times df_{k}} \right) \times \left( 1 - gammaHat \right) ,} where \eqn{K} is the number of groups (please refer to Dudgeon, 2004 for the multiple-group adjustment for agfi*). Corrected Akaike Information Criterion (aic.smallN; Burnham & Anderson, 2003) is a corrected version of AIC for small sample size, often abbreviated AICc: \deqn{ aic.smallN = AIC + \frac{2k(k + 1)}{N - k - 1},} where \eqn{AIC} is the original AIC: \eqn{-2 \times LL + 2k} (where \eqn{k} = the number of estimated parameters in the target model). Note that AICc is a small-sample correction derived for univariate regression models, so it is probably \emph{not} appropriate for comparing SEMs. Corrected Bayesian Information Criterion (bic.priorN; Kuha, 2004) is similar to BIC but explicitly specifying the sample size on which the prior is based (\eqn{N_{prior}}). \deqn{ bic.priorN = f + k\log{(1 + N/N_{prior})},} Stochastic information criterion (SIC; Preacher, 2006) is similar to AIC or BIC. This index will account for model complexity in the model's function form, in addition to the number of free parameters. This index will be provided only when the \eqn{\chi^2} value is not scaled. The SIC can be computed by \deqn{ sic = \frac{1}{2}\left(f - \log{\det{I(\hat{\theta})}}\right),} where \eqn{I(\hat{\theta})} is the information matrix of the parameters. Hannan-Quinn Information Criterion (hqc; Hannan & Quinn, 1979) is used for model selection similar to AIC or BIC. \deqn{ hqc = f + 2k\log{(\log{N})},} Note that if Satorra--Bentler or Yuan--Bentler's method is used, the fit indices using the scaled \eqn{\chi^2} values are also provided. See \code{\link{nullRMSEA}} for the further details of the computation of RMSEA of the null model. } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data = HolzingerSwineford1939) moreFitIndices(fit) fit2 <- cfa(HS.model, data = HolzingerSwineford1939, estimator = "mlr") moreFitIndices(fit2) } \references{ Burnham, K., & Anderson, D. (2003). \emph{Model selection and multimodel inference: A practical--theoretic approach}. New York, NY: Springer--Verlag. Dudgeon, P. (2004). A note on extending Steiger's (1998) multiple sample RMSEA adjustment to other noncentrality parameter-based statistic. \emph{Structural Equation Modeling, 11}(3), 305--319. doi:10.1207/s15328007sem1103_1 Kuha, J. (2004). AIC and BIC: Comparisons of assumptions and performance. \emph{Sociological Methods Research, 33}(2), 188--229. doi:10.1177/0049124103262065 Preacher, K. J. (2006). Quantifying parsimony in structural equation modeling. \emph{Multivariate Behavioral Research, 43}(3), 227-259. doi:10.1207/s15327906mbr4103_1 West, S. G., Taylor, A. B., & Wu, W. (2012). Model fit and model selection in structural equation modeling. In R. H. Hoyle (Ed.), \emph{Handbook of Structural Equation Modeling} (pp. 209--231). New York, NY: Guilford. } \seealso{ \itemize{ \item \code{\link{miPowerFit}} For the modification indices and their power approach for model fit evaluation \item \code{\link{nullRMSEA}} For RMSEA of the null model } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) Aaron Boulton (University of North Carolina, Chapel Hill; \email{aboulton@email.unc.edu}) Ruben Arslan (Humboldt-University of Berlin, \email{rubenarslan@gmail.com}) Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}) } semTools/man/lavTestLRT.mi.Rd0000644000176200001440000001376513532031247015545 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runMI-LRT.R \name{lavTestLRT.mi} \alias{lavTestLRT.mi} \title{Likelihood Ratio Test for Multiple Imputations} \usage{ lavTestLRT.mi(object, h1 = NULL, test = c("D3", "D2"), omit.imps = c("no.conv", "no.se"), asymptotic = FALSE, pool.robust = FALSE, ...) } \arguments{ \item{object, h1}{An object of class \code{\linkS4class{lavaan.mi}}. \code{object} should be nested within (more constrained than) \code{h1}.} \item{test}{\code{character} indicating which pooling method to use. \code{"D3"}, \code{"mr"}, or \code{"meng.rubin"} (default) requests the method described by Meng & Rubin (1992). \code{"D2"}, \code{"LMRR"}, or \code{"Li.et.al"} requests the complete-data LRT statistic should be calculated using each imputed data set, which will then be pooled across imputations, as described in Li, Meng, Raghunathan, & Rubin (1991). Find additional details in Enders (2010, chapter 8).} \item{omit.imps}{\code{character} vector specifying criteria for omitting imputations from pooled results. Can include any of \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the default setting, which excludes any imputations that did not converge or for which standard errors could not be computed. The last option (\code{"no.npd"}) would exclude any imputations which yielded a nonpositive definite covariance matrix for observed or latent variables, which would include any "improper solutions" such as Heywood cases.} \item{asymptotic}{\code{logical}. If \code{FALSE} (default), the pooled test will be returned as an \emph{F}-distributed statistic with numerator (\code{df1}) and denominator (\code{df2}) degrees of freedom. If \code{TRUE}, the pooled \emph{F} statistic will be multiplied by its \code{df1} on the assumption that its \code{df2} is sufficiently large enough that the statistic will be asymptotically \eqn{\chi^2} distributed with \code{df1}.} \item{pool.robust}{\code{logical}. Ignored unless \code{test = "D2"} and a robust test was requested. If \code{pool.robust = TRUE}, the robust test statistic is pooled, whereas \code{pool.robust = FALSE} will pool the naive test statistic (or difference statistic) and apply the average scale/shift parameter to it (unavailable for mean- and variance-adjusted difference statistics, so \code{pool.robust} will be set \code{TRUE}).} \item{...}{Additional arguments passed to \code{\link[lavaan]{lavTestLRT}}, only if \code{test = "D2"} and \code{pool.robust = TRUE}} } \value{ A vector containing the LRT statistic (either an \code{F} or \eqn{\chi^2} statistic, depending on the \code{asymptotic} argument), its degrees of freedom (numerator and denominator, if \code{asymptotic = FALSE}), its \emph{p} value, and 2 missing-data diagnostics: the relative invrease in variance (RIV, or average for multiparameter tests: ARIV) and the fraction missing information (FMI = ARIV / (1 + ARIV)). Robust statistics will also include the average (across imputations) scaling factor and (if relevant) shift parameter(s), unless \code{pool.robust = TRUE}. } \description{ Likelihood ratio test (LRT) for lavaan models fitted to multiple imputed data sets. Statistics for comparing nested models can be calculated by pooling the likelihood ratios across imputed data sets, as described by Meng & Rubin (1992), or by pooling the LRT statistics from each imputation, as described by Li, Meng, Raghunathan, & Rubin (1991). } \details{ The Meng & Rubin (1992) method, also referred to as the \code{"D3"} statistic, is only applicable when using a likelihood-based estimator. Otherwise (e.g., DWLS for categorical outcomes), users are notified that \code{test} was set to \code{"D2"}. \code{test = "Mplus"} implies \code{"D3"} and \code{asymptotic = TRUE} (see Asparouhov & Muthen, 2010). Note that unlike \code{\link[lavaan]{lavTestLRT}}, \code{lavTestLRT} can only be used to compare a single pair of models, not a longer list of models. To compare several nested models fitted to multiple imputations, see examples on the \code{\link{compareFit}} help page. } \examples{ \dontrun{ ## impose missing data for example HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), "ageyr","agemo","school")] set.seed(12345) HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) age <- HSMiss$ageyr + HSMiss$agemo/12 HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ## impute missing data library(Amelia) set.seed(12345) HS.amelia <- amelia(HSMiss, m = 20, noms = "school", p2s = FALSE) imps <- HS.amelia$imputations ## specify CFA model from lavaan's ?cfa help page HS.model <- ' visual =~ x1 + b1*x2 + x3 textual =~ x4 + b2*x5 + x6 speed =~ x7 + b3*x8 + x9 ' fit1 <- cfa.mi(HS.model, data = imps, estimator = "mlm") fit0 <- cfa.mi(HS.model, data = imps, estimator = "mlm", orthogonal = TRUE) ## By default, use D3. ## Must request a chi-squared statistic to be robustified. lavTestLRT.mi(fit1, h1 = fit0, asymptotic = TRUE) ## Using D2, you can either robustify the pooled naive statistic ... lavTestLRT.mi(fit1, h1 = fit0, asymptotic = TRUE, test = "D2") ## ... or pool the robust chi-squared statistic lavTestLRT.mi(fit1, h1 = fit0, asymptotic = TRUE, test = "D2", pool.robust = TRUE) } } \references{ Enders, C. K. (2010). \emph{Applied missing data analysis}. New York, NY: Guilford. Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). Significance levels from repeated \emph{p}-values with multiply-imputed data. \emph{Statistica Sinica, 1}(1), 65--92. Retrieved from https://www.jstor.org/stable/24303994 Meng, X.-L., & Rubin, D. B. (1992). Performing likelihood ratio tests with multiply-imputed data sets. \emph{Biometrika, 79}(1), 103--111. doi:10.2307/2337151 Rubin, D. B. (1987). \emph{Multiple imputation for nonresponse in surveys}. New York, NY: Wiley. } \seealso{ \code{\link[lavaan]{lavTestLRT}}, \code{\link{compareFit}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/bsBootMiss.Rd0000644000176200001440000001412513432435606015216 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/missingBootstrap.R \name{bsBootMiss} \alias{bsBootMiss} \title{Bollen-Stine Bootstrap with the Existence of Missing Data} \usage{ bsBootMiss(x, transformation = 2, nBoot = 500, model, rawData, Sigma, Mu, group, ChiSquared, EMcov, writeTransData = FALSE, transDataOnly = FALSE, writeBootData = FALSE, bootSamplesOnly = FALSE, writeArgs, seed = NULL, suppressWarn = TRUE, showProgress = TRUE, ...) } \arguments{ \item{x}{A target \code{lavaan} object used in the Bollen-Stine bootstrap} \item{transformation}{The transformation methods in Savalei and Yuan (2009). There are three methods in the article, but only the first two are currently implemented here. Use \code{transformation = 1} when there are few missing data patterns, each of which has a large size, such as in a planned-missing-data design. Use \code{transformation = 2} when there are more missing data patterns. The currently unavailable \code{transformation = 3} would be used when several missing data patterns have n = 1.} \item{nBoot}{The number of bootstrap samples.} \item{model}{Optional. The target model if \code{x} is not provided.} \item{rawData}{Optional. The target raw data set if \code{x} is not provided.} \item{Sigma}{Optional. The model-implied covariance matrix if \code{x} is not provided.} \item{Mu}{Optional. The model-implied mean vector if \code{x} is not provided.} \item{group}{Optional character string specifying the name of the grouping variable in \code{rawData} if \code{x} is not provided.} \item{ChiSquared}{Optional. The model's \eqn{\chi^2} test statistic if \code{x} is not provided.} \item{EMcov}{Optional, if \code{x} is not provided. The EM (or Two-Stage ML) estimated covariance matrix used to speed up Transformation 2 algorithm.} \item{writeTransData}{Logical. If \code{TRUE}, the transformed data set is written to a text file, \code{transDataOnly} is set to \code{TRUE}, and the transformed data is returned invisibly.} \item{transDataOnly}{Logical. If \code{TRUE}, the result will provide the transformed data only.} \item{writeBootData}{Logical. If \code{TRUE}, the stacked bootstrap data sets are written to a text file, \code{bootSamplesOnly} is set to \code{TRUE}, and the list of bootstrap data sets are returned invisibly.} \item{bootSamplesOnly}{Logical. If \code{TRUE}, the result will provide bootstrap data sets only.} \item{writeArgs}{Optional \code{list}. If \code{writeBootData = TRUE} or \code{writeBootData = TRUE}, user can pass arguments to the \code{\link[utils]{write.table}} function as a list. Some default values are provided: \code{file} = "bootstrappedSamples.dat", \code{row.names} = \code{FALSE}, and \code{na} = "-999", but the user can override all of these by providing other values for those arguments in the \code{writeArgs} list.} \item{seed}{The seed number used in randomly drawing bootstrap samples.} \item{suppressWarn}{Logical. If \code{TRUE}, warnings from \code{lavaan} function will be suppressed when fitting the model to each bootstrap sample.} \item{showProgress}{Logical. Indicating whether to display a progress bar while fitting models to bootstrap samples.} \item{\dots}{The additional arguments in the \code{\link[lavaan]{lavaan}} function. See also \code{\link[lavaan]{lavOptions}}} } \value{ As a default, this function returns a \code{\linkS4class{BootMiss}} object containing the results of the bootstrap samples. Use \code{show}, \code{summary}, or \code{hist} to examine the results. Optionally, the transformed data set is returned if \code{transDataOnly = TRUE}. Optionally, the bootstrap data sets are returned if \code{bootSamplesOnly = TRUE}. } \description{ Implement the Bollen and Stine's (1992) Bootstrap when missing observations exist. The implemented method is proposed by Savalei and Yuan (2009). This can be used in two ways. The first and easiest option is to fit the model to incomplete data in \code{lavaan} using the FIML estimator, then pass that \code{lavaan} object to \code{bsBootMiss}. } \details{ The second is designed for users of other software packages (e.g., LISREL, EQS, Amos, or Mplus). Users can import their data, \eqn{\chi^2} value, and model-implied moments from another package, and they have the option of saving (or writing to a file) either the transformed data or bootstrapped samples of that data, which can be analyzed in other programs. In order to analyze the bootstrapped samples and return a \emph{p} value, users of other programs must still specify their model using lavaan syntax. } \examples{ \dontrun{ dat1 <- HolzingerSwineford1939 dat1$x5 <- ifelse(dat1$x1 <= quantile(dat1$x1, .3), NA, dat1$x5) dat1$x9 <- ifelse(is.na(dat1$x5), NA, dat1$x9) targetModel <- " visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 " targetFit <- sem(targetModel, dat1, meanstructure = TRUE, std.lv = TRUE, missing = "fiml", group = "school") summary(targetFit, fit = TRUE, standardized = TRUE) # The number of bootstrap samples should be much higher. temp <- bsBootMiss(targetFit, transformation = 1, nBoot = 10, seed = 31415) temp summary(temp) hist(temp) hist(temp, printLegend = FALSE) # suppress the legend ## user can specify alpha level (default: alpha = 0.05), and the number of ## digits to display (default: nd = 2). Pass other arguments to hist(...), ## or a list of arguments to legend() via "legendArgs" hist(temp, alpha = .01, nd = 3, xlab = "something else", breaks = 25, legendArgs = list("bottomleft", box.lty = 2)) } } \references{ Bollen, K. A., & Stine, R. A. (1992). Bootstrapping goodness-of-fit measures in structural equation models. \emph{Sociological Methods & Research, 21}(2), 205--229. doi:10.1177/0049124192021002004 Savalei, V., & Yuan, K.-H. (2009). On the model-based bootstrap with missing data: Obtaining a p-value for a test of exact fit. \emph{Multivariate Behavioral Research, 44}(6), 741--763. doi:10.1080/00273170903333590 } \seealso{ \code{\linkS4class{BootMiss}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) Syntax for transformations borrowed from http://www2.psych.ubc.ca/~vsavalei/ } semTools/man/mvrnonnorm.Rd0000644000176200001440000000407713432435606015352 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mvrnonnorm.R \name{mvrnonnorm} \alias{mvrnonnorm} \title{Generate Non-normal Data using Vale and Maurelli (1983) method} \usage{ mvrnonnorm(n, mu, Sigma, skewness = NULL, kurtosis = NULL, empirical = FALSE) } \arguments{ \item{n}{Sample size} \item{mu}{A mean vector. If elements are named, those will be used as variable names in the returned data matrix.} \item{Sigma}{A positive-definite symmetric matrix specifying the covariance matrix of the variables. If rows or columns are named (and \code{mu} is unnamed), those will be used as variable names in the returned data matrix.} \item{skewness}{A vector of skewness of the variables} \item{kurtosis}{A vector of excessive kurtosis of the variables} \item{empirical}{If \code{TRUE}, \code{mu} and \code{Sigma} specify the empirical rather than population mean and covariance matrix} } \value{ A data matrix } \description{ Generate Non-normal Data using Vale and Maurelli (1983) method. The function is designed to be as similar as the popular \code{mvrnorm} function in the \code{MASS} package. The codes are copied from \code{mvrnorm} function in the \code{MASS} package for argument checking and \code{lavaan} package for data generation using Vale and Maurelli (1983) method. } \examples{ set.seed(123) mvrnonnorm(20, c(1, 2), matrix(c(10, 2, 2, 5), 2, 2), skewness = c(5, 2), kurtosis = c(3, 3)) ## again, with variable names specified in mu set.seed(123) mvrnonnorm(20, c(a = 1, b = 2), matrix(c(10, 2, 2, 5), 2, 2), skewness = c(5, 2), kurtosis = c(3, 3)) } \references{ Vale, C. D. & Maurelli, V. A. (1983). Simulating multivariate nonormal distributions. \emph{Psychometrika, 48}(3), 465--471. doi:10.1007/BF02293687 } \author{ The original function is the \code{\link[lavaan]{simulateData}} function written by Yves Rosseel in the \code{lavaan} package. The function is adjusted for a convenient usage by Sunthud Pornprasertmanit (\email{psunthud@gmail.com}). Terrence D. Jorgensen added the feature to retain variable names from \code{mu} or \code{Sigma}. } semTools/man/residualCovariate.Rd0000644000176200001440000000272613432435606016604 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/residualCovariate.R \name{residualCovariate} \alias{residualCovariate} \title{Residual-center all target indicators by covariates} \usage{ residualCovariate(data, targetVar, covVar) } \arguments{ \item{data}{The desired data to be transformed.} \item{targetVar}{Varible names or the position of indicators that users wish to be residual centered (as dependent variables)} \item{covVar}{Covariate names or the position of the covariates using for residual centering (as independent variables) onto target variables} } \value{ The data that the target variables replaced by the residuals } \description{ This function will regress target variables on the covariate and replace the target variables by the residual of the regression analysis. This procedure is useful to control the covariate from the analysis model (Geldhof, Pornprasertmanit, Schoemann, & Little, 2013). } \examples{ dat <- residualCovariate(attitude, 2:7, 1) } \references{ Geldhof, G. J., Pornprasertmanit, S., Schoemann, A. M., & Little, T. D. (2013). Orthogonalizing through residual centering: Extended applications and caveats. \emph{Educational and Psychological Measurement, 73}(1), 27--46. doi:10.1177/0013164412445473 } \seealso{ \code{\link{indProd}} For creating the indicator products with no centering, mean centering, double-mean centering, or residual centering. } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/probe2WayRC.Rd0000644000176200001440000001313413475443506015235 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probeInteraction.R \name{probe2WayRC} \alias{probe2WayRC} \title{Probing two-way interaction on the residual-centered latent interaction} \usage{ probe2WayRC(fit, nameX, nameY, modVar, valProbe, group) } \arguments{ \item{fit}{The lavaan model object used to evaluate model fit} \item{nameX}{The vector of the factor names used as the predictors. The first-order factor will be listed first. The last name must be the name representing the interaction term.} \item{nameY}{The name of factor that is used as the dependent variable.} \item{modVar}{The name of factor that is used as a moderator. The effect of the other independent factor on each moderator variable value will be probed.} \item{valProbe}{The values of the moderator that will be used to probe the effect of the other independent factor.} \item{group}{In multigroup models, the label of the group for which the results will be returned. Must correspond to one of \code{\link[lavaan]{lavInspect}(fit, "group.label")}.} } \value{ A list with two elements: \enumerate{ \item \code{SimpleIntercept}: The intercepts given each value of the moderator. This element will be shown only if the factor intercept is estimated (e.g., not fixed as 0). \item \code{SimpleSlope}: The slopes given each value of the moderator. } In each element, the first column represents the values of the moderators specified in the \code{valProbe} argument. The second column is the simple intercept or simple slope. The third column is the standard error of the simple intercept or simple slope. The fourth column is the Wald (\emph{z}) statistic. The fifth column is the \emph{p} value testing whether the simple intercepts or slopes are different from 0. } \description{ Probing interaction for simple intercept and simple slope for the residual-centered latent two-way interaction (Pornprasertmanit, Schoemann, Geldhof, & Little, submitted) } \details{ Before using this function, researchers need to make the products of the indicators between the first-order factors and residualize the products by the original indicators (Lance, 1988; Little, Bovaird, & Widaman, 2006). The process can be automated by the \code{\link{indProd}} function. Note that the indicator products can be made for all possible combination or matched-pair approach (Marsh et al., 2004). Next, the hypothesized model with the regression with latent interaction will be used to fit all original indicators and the product terms. To use this function the model must be fit with a mean structure. See the example for how to fit the product term below. Once the lavaan result is obtained, this function will be used to probe the interaction. The probing process on residual-centered latent interaction is based on transforming the residual-centered result into the no-centered result. See Pornprasertmanit, Schoemann, Geldhof, and Little (submitted) for further details. Note that this approach based on a strong assumption that the first-order latent variables are normally distributed. The probing process is applied after the no-centered result (parameter estimates and their covariance matrix among parameter estimates) has been computed. See the \code{\link{probe2WayMC}} for further details. } \examples{ library(lavaan) dat2wayRC <- orthogonalize(dat2way, 1:3, 4:6) model1 <- " f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f12 =~ x1.x4 + x2.x5 + x3.x6 f3 =~ x7 + x8 + x9 f3 ~ f1 + f2 + f12 f12 ~~0*f1 f12 ~~ 0*f2 x1 ~ 0*1 x4 ~ 0*1 x1.x4 ~ 0*1 x7 ~ 0*1 f1 ~ NA*1 f2 ~ NA*1 f12 ~ NA*1 f3 ~ NA*1 " fitRC2way <- sem(model1, data = dat2wayRC, std.lv = FALSE, meanstructure = TRUE) summary(fitRC2way) result2wayRC <- probe2WayRC(fitRC2way, c("f1", "f2", "f12"), "f3", "f2", c(-1, 0, 1)) result2wayRC } \references{ Lance, C. E. (1988). Residual centering, exploratory and confirmatory moderator analysis, and decomposition of effects in path models containing interactions. \emph{Applied Psychological Measurement, 12}(2), 163--175. doi:10.1177/014662168801200205 Little, T. D., Bovaird, J. A., & Widaman, K. F. (2006). On the merits of orthogonalizing powered and product terms: Implications for modeling interactions. \emph{Structural Equation Modeling, 13}(4), 497--519. doi:10.1207/s15328007sem1304_1 Marsh, H. W., Wen, Z., & Hau, K. T. (2004). Structural equation models of latent interactions: Evaluation of alternative estimation strategies and indicator construction. \emph{Psychological Methods, 9}(3), 275--300. doi:10.1037/1082-989X.9.3.275 Geldhof, G. J., Pornprasertmanit, S., Schoemann, A. M., & Little, T. D. (2013). Orthogonalizing through residual centering: Extended applications and caveats \emph{Educational and Psychological Measurement, 73}(1), 27--46. doi:10.1177/0013164412445473 } \seealso{ \itemize{ \item \code{\link{indProd}} For creating the indicator products with no centering, mean centering, double-mean centering, or residual centering. \item \code{\link{probe2WayMC}} For probing the two-way latent interaction when the results are obtained from mean-centering, or double-mean centering \item \code{\link{probe3WayMC}} For probing the three-way latent interaction when the results are obtained from mean-centering, or double-mean centering \item \code{\link{probe3WayRC}} For probing the two-way latent interaction when the results are obtained from residual-centering approach. \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the latent interaction. } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/splitSample.Rd0000644000176200001440000000616413432435606015433 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/splitSample.R \name{splitSample} \alias{splitSample} \title{Randomly Split a Data Set into Halves} \usage{ splitSample(dataset, path = "default", div = 2, type = "default", name = "splitSample") } \arguments{ \item{dataset}{The original data set to be divided. Can be a file path to a *.csv or *.dat file (headers will automatically be detected) or an R object (matrix or dataframe). (Windows users: file path must be specified using FORWARD SLASHES (\code{/}) ONLY.)} \item{path}{File path to folder for output data sets. NOT REQUIRED if dataset is a filename. Specify ONLY if dataset is an R object, or desired output folder is not that of original data set. If path is specified as "object", output data sets will be returned as a list, and not saved to hard drive.} \item{div}{Number of output data sets. NOT REQUIRED if default, 2 halves.} \item{type}{Output file format ("dat" or "csv"). NOT REQUIRED unless desired output formatting differs from that of input, or dataset is an R object and csv formatting is desired.} \item{name}{Output file name. NOT REQUIRED unless desired output name differs from that of input, or input dataset is an R object. (If input is an R object and name is not specified, name will be "splitSample".)} } \value{ If \code{path = "object"}, \code{list} of output data sets. Otherwise, output will saved to hard drive in the same format as input. } \description{ This function randomly splits a data set into two halves, and saves the resulting data sets to the same folder as the original. } \details{ This function randomly orders the rows of a data set, divides the data set into two halves, and saves the halves to the same folder as the original data set, preserving the original formatting. Data set type (*.csv or *.dat) and formatting (headers) are automatically detected, and output data sets will preserve input type and formatting unless specified otherwise. Input can be in the form of a file path (*.dat or *.csv), or an R object (matrix or dataframe). If input is an R object and path is default, output data sets will be returned as a list object. } \examples{ #### Input is .dat file #splitSample("C:/Users/Default/Desktop/MYDATA.dat") #### Output saved to "C:/Users/Default/Desktop/" in .dat format #### Names are "MYDATA_s1.dat" and "MYDATA_s2.dat" #### Input is R object ## Split C02 dataset from the datasets package library(datasets) splitMyData <- splitSample(CO2, path = "object") summary(splitMyData[[1]]) summary(splitMyData[[2]]) #### Output object splitMyData becomes list of output data sets #### Input is .dat file in "C:/" folder #splitSample("C:/testdata.dat", path = "C:/Users/Default/Desktop/", type = "csv") #### Output saved to "C:/Users/Default/Desktop/" in *.csv format #### Names are "testdata_s1.csv" and "testdata_s2.csv" #### Input is R object #splitSample(myData, path = "C:/Users/Default/Desktop/", name = "splitdata") #### Output saved to "C:/Users/Default/Desktop/" in *.dat format #### Names are "splitdata_s1.dat" and "splitdata_s2.dat" } \author{ Corbin Quick (University of Michigan; \email{corbinq@umich.edu}) } semTools/man/poolMAlloc.Rd0000644000176200001440000003273513443605772015207 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/poolMAlloc.R \name{poolMAlloc} \alias{poolMAlloc} \title{Pooled estimates and standard errors across M parcel-allocations: Combining sampling variability and parcel-allocation variability.} \usage{ poolMAlloc(nPerPar, facPlc, nAllocStart, nAllocAdd = 0, parceloutput = NULL, syntax, dataset, stopProp, stopValue, selectParam = NULL, indices = "default", double = FALSE, checkConv = FALSE, names = "default", leaveout = 0, useTotalAlloc = FALSE, ...) } \arguments{ \item{nPerPar}{A list in which each element is a vector, corresponding to each factor, indicating sizes of parcels. If variables are left out of parceling, they should not be accounted for here (i.e., there should not be parcels of size "1").} \item{facPlc}{A list of vectors, each corresponding to a factor, specifying the item indicators of that factor (whether included in parceling or not). Either variable names or column numbers. Variables not listed will not be modeled or included in output datasets.} \item{nAllocStart}{The number of random allocations of items to parcels to generate in the first iteration of the algorithm.} \item{nAllocAdd}{The number of allocations to add with each iteration of the algorithm. Note that if only one iteration is desired, \code{nAllocAdd} can be set to \eqn{0} and results will be output for \code{nAllocStart} allocationsonly.} \item{parceloutput}{Optional \code{character}. Path (folder/directory) where \emph{M} (the final selected number of allocations) parceled data sets will be outputted from the iteration where the algorithm met stopping criteria. Note for Windows users: file path must be specified using forward slashes (\code{/}), not backslashes (\code{\\}). See \code{\link[base]{path.expand}} for details. If \code{NULL} (default), nothing is saved to disk.} \item{syntax}{lavaan syntax that defines the model.} \item{dataset}{Item-level dataset} \item{stopProp}{Value used in defining stopping criteria of the algorithm (\eqn{\delta_a} in Sterba & Rights, 2016). This is the minimum proportion of change (in any pooled parameter or pooled standard error estimate listed in \code{selectParam}) that is allowable from one iteration of the algorithm to the next. That is, change in pooled estimates and pooled standard errors from one iteration to the next must all be less than (\code{stopProp}) x (value from former iteration). Note that \code{stopValue} can override this criterion (see below). Also note that values less than .01 are unlikely to lead to more substantively meaningful precision. Also note that if only \code{stopValue} is a desired criterion, \code{stopProp} can be set to 0.} \item{stopValue}{Value used in defining stopping criteria of the algorithm (\eqn{\delta_b} in Sterba & Rights, 2016). \code{stopValue} is a minimum allowable amount of absolute change (in any pooled parameter or pooled standard error estimate listed in \code{selectParam}) from one iteration of the algorithm to the next. For a given pooled estimate or pooled standard error, \code{stopValue} is only invoked as a stopping criteria when the minimum change required by \code{stopProp} is less than \code{stopValue}. Note that values less than .01 are unlikely to lead to more substantively meaningful precision. Also note that if only \code{stopProp} is a desired criterion, \code{stopValue} can be set to 0.} \item{selectParam}{(Optional) A list of the pooled parameters to be used in defining stopping criteria (i.e., \code{stopProp} and \code{stopValue}). These parameters should appear in the order they are listed in the lavaan syntax. By default, all pooled parameters are used. Note that \code{selectParam} should only contain freely-estimated parameters. In one example from Sterba & Rights (2016) \code{selectParam} included all free parameters except item intercepts and in another example \code{selectParam} included only structural parameters.} \item{indices}{Optional \code{character} vector indicating the names of available \code{\link[lavaan]{fitMeasures}} to be included in the output. The first and second elements should be a chi-squared test statistic and its associated degrees of freedom, both of which will be added if missing. If \code{"default"}, the indices will be \code{c("chisq", "df", "cfi", "tli", "rmsea","srmr")}. If a robust test statistic is requested (see \code{\link[lavaan]{lavOptions}}), \code{c("chisq","df")} will be replaced by \code{c("chisq.scaled","df.scaled")}. For the output to include both the naive and robust test statistics, \code{indices} should include both, but put the scaled test statistics first, as in \code{indices = c("chisq.scaled", "df.scaled", "chisq", "df")}} \item{double}{(Optional) If set to \code{TRUE}, requires stopping criteria (\code{stopProp} and \code{stopValue}) to be met for all parameters (in \code{selectParam}) for two consecutive iterations of the algorithm. By default, this is set to \code{FALSE}, meaning stopping criteria need only be met at one iteration of the algorithm.} \item{checkConv}{(Optional) If set to TRUE, function will output pooled estimates and standard errors from 10 iterations post-convergence.} \item{names}{(Optional) A character vector containing the names of parceled variables.} \item{leaveout}{(Optional) A vector of variables to be left out of randomized parceling. Either variable names or column numbers are allowed.} \item{useTotalAlloc}{(Optional) If set to \code{TRUE}, function will output a separate set of results that uses all allocations created by the algorithm, rather than \emph{M} allocations (see "Allocations needed for stability" below). This distinction is further discussed in Sterba and Rights (2016).} \item{\dots}{Additional arguments to be passed to \code{\link[lavaan]{lavaan}}. See also \code{\link[lavaan]{lavOptions}}} } \value{ \item{Estimates}{A table containing pooled results across \emph{M} allocations at the iteration where stopping criteria were met. Columns correspond to individual parameter name, pooled estimate, pooled standard error, \emph{p}-value for a \emph{z}-test of the parameter, \emph{z}-based 95\% confidence interval, \emph{p}-value for a \emph{t}-test of the parameter (using degrees of freedom described in Sterba & Rights, 2016), and \emph{t}-based 95\% confidence interval for the parameter.} \item{Fit}{A table containing results related to model fit from the \emph{M} allocations at the iteration where stopping criteria were met. Columns correspond to fit index names, the average of each index across allocations, the standard deviation of each fit index across allocations, the maximum of each fit index across allocations, the minimum of each fit index across allocations, the range of each fit index across allocations, and the percent of the \emph{M} allocations where the chi-square test of absolute fit was significant.} \item{Proportion of converged and proper allocations}{A table containing the proportion of the final \emph{M} allocations that converged (using a maximum likelihood estimator) and the proportion of allocations that converged to proper solutions. Note that pooled estimates, pooled standard errors, and other results are computed using only the converged, proper allocations.} \item{Allocations needed for stability (M)}{The number of allocations (\emph{M}) at which the algorithm's stopping criteria (defined above) were met.} \item{Indices used to quantify uncertainty in estimates due to sample vs. allocation variability}{A table containing individual parameter names, an estimate of the proportion of total variance of a pooled parameter estimate that is attributable to parcel-allocation variability (PPAV), and an estimate of the ratio of the between-allocation variance of a pooled parameter estimate to the within-allocation variance (RPAV). See Sterba & Rights (2016) for more detail.} \item{Total runtime (minutes)}{The total runtime of the function, in minutes. Note that the total runtime will be greater when the specified model encounters convergence problems for some allocations, as is the case with the \code{\link{simParcel}} dataset used below.} } \description{ This function employs an iterative algorithm to pick the number of random item-to-parcel allocations needed to meet user-defined stability criteria for a fitted structural equation model (SEM) (see \bold{Details} below for more information). Pooled point and standard-error estimates from this SEM can be outputted at this final selected number of allocations (however, it is more efficient to save the allocations and treat them as multiple imputations using \code{\link{runMI}}; see \bold{See Also} for links with examples). Additionally, new indices (see Sterba & Rights, 2016) are outputted for assessing the relative contributions of parcel-allocation variability vs. sampling variability in each estimate. At each iteration, this function generates a given number of random item-to-parcel allocations, fits a SEM to each allocation, pools estimates across allocations from that iteration, and then assesses whether stopping criteria are met. If stopping criteria are not met, the algorithm increments the number of allocations used (generating all new allocations). } \details{ For further details on the benefits of the random allocation of items to parcels, see Sterba (2011) and Sterba & MacCallum (2010). This function implements an algorithm for choosing the number of allocations (\emph{M}; described in Sterba & Rights, 2016), pools point and standard-error estimates across these \emph{M} allocations, and produces indices for assessing the relative contributions of parcel-allocation variability vs. sampling variability in each estimate. To obtain pooled test statistics for model fit or model comparison, the \code{list} or parcel allocations can be passed to \code{\link{runMI}} (find \bold{Examples} on the help pages for \code{\link{parcelAllocation}} and \code{\link{PAVranking}}). This function randomly generates a given number (\code{nAllocStart}) of item-to-parcel allocations, fits a SEM to each allocation, and then increments the number of allocations used (by \code{nAllocAdd}) until the pooled point and standard-error estimates fulfill stopping criteria (\code{stopProp} and \code{stopValue}, defined above). A summary of results from the model that was fit to the \emph{M} allocations are returned. Additionally, this function outputs the proportion of allocations with solutions that converged (using a maximum likelihood estimator) as well as the proportion of allocations with solutions that were converged and proper. The converged and proper solutions among the final \emph{M} allocations are used in computing pooled results. Additionally, after each iteration of the algorithm, information useful in monitoring the algorithm is outputted. The number of allocations used at that iteration, the proportion of pooled parameter estimates meeting stopping criteria at the previous iteration, the proportion of pooled standard errors meeting stopping criteria at the previous iteration, and the runtime of that iteration are outputted. When stopping criteria are satisfied, the full set of results are outputted. } \examples{ \dontrun{ ## lavaan syntax: A 2 Correlated ## factor CFA model to be fit to parceled data parmodel <- ' f1 =~ NA*p1f1 + p2f1 + p3f1 f2 =~ NA*p1f2 + p2f2 + p3f2 p1f1 ~ 1 p2f1 ~ 1 p3f1 ~ 1 p1f2 ~ 1 p2f2 ~ 1 p3f2 ~ 1 p1f1 ~~ p1f1 p2f1 ~~ p2f1 p3f1 ~~ p3f1 p1f2 ~~ p1f2 p2f2 ~~ p2f2 p3f2 ~~ p3f2 f1 ~~ 1*f1 f2 ~~ 1*f2 f1 ~~ f2 ' ## specify items for each factor f1name <- colnames(simParcel)[1:9] f2name <- colnames(simParcel)[10:18] ## run function poolMAlloc(nPerPar = list(c(3,3,3), c(3,3,3)), facPlc = list(f1name, f2name), nAllocStart = 10, nAllocAdd = 10, syntax = parmodel, dataset = simParcel, stopProp = .03, stopValue = .03, selectParam = c(1:6, 13:18, 21), names = list("p1f1","p2f1","p3f1","p1f2","p2f2","p3f2"), double = FALSE, useTotalAlloc = FALSE) } ## See examples on ?parcelAllocation and ?PAVranking for how to obtain ## pooled test statistics and other pooled lavaan output. ## Details provided in Sterba & Rights (2016). } \references{ Sterba, S. K. (2011). Implications of parcel-allocation variability for comparing fit of item-solutions and parcel-solutions. \emph{Structural Equation Modeling, 18}(4), 554--577. doi:10.1080/10705511.2011.607073 Sterba, S. K., & MacCallum, R. C. (2010). Variability in parameter estimates and model fit across random allocations of items to parcels. \emph{Multivariate Behavioral Research, 45}(2), 322--358. doi:10.1080/00273171003680302 Sterba, S. K., & Rights, J. D. (2016). Accounting for parcel-allocation variability in practice: Combining sources of uncertainty and choosing the number of allocations. \emph{Multivariate Behavioral Research, 51}(2--3), 296--313. doi:10.1080/00273171.2016.1144502 Sterba, S. K., & Rights, J. D. (2017). Effects of parceling on model selection: Parcel-allocation variability in model ranking. \emph{Psychological Methods, 22}(1), 47--68. doi:10.1037/met0000067 } \seealso{ \code{\link{runMI}} for treating allocations as multiple imputations to pool results across allocations. See \bold{Examples} on help pages for: \itemize{ \item{\code{\link{parcelAllocation}} for fitting a single model} \item{\code{\link{PAVranking}} for comparing 2 models} } } \author{ Jason D. Rights (Vanderbilt University; \email{jason.d.rights@vanderbilt.edu}) The author would also like to credit Corbin Quick and Alexander Schoemann for providing the original parcelAllocation function on which this function is based. } semTools/man/FitDiff-class.Rd0000644000176200001440000000425213432435606015550 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compareFit.R \docType{class} \name{FitDiff-class} \alias{FitDiff-class} \alias{show,FitDiff-method} \alias{summary,FitDiff-method} \title{Class For Representing A Template of Model Fit Comparisons} \usage{ \S4method{show}{FitDiff}(object) \S4method{summary}{FitDiff}(object, fit.measures = "default", nd = 3) } \arguments{ \item{object}{object of class \code{FitDiff}} \item{fit.measures}{\code{character} vector naming fit indices the user can request from \code{\link[lavaan]{fitMeasures}}. If \code{"default"}, the fit measures will be \code{c("chisq", "df", "pvalue", "cfi", "tli", "rmsea", "srmr", "aic", "bic")}. If \code{"all"}, all available fit measures will be returned.} \item{nd}{number of digits printed} } \description{ This class contains model fit measures and model fit comparisons among multiple models } \section{Slots}{ \describe{ \item{\code{name}}{\code{character}. The name of each model} \item{\code{model.class}}{\code{character}. One class to which each model belongs} \item{\code{nested}}{\code{data.frame}. Model fit comparisons between adjacently nested models that are ordered by their degrees of freedom (\emph{df})} \item{\code{fit}}{\code{data.frame}. Fit measures of all models specified in the \code{name} slot, ordered by their \emph{df}} }} \section{Objects from the Class}{ Objects can be created via the \code{\link{compareFit}} function. } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' out <- measurementInvariance(model = HS.model, data = HolzingerSwineford1939, group = "school", quiet = TRUE) modelDiff <- compareFit(out) summary(modelDiff) summary(modelDiff, fit.measures = "all") summary(modelDiff, fit.measures = c("aic", "bic")) \dontrun{ ## Save results to a file saveFile(modelDiff, file = "modelDiff.txt") ## Copy to a clipboard clipboard(modelDiff) } } \seealso{ \code{\link{compareFit}}; \code{\link{clipboard}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/plotRMSEApower.Rd0000644000176200001440000000662013432435606015756 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/powerAnalysisRMSEA.R \name{plotRMSEApower} \alias{plotRMSEApower} \title{Plot power curves for RMSEA} \usage{ plotRMSEApower(rmsea0, rmseaA, df, nlow, nhigh, steps = 1, alpha = 0.05, group = 1, ...) } \arguments{ \item{rmsea0}{Null RMSEA} \item{rmseaA}{Alternative RMSEA} \item{df}{Model degrees of freedom} \item{nlow}{Lower sample size} \item{nhigh}{Upper sample size} \item{steps}{Increase in sample size for each iteration. Smaller values of steps will lead to more precise plots. However, smaller step sizes means a longer run time.} \item{alpha}{Alpha level used in power calculations} \item{group}{The number of group that is used to calculate RMSEA.} \item{\dots}{The additional arguments for the plot function.} } \value{ Plot of power for RMSEA against a range of sample sizes } \description{ Plots power of RMSEA over a range of sample sizes } \details{ This function creates plot of power for RMSEA against a range of sample sizes. The plot places sample size on the horizontal axis and power on the vertical axis. The user should indicate the lower and upper values for sample size and the sample size between each estimate ("step size") We strongly urge the user to read the sources below (see References) before proceeding. A web version of this function is available at: \url{http://quantpsy.org/rmsea/rmseaplot.htm}. } \examples{ plotRMSEApower(rmsea0 = .025, rmseaA = .075, df = 23, nlow = 100, nhigh = 500, steps = 10) } \references{ MacCallum, R. C., Browne, M. W., & Cai, L. (2006). Testing differences between nested covariance structure models: Power analysis and null hypotheses. \emph{Psychological Methods, 11}(1), 19--35. doi:10.1037/1082-989X.11.1.19 MacCallum, R. C., Browne, M. W., & Sugawara, H. M. (1996). Power analysis and determination of sample size for covariance structure modeling. \emph{Psychological Methods, 1}(2), 130--149. doi:10.1037/1082-989X.1.2.130 MacCallum, R. C., Lee, T., & Browne, M. W. (2010). The issue of isopower in power analysis for tests of structural equation models. \emph{Structural Equation Modeling, 17}(1), 23--41. doi:10.1080/10705510903438906 Preacher, K. J., Cai, L., & MacCallum, R. C. (2007). Alternatives to traditional model comparison strategies for covariance structure models. In T. D. Little, J. A. Bovaird, & N. A. Card (Eds.), \emph{Modeling contextual effects in longitudinal studies} (pp. 33--62). Mahwah, NJ: Lawrence Erlbaum Associates. Steiger, J. H. (1998). A note on multiple sample extensions of the RMSEA fit index. \emph{Structural Equation Modeling, 5}(4), 411--419. doi:10.1080/10705519809540115 Steiger, J. H., & Lind, J. C. (1980, June). \emph{Statistically based tests for the number of factors.} Paper presented at the annual meeting of the Psychometric Society, Iowa City, IA. } \seealso{ \itemize{ \item \code{\link{plotRMSEAdist}} to visualize the RMSEA distributions \item \code{\link{findRMSEApower}} to find the statistical power based on population RMSEA given a sample size \item \code{\link{findRMSEAsamplesize}} to find the minium sample size for a given statistical power based on population RMSEA } } \author{ Alexander M. Schoemann (East Carolina University; \email{schoemanna@ecu.edu}) Kristopher J. Preacher (Vanderbilt University; \email{kris.preacher@vanderbilt.edu}) Donna L. Coffman (Pennsylvania State University; \email{dlc30@psu.edu.}) } semTools/man/fmi.Rd0000644000176200001440000001212013432435606013676 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fmi.R \name{fmi} \alias{fmi} \title{Fraction of Missing Information.} \usage{ fmi(data, method = "saturated", group = NULL, ords = NULL, varnames = NULL, exclude = NULL, fewImps = FALSE) } \arguments{ \item{data}{Either a single \code{data.frame} with incomplete observations, or a \code{list} of imputed data sets.} \item{method}{character. If \code{"saturated"} or \code{"sat"} (default), the model used to estimate FMI is a freely estimated covariance matrix and mean vector for numeric variables, and/or polychoric correlations and thresholds for ordered categorical variables, for each group (if applicable). If \code{"null"}, only means and variances are estimated for numeric variables, and/or thresholds for ordered categorical variables (i.e., covariances and/or polychoric correlations are constrained to zero). See Details for more information.} \item{group}{character. The optional name of a grouping variable, to request FMI in each group.} \item{ords}{character. Optional vector of names of ordered-categorical variables, which are not already stored as class \code{ordered} in \code{data}.} \item{varnames}{character. Optional vector of variable names, to calculate FMI for a subset of variables in \code{data}. By default, all numeric and ordered variables will be included, unless \code{data} is a single incomplete \code{data.frame}, in which case only numeric variables can be used with FIML estimation. Other variable types will be removed.} \item{exclude}{character. Optional vector of variable names to exclude from the analysis.} \item{fewImps}{logical. If \code{TRUE}, use the estimate of FMI that applies a correction to the estimated between-imputation variance. Recommended when there are few imputations; makes little difference when there are many imputations. Ignored when \code{data} is not a list of imputed data sets.} } \value{ \code{fmi} returns a list with at least 2 of the following: \item{Covariances}{A list of symmetric matrices: (1) the estimated/pooled covariance matrix, or a list of group-specific matrices (if applicable) and (2) a matrix of FMI, or a list of group-specific matrices (if applicable). Only available if \code{method = "saturated"}.} \item{Variances}{The estimated/pooled variance for each numeric variable. Only available if \code{method = "null"} (otherwise, it is on the diagonal of Covariances).} \item{Means}{The estimated/pooled mean for each numeric variable.} \item{Thresholds}{The estimated/pooled threshold(s) for each ordered-categorical variable.} \item{message}{A message indicating caution when the null model is used.} } \description{ This function estimates the Fraction of Missing Information (FMI) for summary statistics of each variable, using either an incomplete data set or a list of imputed data sets. } \details{ The function estimates a saturated model with \code{\link[lavaan]{lavaan}} for a single incomplete data set using FIML, or with \code{\link{lavaan.mi}} for a list of imputed data sets. If method = \code{"saturated"}, FMI will be estiamted for all summary statistics, which could take a lot of time with big data sets. If method = \code{"null"}, FMI will only be estimated for univariate statistics (e.g., means, variances, thresholds). The saturated model gives more reliable estimates, so it could also help to request a subset of variables from a large data set. } \examples{ HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), "ageyr","agemo","school")] set.seed(12345) HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) age <- HSMiss$ageyr + HSMiss$agemo/12 HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ## calculate FMI (using FIML, provide partially observed data set) (out1 <- fmi(HSMiss, exclude = "school")) (out2 <- fmi(HSMiss, exclude = "school", method = "null")) (out3 <- fmi(HSMiss, varnames = c("x5","x6","x7","x8","x9"))) (out4 <- fmi(HSMiss, group = "school")) \dontrun{ ## ordered-categorical data data(datCat) lapply(datCat, class) ## impose missing values set.seed(123) for (i in 1:8) datCat[sample(1:nrow(datCat), size = .1*nrow(datCat)), i] <- NA ## impute data m = 3 times library(Amelia) set.seed(456) impout <- amelia(datCat, m = 3, noms = "g", ords = paste0("u", 1:8), p2s = FALSE) imps <- impout$imputations ## calculate FMI, using list of imputed data sets fmi(imps, group = "g") } } \references{ Rubin, D. B. (1987). \emph{Multiple imputation for nonresponse in surveys}. New York, NY: Wiley. Savalei, V. & Rhemtulla, M. (2012). On obtaining estimates of the fraction of missing information from full information maximum likelihood. \emph{Structural Equation Modeling, 19}(3), 477--494. doi:10.1080/10705511.2012.687669 Wagner, J. (2010). The fraction of missing information as a tool for monitoring the quality of survey data. \emph{Public Opinion Quarterly, 74}(2), 223--243. doi:10.1093/poq/nfq007 } \author{ Mauricio Garnier Villarreal (University of Kansas; \email{mauricio.garniervillarreal@marquette.edu}) Terrence Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/simParcel.Rd0000644000176200001440000000241313432435606015046 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{simParcel} \alias{simParcel} \title{Simulated Data set to Demonstrate Random Allocations of Parcels} \format{A \code{data.frame} with 800 observations of 18 variables. \describe{ \item{f1item1}{Item 1 loading on factor 1} \item{f1item2}{Item 2 loading on factor 1} \item{f1item3}{Item 3 loading on factor 1} \item{f1item4}{Item 4 loading on factor 1} \item{f1item5}{Item 5 loading on factor 1} \item{f1item6}{Item 6 loading on factor 1} \item{f1item7}{Item 7 loading on factor 1} \item{f1item8}{Item 8 loading on factor 1} \item{f1item9}{Item 9 loading on factor 1} \item{f2item1}{Item 1 loading on factor 2} \item{f2item2}{Item 2 loading on factor 2} \item{f2item3}{Item 3 loading on factor 2} \item{f2item4}{Item 4 loading on factor 2} \item{f2item5}{Item 5 loading on factor 2} \item{f2item6}{Item 6 loading on factor 2} \item{f2item7}{Item 7 loading on factor 2} \item{f2item8}{Item 8 loading on factor 2} \item{f2item9}{Item 9 loading on factor 2} }} \source{ Data were generated using the \code{simsem} package. } \usage{ simParcel } \description{ A simulated data set with 2 factors with 9 indicators for each factor } \examples{ head(simParcel) } \keyword{datasets} semTools/man/permuteMeasEq.Rd0000644000176200001440000006332013432435606015710 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/permuteMeasEq.R \name{permuteMeasEq} \alias{permuteMeasEq} \title{Permutation Randomization Tests of Measurement Equivalence and Differential Item Functioning (DIF)} \usage{ permuteMeasEq(nPermute, modelType = c("mgcfa", "mimic"), con, uncon = NULL, null = NULL, param = NULL, freeParam = NULL, covariates = NULL, AFIs = NULL, moreAFIs = NULL, maxSparse = 10, maxNonconv = 10, showProgress = TRUE, warn = -1, datafun, extra, parallelType = c("none", "multicore", "snow"), ncpus = NULL, cl = NULL, iseed = 12345) } \arguments{ \item{nPermute}{An integer indicating the number of random permutations used to form empirical distributions under the null hypothesis.} \item{modelType}{A character string indicating type of model employed: multiple-group CFA (\code{"mgcfa"}) or MIMIC (\code{"mimic"}).} \item{con}{The constrained \code{lavaan} object, in which the parameters specified in \code{param} are constrained to equality across all groups when \code{modelType = "mgcfa"}, or which regression paths are fixed to zero when \code{modelType = "mimic"}. In the case of testing \emph{configural} invariance when \code{modelType = "mgcfa"}, \code{con} is the configural model (implicitly, the unconstrained model is the saturated model, so use the defaults \code{uncon = NULL} and \code{param = NULL}). When \code{modelType = "mimic"}, \code{con} is the MIMIC model in which the covariate predicts the latent construct(s) but no indicators (unless they have already been identified as DIF items).} \item{uncon}{Optional. The unconstrained \code{lavaan} object, in which the parameters specified in \code{param} are freely estimated in all groups. When \code{modelType = "mgcfa"}, only in the case of testing \emph{configural} invariance should \code{uncon = NULL}. When \code{modelType = "mimic"}, any non-\code{NULL uncon} is silently set to \code{NULL}.} \item{null}{Optional. A \code{lavaan} object, in which an alternative null model is fit (besides the default independence model specified by \code{lavaan}) for the calculation of incremental fit indices. See Widamin & Thompson (2003) for details. If \code{NULL}, \code{lavaan}'s default independence model is used.} \item{param}{An optional character vector or list of character vectors indicating which parameters the user would test for DIF following a rejection of the omnibus null hypothesis tested using (\code{more})\code{AFIs}. Note that \code{param} does not guarantee certain parameters \emph{are} constrained in \code{con}; that is for the user to specify when fitting the model. If users have any "anchor items" that they would never intend to free across groups (or levels of a covariate), these should be excluded from \code{param}; exceptions to a type of parameter can be specified in \code{freeParam}. When \code{modelType = "mgcfa"}, \code{param} indicates which parameters of interest are constrained across groups in \code{con} and are unconstrained in \code{uncon}. Parameter names must match those returned by \code{names(coef(con))}, but omitting any group-specific suffixes (e.g., \code{"f1~1"} rather than \code{"f1~1.g2"}) or user-specified labels (that is, the parameter names must follow the rules of lavaan's \code{\link[lavaan]{model.syntax}}). Alternatively (or additionally), to test all constraints of a certain type (or multiple types) of parameter in \code{con}, \code{param} may take any combination of the following values: \code{"loadings"}, \code{"intercepts"}, \code{"thresholds"}, \code{"residuals"}, \code{"residual.covariances"}, \code{"means"}, \code{"lv.variances"}, and/or \code{"lv.covariances"}. When \code{modelType = "mimic"}, \code{param} must be a vector of individual parameters or a list of character strings to be passed one-at-a-time to \code{\link[lavaan]{lavTestScore}}\code{(object = con, add = param[i])}, indicating which (sets of) regression paths fixed to zero in \code{con} that the user would consider freeing (i.e., exclude anchor items). If \code{modelType = "mimic"} and \code{param} is a list of character strings, the multivariate test statistic will be saved for each list element instead of 1-\emph{df} modification indices for each individual parameter, and \code{names(param)} will name the rows of the \code{MI.obs} slot (see \linkS4class{permuteMeasEq}). Set \code{param = NULL} (default) to avoid collecting modification indices for any follow-up tests.} \item{freeParam}{An optional character vector, silently ignored when \code{modelType = "mimic"}. If \code{param} includes a type of parameter (e.g., \code{"loadings"}), \code{freeParam} indicates exceptions (i.e., anchor items) that the user would \emph{not} intend to free across groups and should therefore be ignored when calculating \emph{p} values adjusted for the number of follow-up tests. Parameter types that are already unconstrained across groups in the fitted \code{con} model (i.e., a \emph{partial} invariance model) will automatically be ignored, so they do not need to be specified in \code{freeParam}. Parameter names must match those returned by \code{names(coef(con))}, but omitting any group-specific suffixes (e.g., \code{"f1~1"} rather than \code{"f1~1.g2"}) or user-specified labels (that is, the parameter names must follow the rules of lavaan \code{\link[lavaan]{model.syntax}}).} \item{covariates}{An optional character vector, only applicable when \code{modelType = "mimic"}. The observed data are partitioned into columns indicated by \code{covariates}, and the rows are permuted simultaneously for the entire set before being merged with the remaining data. Thus, the covariance structure is preserved among the covariates, which is necessary when (e.g.) multiple dummy codes are used to represent a discrete covariate or when covariates interact. If \code{covariates = NULL} when \code{modelType = "mimic"}, the value of \code{covariates} is inferred by searching \code{param} for predictors (i.e., variables appearing after the "\code{~}" operator).} \item{AFIs}{A character vector indicating which alternative fit indices (or chi-squared itself) are to be used to test the multiparameter omnibus null hypothesis that the constraints specified in \code{con} hold in the population. Any fit measures returned by \code{\link[lavaan]{fitMeasures}} may be specified (including constants like \code{"df"}, which would be nonsensical). If both \code{AFIs} and \code{moreAFIs} are \code{NULL}, only \code{"chisq"} will be returned.} \item{moreAFIs}{Optional. A character vector indicating which (if any) alternative fit indices returned by \code{\link[semTools]{moreFitIndices}} are to be used to test the multiparameter omnibus null hypothesis that the constraints specified in \code{con} hold in the population.} \item{maxSparse}{Only applicable when \code{modelType = "mgcfa"} and at least one indicator is \code{ordered}. An integer indicating the maximum number of consecutive times that randomly permuted group assignment can yield a sample in which at least one category (of an \code{ordered} indicator) is unobserved in at least one group, such that the same set of parameters cannot be estimated in each group. If such a sample occurs, group assignment is randomly permuted again, repeatedly until a sample is obtained with all categories observed in all groups. If \code{maxSparse} is exceeded, \code{NA} will be returned for that iteration of the permutation distribution.} \item{maxNonconv}{An integer indicating the maximum number of consecutive times that a random permutation can yield a sample for which the model does not converge on a solution. If such a sample occurs, permutation is attempted repeatedly until a sample is obtained for which the model does converge. If \code{maxNonconv} is exceeded, \code{NA} will be returned for that iteration of the permutation distribution, and a warning will be printed when using \code{show} or \code{summary}.} \item{showProgress}{Logical. Indicating whether to display a progress bar while permuting. Silently set to \code{FALSE} when using parallel options.} \item{warn}{Sets the handling of warning messages when fitting model(s) to permuted data sets. See \code{\link[base]{options}}.} \item{datafun}{An optional function that can be applied to the data (extracted from \code{con}) after each permutation, but before fitting the model(s) to each permutation. The \code{datafun} function must have an argument named \code{data} that accepts a \code{data.frame}, and it must return a \code{data.frame} containing the same column names. The column order may differ, the values of those columns may differ (so be careful!), and any additional columns will be ignored when fitting the model, but an error will result if any column names required by the model syntax do not appear in the transformed data set. Although available for any \code{modelType}, \code{datafun} may be useful when using the MIMIC method to test for nonuniform DIF (metric/weak invariance) by using product indicators for a latent factor representing the interaction between a factor and one of the \code{covariates}, in which case the product indicators would need to be recalculated after each permutation of the \code{covariates}. To access other R objects used within \code{permuteMeasEq}, the arguments to \code{datafun} may also contain any subset of the following: \code{"con"}, \code{"uncon"}, \code{"null"}, \code{"param"}, \code{"freeParam"}, \code{"covariates"}, \code{"AFIs"}, \code{"moreAFIs"}, \code{"maxSparse"}, \code{"maxNonconv"}, and/or \code{"iseed"}. The values for those arguments will be the same as the values supplied to \code{permuteMeasEq}.} \item{extra}{An optional function that can be applied to any (or all) of the fitted lavaan objects (\code{con}, \code{uncon}, and/or \code{null}). This function will also be applied after fitting the model(s) to each permuted data set. To access the R objects used within \code{permuteMeasEq}, the arguments to \code{extra} must be any subset of the following: \code{"con"}, \code{"uncon"}, \code{"null"}, \code{"param"}, \code{"freeParam"}, \code{"covariates"}, \code{"AFIs"}, \code{"moreAFIs"}, \code{"maxSparse"}, \code{"maxNonconv"}, and/or \code{"iseed"}. The values for those arguments will be the same as the values supplied to \code{permuteMeasEq}. The \code{extra} function must return a named \code{numeric} vector or a named \code{list} of scalars (i.e., a \code{list} of \code{numeric} vectors of \code{length == 1}). Any unnamed elements (e.g., \code{""} or \code{NULL}) of the returned object will result in an error.} \item{parallelType}{The type of parallel operation to be used (if any). The default is \code{"none"}. Forking is not possible on Windows, so if \code{"multicore"} is requested on a Windows machine, the request will be changed to \code{"snow"} with a message.} \item{ncpus}{Integer: number of processes to be used in parallel operation. If \code{NULL} (the default) and \code{parallelType %in% c("multicore","snow")}, the default is one less than the maximum number of processors detected by \code{\link[parallel]{detectCores}}. This default is also silently set if the user specifies more than the number of processors detected.} \item{cl}{An optional \pkg{parallel} or \pkg{snow} cluster for use when \code{parallelType = "snow"}. If \code{NULL}, a \code{"PSOCK"} cluster on the local machine is created for the duration of the \code{permuteMeasEq} call. If a valid \code{\link[parallel]{makeCluster}} object is supplied, \code{parallelType} is silently set to \code{"snow"}, and \code{ncpus} is silently set to \code{length(cl)}.} \item{iseed}{Integer: Only used to set the states of the RNG when using parallel options, in which case \code{\link[base]{RNGkind}} is set to \code{"L'Ecuyer-CMRG"} with a message. See \code{\link[parallel]{clusterSetRNGStream}} and Section 6 of \code{vignette("parallel", "parallel")} for more details. If user supplies an invalid value, \code{iseed} is silently set to the default (12345). To set the state of the RNG when not using parallel options, call \code{\link[base]{set.seed}} before calling \code{permuteMeasEq}.} } \value{ The \linkS4class{permuteMeasEq} object representing the results of testing measurement equivalence (the multiparameter omnibus test) and DIF (modification indices), as well as diagnostics and any \code{extra} output. } \description{ The function \code{permuteMeasEq} provides tests of hypotheses involving measurement equivalence, in one of two frameworks: multigroup CFA or MIMIC models. } \details{ The function \code{permuteMeasEq} provides tests of hypotheses involving measurement equivalence, in one of two frameworks: \enumerate{ \item{1} For multiple-group CFA models, provide a pair of nested lavaan objects, the less constrained of which (\code{uncon}) freely estimates a set of measurement parameters (e.g., factor loadings, intercepts, or thresholds; specified in \code{param}) in all groups, and the more constrained of which (\code{con}) constrains those measurement parameters to equality across groups. Group assignment is repeatedly permuted and the models are fit to each permutation, in order to produce an empirical distribution under the null hypothesis of no group differences, both for (a) changes in user-specified fit measures (see \code{AFIs} and \code{moreAFIs}) and for (b) the maximum modification index among the user-specified equality constraints. Configural invariance can also be tested by providing that fitted lavaan object to \code{con} and leaving \code{uncon = NULL}, in which case \code{param} must be \code{NULL} as well. \item{2} In MIMIC models, one or a set of continuous and/or discrete \code{covariates} can be permuted, and a constrained model is fit to each permutation in order to provide a distribution of any fit measures (namely, the maximum modification index among fixed parameters in \code{param}) under the null hypothesis of measurement equivalence across levels of those covariates. } In either framework, modification indices for equality constraints or fixed parameters specified in \code{param} are calculated from the constrained model (\code{con}) using the function \code{\link[lavaan]{lavTestScore}}. For multiple-group CFA models, the multiparameter omnibus null hypothesis of measurement equivalence/invariance is that there are no group differences in any measurement parameters (of a particular type). This can be tested using the \code{anova} method on nested \code{lavaan} objects, as seen in the output of \code{\link[semTools]{measurementInvariance}}, or by inspecting the change in alternative fit indices (AFIs) such as the CFI. The permutation randomization method employed by \code{permuteMeasEq} generates an empirical distribution of any \code{AFIs} under the null hypothesis, so the user is not restricted to using fixed cutoffs proposed by Cheung & Rensvold (2002), Chen (2007), or Meade, Johnson, & Braddy (2008). If the multiparameter omnibus null hypothesis is rejected, partial invariance can still be established by freeing invalid equality constraints, as long as equality constraints are valid for at least two indicators per factor. Modification indices can be calculated from the constrained model (\code{con}), but multiple testing leads to inflation of Type I error rates. The permutation randomization method employed by \code{permuteMeasEq} creates a distribution of the maximum modification index if the null hypothesis is true, which allows the user to control the familywise Type I error rate in a manner similar to Tukey's \emph{q} (studentized range) distribution for the Honestly Significant Difference (HSD) post hoc test. For MIMIC models, DIF can be tested by comparing modification indices of regression paths to the permutation distribution of the maximum modification index, which controls the familywise Type I error rate. The MIMIC approach could also be applied with multiple-group models, but the grouping variable would not be permuted; rather, the covariates would be permuted separately within each group to preserve between-group differences. So whether parameters are constrained or unconstrained across groups, the MIMIC approach is only for testing null hypotheses about the effects of \code{covariates} on indicators, controlling for common factors. In either framework, \code{\link[lavaan]{lavaan}}'s \code{group.label} argument is used to preserve the order of groups seen in \code{con} when permuting the data. } \examples{ \dontrun{ ######################## ## Multiple-Group CFA ## ######################## ## create 3-group data in lavaan example(cfa) data HS <- lavaan::HolzingerSwineford1939 HS$ageGroup <- ifelse(HS$ageyr < 13, "preteen", ifelse(HS$ageyr > 13, "teen", "thirteen")) ## specify and fit an appropriate null model for incremental fit indices mod.null <- c(paste0("x", 1:9, " ~ c(T", 1:9, ", T", 1:9, ", T", 1:9, ")*1"), paste0("x", 1:9, " ~~ c(L", 1:9, ", L", 1:9, ", L", 1:9, ")*x", 1:9)) fit.null <- cfa(mod.null, data = HS, group = "ageGroup") ## fit target model with varying levels of measurement equivalence mod.config <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' miout <- measurementInvariance(mod.config, data = HS, std.lv = TRUE, group = "ageGroup") (fit.config <- miout[["fit.configural"]]) (fit.metric <- miout[["fit.loadings"]]) (fit.scalar <- miout[["fit.intercepts"]]) ####################### Permutation Method ## fit indices of interest for multiparameter omnibus test myAFIs <- c("chisq","cfi","rmsea","mfi","aic") moreAFIs <- c("gammaHat","adjGammaHat") ## Use only 20 permutations for a demo. In practice, ## use > 1000 to reduce sampling variability of estimated p values ## test configural invariance set.seed(12345) out.config <- permuteMeasEq(nPermute = 20, con = fit.config) out.config ## test metric equivalence set.seed(12345) # same permutations out.metric <- permuteMeasEq(nPermute = 20, uncon = fit.config, con = fit.metric, param = "loadings", AFIs = myAFIs, moreAFIs = moreAFIs, null = fit.null) summary(out.metric, nd = 4) ## test scalar equivalence set.seed(12345) # same permutations out.scalar <- permuteMeasEq(nPermute = 20, uncon = fit.metric, con = fit.scalar, param = "intercepts", AFIs = myAFIs, moreAFIs = moreAFIs, null = fit.null) summary(out.scalar) ## Not much to see without significant DIF. ## Try using an absurdly high alpha level for illustration. outsum <- summary(out.scalar, alpha = .50) ## notice that the returned object is the table of DIF tests outsum ## visualize permutation distribution hist(out.config, AFI = "chisq") hist(out.metric, AFI = "chisq", nd = 2, alpha = .01, legendArgs = list(x = "topright")) hist(out.scalar, AFI = "cfi", printLegend = FALSE) ####################### Extra Output ## function to calculate expected change of Group-2 and -3 latent means if ## each intercept constraint were released extra <- function(con) { output <- list() output["x1.vis2"] <- lavTestScore(con, release = 19:20, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[70] output["x1.vis3"] <- lavTestScore(con, release = 19:20, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[106] output["x2.vis2"] <- lavTestScore(con, release = 21:22, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[70] output["x2.vis3"] <- lavTestScore(con, release = 21:22, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[106] output["x3.vis2"] <- lavTestScore(con, release = 23:24, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[70] output["x3.vis3"] <- lavTestScore(con, release = 23:24, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[106] output["x4.txt2"] <- lavTestScore(con, release = 25:26, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[71] output["x4.txt3"] <- lavTestScore(con, release = 25:26, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[107] output["x5.txt2"] <- lavTestScore(con, release = 27:28, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[71] output["x5.txt3"] <- lavTestScore(con, release = 27:28, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[107] output["x6.txt2"] <- lavTestScore(con, release = 29:30, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[71] output["x6.txt3"] <- lavTestScore(con, release = 29:30, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[107] output["x7.spd2"] <- lavTestScore(con, release = 31:32, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[72] output["x7.spd3"] <- lavTestScore(con, release = 31:32, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[108] output["x8.spd2"] <- lavTestScore(con, release = 33:34, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[72] output["x8.spd3"] <- lavTestScore(con, release = 33:34, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[108] output["x9.spd2"] <- lavTestScore(con, release = 35:36, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[72] output["x9.spd3"] <- lavTestScore(con, release = 35:36, univariate = FALSE, epc = TRUE, warn = FALSE)$epc$epc[108] output } ## observed EPC extra(fit.scalar) ## permutation results, including extra output set.seed(12345) # same permutations out.scalar <- permuteMeasEq(nPermute = 20, uncon = fit.metric, con = fit.scalar, param = "intercepts", AFIs = myAFIs, moreAFIs = moreAFIs, null = fit.null, extra = extra) ## summarize extra output summary(out.scalar, extra = TRUE) ########### ## MIMIC ## ########### ## Specify Restricted Factor Analysis (RFA) model, equivalent to MIMIC, but ## the factor covaries with the covariate instead of being regressed on it. ## The covariate defines a single-indicator construct, and the ## double-mean-centered products of the indicators define a latent ## interaction between the factor and the covariate. mod.mimic <- ' visual =~ x1 + x2 + x3 age =~ ageyr age.by.vis =~ x1.ageyr + x2.ageyr + x3.ageyr x1 ~~ x1.ageyr x2 ~~ x2.ageyr x3 ~~ x3.ageyr ' HS.orth <- indProd(var1 = paste0("x", 1:3), var2 = "ageyr", match = FALSE, data = HS[ , c("ageyr", paste0("x", 1:3))] ) fit.mimic <- cfa(mod.mimic, data = HS.orth, meanstructure = TRUE) summary(fit.mimic, stand = TRUE) ## Whereas MIMIC models specify direct effects of the covariate on an indicator, ## DIF can be tested in RFA models by specifying free loadings of an indicator ## on the covariate's construct (uniform DIF, scalar invariance) and the ## interaction construct (nonuniform DIF, metric invariance). param <- as.list(paste0("age + age.by.vis =~ x", 1:3)) names(param) <- paste0("x", 1:3) # param <- as.list(paste0("x", 1:3, " ~ age + age.by.vis")) # equivalent ## test both parameters simultaneously for each indicator do.call(rbind, lapply(param, function(x) lavTestScore(fit.mimic, add = x)$test)) ## or test each parameter individually lavTestScore(fit.mimic, add = as.character(param)) ####################### Permutation Method ## function to recalculate interaction terms after permuting the covariate datafun <- function(data) { d <- data[, !names(data) \%in\% paste0("x", 1:3, ".ageyr")] indProd(var1 = paste0("x", 1:3), var2 = "ageyr", match = FALSE, data = d) } set.seed(12345) perm.mimic <- permuteMeasEq(nPermute = 20, modelType = "mimic", con = fit.mimic, param = param, covariates = "ageyr", datafun = datafun) summary(perm.mimic) } } \references{ \bold{Papers about permutation tests of measurement equivalence:} Jorgensen, T. D., Kite, B. A., Chen, P.-Y., & Short, S. D. (in press). Permutation randomization methods for testing measurement equivalence and detecting differential item functioning in multiple-group confirmatory factor analysis. \emph{Psychological Methods}. doi:10.1037/met0000152 Kite, B. A., Jorgensen, T. D., & Chen, P.-Y. (in press). Random permutation testing applied to measurement invariance testing with ordered-categorical indicators. \emph{Structural Equation Modeling}. doi:10.1080/10705511.2017.1421467 Jorgensen, T. D. (2017). Applying permutation tests and multivariate modification indices to configurally invariant models that need respecification. \emph{Frontiers in Psychology, 8}(1455). doi:10.3389/fpsyg.2017.01455 \bold{Additional reading:} Chen, F. F. (2007). Sensitivity of goodness of fit indexes to lack of measurement invariance. \emph{Structural Equation Modeling, 14}(3), 464--504. doi:10.1080/10705510701301834 Cheung, G. W., & Rensvold, R. B. (2002). Evaluating goodness-of-fit indexes for testing measurement invariance. \emph{Structural Equation Modeling, 9}(2), 233--255. doi:10.1207/S15328007SEM0902_5 Meade, A. W., Johnson, E. C., & Braddy, P. W. (2008). Power and sensitivity of alternative fit indices in tests of measurement invariance. \emph{Journal of Applied Psychology, 93}(3), 568--592. doi:10.1037/0021-9010.93.3.568 Widamin, K. F., & Thompson, J. S. (2003). On specifying the null model for incremental fit indices in structural equation modeling. \emph{Psychological Methods, 8}(1), 16--37. doi:10.1037/1082-989X.8.1.16 } \seealso{ \code{\link[stats]{TukeyHSD}}, \code{\link[lavaan]{lavTestScore}}, \code{\link[semTools]{measurementInvariance}}, \code{\link[semTools]{measurementInvarianceCat}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/chisqSmallN.Rd0000644000176200001440000000421313531516335015344 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitIndices.R \name{chisqSmallN} \alias{chisqSmallN} \title{\emph{k}-factor correction for \eqn{chi^2} test statistic} \usage{ chisqSmallN(fit0, fit1 = NULL, ...) } \arguments{ \item{fit0}{The lavaan model object provided after running the \code{cfa}, \code{sem}, \code{growth}, or \code{lavaan} functions.} \item{fit1}{Optional additional \linkS4class{lavaan} model, in which \code{fit0} is nested. If \code{fit0} has fewer \emph{df} than \code{fit1}, the models will be swapped, still on the assumption that they are nested.} \item{\dots}{Additional arguments to the \code{\link[lavaan]{lavTestLRT}} function.} } \value{ A numeric vector including the original (unadjusted) chi-squared statistic, the \emph{k}-factor correction, the corrected test statistic, the \emph{df} for the test, and the \emph{p} value for the test under the null hypothesis that the model fits perfectly (or that the 2 models have equivalent fit). } \description{ Calculate \emph{k}-factor correction for \eqn{chi^2} model-fit test statistic to adjust for small sample size. } \details{ The \emph{k}-factor correction (Nevitt & Hancock, 2004) is a global fit index which can be computed by: \deqn{ kc = 1 - \frac{2 \times P + 4 \times K + 5}{6 \times N}} where \eqn{N} is the sample size when using normal likelihood, or \eqn{N - 1} when using \code{likelihood = 'wishart'}. } \examples{ HS.model <- ' visual =~ x1 + b1*x2 + x3 textual =~ x4 + b2*x5 + x6 speed =~ x7 + b3*x8 + x9 ' fit1 <- cfa(HS.model, data = HolzingerSwineford1939) ## test a single model (implicitly compared to a saturated model) chisqSmallN(fit1) ## fit a more constrained model fit0 <- cfa(HS.model, data = HolzingerSwineford1939, orthogonal = TRUE) ## compare 2 models chisqSmallN(fit1, fit0) } \references{ Nevitt, J., & Hancock, G. R. (2004). Evaluating small sample approaches for model test statistics in structural equation modeling. \emph{Multivariate Behavioral Research, 39}(3), 439--478. doi:10.1207/S15327906MBR3903_3 } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/rotate.Rd0000644000176200001440000000524013432435606014426 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/efa.R \name{orthRotate} \alias{orthRotate} \alias{oblqRotate} \alias{funRotate} \title{Implement orthogonal or oblique rotation} \usage{ orthRotate(object, method = "varimax", ...) oblqRotate(object, method = "quartimin", ...) funRotate(object, fun, ...) } \arguments{ \item{object}{A lavaan output} \item{method}{The method of rotations, such as \code{"varimax"}, \code{"quartimax"}, \code{"geomin"}, \code{"oblimin"}, or any gradient projection algorithms listed in the \code{\link[GPArotation]{GPA}} function in the \code{GPArotation} package.} \item{\dots}{Additional arguments for the \code{\link[GPArotation]{GPForth}} function (for \code{orthRotate}), the \code{\link[GPArotation]{GPFoblq}} function (for \code{oblqRotate}), or the function that users provide in the \code{fun} argument.} \item{fun}{The name of the function that users wish to rotate the standardized solution. The functions must take the first argument as the standardized loading matrix and return the \code{GPArotation} object. Check this page for available functions: \code{\link[GPArotation]{rotations}}.} } \value{ An \code{linkS4class{EFA}} object that saves the rotated EFA solution } \description{ These functions will implement orthogonal or oblique rotation on standardized factor loadings from a lavaan output. } \details{ These functions will rotate the unrotated standardized factor loadings by orthogonal rotation using the \code{\link[GPArotation]{GPForth}} function or oblique rotation using the \code{\link[GPArotation]{GPFoblq}} function the \code{GPArotation} package. The resulting rotation matrix will be used to calculate standard errors of the rotated standardized factor loading by delta method by numerically computing the Jacobian matrix by the \code{\link[lavaan]{lav_func_jacobian_simple}} function. } \examples{ \dontrun{ unrotated <- efaUnrotate(HolzingerSwineford1939, nf = 3, varList = paste0("x", 1:9), estimator = "mlr") # Orthogonal varimax out.varimax <- orthRotate(unrotated, method = "varimax") summary(out.varimax, sort = FALSE, suppress = 0.3) # Orthogonal Quartimin orthRotate(unrotated, method = "quartimin") # Oblique Quartimin oblqRotate(unrotated, method = "quartimin") # Geomin oblqRotate(unrotated, method = "geomin") # Target rotation library(GPArotation) target <- matrix(0, 9, 3) target[1:3, 1] <- NA target[4:6, 2] <- NA target[7:9, 3] <- NA colnames(target) <- c("factor1", "factor2", "factor3") ## This function works with GPArotation version 2012.3-1 funRotate(unrotated, fun = "targetQ", Target = target) } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/clipboard.Rd0000644000176200001440000000632513432435606015074 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clipboard.R \name{clipboard} \alias{clipboard} \alias{saveFile} \title{Copy or save the result of \code{lavaan} or \code{FitDiff} objects into a clipboard or a file} \usage{ clipboard(object, what = "summary", ...) saveFile(object, file, what = "summary", tableFormat = FALSE, fit.measures = "default", writeArgs = list(), ...) } \arguments{ \item{object}{The \code{lavaan} or \code{\linkS4class{FitDiff}} object} \item{what}{The attributes of the \code{lavaan} object to be copied in the clipboard. \code{"summary"} is to copy the screen provided from the \code{summary} function. \code{"mifit"} is to copy the result from the \code{\link{miPowerFit}} function. Other attributes listed in the \code{inspect} method in the \link[lavaan]{lavaan-class} could also be used, such as \code{"coef"}, \code{"se"}, \code{"fit"}, \code{"samp"}, and so on. For the The \code{\linkS4class{FitDiff}} object, this argument is not active yet.} \item{\dots}{Additional argument listed in the \code{\link{miPowerFit}} function (for \code{lavaan} object only).} \item{file}{A file name used for saving the result} \item{tableFormat}{If \code{TRUE}, save the result in the table format using tabs for seperation. Otherwise, save the result as the output screen printed in the R console.} \item{fit.measures}{\code{character} vector specifying names of fit measures returned by \code{\link[lavaan]{fitMeasures}} to be copied/saved. Only relevant if \code{object} is class \code{\linkS4class{FitDiff}}.} \item{writeArgs}{\code{list} of additional arguments to be passed to \code{\link[utils]{write.table}}} } \value{ The resulting output will be saved into a clipboard or a file. If using the \code{clipboard} function, users may paste it in the other applications. } \description{ Copy or save the result of \code{lavaan} or \code{\linkS4class{FitDiff}} object into a clipboard or a file. From the clipboard, users may paste the result into the Microsoft Excel or spreadsheet application to create a table of the output. } \examples{ \dontrun{ library(lavaan) HW.model <- ' visual =~ x1 + c1*x2 + x3 textual =~ x4 + c1*x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HW.model, data=HolzingerSwineford1939, group="school", meanstructure=TRUE) # Copy the summary of the lavaan object clipboard(fit) # Copy the modification indices and the model fit from the miPowerFit function clipboard(fit, "mifit") # Copy the parameter estimates clipboard(fit, "coef") # Copy the standard errors clipboard(fit, "se") # Copy the sample statistics clipboard(fit, "samp") # Copy the fit measures clipboard(fit, "fit") # Save the summary of the lavaan object saveFile(fit, "out.txt") # Save the modification indices and the model fit from the miPowerFit function saveFile(fit, "out.txt", "mifit") # Save the parameter estimates saveFile(fit, "out.txt", "coef") # Save the standard errors saveFile(fit, "out.txt", "se") # Save the sample statistics saveFile(fit, "out.txt", "samp") # Save the fit measures saveFile(fit, "out.txt", "fit") } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/plotRMSEAdist.Rd0000644000176200001440000000606213432435606015565 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/powerAnalysisRMSEA.R \name{plotRMSEAdist} \alias{plotRMSEAdist} \title{Plot the sampling distributions of RMSEA} \usage{ plotRMSEAdist(rmsea, n, df, ptile = NULL, caption = NULL, rmseaScale = TRUE, group = 1) } \arguments{ \item{rmsea}{The vector of RMSEA values to be plotted} \item{n}{Sample size of a dataset} \item{df}{Model degrees of freedom} \item{ptile}{The percentile rank of the distribution of the first RMSEA that users wish to plot a vertical line in the resulting graph} \item{caption}{The name vector of each element of \code{rmsea}} \item{rmseaScale}{If \code{TRUE}, the RMSEA scale is used in the x-axis. If \code{FALSE}, the chi-square scale is used in the x-axis.} \item{group}{The number of group that is used to calculate RMSEA.} } \description{ Plots the sampling distributions of RMSEA based on the noncentral chi-square distributions } \details{ This function creates overlappling plots of the sampling distribution of RMSEA based on noncentral \eqn{\chi^2} distribution (MacCallum, Browne, & Suguwara, 1996). First, the noncentrality parameter (\eqn{\lambda}) is calculated from RMSEA (Steiger, 1998; Dudgeon, 2004) by \deqn{\lambda = (N - 1)d\varepsilon^2 / K,} where \eqn{N} is sample size, \eqn{d} is the model degree of freedom, \eqn{K} is the number of group, and \eqn{\varepsilon} is the population RMSEA. Next, the noncentral \eqn{\chi^2} distribution with a specified \emph{df} and noncentrality parameter is plotted. Thus, the x-axis represents the sample \eqn{\chi^2} value. The sample \eqn{\chi^2} value can be transformed to the sample RMSEA scale (\eqn{\hat{\varepsilon}}) by \deqn{\hat{\varepsilon} = \sqrt{K}\sqrt{\frac{\chi^2 - d}{(N - 1)d}},} where \eqn{\chi^2} is the \eqn{\chi^2} value obtained from the noncentral \eqn{\chi^2} distribution. } \examples{ plotRMSEAdist(c(.05, .08), n = 200, df = 20, ptile = .95, rmseaScale = TRUE) plotRMSEAdist(c(.05, .01), n = 200, df = 20, ptile = .05, rmseaScale = FALSE) } \references{ Dudgeon, P. (2004). A note on extending Steiger's (1998) multiple sample RMSEA adjustment to other noncentrality parameter-based statistic. \emph{Structural Equation Modeling, 11}(3), 305--319. doi:10.1207/s15328007sem1103_1 MacCallum, R. C., Browne, M. W., & Sugawara, H. M. (1996). Power analysis and determination of sample size for covariance structure modeling. \emph{Psychological Methods, 1}(2), 130--149. doi:10.1037/1082-989X.1.2.130 Steiger, J. H. (1998). A note on multiple sample extensions of the RMSEA fit index. \emph{Structural Equation Modeling, 5}(4), 411--419. doi:10.1080/10705519809540115 } \seealso{ \itemize{ \item \code{\link{plotRMSEApower}} to plot the statistical power based on population RMSEA given the sample size \item \code{\link{findRMSEApower}} to find the statistical power based on population RMSEA given a sample size \item \code{\link{findRMSEAsamplesize}} to find the minium sample size for a given statistical power based on population RMSEA } } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/modindices.mi.Rd0000644000176200001440000001601513532031247015646 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runMI-modification.R \name{modindices.mi} \alias{modindices.mi} \alias{modificationIndices.mi} \alias{modificationindices.mi} \title{Modification Indices for Multiple Imputations} \usage{ modindices.mi(object, test = c("D2", "D1"), omit.imps = c("no.conv", "no.se"), standardized = TRUE, cov.std = TRUE, information = "expected", power = FALSE, delta = 0.1, alpha = 0.05, high.power = 0.75, sort. = FALSE, minimum.value = 0, maximum.number = nrow(LIST), na.remove = TRUE, op = NULL) modificationIndices.mi(object, test = c("D2", "D1"), omit.imps = c("no.conv", "no.se"), standardized = TRUE, cov.std = TRUE, information = "expected", power = FALSE, delta = 0.1, alpha = 0.05, high.power = 0.75, sort. = FALSE, minimum.value = 0, maximum.number = nrow(LIST), na.remove = TRUE, op = NULL) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan.mi}}} \item{test}{\code{character} indicating which pooling method to use. \code{test = "D2"} (default) indicates that modification indices that were calculated within each imputed data set will be pooled across imputations, as described in Li, Meng, Raghunathan, & Rubin (1991) and Enders (2010). \code{"D1"} indicates Li et al.'s (1991) proposed Wald test will be applied to the gradient and information, and those pooled values will be used to calculate modification indices in the usual manner.} \item{omit.imps}{\code{character} vector specifying criteria for omitting imputations from pooled results. Can include any of \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the default setting, which excludes any imputations that did not converge or for which standard errors could not be computed. The last option (\code{"no.npd"}) would exclude any imputations which yielded a nonpositive definite covariance matrix for observed or latent variables, which would include any "improper solutions" such as Heywood cases.} \item{standardized}{\code{logical}. If \code{TRUE}, two extra columns (\code{$sepc.lv} and \code{$sepc.all}) will contain standardized values for the EPCs. In the first column (\code{$sepc.lv}), standardizization is based on the variances of the (continuous) latent variables. In the second column (\code{$sepc.all}), standardization is based on both the variances of both (continuous) observed and latent variables. (Residual) covariances are standardized using (residual) variances.} \item{cov.std}{\code{logical}. \code{TRUE} if \code{test == "D2"}. If \code{TRUE} (default), the (residual) observed covariances are scaled by the square-root of the diagonal elements of the \eqn{\Theta} matrix, and the (residual) latent covariances are scaled by the square-root of the diagonal elements of the \eqn{\Psi} matrix. If \code{FALSE}, the (residual) observed covariances are scaled by the square-root of the diagonal elements of the model-implied covariance matrix of observed variables (\eqn{\Sigma}), and the (residual) latent covariances are scaled by the square-root of the diagonal elements of the model-implied covariance matrix of the latent variables.} \item{information}{\code{character} indicating the type of information matrix to use (check \code{\link{lavInspect}} for available options). \code{"expected"} information is the default, which provides better control of Type I errors.} \item{power}{\code{logical}. If \code{TRUE}, the (post-hoc) power is computed for each modification index, using the values of \code{delta} and \code{alpha}.} \item{delta}{The value of the effect size, as used in the post-hoc power computation, currently using the unstandardized metric of the \code{$epc} column.} \item{alpha}{The significance level used for deciding if the modification index is statistically significant or not.} \item{high.power}{If the computed power is higher than this cutoff value, the power is considered 'high'. If not, the power is considered 'low'. This affects the values in the \code{$decision} column in the output.} \item{sort.}{\code{logical}. If \code{TRUE}, sort the output using the values of the modification index values. Higher values appear first.} \item{minimum.value}{\code{numeric}. Filter output and only show rows with a modification index value equal or higher than this minimum value.} \item{maximum.number}{\code{integer}. Filter output and only show the first maximum number rows. Most useful when combined with the \code{sort.} option.} \item{na.remove}{\code{logical}. If \code{TRUE} (default), filter output by removing all rows with \code{NA} values for the modification indices.} \item{op}{\code{character} string. Filter the output by selecting only those rows with operator \code{op}.} } \value{ A \code{data.frame} containing modification indices and (S)EPCs. } \description{ Modification indices (1-\emph{df} Lagrange multiplier tests) from a latent variable model fitted to multiple imputed data sets. Statistics for releasing one or more fixed or constrained parameters in model can be calculated by pooling the gradient and information matrices across imputed data sets in a method analogous to the Wald test proposed by Li, Meng, Raghunathan, & Rubin (1991), or by pooling the complete-data score-test statistics across imputed data sets (Li et al., 1991). } \note{ When \code{test = "D2"}, each (S)EPC will be pooled by taking its average across imputations. When \code{test = "D1"}, EPCs will be calculated in the standard way using the pooled gradient and information, and SEPCs will be calculated by standardizing the EPCs using model-implied (residual) variances. } \examples{ \dontrun{ ## impose missing data for example HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), "ageyr","agemo","school")] set.seed(12345) HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) age <- HSMiss$ageyr + HSMiss$agemo/12 HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ## impute missing data library(Amelia) set.seed(12345) HS.amelia <- amelia(HSMiss, m = 20, noms = "school", p2s = FALSE) imps <- HS.amelia$imputations ## specify CFA model from lavaan's ?cfa help page HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' out <- cfa.mi(HS.model, data = imps) modindices.mi(out) # default: Li et al.'s (1991) "D2" method modindices.mi(out, test = "D1") # Li et al.'s (1991) "D1" method } } \references{ Enders, C. K. (2010). \emph{Applied missing data analysis}. New York, NY: Guilford. Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). Significance levels from repeated \emph{p}-values with multiply-imputed data.\emph{Statistica Sinica, 1}(1), 65--92. Retrieved from https://www.jstor.org/stable/24303994 } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) Adapted from \pkg{lavaan} source code, written by Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}) \code{test = "D1"} method proposed by Maxwell Mansolf (University of California, Los Angeles; \email{mamansolf@gmail.com}) } semTools/man/calculate.D2.Rd0000644000176200001440000000577613532031247015341 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runMI.R \name{calculate.D2} \alias{calculate.D2} \title{Calculate the "D2" statistic} \usage{ calculate.D2(w, DF = 0L, asymptotic = FALSE) } \arguments{ \item{w}{\code{numeric} vector of Wald \eqn{\chi^2} statistics. Can also be Wald \emph{z} statistics, which will be internally squared to make \eqn{\chi^2} statistics with one \emph{df} (must set \code{DF = 0L}).} \item{DF}{degrees of freedom (\emph{df}) of the \eqn{\chi^2} statistics. If \code{DF = 0L} (default), \code{w} is assumed to contain \emph{z} statistics, which will be internally squared.} \item{asymptotic}{\code{logical}. If \code{FALSE} (default), the pooled test will be returned as an \emph{F}-distributed statistic with numerator (\code{df1}) and denominator (\code{df2}) degrees of freedom. If \code{TRUE}, the pooled \emph{F} statistic will be multiplied by its \code{df1} on the assumption that its \code{df2} is sufficiently large enough that the statistic will be asymptotically \eqn{\chi^2} distributed with \code{df1}.} } \value{ A \code{numeric} vector containing the test statistic, \emph{df}, its \emph{p} value, and 2 missing-data diagnostics: the relative invrease in variance (RIV, or average for multiparameter tests: ARIV) and the fraction missing information (FMI = ARIV / (1 + ARIV)). } \description{ This is a utility function used to calculate the "D2" statistic for pooling test statistics across multiple imputations. This function is called by several functions used for \code{\linkS4class{lavaan.mi}} objects, such as \code{\link{lavTestLRT.mi}}, \code{\link{lavTestWald.mi}}, and \code{\link{lavTestScore.mi}}. But this function can be used for any general scenario because it only requires a vector of \eqn{\chi^2} statistics (one from each imputation) and the degrees of freedom for the test statistic. See Li, Meng, Raghunathan, & Rubin (1991) and Enders (2010, chapter 8) for details about how it is calculated. } \examples{ ## generate a vector of chi-squared values, just for example DF <- 3 # degrees of freedom M <- 20 # number of imputations CHI <- rchisq(M, DF) ## pool the "results" calculate.D2(CHI, DF) # by default, an F statistic is returned calculate.D2(CHI, DF, asymptotic = TRUE) # asymptotically chi-squared ## generate standard-normal values, for an example of Wald z tests Z <- rnorm(M) calculate.D2(Z) # default DF = 0 will square Z to make chisq(DF = 1) ## F test is equivalent to a t test with the denominator DF } \references{ Enders, C. K. (2010). \emph{Applied missing data analysis}. New York, NY: Guilford. Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). Significance levels from repeated \emph{p}-values with multiply-imputed data. \emph{Statistica Sinica, 1}(1), 65--92. Retrieved from https://www.jstor.org/stable/24303994 } \seealso{ \code{\link{lavTestLRT.mi}}, \code{\link{lavTestWald.mi}}, \code{\link{lavTestScore.mi}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/partialInvariance.Rd0000644000176200001440000003117413432435606016571 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/partialInvariance.R \name{partialInvariance} \alias{partialInvariance} \alias{partialInvarianceCat} \title{Partial Measurement Invariance Testing Across Groups} \usage{ partialInvariance(fit, type, free = NULL, fix = NULL, refgroup = 1, poolvar = TRUE, p.adjust = "none", fbound = 2, return.fit = FALSE, method = "satorra.bentler.2001") partialInvarianceCat(fit, type, free = NULL, fix = NULL, refgroup = 1, poolvar = TRUE, p.adjust = "none", return.fit = FALSE, method = "satorra.bentler.2001") } \arguments{ \item{fit}{A list of models for invariance testing. Each model should be assigned by appropriate names (see details). The result from \code{\link{measurementInvariance}} or \code{\link{measurementInvarianceCat}} could be used in this argument directly.} \item{type}{The types of invariance testing: "metric", "scalar", "strict", or "means"} \item{free}{A vector of variable names that are free across groups in advance. If partial mean invariance is tested, this argument represents a vector of factor names that are free across groups.} \item{fix}{A vector of variable names that are constrained to be equal across groups in advance. If partial mean invariance is tested, this argument represents a vector of factor names that are fixed across groups.} \item{refgroup}{The reference group used to make the effect size comparison with the other groups.} \item{poolvar}{If \code{TRUE}, the variances are pooled across group for standardization. Otherwise, the variances of the reference group are used for standardization.} \item{p.adjust}{The method used to adjust p values. See \code{\link[stats]{p.adjust}} for the options for adjusting p values. The default is to not use any corrections.} \item{fbound}{The z-scores of factor that is used to calculate the effect size of the loading difference proposed by Millsap and Olivera-Aguilar (2012).} \item{return.fit}{Return the submodels fitted by this function} \item{method}{The method used to calculate likelihood ratio test. See \code{\link[lavaan]{lavTestLRT}} for available options} } \value{ A list of results are provided. The list will consists of at least two elements: \enumerate{ \item \code{estimates}: The results of parameter estimates including pooled estimates (\code{poolest}), the estimates for each group, standardized estimates for each group (\code{std}), the difference in standardized values, and the effect size statistic (\emph{q} for factor loading difference and \emph{h} for error variance difference). See the details of this effect size statistic by running \code{vignette("partialInvariance")}. In the \code{partialInvariance} function, the additional effect statistics proposed by Millsap and Olivera-Aguilar (2012) are provided. For factor loading, the additional outputs are the observed mean difference (\code{diff_mean}), the mean difference if factor scores are low (\code{low_fscore}), and the mean difference if factor scores are high (\code{high_fscore}). The low factor score is calculated by (a) finding the factor scores that its \emph{z} score equals -\code{bound} (the default is \eqn{-2}) from all groups and (b) picking the minimum value among the factor scores. The high factor score is calculated by (a) finding the factor scores that its \emph{z} score equals \code{bound} (default = 2) from all groups and (b) picking the maximum value among the factor scores. For measurement intercepts, the additional outputs are the observed means difference (\code{diff_mean}) and the proportion of the differences in the intercepts over the observed means differences (\code{propdiff}). For error variances, the additional outputs are the proportion of the difference in error variances over the difference in observed variances (\code{propdiff}). \item \code{results}: Statistical tests as well as the change in CFI are provided. \eqn{\chi^2} and \emph{p} value are provided for all methods. \item \code{models}: The submodels used in the \code{free} and \code{fix} methods, as well as the nested and parent models. The nested and parent models will be changed from the original models if \code{free} or \code{fit} arguments are specified. } } \description{ This test will provide partial invariance testing by (a) freeing a parameter one-by-one from nested model and compare with the original nested model or (b) fixing (or constraining) a parameter one-by-one from the parent model and compare with the original parent model. This function only works with congeneric models. The \code{partialInvariance} is used for continuous variable. The \code{partialInvarianceCat} is used for categorical variables. } \details{ There are four types of partial invariance testing: \itemize{ \item Partial weak invariance. The model named 'fit.configural' from the list of models is compared with the model named 'fit.loadings'. Each loading will be freed or fixed from the metric and configural invariance models respectively. The modified models are compared with the original model. Note that the objects in the list of models must have the names of "fit.configural" and "fit.loadings". Users may use "metric", "weak", "loading", or "loadings" in the \code{type} argument. Note that, for testing invariance on marker variables, other variables will be assigned as marker variables automatically. \item Partial strong invariance. The model named 'fit.loadings' from the list of models is compared with the model named either 'fit.intercepts' or 'fit.thresholds'. Each intercept will be freed or fixed from the scalar and metric invariance models respectively. The modified models are compared with the original model. Note that the objects in the list of models must have the names of "fit.loadings" and either "fit.intercepts" or "fit.thresholds". Users may use "scalar", "strong", "intercept", "intercepts", "threshold", or "thresholds" in the \code{type} argument. Note that, for testing invariance on marker variables, other variables will be assigned as marker variables automatically. Note that if all variables are dichotomous, scalar invariance testing is not available. \item Partial strict invariance. The model named either 'fit.intercepts' or 'fit.thresholds' (or 'fit.loadings') from the list of models is compared with the model named 'fit.residuals'. Each residual variance will be freed or fixed from the strict and scalar (or metric) invariance models respectively. The modified models are compared with the original model. Note that the objects in the list of models must have the names of "fit.residuals" and either "fit.intercepts", "fit.thresholds", or "fit.loadings". Users may use "strict", "residual", "residuals", "error", or "errors" in the \code{type} argument. \item Partial mean invariance. The model named either 'fit.intercepts' or 'fit.thresholds' (or 'fit.residuals' or 'fit.loadings') from the list of models is compared with the model named 'fit.means'. Each factor mean will be freed or fixed from the means and scalar (or strict or metric) invariance models respectively. The modified models are compared with the original model. Note that the objects in the list of models must have the names of "fit.means" and either "fit.residuals", "fit.intercepts", "fit.thresholds", or "fit.loadings". Users may use "means" or "mean" in the \code{type} argument. } Two types of comparisons are used in this function: \enumerate{ \item \code{free}: The nested model is used as a template. Then, one parameter indicating the differences between two models is free. The new model is compared with the nested model. This process is repeated for all differences between two models. The likelihood-ratio test and the difference in CFI are provided. \item \code{fix}: The parent model is used as a template. Then, one parameter indicating the differences between two models is fixed or constrained to be equal to other parameters. The new model is then compared with the parent model. This process is repeated for all differences between two models. The likelihood-ratio test and the difference in CFI are provided. \item \code{wald}: This method is similar to the \code{fix} method. However, instead of building a new model and compare them with likelihood-ratio test, multivariate wald test is used to compare equality between parameter estimates. See \code{\link[lavaan]{lavTestWald}} for further details. Note that if any rows of the contrast cannot be summed to 0, the Wald test is not provided, such as comparing two means where one of the means is fixed as 0. This test statistic is not as accurate as likelihood-ratio test provided in \code{fix}. I provide it here in case that likelihood-ratio test fails to converge. } Note that this function does not adjust for the inflated Type I error rate from multiple tests. The degree of freedom of all tests would be the number of groups minus 1. The details of standardized estimates and the effect size used for each parameters are provided in the vignettes by running \code{vignette("partialInvariance")}. } \examples{ ## Conduct weak invariance testing manually by using fixed-factor ## method of scale identification library(lavaan) conf <- " f1 =~ NA*x1 + x2 + x3 f2 =~ NA*x4 + x5 + x6 f1 ~~ c(1, 1)*f1 f2 ~~ c(1, 1)*f2 " weak <- " f1 =~ NA*x1 + x2 + x3 f2 =~ NA*x4 + x5 + x6 f1 ~~ c(1, NA)*f1 f2 ~~ c(1, NA)*f2 " configural <- cfa(conf, data = HolzingerSwineford1939, std.lv = TRUE, group="school") weak <- cfa(weak, data = HolzingerSwineford1939, group="school", group.equal="loadings") models <- list(fit.configural = configural, fit.loadings = weak) partialInvariance(models, "metric") \dontrun{ partialInvariance(models, "metric", free = "x5") # "x5" is free across groups in advance partialInvariance(models, "metric", fix = "x4") # "x4" is fixed across groups in advance ## Use the result from the measurementInvariance function HW.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' models2 <- measurementInvariance(model = HW.model, data=HolzingerSwineford1939, group="school") partialInvariance(models2, "scalar") ## Conduct weak invariance testing manually by using fixed-factor ## method of scale identification for dichotomous variables f <- rnorm(1000, 0, 1) u1 <- 0.9*f + rnorm(1000, 1, sqrt(0.19)) u2 <- 0.8*f + rnorm(1000, 1, sqrt(0.36)) u3 <- 0.6*f + rnorm(1000, 1, sqrt(0.64)) u4 <- 0.7*f + rnorm(1000, 1, sqrt(0.51)) u1 <- as.numeric(cut(u1, breaks = c(-Inf, 0, Inf))) u2 <- as.numeric(cut(u2, breaks = c(-Inf, 0.5, Inf))) u3 <- as.numeric(cut(u3, breaks = c(-Inf, 0, Inf))) u4 <- as.numeric(cut(u4, breaks = c(-Inf, -0.5, Inf))) g <- rep(c(1, 2), 500) dat2 <- data.frame(u1, u2, u3, u4, g) configural2 <- " f1 =~ NA*u1 + u2 + u3 + u4 u1 | c(t11, t11)*t1 u2 | c(t21, t21)*t1 u3 | c(t31, t31)*t1 u4 | c(t41, t41)*t1 f1 ~~ c(1, 1)*f1 f1 ~ c(0, NA)*1 u1 ~~ c(1, 1)*u1 u2 ~~ c(1, NA)*u2 u3 ~~ c(1, NA)*u3 u4 ~~ c(1, NA)*u4 " outConfigural2 <- cfa(configural2, data = dat2, group = "g", parameterization = "theta", estimator = "wlsmv", ordered = c("u1", "u2", "u3", "u4")) weak2 <- " f1 =~ NA*u1 + c(f11, f11)*u1 + c(f21, f21)*u2 + c(f31, f31)*u3 + c(f41, f41)*u4 u1 | c(t11, t11)*t1 u2 | c(t21, t21)*t1 u3 | c(t31, t31)*t1 u4 | c(t41, t41)*t1 f1 ~~ c(1, NA)*f1 f1 ~ c(0, NA)*1 u1 ~~ c(1, 1)*u1 u2 ~~ c(1, NA)*u2 u3 ~~ c(1, NA)*u3 u4 ~~ c(1, NA)*u4 " outWeak2 <- cfa(weak2, data = dat2, group = "g", parameterization = "theta", estimator = "wlsmv", ordered = c("u1", "u2", "u3", "u4")) modelsCat <- list(fit.configural = outConfigural2, fit.loadings = outWeak2) partialInvarianceCat(modelsCat, type = "metric") partialInvarianceCat(modelsCat, type = "metric", free = "u2") partialInvarianceCat(modelsCat, type = "metric", fix = "u3") ## Use the result from the measurementInvarianceCat function model <- ' f1 =~ u1 + u2 + u3 + u4 f2 =~ u5 + u6 + u7 + u8' modelsCat2 <- measurementInvarianceCat(model = model, data = datCat, group = "g", parameterization = "theta", estimator = "wlsmv", strict = TRUE) partialInvarianceCat(modelsCat2, type = "scalar") } } \references{ Millsap, R. E., & Olivera-Aguilar, M. (2012). Investigating measurement invariance using confirmatory factor analysis. In R. H. Hoyle (Ed.), \emph{Handbook of structural equation modeling} (pp. 380--392). New York, NY: Guilford. } \seealso{ \code{\link{measurementInvariance}} for measurement invariance for continuous variables; \code{\link{measurementInvarianceCat}} for measurement invariance for categorical variables; \code{\link[lavaan]{lavTestWald}} for multivariate Wald test } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/lavTestWald.mi.Rd0000644000176200001440000001446213532031247015766 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runMI-Wald.R \name{lavTestWald.mi} \alias{lavTestWald.mi} \title{Wald Test for Multiple Imputations} \usage{ lavTestWald.mi(object, constraints = NULL, test = c("D1", "D2"), asymptotic = FALSE, scale.W = !asymptotic, omit.imps = c("no.conv", "no.se"), verbose = FALSE, warn = TRUE) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan.mi}}.} \item{constraints}{A \code{character} string (typically between single quotes) containing one or more equality constraints. See examples for more details} \item{test}{\code{character} indicating which pooling method to use. \code{"D1"} or \code{"Rubin"} (default) indicates Rubin's (1987) rules will be applied to the point estimates and the asymptotic covariance matrix of model parameters, and those pooled values will be used to calculate the Wald test in the usual manner. \code{"D2"}, \code{"LMRR"}, or \code{"Li.et.al"} indicate that the complete-data Wald test statistic should be calculated using each imputed data set, which will then be pooled across imputations, as described in Li, Meng, Raghunathan, & Rubin (1991) and Enders (2010, chapter 8).} \item{asymptotic}{\code{logical}. If \code{FALSE} (default), the pooled test will be returned as an \emph{F}-distributed statistic with numerator (\code{df1}) and denominator (\code{df2}) degrees of freedom. If \code{TRUE}, the pooled \emph{F} statistic will be multiplied by its \code{df1} on the assumption that its \code{df2} is sufficiently large enough that the statistic will be asymptotically \eqn{\chi^2} distributed with \code{df1}.} \item{scale.W}{\code{logical}. If \code{FALSE}, the pooled asymptotic covariance matrix of model parameters is calculated as the weighted sum of the within-imputation and between-imputation components. Otherwise, the pooled asymptotic covariance matrix of model parameters is calculated by scaling the within-imputation component by the average relative increase in variance (ARIV; see Enders, 2010, p. 235), which is \emph{only} consistent when requesting the \emph{F} test (i.e., \code{asymptotic = FALSE}. Ignored (irrelevant) if \code{test = "D2"}.} \item{omit.imps}{\code{character} vector specifying criteria for omitting imputations from pooled results. Can include any of \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the default setting, which excludes any imputations that did not converge or for which standard errors could not be computed. The last option (\code{"no.npd"}) would exclude any imputations which yielded a nonpositive definite covariance matrix for observed or latent variables, which would include any "improper solutions" such as Heywood cases.} \item{verbose}{\code{logical}. If \code{TRUE}, print the restriction matrix and the estimated restricted values.} \item{warn}{\code{logical}. If \code{TRUE}, print warnings if they occur.} } \value{ A vector containing the Wald test statistic (either an \code{F} or \eqn{\chi^2} statistic, depending on the \code{asymptotic} argument), the degrees of freedom (numerator and denominator, if \code{asymptotic = FALSE}), and a \emph{p} value. If \code{asymptotic = FALSE}, the relative invrease in variance (RIV, or average for multiparameter tests: ARIV) used to calculate the denominator \emph{df} is also returned as a missing-data diagnostic, along with the fraction missing information (FMI = ARIV / (1 + ARIV)). } \description{ Wald test for testing a linear hypothesis about the parameters of lavaan models fitted to multiple imputed data sets. Statistics for constraining one or more free parameters in a model can be calculated from the pooled point estimates and asymptotic covariance matrix of model parameters using Rubin's (1987) rules, or by pooling the Wald test statistics across imputed data sets (Li, Meng, Raghunathan, & Rubin, 1991). } \details{ The constraints are specified using the \code{"=="} operator. Both the left-hand side and the right-hand side of the equality can contain a linear combination of model parameters, or a constant (like zero). The model parameters must be specified by their user-specified labels from the \code{link[lavaan]{model.syntax}}. Names of defined parameters (using the ":=" operator) can be included too. } \examples{ \dontrun{ ## impose missing data for example HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), "ageyr","agemo","school")] set.seed(12345) HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) age <- HSMiss$ageyr + HSMiss$agemo/12 HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ## impute missing data library(Amelia) set.seed(12345) HS.amelia <- amelia(HSMiss, m = 20, noms = "school", p2s = FALSE) imps <- HS.amelia$imputations ## specify CFA model from lavaan's ?cfa help page HS.model <- ' visual =~ x1 + b1*x2 + x3 textual =~ x4 + b2*x5 + x6 speed =~ x7 + b3*x8 + x9 ' fit <- cfa.mi(HS.model, data = imps) ## Testing whether a single parameter equals zero yields the 'chi-square' ## version of the Wald z statistic from the summary() output, or the ## 'F' version of the t statistic from the summary() output, depending ## whether asymptotic = TRUE or FALSE lavTestWald.mi(fit, constraints = "b1 == 0") # default D1 statistic lavTestWald.mi(fit, constraints = "b1 == 0", test = "D2") # D2 statistic ## The real advantage is simultaneously testing several equality ## constraints, or testing more complex constraints: con <- ' 2*b1 == b3 b2 - b3 == 0 ' lavTestWald.mi(fit, constraints = con) # default F statistic lavTestWald.mi(fit, constraints = con, asymptotic = TRUE) # chi-squared } } \references{ Enders, C. K. (2010). \emph{Applied missing data analysis}. New York, NY: Guilford. Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). Significance levels from repeated \emph{p}-values with multiply-imputed data. \emph{Statistica Sinica, 1}(1), 65--92. Retrieved from https://www.jstor.org/stable/24303994 Rubin, D. B. (1987). \emph{Multiple imputation for nonresponse in surveys}. New York, NY: Wiley. } \seealso{ \code{\link[lavaan]{lavTestWald}} } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) Adapted from \pkg{lavaan} source code, written by Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}) } semTools/man/tukeySEM.Rd0000644000176200001440000000423713432435606014643 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tukeySEM.R \name{tukeySEM} \alias{tukeySEM} \title{Tukey's WSD post-hoc test of means for unequal variance and sample size} \usage{ tukeySEM(m1, m2, var1, var2, n1, n2, ng) } \arguments{ \item{m1}{Mean of group 1.} \item{m2}{Mean of group 2.} \item{var1}{Variance of group 1.} \item{var2}{Variance of group 2.} \item{n1}{Sample size of group 1.} \item{n2}{Sample size of group 2.} \item{ng}{Total number of groups to be compared (i.e., the number of groups compared in the omnibus test).} } \value{ A vector with three elements: \enumerate{ \item \code{q}: The \emph{q} statistic \item \code{df}: The degrees of freedom for the \emph{q} statistic \item \code{p}: A \emph{p} value based on the \emph{q} statistic, \emph{df}, and the total number of groups to be compared } } \description{ This function computes Tukey's WSD post hoc test of means when variances and sample sizes are not equal across groups. It can be used as a post hoc test when comparing latent means in multiple group SEM. } \details{ After conducting an omnibus test of means across three of more groups, researchers often wish to know which sets of means differ at a particular Type I error rate. Tukey's WSD test holds the error rate stable across multiple comparisons of means. This function implements an adaptation of Tukey's WSD test from Maxwell & Delaney (2004), that allows variances and sample sizes to differ across groups. } \examples{ ## For a case where three groups have been compared: ## Group 1: mean = 3.91, var = 0.46, n = 246 ## Group 2: mean = 3.96, var = 0.62, n = 465 ## Group 3: mean = 2.94, var = 1.07, n = 64 ## compare group 1 and group 2 tukeySEM(3.91, 3.96, 0.46, 0.62, 246, 425, 3) ## compare group 1 and group 3 tukeySEM(3.91, 2.94, 0.46, 1.07, 246, 64, 3) ## compare group 2 and group 3 tukeySEM(3.96, 2.94, 0.62, 1.07, 465, 64, 3) } \references{ Maxwell, S. E., & Delaney, H. D. (2004). \emph{Designing experiments and analyzing data: A model comparison perspective} (2nd ed.). Mahwah, NJ: Lawrence Erlbaum Associates. } \author{ Alexander M. Schoemann (East Carolina University; \email{schoemanna@ecu.edu}) } semTools/man/EFA-class.Rd0000644000176200001440000000350213432435606014625 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/efa.R \docType{class} \name{EFA-class} \alias{EFA-class} \alias{show,EFA-method} \alias{summary,EFA-method} \title{Class For Rotated Results from EFA} \usage{ \S4method{show}{EFA}(object) \S4method{summary}{EFA}(object, suppress = 0.1, sort = TRUE) } \arguments{ \item{object}{object of class \code{EFA}} \item{suppress}{any standardized loadings less than the specified value will not be printed to the screen} \item{sort}{\code{logical}. If \code{TRUE} (default), factor loadings will be sorted by their size in the console output} } \description{ This class contains the results of rotated exploratory factor analysis } \section{Slots}{ \describe{ \item{\code{loading}}{Rotated standardized factor loading matrix} \item{\code{rotate}}{Rotation matrix} \item{\code{gradRotate}}{gradient of the objective function at the rotated loadings} \item{\code{convergence}}{Convergence status} \item{\code{phi:}}{Factor correlation matrix. Will be an identity matrix if orthogonal rotation is used.} \item{\code{se}}{Standard errors of the rotated standardized factor loading matrix} \item{\code{method}}{Method of rotation} \item{\code{call}}{The command used to generate this object} }} \section{Objects from the Class}{ Objects can be created via the \code{\link{orthRotate}} or \code{\link{oblqRotate}} function. } \examples{ unrotated <- efaUnrotate(HolzingerSwineford1939, nf = 3, varList = paste0("x", 1:9), estimator = "mlr") summary(unrotated, std = TRUE) lavInspect(unrotated, "std") # Rotated by Quartimin rotated <- oblqRotate(unrotated, method = "quartimin") summary(rotated) } \seealso{ \code{\link{efaUnrotate}}; \code{\link{orthRotate}}; \code{\link{oblqRotate}} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/datCat.Rd0000644000176200001440000000132713432435606014332 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{datCat} \alias{datCat} \title{Simulated Data set to Demonstrate Categorical Measurement Invariance} \format{A \code{data.frame} with 200 observations of 9 variables. \describe{ \item{g}{Sex of respondents} \item{u1}{Indicator 1} \item{u2}{Indicator 2} \item{u3}{Indicator 3} \item{u4}{Indicator 4} \item{u5}{Indicator 5} \item{u6}{Indicator 6} \item{u7}{Indicator 7} \item{u8}{Indicator 8} }} \source{ Data were generated using the \code{lavaan} package. } \usage{ datCat } \description{ A simulated data set with 2 factors with 4 indicators each separated into two groups } \examples{ head(datCat) } \keyword{datasets} semTools/man/imposeStart.Rd0000644000176200001440000000645413432435606015452 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/imposeStart.R \name{imposeStart} \alias{imposeStart} \title{Specify starting values from a lavaan output} \usage{ imposeStart(out, expr, silent = TRUE) } \arguments{ \item{out}{The \code{lavaan} output that users wish to use the parameter estimates as staring values for an analysis model} \item{expr}{The original code that users use to run a lavaan model} \item{silent}{Logical to print the parameter table with new starting values} } \value{ A fitted lavaan model } \description{ This function will save the parameter estimates of a lavaan output and impose those parameter estimates as starting values for another analysis model. The free parameters with the same names or the same labels across two models will be imposed the new starting values. This function may help to increase the chance of convergence in a complex model (e.g., multitrait-multimethod model or complex longitudinal invariance model). } \examples{ ## The following example show that the longitudinal weak invariance model ## using effect coding was not convergent with three time points but convergent ## with two time points. Thus, the parameter estimates from the model with ## two time points are used as starting values of the three time points. ## The model with new starting values is convergent properly. weak2time <- ' # Loadings f1t1 =~ LOAD1*y1t1 + LOAD2*y2t1 + LOAD3*y3t1 f1t2 =~ LOAD1*y1t2 + LOAD2*y2t2 + LOAD3*y3t2 # Factor Variances f1t1 ~~ f1t1 f1t2 ~~ f1t2 # Factor Covariances f1t1 ~~ f1t2 # Error Variances y1t1 ~~ y1t1 y2t1 ~~ y2t1 y3t1 ~~ y3t1 y1t2 ~~ y1t2 y2t2 ~~ y2t2 y3t2 ~~ y3t2 # Error Covariances y1t1 ~~ y1t2 y2t1 ~~ y2t2 y3t1 ~~ y3t2 # Factor Means f1t1 ~ NA*1 f1t2 ~ NA*1 # Measurement Intercepts y1t1 ~ INT1*1 y2t1 ~ INT2*1 y3t1 ~ INT3*1 y1t2 ~ INT4*1 y2t2 ~ INT5*1 y3t2 ~ INT6*1 # Constraints for Effect-coding Identification LOAD1 == 3 - LOAD2 - LOAD3 INT1 == 0 - INT2 - INT3 INT4 == 0 - INT5 - INT6 ' model2time <- lavaan(weak2time, data = exLong) weak3time <- ' # Loadings f1t1 =~ LOAD1*y1t1 + LOAD2*y2t1 + LOAD3*y3t1 f1t2 =~ LOAD1*y1t2 + LOAD2*y2t2 + LOAD3*y3t2 f1t3 =~ LOAD1*y1t3 + LOAD2*y2t3 + LOAD3*y3t3 # Factor Variances f1t1 ~~ f1t1 f1t2 ~~ f1t2 f1t3 ~~ f1t3 # Factor Covariances f1t1 ~~ f1t2 + f1t3 f1t2 ~~ f1t3 # Error Variances y1t1 ~~ y1t1 y2t1 ~~ y2t1 y3t1 ~~ y3t1 y1t2 ~~ y1t2 y2t2 ~~ y2t2 y3t2 ~~ y3t2 y1t3 ~~ y1t3 y2t3 ~~ y2t3 y3t3 ~~ y3t3 # Error Covariances y1t1 ~~ y1t2 y2t1 ~~ y2t2 y3t1 ~~ y3t2 y1t1 ~~ y1t3 y2t1 ~~ y2t3 y3t1 ~~ y3t3 y1t2 ~~ y1t3 y2t2 ~~ y2t3 y3t2 ~~ y3t3 # Factor Means f1t1 ~ NA*1 f1t2 ~ NA*1 f1t3 ~ NA*1 # Measurement Intercepts y1t1 ~ INT1*1 y2t1 ~ INT2*1 y3t1 ~ INT3*1 y1t2 ~ INT4*1 y2t2 ~ INT5*1 y3t2 ~ INT6*1 y1t3 ~ INT7*1 y2t3 ~ INT8*1 y3t3 ~ INT9*1 # Constraints for Effect-coding Identification LOAD1 == 3 - LOAD2 - LOAD3 INT1 == 0 - INT2 - INT3 INT4 == 0 - INT5 - INT6 INT7 == 0 - INT8 - INT9 ' ### The following command does not provide convergent result # model3time <- lavaan(weak3time, data = exLong) ### Use starting values from the model with two time points model3time <- imposeStart(model2time, lavaan(weak3time, data = exLong)) summary(model3time) } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/monteCarloMed.Rd0000644000176200001440000001524113475314630015663 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/monteCarloMed.R \name{monteCarloMed} \alias{monteCarloMed} \title{Monte Carlo Confidence Intervals to Test Complex Indirect Effects} \usage{ monteCarloMed(expression, ..., ACM = NULL, object = NULL, rep = 20000, CI = 95, plot = FALSE, outputValues = FALSE) } \arguments{ \item{expression}{A character scalar representing the computation of an indirect effect. Different parameters in the expression should have different alphanumeric values. Expressions can use either addition (+) or multiplication (*) operators.} \item{\dots}{Parameter estimates for all parameters named in \code{expression}. The order of parameters should follow from \code{expression} (the first parameter named in \code{expression} should be the first parameter listed in \dots{}). Alternatively \dots can be a vector of parameter estimates.} \item{ACM}{A matrix representing the asymptotic covariance matrix of the parameters described in \code{expression}. This matrix should be a symetric matrix with dimensions equal to the number of parameters names in \code{expression}. Information on finding the ACOV is popular SEM software is described below.)} \item{object}{A lavaan model object fitted after running the running the \code{cfa}, \code{sem}, \code{growth}, or \code{lavaan} functions. The model must have parameters labelled with the same labels used in \code{expression}. When using this option do not specify values for \dots or \code{ACM}} \item{rep}{The number of replications to compute. Many thousand are reccomended.} \item{CI}{Width of the confidence interval computed.} \item{plot}{Should the function output a plot of simulated values of the indirect effect?} \item{outputValues}{Should the function output all simulated values of the indirect effect?} } \value{ A list with two elements. The first element is the point estimate for the indirect effect. The second element is a matrix with values for the upper and lower limits of the confidence interval generated from the Monte Carlo test of mediation. If \code{outputValues = TRUE}, output will be a list with a list with the point estimate and values for the upper and lower limits of the confidence interval as the first element and a vector of simulated values of the indirect effect as the second element. } \description{ This function takes an expression for an indirect effect, the parameters and standard errors associated with the expression and returns a confidence interval based on a Monte Carlo test of mediation (MacKinnon, Lockwood, & Williams, 2004). } \details{ This function implements the Monte Carlo test of mediation first described in MacKinnon, Lockwood, & Williams (2004) and extends it to complex cases where the indirect effect is more than a function of two parameters. The function takes an expression for the indirect effect, randomly simulated values of the indirect effect based on the values of the parameters (and the associated standard errors) comprising the indirect effect, and outputs a confidence interval of the indirect effect based on the simulated values. For further information on the Monte Carlo test of mediation see MacKinnon, Lockwood, & Williams (2004) and Preacher & Selig (2012). The asymptotic covariance matrix can be easily found in many popular SEM software applications. \itemize{ \item LISREL: Including the EC option on the OU line will print the ACM to a seperate file. The file contains the lower triangular elements of the ACM in free format and scientific notation \item Mplus Include the command TECH3; in the OUTPUT section. The ACM will be printed in the output. \item lavaan: Use the command \code{vcov} on the fitted lavaan object to print the ACM to the screen } } \examples{ ## Simple two path mediation ## Write expression of indirect effect med <- 'a*b' ## Paramter values from analyses aparam <- 1 bparam <- 2 ## Asymptotic covariance matrix from analyses AC <- matrix(c(.01,.00002, .00002,.02), nrow=2, byrow=TRUE) ## Compute CI, include a plot monteCarloMed(med, coef1 = aparam, coef2 = bparam, outputValues = FALSE, plot = TRUE, ACM = AC) ## Use a vector of parameter estimates as input aparam <- c(1,2) monteCarloMed(med, coef1 = aparam, outputValues = FALSE, plot = TRUE, ACM = AC) ## Complex mediation with two paths for the indirect effect ## Write expression of indirect effect med <- 'a1*b1 + a1*b2' ## Paramter values and standard errors from analyses aparam <- 1 b1param <- 2 b2param <- 1 ## Asymptotic covariance matrix from analyses AC <- matrix(c(1, .00002, .00003, .00002, 1, .00002, .00003, .00002, 1), nrow = 3, byrow = TRUE) ## Compute CI do not include a plot monteCarloMed(med, coef1 = aparam, coef2 = b1param, coef3 = b2param, ACM = AC) ## WORKING WITH lavaan MODELS. From the mediation tutorial: ## http://lavaan.ugent.be/tutorial/mediation.html set.seed(1234) X <- rnorm(100) M <- 0.5*X + rnorm(100) Y <- 0.7*M + rnorm(100) Data <- data.frame(X = X, Y = Y, M = M) model <- ' # direct effect Y ~ c*X # mediator M ~ a*X Y ~ b*M # indirect effect (a*b) ab := a*b # total effect total := c + (a*b) ' fit <- sem(model, data = Data) med <- 'a*b' ## Automatically extract information from lavaan object monteCarloMed(med, object = fit) ## or (unnecessary) manually extract the information first myParams <- c("a","b") myCoefs <- coef(fit)[myParams] myACM <- vcov(fit)[myParams, myParams] monteCarloMed(med, myCoefs, ACM = myACM) } \references{ MacKinnon, D. P., Lockwood, C. M., & Williams, J. (2004). Confidence limits for the indirect effect: Distribution of the product and resampling methods. \emph{Multivariate Behavioral Research, 39}(1) 99--128. doi:10.1207/s15327906mbr3901_4 Preacher, K. J., & Selig, J. P. (2010, July). Monte Carlo method for assessing multilevel mediation: An interactive tool for creating confidence intervals for indirect effects in 1-1-1 multilevel models [Computer software]. Available from \url{http://quantpsy.org/}. Preacher, K. J., & Selig, J. P. (2012). Advantages of Monte Carlo confidence intervals for indirect effects. \emph{Communication Methods and Measures, 6}(2), 77--98. doi:10.1080/19312458.2012.679848 Selig, J. P., & Preacher, K. J. (2008, June). Monte Carlo method for assessing mediation: An interactive tool for creating confidence intervals for indirect effects [Computer software]. Available from \url{http://quantpsy.org/}. } \author{ Corbin Quick (University of Michigan; \email{corbinq@umich.edu}) Alexander M. Schoemann (East Carolina University; \email{schoemanna@ecu.edu}) James P. Selig (University of New Mexico; \email{selig@unm.edu}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/efaUnrotate.Rd0000644000176200001440000000457113432435606015413 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/efa.R \name{efaUnrotate} \alias{efaUnrotate} \title{Analyze Unrotated Exploratory Factor Analysis Model} \usage{ efaUnrotate(data, nf, varList = NULL, start = TRUE, aux = NULL, ...) } \arguments{ \item{data}{A target \code{data.frame}} \item{nf}{The desired number of factors} \item{varList}{Target observed variables. If not specified, all variables in \code{data} will be used.} \item{start}{Use starting values in the analysis from the \code{\link{factanal}} \code{function}. If \code{FALSE}, the starting values from the \code{lavaan} package will be used. \code{TRUE} is ignored with a warning if the \code{aux} argument is used.} \item{aux}{The list of auxiliary variables. These variables will be included in the model by the saturated-correlates approach to account for missing information.} \item{\dots}{Other arguments in the \code{\link[lavaan]{cfa}} function in the \code{lavaan} package, such as \code{ordered}, \code{se}, or \code{estimator}} } \value{ A \code{lavaan} output of unrotated exploratory factor analysis solution. } \description{ This function will analyze unrotated exploratory factor analysis model. The unrotated solution can be rotated by the \code{\link{orthRotate}} and \code{\link{oblqRotate}} functions. } \details{ This function will generate a lavaan script for unrotated exploratory factor analysis model such that (1) all factor loadings are estimated, (2) factor variances are fixed to 1, (3) factor covariances are fixed to 0, and (4) the dot products of any pairs of columns in the factor loading matrix are fixed to zero (Johnson & Wichern, 2002). The reason for creating this function in addition to the \code{\link{factanal}} function is that users can enjoy some advanced features from the \code{lavaan} package such as scaled \eqn{\chi^2}, diagonal weighted least squares for ordinal indicators, or full-information maximum likelihood (FIML). } \examples{ unrotated <- efaUnrotate(HolzingerSwineford1939, nf = 3, varList=paste0("x", 1:9), estimator = "mlr") summary(unrotated, std = TRUE) inspect(unrotated, "std") dat <- data.frame(HolzingerSwineford1939, z = rnorm(nrow(HolzingerSwineford1939), 0, 1)) unrotated2 <- efaUnrotate(dat, nf = 2, varList = paste0("x", 1:9), aux = "z") } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) } semTools/man/PAVranking.Rd0000644000176200001440000002364613433602261015134 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PAVranking.R \name{PAVranking} \alias{PAVranking} \title{Parcel-Allocation Variability in Model Ranking} \usage{ PAVranking(model0, model1, data, parcel.names, item.syntax, nAlloc = 100, fun = "sem", alpha = 0.05, bic.crit = 10, fit.measures = c("chisq", "df", "cfi", "tli", "rmsea", "srmr", "logl", "aic", "bic", "bic2"), ..., show.progress = FALSE, iseed = 12345, warn = FALSE) } \arguments{ \item{model0, model1}{\code{\link[lavaan]{lavaan}} model syntax specifying nested models (\code{model0} within \code{model1}) to be fitted to the same parceled data. Note that there can be a mixture of items and parcels (even within the same factor), in case certain items should never be parceled. Can be a character string or parameter table. Also see \code{\link[lavaan]{lavaanify}} for more details.} \item{data}{A \code{data.frame} containing all observed variables appearing in the \code{model}, as well as those in the \code{item.syntax} used to create parcels. If the data have missing values, multiple imputation before parceling is recommended: submit a stacked data set (with a variable for the imputation number, so they can be separateed later) and set \code{do.fit = FALSE} to return the list of \code{data.frame}s (one per allocation), each of which is a stacked, imputed data set with parcels.} \item{parcel.names}{\code{character} vector containing names of all parcels appearing as indicators in \code{model}.} \item{item.syntax}{\link[lavaan]{lavaan} model syntax specifying the model that would be fit to all of the unparceled items, including items that should be randomly allocated to parcels appearing in \code{model}.} \item{nAlloc}{The number of random items-to-parcels allocations to generate.} \item{fun}{\code{character} string indicating the name of the \code{\link[lavaan]{lavaan}} function used to fit \code{model} to \code{data}. Can only take the values \code{"lavaan"}, \code{"sem"}, \code{"cfa"}, or \code{"growth"}.} \item{alpha}{Alpha level used as criterion for significance.} \item{bic.crit}{Criterion for assessing evidence in favor of one model over another. See Rafferty (1995) for guidelines (default is "very strong evidence" in favor of the model with lower BIC).} \item{fit.measures}{\code{character} vector containing names of fit measures to request from each fitted \code{\link[lavaan]{lavaan}} model. See the output of \code{\link[lavaan]{fitMeasures}} for a list of available measures.} \item{\dots}{Additional arguments to be passed to \code{\link[lavaan]{lavaanList}}. See also \code{\link[lavaan]{lavOptions}}} \item{show.progress}{If \code{TRUE}, show a \code{\link[utils]{txtProgressBar}} indicating how fast each model-fitting iterates over allocations.} \item{iseed}{(Optional) Random seed used for parceling items. When the same random seed is specified and the program is re-run, the same allocations will be generated. The seed argument can be used to assess parcel-allocation variability in model ranking when considering more than two models. For each pair of models under comparison, the program should be rerun using the same random seed. Doing so ensures that multiple model comparisons will employ the same set of parcel datasets. \emph{Note}: When using \pkg{parallel} options, you must first type \code{RNGkind("L'Ecuyer-CMRG")} into the R Console, so that the seed will be controlled across cores.} \item{warn}{Whether to print warnings when fitting models to each allocation} } \value{ \item{model0.results}{Results returned by \code{\link{parcelAllocation}} for \code{model0} (see the \bold{Value} section).} \item{model1.results}{Results returned by \code{\link{parcelAllocation}} for \code{model1} (see the \bold{Value} section).} \item{model0.v.model1}{A \code{list} of model-comparison results, including the following: \itemize{ \item{\code{LRT_Summary:}}{ The average likelihood ratio test across allocations, as well as the \emph{SD}, minimum, maximum, range, and the proportion of allocations for which the test was significant.} \item{\code{Fit_Index_Differences:}}{ Differences in fit indices, organized by what proportion favored each model and among those, what the average difference was.} \item{\code{Favored_by_BIC:}}{ The proportion of allocations in which each model met the criterion (\code{bic.crit}) for a substantial difference in fit.} \item{\code{Convergence_Summary:}}{ The proportion of allocations in which each model (and both models) converged on a solution.} } Histograms are also printed to the current plot-output device.} } \description{ This function quantifies and assesses the consequences of parcel-allocation variability for model ranking of structural equation models (SEMs) that differ in their structural specification but share the same parcel-level measurement specification (see Sterba & Rights, 2016). This function calls \code{\link{parcelAllocation}}---which can be used with only one SEM in isolation---to fit two (assumed) nested models to each of a specified number of random item-to-parcel allocations. Output includes summary information about the distribution of model selection results (including plots) and the distribution of results for each model individually, across allocations within-sample. Note that this function can be used when selecting among more than two competing structural models as well (see instructions below involving the \code{seed} argument). } \details{ This is based on a SAS macro \code{ParcelAlloc} (Sterba & MacCallum, 2010). The \code{PAVranking} function produces results discussed in Sterba and Rights (2016) relevant to the assessment of parcel-allocation variability in model selection and model ranking. Specifically, the \code{PAVranking} function first calls \code{\link{parcelAllocation}} to generate a given number (\code{nAlloc}) of item-to-parcel allocations, fitting both specified models to each allocation, and providing summaryies of PAV for each model. Additionally, \code{PAVranking} provides the following new summaries: \itemize{ \item{PAV in model selection index values and model ranking between Models \code{model0} and \code{model1}.} \item{The proportion of allocations that converged and the proportion of proper solutions (results are summarized for allocations with both converged and proper allocations only).} } For further details on the benefits of the random allocation of items to parcels, see Sterba (2011) and Sterba and MacCallum (2010). To test whether nested models have equivalent fit, results can be pooled across allocations using the same methods available for pooling results across multiple imputations of missing data (see \bold{Examples}). \emph{Note}: This function requires the \code{lavaan} package. Missing data must be coded as \code{NA}. If the function returns \code{"Error in plot.new() : figure margins too large"}, the user may need to increase size of the plot window (e.g., in RStudio) and rerun the function. } \examples{ ## Specify the item-level model (if NO parcels were created) ## This must apply to BOTH competing models item.syntax <- c(paste0("f1 =~ f1item", 1:9), paste0("f2 =~ f2item", 1:9)) cat(item.syntax, sep = "\\n") ## Below, we reduce the size of this same model by ## applying different parceling schemes ## Specify a 2-factor CFA with correlated factors, using 3-indicator parcels mod1 <- ' f1 =~ par1 + par2 + par3 f2 =~ par4 + par5 + par6 ' ## Specify a more restricted model with orthogonal factors mod0 <- ' f1 =~ par1 + par2 + par3 f2 =~ par4 + par5 + par6 f1 ~~ 0*f2 ' ## names of parcels (must apply to BOTH models) (parcel.names <- paste0("par", 1:6)) \dontrun{ ## override default random-number generator to use parallel options RNGkind("L'Ecuyer-CMRG") PAVranking(model0 = mod0, model1 = mod1, data = simParcel, nAlloc = 100, parcel.names = parcel.names, item.syntax = item.syntax, std.lv = TRUE, # any addition lavaan arguments parallel = "snow") # parallel options ## POOL RESULTS by treating parcel allocations as multiple imputations. ## Details provided in Sterba & Rights (2016); see ?poolMAlloc. ## save list of data sets instead of fitting model yet dataList <- parcelAllocation(mod.parcels, data = simParcel, nAlloc = 100, parcel.names = parcel.names, item.syntax = item.syntax, do.fit = FALSE) ## now fit each model to each data set fit0 <- cfa.mi(mod0, data = dataList, std.lv = TRUE) fit1 <- cfa.mi(mod1, data = dataList, std.lv = TRUE) anova(fit0, fit1) # pooled test statistic comparing models class?lavaan.mi # find more methods for pooling results } } \references{ Raftery, A. E. (1995). Bayesian model selection in social research. \emph{Sociological Methodology, 25}, 111--163. doi:10.2307/271063 Sterba, S. K. (2011). Implications of parcel-allocation variability for comparing fit of item-solutions and parcel-solutions. \emph{Structural Equation Modeling: A Multidisciplinary Journal, 18}(4), 554--577. doi:10.1080/10705511.2011.607073 Sterba, S. K., & MacCallum, R. C. (2010). Variability in parameter estimates and model fit across repeated allocations of items to parcels. \emph{Multivariate Behavioral Research, 45}(2), 322--358. doi:10.1080/00273171003680302 Sterba, S. K., & Rights, J. D. (2016). Accounting for parcel-allocation variability in practice: Combining sources of uncertainty and choosing the number of allocations. \emph{Multivariate Behavioral Research, 51}(2--3), 296--313. doi:10.1080/00273171.2016.1144502 Sterba, S. K., & Rights, J. D. (2017). Effects of parceling on model selection: Parcel-allocation variability in model ranking. \emph{Psychological Methods, 22}(1), 47--68. doi:10.1037/met0000067 } \seealso{ \code{\link{parcelAllocation}} for fitting a single model, \code{\link{poolMAlloc}} for choosing the number of allocations } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/auxiliary.Rd0000644000176200001440000001055113432435606015140 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/auxiliary.R \name{auxiliary} \alias{auxiliary} \alias{lavaan.auxiliary} \alias{cfa.auxiliary} \alias{sem.auxiliary} \alias{growth.auxiliary} \title{Implement Saturated Correlates with FIML} \usage{ auxiliary(model, data, aux, fun, ...) lavaan.auxiliary(model, data, aux, ...) cfa.auxiliary(model, data, aux, ...) sem.auxiliary(model, data, aux, ...) growth.auxiliary(model, data, aux, ...) } \arguments{ \item{model}{The analysis model can be specified with 1 of 2 objects: \enumerate{ \item lavaan \code{\link[lavaan]{model.syntax}} specifying a hypothesized model \emph{without} mention of auxiliary variables in \code{aux} \item a parameter table, as returned by \code{\link[lavaan]{parTable}}, specifying the target model \emph{without} auxiliary variables. This option requires these columns (and silently ignores all others): \code{c("lhs","op","rhs","user","group","free","label","plabel","start")} }} \item{data}{\code{data.frame} that includes auxiliary variables as well as any observed variables in the \code{model}} \item{aux}{\code{character}. Names of auxiliary variables to add to \code{model}} \item{fun}{\code{character}. Name of a specific lavaan function used to fit \code{model} to \code{data} (i.e., \code{"lavaan"}, \code{"cfa"}, \code{"sem"}, or \code{"growth"}). Only required for \code{auxiliary}.} \item{...}{additional arguments to pass to \code{\link[lavaan]{lavaan}}.} } \value{ a fitted \code{\linkS4class{lavaan}} object. Additional information is stored as a \code{list} in the \code{@external} slot: \itemize{ \item \code{baseline.model}. a fitted \code{\linkS4class{lavaan}} object. Results of fitting an appropriate independence model for the calculation of incremental fit indices (e.g., CFI, TLI) in which the auxiliary variables remain saturated, so only the target variables are constrained to be orthogonal. See Examples for how to send this baseline model to \code{\link[lavaan]{fitMeasures}}. \item \code{aux}. The character vector of auxiliary variable names. \item \code{baseline.syntax}. A character vector generated within the \code{auxiliary} function, specifying the \code{baseline.model} syntax. } } \description{ Automatically add auxiliary variables to a lavaan model when using full information maximum likelihood (FIML) to handle missing data } \details{ These functions are wrappers around the corresponding lavaan functions. You can use them the same way you use \code{\link[lavaan]{lavaan}}, but you \emph{must} pass your full \code{data.frame} to the \code{data} argument. Because the saturated-correlates approaches (Enders, 2008) treates exogenous variables as random, \code{fixed.x} must be set to \code{FALSE}. Because FIML requires continuous data (although nonnormality corrections can still be requested), no variables in the model nor auxiliary variables specified in \code{aux} can be declared as \code{ordered}. } \examples{ dat1 <- lavaan::HolzingerSwineford1939 set.seed(12345) dat1$z <- rnorm(nrow(dat1)) dat1$x5 <- ifelse(dat1$z < quantile(dat1$z, .3), NA, dat1$x5) dat1$x9 <- ifelse(dat1$z > quantile(dat1$z, .8), NA, dat1$x9) targetModel <- " visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 " ## works just like cfa(), but with an extra "aux" argument fitaux1 <- cfa.auxiliary(targetModel, data = dat1, aux = "z", missing = "fiml", estimator = "mlr") ## with multiple auxiliary variables and multiple groups fitaux2 <- cfa.auxiliary(targetModel, data = dat1, aux = c("z","ageyr","grade"), group = "school", group.equal = "loadings") ## calculate correct incremental fit indices (e.g., CFI, TLI) fitMeasures(fitaux2, fit.measures = c("cfi","tli")) ## NOTE: lavaan will use the internally stored baseline model, which ## is the independence model plus saturated auxiliary parameters lavInspect(fitaux2@external$baseline.model, "free") } \references{ Enders, C. K. (2008). A note on the use of missing auxiliary variables in full information maximum likelihood-based structural equation models. \emph{Structural Equation Modeling, 15}(3), 434--448. doi:10.1080/10705510802154307 } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/net.Rd0000644000176200001440000000455013432435606013721 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/NET.R \name{net} \alias{net} \title{Nesting and Equivalence Testing} \usage{ net(..., crit = 1e-04) } \arguments{ \item{\dots}{The \code{lavaan} objects used for test of nesting and equivalence} \item{crit}{The upper-bound criterion for testing the equivalence of models. Models are considered nested (or equivalent) if the difference between their \eqn{\chi^2} fit statistics is less than this criterion.} } \value{ The \linkS4class{Net} object representing the outputs for nesting and equivalent testing, including a logical matrix of test results and a vector of degrees of freedom for each model. } \description{ This test examines whether models are nested or equivalent based on Bentler and Satorra's (2010) procedure. } \details{ The concept of nesting/equivalence should be the same regardless of estimation method. However, the particular method of testing nesting/equivalence (as described in Bentler & Satorra, 2010) employed by the net function analyzes summary statistics (model-implied means and covariance matrices, not raw data). In the case of robust methods like MLR, the raw data is only utilized for the robust adjustment to SE and chi-sq, and the net function only checks the unadjusted chi-sq for the purposes of testing nesting/equivalence. This method does not apply to models that estimate thresholds for categorical data, so an error message will be issued if such a model is provided. } \examples{ \dontrun{ m1 <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' m2 <- ' f1 =~ x1 + x2 + x3 + x4 f2 =~ x5 + x6 + x7 + x8 + x9 ' m3 <- ' visual =~ x1 + x2 + x3 textual =~ eq*x4 + eq*x5 + eq*x6 speed =~ x7 + x8 + x9 ' fit1 <- cfa(m1, data = HolzingerSwineford1939) fit1a <- cfa(m1, data = HolzingerSwineford1939, std.lv = TRUE) # Equivalent to fit1 fit2 <- cfa(m2, data = HolzingerSwineford1939) # Not equivalent to or nested in fit1 fit3 <- cfa(m3, data = HolzingerSwineford1939) # Nested in fit1 and fit1a tests <- net(fit1, fit1a, fit2, fit3) tests summary(tests) } } \references{ Bentler, P. M., & Satorra, A. (2010). Testing model nesting and equivalence. \emph{Psychological Methods, 15}(2), 111--123. doi:10.1037/a0019625 } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/parcelAllocation.Rd0000644000176200001440000002174113433602261016402 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parcelAllocation.R \name{parcelAllocation} \alias{parcelAllocation} \title{Random Allocation of Items to Parcels in a Structural Equation Model} \usage{ parcelAllocation(model, data, parcel.names, item.syntax, nAlloc = 100, fun = "sem", alpha = 0.05, fit.measures = c("chisq", "df", "cfi", "tli", "rmsea", "srmr"), ..., show.progress = FALSE, iseed = 12345, do.fit = TRUE, return.fit = FALSE, warn = FALSE) } \arguments{ \item{model}{\code{\link[lavaan]{lavaan}} model syntax specifying the model fit to (at least some) parceled data. Note that there can be a mixture of items and parcels (even within the same factor), in case certain items should never be parceled. Can be a character string or parameter table. Also see \code{\link[lavaan]{lavaanify}} for more details.} \item{data}{A \code{data.frame} containing all observed variables appearing in the \code{model}, as well as those in the \code{item.syntax} used to create parcels. If the data have missing values, multiple imputation before parceling is recommended: submit a stacked data set (with a variable for the imputation number, so they can be separateed later) and set \code{do.fit = FALSE} to return the list of \code{data.frame}s (one per allocation), each of which is a stacked, imputed data set with parcels.} \item{parcel.names}{\code{character} vector containing names of all parcels appearing as indicators in \code{model}.} \item{item.syntax}{\link[lavaan]{lavaan} model syntax specifying the model that would be fit to all of the unparceled items, including items that should be randomly allocated to parcels appearing in \code{model}.} \item{nAlloc}{The number of random items-to-parcels allocations to generate.} \item{fun}{\code{character} string indicating the name of the \code{\link[lavaan]{lavaan}} function used to fit \code{model} to \code{data}. Can only take the values \code{"lavaan"}, \code{"sem"}, \code{"cfa"}, or \code{"growth"}.} \item{alpha}{Alpha level used as criterion for significance.} \item{fit.measures}{\code{character} vector containing names of fit measures to request from each fitted \code{\link[lavaan]{lavaan}} model. See the output of \code{\link[lavaan]{fitMeasures}} for a list of available measures.} \item{\dots}{Additional arguments to be passed to \code{\link[lavaan]{lavaanList}}. See also \code{\link[lavaan]{lavOptions}}} \item{show.progress}{If \code{TRUE}, show a \code{\link[utils]{txtProgressBar}} indicating how fast the model-fitting iterates over allocations.} \item{iseed}{(Optional) Random seed used for parceling items. When the same random seed is specified and the program is re-run, the same allocations will be generated. Using the same \code{iseed} argument will ensure the any model is fit to the same parcel allocations. \emph{Note}: When using \pkg{parallel} options, you must first type \code{RNGkind("L'Ecuyer-CMRG")} into the R Console, so that the seed will be controlled across cores.} \item{do.fit}{If \code{TRUE} (default), the \code{model} is fitted to each parceled data set, and the summary of results is returned (see the Value section below). If \code{FALSE}, the items are randomly parceled, but the model is not fit; instead, the \code{list} of \code{data.frame}s is returned (so assign it to an object).} \item{return.fit}{If \code{TRUE}, a \code{\link[lavaan]{lavaanList}} object is returned with the \code{list} of results across allocations} \item{warn}{Whether to print warnings when fitting \code{model} to each allocation} } \value{ \item{Estimates}{A \code{data.frame} containing results related to parameter estimates with columns corresponding to their names; average and standard deviation across allocations; minimum, maximum, and range across allocations; and the proportion of allocations in which each parameter estimate was significant.} \item{SE}{A \code{data.frame} containing results similar to \code{Estimates}, but related to the standard errors of parameter estimates.} \item{Fit}{A \code{data.frame} containing results related to model fit, with columns corresponding to fit index names; their average and standard deviation across allocations; the minimum, maximum, and range across allocations; and (if the test statistic or RMSEA is included in \code{fit.measures}) the proportion of allocations in which each test of (exact or close) fit was significant.} \item{Model}{A \code{\link[lavaan]{lavaanList}} object containing results of the \code{model} fitted to each parcel allocation. Only returned if \code{return.fit = TRUE}.} } \description{ This function generates a given number of randomly generated item-to-parcel allocations, fits a model to each allocation, and provides averaged results over all allocations. } \details{ This function implements the random item-to-parcel allocation procedure described in Sterba (2011) and Sterba and MacCallum (2010). The function takes a single data set with item-level data, randomly assigns items to parcels, fits a structural equation model to the parceled data (using \link[lavaan]{lavaanList}), and repeats this process for a user-specified number of random allocations. Results from all fitted models are summarized in the output. For further details on the benefits of randomly allocating items to parcels, see Sterba (2011) and Sterba and MccCallum (2010). } \examples{ ## Fit 2-factor CFA to simulated data. Each factor has 9 indicators. ## Specify the item-level model (if NO parcels were created) item.syntax <- c(paste0("f1 =~ f1item", 1:9), paste0("f2 =~ f2item", 1:9)) cat(item.syntax, sep = "\\n") ## Below, we reduce the size of this same model by ## applying different parceling schemes ## 3-indicator parcels mod.parcels <- ' f1 =~ par1 + par2 + par3 f2 =~ par4 + par5 + par6 ' ## names of parcels (parcel.names <- paste0("par", 1:6)) \dontrun{ ## override default random-number generator to use parallel options RNGkind("L'Ecuyer-CMRG") parcelAllocation(mod.parcels, data = simParcel, nAlloc = 100, parcel.names = parcel.names, item.syntax = item.syntax, std.lv = TRUE, # any addition lavaan arguments parallel = "snow") # parallel options ## POOL RESULTS by treating parcel allocations as multiple imputations ## Details provided in Sterba & Rights (2016); see ?poolMAlloc. ## save list of data sets instead of fitting model yet dataList <- parcelAllocation(mod.parcels, data = simParcel, nAlloc = 100, parcel.names = parcel.names, item.syntax = item.syntax, do.fit = FALSE) ## now fit the model to each data set fit.parcels <- cfa.mi(mod.parcels, data = dataList, std.lv = TRUE) summary(fit.parcels) # uses Rubin's rules anova(fit.parcels) # pooled test statistic class?lavaan.mi # find more methods for pooling results } ## multigroup example simParcel$group <- 0:1 # arbitrary groups for example mod.mg <- ' f1 =~ par1 + c(L2, L2)*par2 + par3 f2 =~ par4 + par5 + par6 ' ## names of parcels (parcel.names <- paste0("par", 1:6)) parcelAllocation(mod.mg, data = simParcel, parcel.names, item.syntax, std.lv = TRUE, group = "group", group.equal = "loadings", nAlloc = 20, show.progress = TRUE) ## parcels for first factor, items for second factor mod.items <- ' f1 =~ par1 + par2 + par3 f2 =~ f2item2 + f2item7 + f2item8 ' ## names of parcels (parcel.names <- paste0("par", 1:3)) parcelAllocation(mod.items, data = simParcel, parcel.names, item.syntax, nAlloc = 20, std.lv = TRUE) ## mixture of 1- and 3-indicator parcels for second factor mod.mix <- ' f1 =~ par1 + par2 + par3 f2 =~ f2item2 + f2item7 + f2item8 + par4 + par5 + par6 ' ## names of parcels (parcel.names <- paste0("par", 1:6)) parcelAllocation(mod.mix, data = simParcel, parcel.names, item.syntax, nAlloc = 20, std.lv = TRUE) } \references{ Sterba, S. K. (2011). Implications of parcel-allocation variability for comparing fit of item-solutions and parcel-solutions. \emph{Structural Equation Modeling, 18}(4), 554--577. doi:10.1080/10705511.2011.607073 Sterba, S. K. & MacCallum, R. C. (2010). Variability in parameter estimates and model fit across random allocations of items to parcels. \emph{Multivariate Behavioral Research, 45}(2), 322--358. doi:10.1080/00273171003680302 Sterba, S. K., & Rights, J. D. (2016). Accounting for parcel-allocation variability in practice: Combining sources of uncertainty and choosing the number of allocations. \emph{Multivariate Behavioral Research, 51}(2--3), 296--313. doi:10.1080/00273171.2016.1144502 Sterba, S. K., & Rights, J. D. (2017). Effects of parceling on model selection: Parcel-allocation variability in model ranking. \emph{Psychological Methods, 22}(1), 47--68. doi:10.1037/met0000067 } \seealso{ \code{\link{PAVranking}} for comparing 2 models, \code{\link{poolMAlloc}} for choosing the number of allocations } \author{ Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/longInvariance-deprecated.Rd0000644000176200001440000001410713432435606020167 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/longInvariance.R \name{longInvariance-deprecated} \alias{longInvariance-deprecated} \title{Measurement Invariance Tests Within Person} \usage{ longInvariance(model, varList, auto = "all", constrainAuto = FALSE, fixed.x = TRUE, std.lv = FALSE, group = NULL, group.equal = "", group.partial = "", strict = FALSE, warn = TRUE, debug = FALSE, quiet = FALSE, fit.measures = "default", baseline.model = NULL, method = "satorra.bentler.2001", ...) } \arguments{ \item{model}{lavaan syntax or parameter table} \item{varList}{A list containing indicator names of factors used in the invariance testing, such as the list that the first element is the vector of indicator names in the first timepoint and the second element is the vector of indicator names in the second timepoint. The order of indicator names should be the same (but measured in different times or different units).} \item{auto}{The order of autocorrelation on the measurement errors on the similar items across factor (e.g., Item 1 in Time 1 and Time 2). If 0 is specified, the autocorrelation will be not imposed. If 1 is specified, the autocorrelation will imposed for the adjacent factor listed in \code{varList}. The maximum number can be specified is the number of factors specified minus 1. If \code{"all"} is specified, the maximum number of order will be used.} \item{constrainAuto}{If \code{TRUE}, the function will equate the auto-\emph{covariance} to be equal within the same item across factors. For example, the covariance of item 1 in time 1 and time 2 is equal to the covariance of item 1 in time 2 and time 3.} \item{fixed.x}{See \code{\link[lavaan]{lavaan}.}} \item{std.lv}{See \code{\link[lavaan]{lavaan}.}} \item{group}{See \code{\link[lavaan]{lavaan}.}} \item{group.equal}{See \code{\link[lavaan]{lavaan}.}} \item{group.partial}{See \code{\link[lavaan]{lavaan}.}} \item{strict}{If \code{TRUE}, the sequence requires strict invariance. See} \item{warn}{See \code{\link[lavaan]{lavaan}.}} \item{debug}{See \code{\link[lavaan]{lavaan}.} details for more information.} \item{quiet}{If \code{FALSE} (default), a summary is printed out containing an overview of the different models that are fitted, together with some model comparison tests. If \code{TRUE}, no summary is printed.} \item{fit.measures}{Fit measures used to calculate the differences between nested models.} \item{baseline.model}{custom baseline model passed to \code{\link[lavaan]{fitMeasures}}} \item{method}{The method used to calculate likelihood ratio test. See \code{\link[lavaan]{lavTestLRT}} for available options} \item{...}{Additional arguments in the \code{\link[lavaan]{lavaan}} function. See also \code{\link[lavaan]{lavOptions}}} } \value{ Invisibly, all model fits in the sequence are returned as a list. } \description{ Testing measurement invariance across timepoints (longitudinal) or any context involving the use of the same scale in one case (e.g., a dyad case with husband and wife answering the same scale). The measurement invariance uses a typical sequence of model comparison tests. This function currently works with only one scale, and only with continuous indicators. } \details{ If \code{strict = FALSE}, the following four models are tested in order: \enumerate{ \item Model 1: configural invariance. The same factor structure is imposed on all units. \item Model 2: weak invariance. The factor loadings are constrained to be equal across units. \item Model 3: strong invariance. The factor loadings and intercepts are constrained to be equal across units. \item Model 4: The factor loadings, intercepts and means are constrained to be equal across units. } Each time a more restricted model is fitted, a \eqn{\Delta\chi^2} test is reported, comparing the current model with the previous one, and comparing the current model to the baseline model (Model 1). In addition, the difference in CFA is also reported (\eqn{\Delta}CFI). If \code{strict = TRUE}, the following five models are tested in order: \enumerate{ \item Model 1: configural invariance. The same factor structure is imposed on all units. \item Model 2: weak invariance. The factor loadings are constrained to be equal across units. \item Model 3: strong invariance. The factor loadings and intercepts are constrained to be equal across units. \item Model 4: strict invariance. The factor loadings, intercepts and residual variances are constrained to be equal across units. \item Model 5: The factor loadings, intercepts, residual variances and means are constrained to be equal across units. } Note that if the \eqn{\chi^2} test statistic is scaled (eg. a Satorra-Bentler or Yuan-Bentler test statistic), a special version of the \eqn{\Delta\chi^2} test is used as described in \url{http://www.statmodel.com/chidiff.shtml} } \examples{ model <- ' f1t1 =~ y1t1 + y2t1 + y3t1 f1t2 =~ y1t2 + y2t2 + y3t2 f1t3 =~ y1t3 + y2t3 + y3t3 ' ## Create list of variables var1 <- c("y1t1", "y2t1", "y3t1") var2 <- c("y1t2", "y2t2", "y3t2") var3 <- c("y1t3", "y2t3", "y3t3") constrainedVar <- list(var1, var2, var3) ## Invariance of the same factor across timepoints longInvariance(model, auto = 1, constrainAuto = TRUE, varList = constrainedVar, data = exLong) ## Invariance of the same factor across timepoints and groups longInvariance(model, auto = 1, constrainAuto = TRUE, varList = constrainedVar, data = exLong, group = "sex", group.equal = c("loadings", "intercepts")) } \references{ Vandenberg, R. J., and Lance, C. E. (2000). A review and synthesis of the measurement invariance literature: Suggestions, practices, and recommendations for organizational research. \emph{Organizational Research Methods, 3}(1), 4--70. doi:10.1177/109442810031002 } \seealso{ \code{\link{semTools-deprecated}} } \author{ Sunthud Pornprasertmanit (\email{psunthud@gmail.com}) Yves Rosseel (Ghent University; \email{Yves.Rosseel@UGent.be}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } \keyword{internal} semTools/man/nullRMSEA.Rd0000644000176200001440000000451713432435606014700 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitIndices.R \name{nullRMSEA} \alias{nullRMSEA} \title{Calculate the RMSEA of the null model} \usage{ nullRMSEA(object, scaled = FALSE, silent = FALSE) } \arguments{ \item{object}{The lavaan model object provided after running the \code{cfa}, \code{sem}, \code{growth}, or \code{lavaan} functions.} \item{scaled}{If \code{TRUE}, the scaled (or robust, if available) RMSEA is returned. Ignored if a robust test statistic was not requested.} \item{silent}{If \code{TRUE}, do not print anything on the screen.} } \value{ A value of RMSEA of the null model (a \code{numeric} vector) returned invisibly. } \description{ Calculate the RMSEA of the null (baseline) model } \details{ RMSEA of the null model is calculated similar to the formula provided in the \code{lavaan} package. The standard formula of RMSEA is \deqn{ RMSEA =\sqrt{\frac{\chi^2}{N \times df} - \frac{1}{N}} \times \sqrt{G} } where \eqn{\chi^2} is the chi-square test statistic value of the target model, \eqn{N} is the total sample size, \eqn{df} is the degree of freedom of the hypothesized model, \eqn{G} is the number of groups. Kenny proposed in his website that "A reasonable rule of thumb is to examine the RMSEA for the null model and make sure that is no smaller than 0.158. An RMSEA for the model of 0.05 and a TLI of .90, implies that the RMSEA of the null model is 0.158. If the RMSEA for the null model is less than 0.158, an incremental measure of fit may not be that informative." See also \url{http://davidakenny.net/cm/fit.htm} } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data = HolzingerSwineford1939) nullRMSEA(fit) } \references{ Kenny, D. A., Kaniskan, B., & McCoach, D. B. (2015). The performance of RMSEA in models with small degrees of freedom. \emph{Sociological Methods Research, 44}(3), 486--507. doi:10.1177/0049124114543236 } \seealso{ \itemize{ \item \code{\link{miPowerFit}} For the modification indices and their power approach for model fit evaluation \item \code{\link{moreFitIndices}} For other fit indices } } \author{ Ruben Arslan (Humboldt-University of Berlin, \email{rubenarslan@gmail.com}) Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) } semTools/man/quark.Rd0000644000176200001440000001167613432435606014265 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/quark.R \name{quark} \alias{quark} \title{Quark} \usage{ quark(data, id, order = 1, silent = FALSE, ...) } \arguments{ \item{data}{The data frame is a required component for \code{quark}. In order for \code{quark} to process a data frame, it must not contain any factors or text-based variables. All variables must be in numeric format. Identifiers and dates can be left in the data; however, they will need to be identified under the \code{id} argument.} \item{id}{Identifiers and dates within the dataset will need to be acknowledged as \code{quark} cannot process these. By acknowledging the identifiers and dates as a vector of column numbers or variable names, \code{quark} will remove them from the data temporarily to complete its main processes. Among many potential issues of not acknowledging identifiers and dates are issues involved with imputation, product and polynomial effects, and principal component analysis.} \item{order}{Order is an optional argument provided by quark that can be used when the imputation procedures in mice fail. Under some circumstances, mice cannot calculate missing values due to issues with extreme missingness. Should an error present itself stating a failure due to not having any columns selected, set the argument \code{order = 2} in order to reorder the imputation method procedure. Otherwise, use the default \code{order = 1}.} \item{silent}{If \code{FALSE}, the details of the \code{quark} process are printed.} \item{\dots}{additional arguments to pass to \code{\link[mice]{mice}}.} } \value{ The output value from using the quark function is a list. It will return a list with 7 components. \item{ID Columns}{Is a vector of the identifier columns entered when running quark.} \item{ID Variables}{Is a subset of the dataset that contains the identifiers as acknowledged when running quark.} \item{Used Data}{Is a matrix / dataframe of the data provided by user as the basis for quark to process.} \item{Imputed Data}{Is a matrix / dataframe of the data after the multiple method imputation process.} \item{Big Matrix}{Is the expanded product and polynomial matrix.} \item{Principal Components}{Is the entire dataframe of principal components for the dataset. This dataset will have the same number of rows of the big matrix, but will have 1 less column (as is the case with principal component analyses).} \item{Percent Variance Explained}{Is a vector of the percent variance explained with each column of principal components.} } \description{ The \code{quark} function provides researchers with the ability to calculate and include component scores calculated by taking into account the variance in the original dataset and all of the interaction and polynomial effects of the data in the dataset. } \details{ The \code{quark} function calculates these component scores by first filling in the data via means of multiple imputation methods and then expanding the dataset by aggregating the non-overlapping interaction effects between variables by calculating the mean of the interactions and polynomial effects. The multiple imputation methods include one of iterative sampling and group mean substitution and multiple imputation using a polytomous regression algorithm (mice). During the expansion process, the dataset is expanded to three times its normal size (in width). The first third of the dataset contains all of the original data post imputation, the second third contains the means of the polynomial effects (squares and cubes), and the final third contains the means of the non-overlapping interaction effects. A full principal componenent analysis is conducted and the individual components are retained. The subsequent \code{\link{combinequark}} function provides researchers the control in determining how many components to extract and retain. The function returns the dataset as submitted (with missing values) and the component scores as requested for a more accurate multiple imputation in subsequent steps. } \examples{ set.seed(123321) dat <- HolzingerSwineford1939[,7:15] misspat <- matrix(runif(nrow(dat) * 9) < 0.3, nrow(dat)) dat[misspat] <- NA dat <- cbind(HolzingerSwineford1939[,1:3], dat) \dontrun{ quark.list <- quark(data = dat, id = c(1, 2)) final.data <- combinequark(quark = quark.list, percent = 80) ## Example to rerun quark after imputation failure: quark.list <- quark(data = dat, id = c(1, 2), order = 2) } } \references{ Howard, W. J., Rhemtulla, M., & Little, T. D. (2015). Using Principal Components as Auxiliary Variables in Missing Data Estimation. \emph{Multivariate Behavioral Research, 50}(3), 285--299. doi:10.1080/00273171.2014.999267 } \seealso{ \code{\link{combinequark}} } \author{ Steven R. Chesnut (University of Southern Mississippi; \email{Steven.Chesnut@usm.edu}) Danny Squire (Texas Tech University) Terrence D. Jorgensen (University of Amsterdam) The PCA code is copied and modified from the \code{FactoMineR} package. } semTools/DESCRIPTION0000644000176200001440000000723213532170512013570 0ustar liggesusersPackage: semTools Version: 0.5-2 Title: Useful Tools for Structural Equation Modeling Description: Provides useful tools for structural equation modeling. Authors@R: c(person(given = c("Terrence","D."), family = "Jorgensen", role = c("aut","cre"), email="TJorgensen314@gmail.com", comment = c(ORCID = "0000-0001-5111-6773")), person(given = "Sunthud", family = "Pornprasertmanit", role = "aut", email = "psunthud@gmail.com"), person(given = c("Alexander","M."), family = "Schoemann", role = "aut", email="schoemanna@ecu.edu"), person(given = "Yves", family = "Rosseel", role = "aut", email="Yves.Rosseel@UGent.be"), person(given = "Patrick", family = "Miller", role = "ctb", email="pmille13@nd.edu"), person(given = "Corbin", family = "Quick", role = "ctb", email="corbinq@umich.edu"), person(given = "Mauricio", family = "Garnier-Villarreal", role = "ctb", email="mauricio.garniervillarreal@marquette.edu", comment = c(ORCID = "0000-0002-2951-6647")), person(given = "James", family = "Selig", role = "ctb", email="selig@unm.edu"), person(given = "Aaron", family = "Boulton", role = "ctb", email="aboulton@email.unc.edu"), person(given = "Kristopher", family = "Preacher", role = "ctb", email="kris.preacher@vanderbilt.edu"), person(given = "Donna", family = "Coffman", role = "ctb", email="dlc30@psu.edu"), person(given = "Mijke", family = "Rhemtulla", role = "ctb", email="mrhemtulla@ucdavis.edu"), person(given = "Alexander", family = "Robitzsch", role = "ctb", email="a.robitzsch@bifie.at"), person(given = "Craig", family = "Enders", role = "ctb", email="Craig.Enders@asu.edu"), person(given = "Ruber", family = "Arslan", role = "ctb", email="rubenarslan@gmail.com"), person(given = "Bell", family = "Clinton", role = "ctb", email="clintonbell@ku.edu"), person(given = "Pavel", family = "Panko", role = "ctb", email="pavel.panko@ttu.edu"), person(given = "Edgar", family = "Merkle", role = "ctb", email="merklee@missouri.edu", comment = c(ORCID = "0000-0001-7158-0653")), person(given = "Steven", family = "Chesnut", role = "ctb", email="Steven.Chesnut@usm.edu"), person(given = "Jarrett", family = "Byrnes", role = "ctb", email="Jarrett.Byrnes@umb.edu"), person(given = c("Jason","D."), family = "Rights", role = "ctb", email="jason.d.rights@vanderbilt.edu"), person(given = "Ylenio", family = "Longo", role = "ctb", email="yleniolongo@gmail.com"), person(given = "Maxwell", family = "Mansolf", role = "ctb", email="mamansolf@gmail.com") ) Depends: R(>= 3.4), utils, stats, graphics, lavaan(>= 0.6-5) Imports: methods Suggests: MASS, foreign, parallel, boot, Amelia, mice, GPArotation, mnormt, blavaan License: GPL (>= 2) LazyData: yes LazyLoad: yes URL: https://github.com/simsem/semTools/wiki BugReports: https://github.com/simsem/semTools/issues Date/Publication: 2019-08-30 09:50:02 UTC RoxygenNote: 6.1.1 NeedsCompilation: no Packaged: 2019-08-30 09:12:31 UTC; terrence Author: Terrence D. Jorgensen [aut, cre] (), Sunthud Pornprasertmanit [aut], Alexander M. Schoemann [aut], Yves Rosseel [aut], Patrick Miller [ctb], Corbin Quick [ctb], Mauricio Garnier-Villarreal [ctb] (), James Selig [ctb], Aaron Boulton [ctb], Kristopher Preacher [ctb], Donna Coffman [ctb], Mijke Rhemtulla [ctb], Alexander Robitzsch [ctb], Craig Enders [ctb], Ruber Arslan [ctb], Bell Clinton [ctb], Pavel Panko [ctb], Edgar Merkle [ctb] (), Steven Chesnut [ctb], Jarrett Byrnes [ctb], Jason D. Rights [ctb], Ylenio Longo [ctb], Maxwell Mansolf [ctb] Maintainer: Terrence D. Jorgensen Repository: CRAN semTools/build/0000755000176200001440000000000013532164177013167 5ustar liggesuserssemTools/build/vignette.rds0000644000176200001440000000032313532164177015524 0ustar liggesusersO 0R"')DnDvanI]Omι8X8ӱӁ_CKdWvW"k e@t?4mYY #'|F&)گ8i똶f`G^f&Ԍup'S2[B/E]A?h%HTI}ot?ZZsemTools/vignettes/0000755000176200001440000000000013532164177014100 5ustar liggesuserssemTools/vignettes/partialInvariance.Rnw0000644000176200001440000002437213432435606020231 0ustar liggesusers\documentclass[12pt]{article} %%\VignetteIndexEntry{Partial Invariance} %%\VignetteDepends{semTools} \usepackage[utf8]{inputenc} \usepackage{amsfonts} \usepackage{amstext} \usepackage{amsmath} \usepackage{natbib} \title{A Note on Effect Size for Measurement Invariance} \author{Sunthud Pornprasertmanit} \begin{document} \maketitle This article aims to show the mathematical reasoning behind all effect sizes used in the \texttt{partialInvariance} and \texttt{partialInvarianceCat} functions in \texttt{semTools} package. In the functions, the following statistics are compared across groups: factor loadings, item intercepts (for continuous items), item thresholds (for categorical items), measurement error variances, and factor means. The comparison can be compared between two groups (e.g., Cohen's \emph{d}) or multiple groups (e.g., $R^2$). This note provides the details of the effect sizes in comparing two groups only. The comparison between multiple groups can be done by picking the reference group and compare the other groups with the reference group in the similar fashion to dummy variables. For example, the comparison between four groups would create three effect size values (i.e., Group 1 vs. Reference, Group 2 vs. Reference, and Group 3 vs. Reference). Alternatively, for the measurement invariance, the change in comparative fit index (CFI) can be used as the measure of effect size. In the measurement invariance literature \citep{cheung2002, meade2008}, the change in CFI is used to test the equality constraints for multiple items simultaneously. The functions in \texttt{semTools} will show the change in CFI for each individual item. That is, if an item were to allow to have different statistics (e.g., loading), how large the CFI would drop from the original model. Please note that more research is needed in finding the appropriate cutoffs for the change in CFI for individual items. Are the cutoffs of .002 or .01 appropriate for this context? In creating effect size, a target statistic needs to be standardized. Sample variances are used in the standardization formula. If researchers can assume that target variances across groups are equal in population, then pooled variances can be used in the standardization. The pooled variance $s^2_P$ can be computed as follows: $$s^2_P = \frac{\sum^G_{g=1}(n_g - 1)s^2_g}{\sum^G_{g=1}(n_g - 1)},$$ \noindent where $g$ represents the index of groups, $G$ is the number of groups, $s^2_g$ represents the variance of Group $g$, and $n_g$ is the Group $g$ size. If the variances are not assumed to be equal across groups, I recommend to pick a reference (baseline) group for the standardization. In the following sections, I will show how effect sizes are defined in each type of partial invariance testing. \section{Factor Loading} Let $\lambda_{ijg}$ be the unstandardized factor loading of Item $i$ from Factor $j$ in Group $g$. A standardized factor loading $\lambda^*_{ijg}$ can be computed \citep{muthen1998}: $$\lambda^*_{ijg} = \lambda_{ijg}\cdot\frac{\psi_{jg}}{\sigma_{ig}},$$ \noindent where $\psi_{jg}$ is the standard deviation of Factor $j$ from Group $g$ and $\sigma_{ig}$ is the total standard deviation of Item $i$ from Group $g$. To quantify the difference in factor loadings between groups in standardized scale, the standard deviation in the standardization formula needs to be the same across groups. If Group A and Group B are compared, the standardized difference in factor loading is defined: $$\Delta\lambda^*_{ij} = (\lambda_{ijA} - \lambda_{ijB})\cdot\frac{\psi_{jP}}{\sigma_{iP}},$$ \noindent where $\psi_{jP}$ is the pooled standard deviation of Factor $j$ and $\sigma_{iP}$ is the pooled total standard deviation of Item $i$. If Group A is the reference group, $\psi_{jA}$ and $\sigma_{iA}$ can substitute $\psi_{jP}$ and $\sigma_{iP}$. Assume that standardized factor loadings are from congeneric measurement model, standardized factor loadings represent the correlation between items and factors. \cite{cohen1992} provide a guideline for interpreting the magnitude of the difference in correlations for independent groups. The correlations are transformed to Fisher's z transformation: $$q = \arctan\left(\lambda_{ijA}\cdot\frac{\psi_{jP}}{\sigma_{iP}}\right) - \arctan\left(\lambda_{ijB}\cdot\frac{\psi_{jP}}{\sigma_{iP}}\right)$$ Then, the $q$ values of .1, .3, and .5 are interpreted as small, medium, and large effect sizes. For continuous outcomes, the amount of mean differences implied by the factor loading difference given a factor score can be used as an effect size \citep{millsap2012}. Let $X_ijg$ be the observed score of Item $i$ loaded on Factor $j$ from Group $g$ and $W_{j}$ represents the score of Factor $j$. The expected value of the observed score differences between Group A and Group B is calculated as follows: $$E\left(X_{iA} - X_iB | W_j \right) = \left( \nu_{iA} - \nu_{iB} \right) + \left( \lambda_{ijA} - \lambda_{ijB} \right) \times W_{j}, $$ \noindent where $\nu_{ig}$ represents the intercept of Item $i$ in Group $g$. Let the values between $W_{jl}$ and $W_{jh}$ be the values of interest. We can find the expected difference in the observed scores under this range of the factor scores. \cite{millsap2012} proposed that, if the size of the expected difference is over the value of meaningful differences, the loading difference is not negligible. See their article for the discussion of the meaningful difference. Note that, in the \texttt{partialInvariance} function, $W_{jl}$ is calculated by (a) finding the factor scores representing a low \emph{z}-score (e.g., -2) from all groups and (b) selecting the lowest factor score across all groups. $W_{jh}$ is calculated by (a) finding the factor scores representing a high \emph{z}-score (e.g., 2) from all groups and (b) selecting the highest factor score across all groups. \section{Item Intercepts} Let $\nu_{ig}$ be the intercept of Item $i$ in Group $g$. A standardized intercept $\nu^*_{ig}$ is defined as follows \citep{muthen1998}: $$\nu^*_{ig} = \nu_{ig} / \sigma_{ig}.$$ Thus, the standardized difference between Groups A and B in item intercepts is defined: $$\Delta\nu^*_{i} = (\nu_{iA} - \nu_{iB}) / \sigma_{iP}.$$ Note that $\sigma_{iA}$ can substitute $\sigma_{iP}$ if Group A is the reference group. By using this scale, .2, .5, and .8 can be interpreted as small, medium, and large effect sizes according to \cite{cohen1992}. The proportion of the intercept difference over the observed score difference may be used as an effect size \citep{millsap2012}: $$(\nu_{iA} - \nu_{iB}) / (M_{iA} - M_{iB}), $$ \noindent where $M_{ig}$ represents the observed mean of Item $i$ in Group $g$. \cite{millsap2012} noted that a relatively small proportion (e.g., less than 20\%) is ignorable. If the sign is negative or the value is over 1, the interpretation is doubtful. \section{Item Thresholds} Let $\tau_{cig}$ be the threshold categorizing between category $c$ and $c + 1$ for Item $i$ in Group $g$. Note that the maximum number of $c$ is the number of categories minus 1. Because thresholds are the location of the distribution underlying ordered categorical items (usually normal distribution), the location statistic can be standardized by dividing it by the standard deviation of the underlying distribution. The standardized threshold $\tau^*_{cig}$ is defined as follows: $$\tau^*_{cig} = \tau_{cig} / \sigma^u_{ig},$$ \noindent where $\sigma^u_{ig}$ is the standard deviation of the distribution underlying the categorical data for Item $i$ in Group $g$. In theta parameterization of categorical confirmatory factor analysis, $\sigma^u_{ig}$ may not be equal across groups. The standardized difference in thresholds between Group A and B needs the pooled standard deviation. The standardized difference in thresholds is defined: $$\Delta\tau^*_{ci} = (\tau_{ciA} - \tau_{ciB}) / \sigma^u_{iP}.$$ Note that $\sigma^u_{iA}$ can substitute $\sigma^u_{iP}$ if Group A is the reference group. By using this scale, .2, .5, and .8 can be interpreted as small, medium, and large effect sizes according to \cite{cohen1992}. \section{Measurement Error Variances} Let $\theta_{ig}$ be the measurement error variance of Item $i$ in Group $g$. A standardized measurement error variance $\theta^*_{ig}$ is defined as follows \citep{muthen1998}: $$\theta^*_{ig} = \theta_{ig} / \sigma_{ig},$$ Thus, the standardized difference between Groups A and B in measurement error variances could be defined: $$\Delta\theta^*_{i} = (\theta_{iA} - \theta_{iB}) / \sigma_{iP}.$$ Note that $\sigma_{iA}$ can substitute $\sigma_{iP}$ if Group A is the reference group. However, there is no direct guideline to interpret the magnitude of the difference in measurement error variances according to Cohen (1992). A new standardized difference in measurement error variances is needed. Assume that $\sigma_{iP}$ is always greater than $\theta_{iA}$ and $\theta_{iB}$, which is usually correct, then $\theta_{iA} / \sigma_{iP}$ and $\theta_{iB} / \sigma_{iP}$ ranges between 0 and 1 similar to a proportion statistic. \cite{cohen1992} provided a guideline in interpreting the magnitude of the difference in proportions using arcsine transformation. The new index ($h$) is defined as follows: $$h = \sin^{-1}\sqrt{\frac{\theta_{iA}}{\sigma_{iP}}} - \sin^{-1}\sqrt{\frac{\theta_{iB}}{\sigma_{iP}}}.$$ Then, the $h$ values of .2, .5, and .8 are interpreted as small, medium, and large effect sizes. If items are continuous, the proportion of the error variance difference over the observed variance difference may be used as an effect size \citep{millsap2012}: $$(\theta_{iA} - \theta_{iB}) / (\sigma_{iA} - \sigma_{iB}). $$ \noindent If the sign is negative or the value is over 1, the interpretation is doubtful. \section{Factor Means} Let $\alpha_{jg}$ be the mean of Factor $j$ in Group $g$. A standardized factor mean $\alpha^*_{jg}$ is defined as follows \citep{muthen1998}: $$\alpha^*_{jg} = \alpha_{jg} / \psi_{jg}$$ Thus, the standardized difference between Groups A and B in factor means is defined: $$\Delta\alpha^*_{j} = (\alpha_{jA} - \alpha_{jB}) / \psi_{jP}.$$ Note that $\psi_{jA}$ can substitute $\psi_{jP}$ if Group A is the reference group. By using this scale, .2, .5, and .8 can be interpreted as small, medium, and large effect sizes according to \cite{cohen1992}. \bibliographystyle{plainnat} \bibliography{partialInvariance} \end{document}semTools/vignettes/partialInvariance.bib0000644000176200001440000000270413432435606020212 0ustar liggesusers@preamble{ " \newcommand{\noop}[1]{} " } @article{cheung2002, title={Evaluating goodness-of-fit indexes for testing measurement invariance}, author={Cheung, Gordon W and Rensvold, Roger B}, journal={Structural equation modeling}, volume={9}, number={2}, pages={233--255}, year={2002}, publisher={Taylor \& Francis} } @article{meade2008, title={Power and sensitivity of alternative fit indices in tests of measurement invariance.}, author={Meade, Adam W and Johnson, Emily C and Braddy, Phillip W}, journal={Journal of Applied Psychology}, volume={93}, number={3}, pages={568}, year={2008}, publisher={American Psychological Association} } @book{muthen1998, title={Mplus technical appendices}, author={Muth{\'e}n, Bengt O}, publisher={Muth{\'e}n \& Muth{\'e}n}, address={Los Angeles, CA}, year={\noop{1998}1998--2004} } @article{cohen1992, title={A power primer.}, author={Cohen, Jacob}, journal={Psychological bulletin}, volume={112}, number={1}, pages={155--159}, year={1992}, publisher={American Psychological Association} } @incollection{millsap2012, author = {Millsap, Roger E and Olivera-Aguilar, Margarita}, title = {Investigating measurement invariance using confirmatory factor analysis}, editor = {Hoyle, Rick H}, booktitle = {Handbook of structural equation modeling}, pages = {380--392}, publisher = {Guilford}, address = {New York}, year = {2012} }semTools/R/0000755000176200001440000000000013526762413012271 5ustar liggesuserssemTools/R/splitSample.R0000644000176200001440000001332313432435606014710 0ustar liggesusers### Corbin Quick ### Last updated: 4 April 2017 #' Randomly Split a Data Set into Halves #' #' This function randomly splits a data set into two halves, and saves the #' resulting data sets to the same folder as the original. #' #' This function randomly orders the rows of a data set, divides the data set #' into two halves, and saves the halves to the same folder as the original #' data set, preserving the original formatting. Data set type (*.csv or *.dat) #' and formatting (headers) are automatically detected, and output data sets #' will preserve input type and formatting unless specified otherwise. Input #' can be in the form of a file path (*.dat or *.csv), or an R object (matrix or #' dataframe). If input is an R object and path is default, output data sets #' will be returned as a list object. #' #' #' @importFrom stats runif #' #' @param dataset The original data set to be divided. Can be a file path to a #' *.csv or *.dat file (headers will automatically be detected) or an R object #' (matrix or dataframe). (Windows users: file path must be specified using #' FORWARD SLASHES (\code{/}) ONLY.) #' @param path File path to folder for output data sets. NOT REQUIRED if #' dataset is a filename. Specify ONLY if dataset is an R object, or desired #' output folder is not that of original data set. If path is specified as #' "object", output data sets will be returned as a list, and not saved to hard #' drive. #' @param div Number of output data sets. NOT REQUIRED if default, 2 halves. #' @param type Output file format ("dat" or "csv"). NOT REQUIRED unless desired #' output formatting differs from that of input, or dataset is an R object and #' csv formatting is desired. #' @param name Output file name. NOT REQUIRED unless desired output name #' differs from that of input, or input dataset is an R object. (If input is an #' R object and name is not specified, name will be "splitSample".) #' @return If \code{path = "object"}, \code{list} of output data sets. #' Otherwise, output will saved to hard drive in the same format as input. #' @author Corbin Quick (University of Michigan; \email{corbinq@@umich.edu}) #' @examples #' #' #### Input is .dat file #' #splitSample("C:/Users/Default/Desktop/MYDATA.dat") #' #### Output saved to "C:/Users/Default/Desktop/" in .dat format #' #### Names are "MYDATA_s1.dat" and "MYDATA_s2.dat" #' #' #### Input is R object #' ## Split C02 dataset from the datasets package #' library(datasets) #' splitMyData <- splitSample(CO2, path = "object") #' summary(splitMyData[[1]]) #' summary(splitMyData[[2]]) #' #### Output object splitMyData becomes list of output data sets #' #' #### Input is .dat file in "C:/" folder #' #splitSample("C:/testdata.dat", path = "C:/Users/Default/Desktop/", type = "csv") #' #### Output saved to "C:/Users/Default/Desktop/" in *.csv format #' #### Names are "testdata_s1.csv" and "testdata_s2.csv" #' #' #### Input is R object #' #splitSample(myData, path = "C:/Users/Default/Desktop/", name = "splitdata") #' #### Output saved to "C:/Users/Default/Desktop/" in *.dat format #' #### Names are "splitdata_s1.dat" and "splitdata_s2.dat" #' #' @export splitSample <- function(dataset, path = "default", div = 2, type = "default", name = "splitSample") { type1 <- type hea = FALSE file <- dataset if (is.character(file)) { temp <- strsplit(file, "/", fixed = TRUE) if (path == "default") { path <- paste(temp[[1]][1:(length(temp[[1]]) - 1)], "/", sep = "", collapse = "") } fileN <- temp[[1]][length(temp[[1]])] temp <- strsplit(fileN, ".", fixed = TRUE) type <- temp[[1]][2] name <- temp[[1]][1] if (type == "dat") { if (is.numeric(as.matrix(utils::read.table(file, nrows = 1))) == FALSE) { data <- as.matrix(utils::read.table(file, header = TRUE)) hea = TRUE } else { data <- as.matrix(utils::read.table(file)) } } if (type == "csv") { if (is.numeric(as.matrix(utils::read.table(file, nrows = 1))) == FALSE) { data <- as.matrix(utils::read.csv(file, header = TRUE)) hea = TRUE } else { data <- as.matrix(utils::read.csv(file)) } } } else { if (is.matrix(file) | is.data.frame(file)) { data <- as.matrix(file) } else { stop("Provide data in *.dat or *.csv format") } } if (type1 != "default") { type <- type1 } if (is.character(colnames(data))) { hea = TRUE } random <- runif(nrow(data), 1, nrow(data)) data <- cbind(random, data) data <- data[order(random), ] data <- data[, 2:ncol(data)] size <- split((1:nrow(data)), cut((1:nrow(data)), div, labels = FALSE)) size <- as.matrix(as.data.frame(lapply(size, length))) dataL <- list() dataL[[1]] <- data[1:size[1, 1], ] for (i in 2:div) { size[1, i] <- size[1, (i - 1)] + size[1, i] dataL[[i]] <- data[(size[1, (i - 1)] + 1):size[1, i], ] } if (path == "default") { return(dataL) } else { if (path == "object") { return(dataL) } else { for (i in 1:div) { if (type == "dat") { utils::write.table(dataL[[i]], paste(path, name, "_s", i, ".dat", sep = ""), sep = " ", row.names = FALSE, col.names = hea) } if (type == "csv") { utils::write.table(dataL[[i]], paste(path, name, "_s", i, ".csv", sep = ""), sep = ",", row.names = FALSE, col.names = hea) } if (type == "default") { utils::write.table(dataL[[i]], paste(path, name, "_s", i, ".dat", sep = ""), sep = " ", row.names = FALSE, col.names = hea) } } } } } semTools/R/permuteMeasEq.R0000644000176200001440000021671413432435606015201 0ustar liggesusers### Terrence D. Jorgensen ### Last updated: 25 June 2018 ### permutation randomization test for measurement equivalence and DIF ## ----------------- ## Class and Methods ## ----------------- ##' Class for the Results of Permutation Randomization Tests of Measurement ##' Equivalence and DIF ##' ##' This class contains the results of tests of Measurement Equivalence and ##' Differential Item Functioning (DIF). ##' ##' ##' @name permuteMeasEq-class ##' @aliases permuteMeasEq-class show,permuteMeasEq-method ##' summary,permuteMeasEq-method hist,permuteMeasEq-method ##' @docType class ##' ##' @slot PT A \code{data.frame} returned by a call to ##' \code{\link[lavaan]{parTable}} on the constrained model ##' @slot modelType A character indicating the specified \code{modelType} in the ##' call to \code{permuteMeasEq} ##' @slot ANOVA A \code{numeric} vector indicating the results of the observed ##' (\eqn{\Delta})\eqn{\chi^2} test, based on the central \eqn{\chi^2} ##' distribution ##' @slot AFI.obs A vector of observed (changes in) user-selected fit measures ##' @slot AFI.dist The permutation distribution(s) of user-selected fit measures. ##' A \code{data.frame} with \code{n.Permutations} rows and one column for each ##' \code{AFI.obs}. ##' @slot AFI.pval A vector of \emph{p} values (one for each element in slot ##' \code{AFI.obs}) calculated using slot \code{AFI.dist}, indicating the ##' probability of observing a change at least as extreme as \code{AFI.obs} ##' if the null hypothesis were true ##' @slot MI.obs A \code{data.frame} of observed Lagrange Multipliers ##' (modification indices) associated with the equality constraints or fixed ##' parameters specified in the \code{param} argument. This is a subset of the ##' output returned by a call to \code{\link[lavaan]{lavTestScore}} on the ##' constrained model. ##' @slot MI.dist The permutation distribution of the maximum modification index ##' (among those seen in slot \code{MI.obs$X2}) at each permutation of group ##' assignment or of \code{covariates} ##' @slot extra.obs If \code{permuteMeasEq} was called with an \code{extra} ##' function, the output when applied to the original data is concatenated ##' into this vector ##' @slot extra.dist A \code{data.frame}, each column of which contains the ##' permutation distribution of the corresponding statistic in slot ##' \code{extra.obs} ##' @slot n.Permutations An \code{integer} indicating the number of permutations ##' requested by the user ##' @slot n.Converged An \code{integer} indicating the number of permuation ##' iterations which yielded a converged solution ##' @slot n.nonConverged An \code{integer} vector of length ##' \code{n.Permutations} indicating how many times group assignment was ##' randomly permuted (at each iteration) before converging on a solution ##' @slot n.Sparse Only relevant with \code{ordered} indicators when ##' \code{modelType == "mgcfa"}. An \code{integer} vector of length ##' \code{n.Permutations} indicating how many times group assignment was ##' randomly permuted (at each iteration) before obtaining a sample with all ##' categories observed in all groups. ##' @slot oldSeed An \code{integer} vector storing the value of ##' \code{.Random.seed} before running \code{permuteMeasEq}. Only relevant ##' when using a parallel/multicore option and the original ##' \code{RNGkind() != "L'Ecuyer-CMRG"}. This enables users to restore their ##' previous \code{.Random.seed} state, if desired, by running: ##' \code{.Random.seed[-1] <- permutedResults@oldSeed[-1]} ##' @section Objects from the Class: Objects can be created via the ##' \code{\link[semTools]{permuteMeasEq}} function. ##' ##' @return ##' \itemize{ ##' \item The \code{show} method prints a summary of the multiparameter ##' omnibus test results, using the user-specified AFIs. The parametric ##' (\eqn{\Delta})\eqn{\chi^2} test is also displayed. ##' \item The \code{summary} method prints the same information from the ##' \code{show} method, but when \code{extra = FALSE} (the default) it also ##' provides a table summarizing any requested follow-up tests of DIF using ##' modification indices in slot \code{MI.obs}. The user can also specify an ##' \code{alpha} level for flagging modification indices as significant, as ##' well as \code{nd} (the number of digits displayed). For each modification ##' index, the \emph{p} value is displayed using a central \eqn{\chi^2} ##' distribution with the \emph{df} shown in that column. Additionally, a ##' \emph{p} value is displayed using the permutation distribution of the ##' maximum index, which controls the familywise Type I error rate in a manner ##' similar to Tukey's studentized range test. If any indices are flagged as ##' significant using the \code{tukey.p.value}, then a message is displayed for ##' each flagged index. The invisibly returned \code{data.frame} is the ##' displayed table of modification indices, unless ##' \code{\link[semTools]{permuteMeasEq}} was called with \code{param = NULL}, ##' in which case the invisibly returned object is \code{object}. If ##' \code{extra = TRUE}, the permutation-based \emph{p} values for each ##' statistic returned by the \code{extra} function are displayed and returned ##' in a \code{data.frame} instead of the modification indices requested in the ##' \code{param} argument. ##' \item The \code{hist} method returns a list of \code{length == 2}, ##' containing the arguments for the call to \code{hist} and the arguments ##' to the call for \code{legend}, respectively. This list may facilitate ##' creating a customized histogram of \code{AFI.dist}, \code{MI.dist}, or ##' \code{extra.dist} ##' } ##' ##' @author Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \code{\link[semTools]{permuteMeasEq}} ##' ##' @examples ##' ##' # See the example from the permuteMeasEq function ##' setClass("permuteMeasEq", slots = c(PT = "data.frame", modelType = "character", ANOVA = "vector", AFI.obs = "vector", AFI.dist = "data.frame", AFI.pval = "vector", MI.obs = "data.frame", MI.dist = "vector", extra.obs = "vector", extra.dist = "data.frame", n.Permutations = "integer", n.Converged = "integer", n.nonConverged = "vector", n.Sparse = "vector", oldSeed = "integer")) ##' @rdname permuteMeasEq-class ##' @aliases show,permuteMeasEq-method ##' @export setMethod("show", "permuteMeasEq", function(object) { ## print warning if there are nonConverged permutations if (object@n.Permutations != object@n.Converged) { warning(paste("Only", object@n.Converged, "out of", object@n.Permutations, "models converged within", max(object@n.nonConverged), "attempts per permutation.\n\n")) } ## print ANOVA cat("Omnibus p value based on parametric chi-squared difference test:\n\n") print(round(object@ANOVA, digits = 3)) ## print permutation results cat("\n\nOmnibus p values based on nonparametric permutation method: \n\n") AFI <- data.frame(AFI.Difference = object@AFI.obs, p.value = object@AFI.pval) class(AFI) <- c("lavaan.data.frame","data.frame") print(AFI, nd = 3) invisible(object) }) ##' @rdname permuteMeasEq-class ##' @aliases summary,permuteMeasEq-method ##' @export setMethod("summary", "permuteMeasEq", function(object, alpha = .05, nd = 3, extra = FALSE) { ## print warning if there are nonConverged permutations if (object@n.Permutations != object@n.Converged) { warning(paste("Only", object@n.Converged, "out of", object@n.Permutations, "models converged within", max(object@n.nonConverged), "attempts per permutation.\n\n")) } ## print ANOVA cat("Omnibus p value based on parametric chi-squared difference test:\n\n") print(round(object@ANOVA, digits = nd)) ## print permutation results cat("\n\nOmnibus p values based on nonparametric permutation method: \n\n") AFI <- data.frame(AFI.Difference = object@AFI.obs, p.value = object@AFI.pval) class(AFI) <- c("lavaan.data.frame","data.frame") print(AFI, nd = nd) ## print extras or DIF test results, if any were requested if (extra && length(object@extra.obs)) { cat("\n\nUnadjusted p values of extra statistics,\n", "based on permutation distribution of each statistic: \n\n") MI <- data.frame(Statistic = object@extra.obs) class(MI) <- c("lavaan.data.frame","data.frame") MI$p.value <- sapply(names(object@extra.dist), function(nn) { mean(abs(object@extra.dist[,nn]) >= abs(object@extra.obs[nn]), na.rm = TRUE) }) MI$flag <- ifelse(MI$p.value < alpha, "* ", "") print(MI, nd = nd) } else if (length(object@MI.dist)) { cat("\n\n Modification indices for equality constrained parameter estimates,\n", "with unadjusted 'p.value' based on chi-squared distribution and\n", "adjusted 'tukey.p.value' based on permutation distribution of the\n", "maximum modification index per iteration: \n\n") MI <- do.call(paste("summ", object@modelType, sep = "."), args = list(object = object, alpha = alpha)) print(MI, nd = nd) ## print messages about potential DIF if (all(MI$tukey.p.value > alpha)) { cat("\n\n No equality constraints were flagged as significant.\n\n") return(invisible(MI)) } if (object@modelType == "mgcfa") { cat("\n\nThe following equality constraints were flagged as significant:\n\n") for (i in which(MI$tukey.p.value < alpha)) { cat("Parameter '", MI$parameter[i], "' may differ between Groups '", MI$group.lhs[i], "' and '", MI$group.rhs[i], "'.\n", sep = "") } cat("\nUse lavTestScore(..., epc = TRUE) on your constrained model to", "display expected parameter changes for these equality constraints\n\n") } } else return(invisible(object)) invisible(MI) }) summ.mgcfa <- function(object, alpha) { MI <- object@MI.obs class(MI) <- c("lavaan.data.frame","data.frame") PT <- object@PT eqPar <- rbind(PT[PT$plabel %in% MI$lhs, ], PT[PT$plabel %in% MI$rhs, ]) MI$flag <- "" MI$parameter <- "" MI$group.lhs <- "" MI$group.rhs <- "" for (i in 1:nrow(MI)) { par1 <- eqPar$par[ eqPar$plabel == MI$lhs[i] ] par2 <- eqPar$par[ eqPar$plabel == MI$rhs[i] ] MI$parameter[i] <- par1 MI$group.lhs[i] <- eqPar$group.label[ eqPar$plabel == MI$lhs[i] ] MI$group.rhs[i] <- eqPar$group.label[ eqPar$plabel == MI$rhs[i] ] if (par1 != par2) { myMessage <- paste0("Constraint '", MI$lhs[i], "==", MI$rhs[i], "' refers to different parameters: \n'", MI$lhs[i], "' is '", par1, "' in group '", MI$group.lhs[i], "'\n'", MI$rhs[i], "' is '", par2, "' in group '", MI$group.rhs[i], "'\n") warning(myMessage) } if (MI$tukey.p.value[i] < alpha) MI$flag[i] <- "* -->" } MI } summ.mimic <- function(object, alpha) { MI <- object@MI.obs class(MI) <- c("lavaan.data.frame","data.frame") MI$flag <- ifelse(MI$tukey.p.value < alpha, "* ", "") MI } ##' @rdname permuteMeasEq-class ##' @aliases hist,permuteMeasEq-method ##' @importFrom stats qchisq dchisq quantile ##' @param object,x object of class \code{permuteMeasEq} ##' @param ... Additional arguments to pass to \code{\link[graphics]{hist}} ##' @param AFI \code{character} indicating the fit measure whose permutation ##' distribution should be plotted ##' @param alpha alpha level used to draw confidence limits in \code{hist} and ##' flag significant statistics in \code{summary} output ##' @param nd number of digits to display ##' @param extra \code{logical} indicating whether the \code{summary} output ##' should return permutation-based \emph{p} values for each statistic returned ##' by the \code{extra} function. If \code{FALSE} (default), \code{summary} ##' will return permutation-based \emph{p} values for each modification index. ##' @param printLegend \code{logical}. If \code{TRUE} (default), a legend will ##' be printed with the histogram ##' @param legendArgs \code{list} of arguments passed to the ##' \code{\link[graphics]{legend}} function. The default argument is a list ##' placing the legend at the top-left of the figure. ##' @export setMethod("hist", "permuteMeasEq", function(x, ..., AFI, alpha = .05, nd = 3, printLegend = TRUE, legendArgs = list(x = "topleft")) { histArgs <- list(...) histArgs$x <- x@AFI.dist[[AFI]] if (is.null(histArgs$col)) histArgs$col <- "grey69" histArgs$freq <- !grepl("chi", AFI) histArgs$ylab <- if (histArgs$freq) "Frequency" else "Probability Density" if (printLegend) { if (is.null(legendArgs$box.lty)) legendArgs$box.lty <- 0 if (nd < length(strsplit(as.character(1 / alpha), "")[[1]]) - 1) { warning(paste0("The number of digits argument (nd = ", nd , ") is too low to display your p value at the", " same precision as your requested alpha level (alpha = ", alpha, ")")) } if (x@AFI.pval[[AFI]] < (1 / 10^nd)) { pVal <- paste(c("< .", rep(0, nd - 1),"1"), collapse = "") } else { pVal <- paste("=", round(x@AFI.pval[[AFI]], nd)) } } delta <- length(x@MI.dist) > 0L && x@modelType == "mgcfa" if (grepl("chi", AFI)) { ####################################### Chi-squared ChiSq <- x@AFI.obs[AFI] DF <- x@ANOVA[2] histArgs$xlim <- range(c(ChiSq, x@AFI.dist[[AFI]], qchisq(c(.01, .99), DF))) xVals <- seq(histArgs$xlim[1], histArgs$xlim[2], by = .1) theoDist <- dchisq(xVals, df = DF) TheoCrit <- round(qchisq(p = alpha, df = DF, lower.tail = FALSE), 2) Crit <- quantile(histArgs$x, probs = 1 - alpha) if (ChiSq > histArgs$xlim[2]) histArgs$xlim[2] <- ChiSq if (delta) { histArgs$main <- expression(Permutation~Distribution~of~Delta*chi^2) histArgs$xlab <- expression(Delta*chi^2) if (printLegend) { legendArgs$legend <- c(bquote(Theoretical~Delta*chi[Delta*.(paste("df =", DF))]^2 ~ Distribution), bquote(Critical~chi[alpha~.(paste(" =", alpha))]^2 == .(round(TheoCrit, nd))), bquote(.(paste("Permuted Critical Value =", round(Crit, nd)))), bquote(Observed~Delta*chi^2 == .(round(ChiSq, nd))), expression(paste("")), bquote(Permuted~italic(p)~.(pVal))) } } else { histArgs$main <- expression(Permutation~Distribution~of~chi^2) histArgs$xlab <- expression(chi^2) if (printLegend) { legendArgs$legend <- c(bquote(Theoretical~chi[.(paste("df =", DF))]^2 ~ Distribution), bquote(Critical~chi[alpha~.(paste(" =", alpha))]^2 == .(round(TheoCrit, nd))), bquote(.(paste("Permuted Critical Value =", round(Crit, nd)))), bquote(Observed~chi^2 == .(round(ChiSq, nd))), expression(paste("")), bquote(Permuted~italic(p)~.(pVal))) } } H <- do.call(hist, c(histArgs["x"], plot = FALSE)) histArgs$ylim <- c(0, max(H$density, theoDist)) if (printLegend) { legendArgs <- c(legendArgs, list(lty = c(2, 2, 1, 1, 0, 0), lwd = c(2, 2, 2, 3, 0, 0), col = c("black","black","black","red","",""))) } } else { ################################################### other AFIs badness <- grepl(pattern = "fmin|aic|bic|rmr|rmsea|cn|sic|hqc", x = AFI, ignore.case = TRUE) if (badness) { Crit <- quantile(histArgs$x, probs = 1 - alpha) } else { Crit <- quantile(histArgs$x, probs = alpha) } histArgs$xlim <- range(histArgs$x, x@AFI.obs[AFI]) if (delta) { histArgs$main <- bquote(~Permutation~Distribution~of~Delta*.(toupper(AFI))) histArgs$xlab <- bquote(~Delta*.(toupper(AFI))) if (printLegend) { legendArgs$legend <- c(bquote(Critical~Delta*.(toupper(AFI))[alpha~.(paste(" =", alpha))] == .(round(Crit, nd))), bquote(Observed~Delta*.(toupper(AFI)) == .(round(x@AFI.obs[AFI], nd))), expression(paste("")), bquote(Permuted~italic(p)~.(pVal))) } } else { histArgs$main <- paste("Permutation Distribution of", toupper(AFI)) histArgs$xlab <- toupper(AFI) if (printLegend) { legendArgs$legend <- c(bquote(Critical~.(toupper(AFI))[alpha~.(paste(" =", alpha))] == .(round(Crit, nd))), bquote(Observed~.(toupper(AFI)) == .(round(x@AFI.obs[AFI], nd))), expression(paste("")), bquote(Permuted~italic(p)~.(pVal))) } } if (printLegend) { legendArgs <- c(legendArgs, list(lty = c(1, 1, 0, 0), lwd = c(2, 3, 0, 0), col = c("black","red","",""))) } } ## print histogram (and optionally, print legend) suppressWarnings({ do.call(hist, histArgs) if (grepl("chi", AFI)) { lines(x = xVals, y = theoDist, lwd = 2, lty = 2) abline(v = TheoCrit, col = "black", lwd = 2, lty = 2) } abline(v = Crit, col = "black", lwd = 2) abline(v = x@AFI.obs[AFI], col = "red", lwd = 3) if (printLegend) do.call(legend, legendArgs) }) ## return arguments to create histogram (and optionally, legend) invisible(list(hist = histArgs, legend = legendArgs)) }) ## -------------------- ## Constructor Function ## -------------------- ##' Permutation Randomization Tests of Measurement Equivalence and Differential ##' Item Functioning (DIF) ##' ##' The function \code{permuteMeasEq} provides tests of hypotheses involving ##' measurement equivalence, in one of two frameworks: multigroup CFA or MIMIC ##' models. ##' ##' ##' The function \code{permuteMeasEq} provides tests of hypotheses involving ##' measurement equivalence, in one of two frameworks: ##' \enumerate{ ##' \item{1} For multiple-group CFA models, provide a pair of nested lavaan objects, ##' the less constrained of which (\code{uncon}) freely estimates a set of ##' measurement parameters (e.g., factor loadings, intercepts, or thresholds; ##' specified in \code{param}) in all groups, and the more constrained of which ##' (\code{con}) constrains those measurement parameters to equality across ##' groups. Group assignment is repeatedly permuted and the models are fit to ##' each permutation, in order to produce an empirical distribution under the ##' null hypothesis of no group differences, both for (a) changes in ##' user-specified fit measures (see \code{AFIs} and \code{moreAFIs}) and for ##' (b) the maximum modification index among the user-specified equality ##' constraints. Configural invariance can also be tested by providing that ##' fitted lavaan object to \code{con} and leaving \code{uncon = NULL}, in which ##' case \code{param} must be \code{NULL} as well. ##' ##' \item{2} In MIMIC models, one or a set of continuous and/or discrete ##' \code{covariates} can be permuted, and a constrained model is fit to each ##' permutation in order to provide a distribution of any fit measures (namely, ##' the maximum modification index among fixed parameters in \code{param}) under ##' the null hypothesis of measurement equivalence across levels of those ##' covariates. ##' } ##' ##' In either framework, modification indices for equality constraints or fixed ##' parameters specified in \code{param} are calculated from the constrained ##' model (\code{con}) using the function \code{\link[lavaan]{lavTestScore}}. ##' ##' For multiple-group CFA models, the multiparameter omnibus null hypothesis of ##' measurement equivalence/invariance is that there are no group differences in ##' any measurement parameters (of a particular type). This can be tested using ##' the \code{anova} method on nested \code{lavaan} objects, as seen in the ##' output of \code{\link[semTools]{measurementInvariance}}, or by inspecting ##' the change in alternative fit indices (AFIs) such as the CFI. The ##' permutation randomization method employed by \code{permuteMeasEq} generates ##' an empirical distribution of any \code{AFIs} under the null hypothesis, so ##' the user is not restricted to using fixed cutoffs proposed by Cheung & ##' Rensvold (2002), Chen (2007), or Meade, Johnson, & Braddy (2008). ##' ##' If the multiparameter omnibus null hypothesis is rejected, partial ##' invariance can still be established by freeing invalid equality constraints, ##' as long as equality constraints are valid for at least two indicators per ##' factor. Modification indices can be calculated from the constrained model ##' (\code{con}), but multiple testing leads to inflation of Type I error rates. ##' The permutation randomization method employed by \code{permuteMeasEq} ##' creates a distribution of the maximum modification index if the null ##' hypothesis is true, which allows the user to control the familywise Type I ##' error rate in a manner similar to Tukey's \emph{q} (studentized range) ##' distribution for the Honestly Significant Difference (HSD) post hoc test. ##' ##' For MIMIC models, DIF can be tested by comparing modification indices of ##' regression paths to the permutation distribution of the maximum modification ##' index, which controls the familywise Type I error rate. The MIMIC approach ##' could also be applied with multiple-group models, but the grouping variable ##' would not be permuted; rather, the covariates would be permuted separately ##' within each group to preserve between-group differences. So whether ##' parameters are constrained or unconstrained across groups, the MIMIC ##' approach is only for testing null hypotheses about the effects of ##' \code{covariates} on indicators, controlling for common factors. ##' ##' In either framework, \code{\link[lavaan]{lavaan}}'s \code{group.label} ##' argument is used to preserve the order of groups seen in \code{con} when ##' permuting the data. ##' ##' ##' @importFrom lavaan lavInspect parTable ##' ##' @param nPermute An integer indicating the number of random permutations used ##' to form empirical distributions under the null hypothesis. ##' @param modelType A character string indicating type of model employed: ##' multiple-group CFA (\code{"mgcfa"}) or MIMIC (\code{"mimic"}). ##' @param con The constrained \code{lavaan} object, in which the parameters ##' specified in \code{param} are constrained to equality across all groups when ##' \code{modelType = "mgcfa"}, or which regression paths are fixed to zero when ##' \code{modelType = "mimic"}. In the case of testing \emph{configural} ##' invariance when \code{modelType = "mgcfa"}, \code{con} is the configural ##' model (implicitly, the unconstrained model is the saturated model, so use ##' the defaults \code{uncon = NULL} and \code{param = NULL}). When ##' \code{modelType = "mimic"}, \code{con} is the MIMIC model in which the ##' covariate predicts the latent construct(s) but no indicators (unless they ##' have already been identified as DIF items). ##' @param uncon Optional. The unconstrained \code{lavaan} object, in which the ##' parameters specified in \code{param} are freely estimated in all groups. ##' When \code{modelType = "mgcfa"}, only in the case of testing ##' \emph{configural} invariance should \code{uncon = NULL}. When ##' \code{modelType = "mimic"}, any non-\code{NULL uncon} is silently set to ##' \code{NULL}. ##' @param null Optional. A \code{lavaan} object, in which an alternative null ##' model is fit (besides the default independence model specified by ##' \code{lavaan}) for the calculation of incremental fit indices. See Widamin & ##' Thompson (2003) for details. If \code{NULL}, \code{lavaan}'s default ##' independence model is used. ##' @param param An optional character vector or list of character vectors ##' indicating which parameters the user would test for DIF following a ##' rejection of the omnibus null hypothesis tested using ##' (\code{more})\code{AFIs}. Note that \code{param} does not guarantee certain ##' parameters \emph{are} constrained in \code{con}; that is for the user to ##' specify when fitting the model. If users have any "anchor items" that they ##' would never intend to free across groups (or levels of a covariate), these ##' should be excluded from \code{param}; exceptions to a type of parameter can ##' be specified in \code{freeParam}. When \code{modelType = "mgcfa"}, ##' \code{param} indicates which parameters of interest are constrained across ##' groups in \code{con} and are unconstrained in \code{uncon}. Parameter names ##' must match those returned by \code{names(coef(con))}, but omitting any ##' group-specific suffixes (e.g., \code{"f1~1"} rather than \code{"f1~1.g2"}) ##' or user-specified labels (that is, the parameter names must follow the rules ##' of lavaan's \code{\link[lavaan]{model.syntax}}). Alternatively (or ##' additionally), to test all constraints of a certain type (or multiple types) ##' of parameter in \code{con}, \code{param} may take any combination of the ##' following values: \code{"loadings"}, \code{"intercepts"}, ##' \code{"thresholds"}, \code{"residuals"}, \code{"residual.covariances"}, ##' \code{"means"}, \code{"lv.variances"}, and/or \code{"lv.covariances"}. When ##' \code{modelType = "mimic"}, \code{param} must be a vector of individual ##' parameters or a list of character strings to be passed one-at-a-time to ##' \code{\link[lavaan]{lavTestScore}}\code{(object = con, add = param[i])}, ##' indicating which (sets of) regression paths fixed to zero in \code{con} that ##' the user would consider freeing (i.e., exclude anchor items). If ##' \code{modelType = "mimic"} and \code{param} is a list of character strings, ##' the multivariate test statistic will be saved for each list element instead ##' of 1-\emph{df} modification indices for each individual parameter, and ##' \code{names(param)} will name the rows of the \code{MI.obs} slot (see ##' \linkS4class{permuteMeasEq}). Set \code{param = NULL} (default) to avoid ##' collecting modification indices for any follow-up tests. ##' @param freeParam An optional character vector, silently ignored when ##' \code{modelType = "mimic"}. If \code{param} includes a type of parameter ##' (e.g., \code{"loadings"}), \code{freeParam} indicates exceptions (i.e., ##' anchor items) that the user would \emph{not} intend to free across groups ##' and should therefore be ignored when calculating \emph{p} values adjusted ##' for the number of follow-up tests. Parameter types that are already ##' unconstrained across groups in the fitted \code{con} model (i.e., a ##' \emph{partial} invariance model) will automatically be ignored, so they do ##' not need to be specified in \code{freeParam}. Parameter names must match ##' those returned by \code{names(coef(con))}, but omitting any group-specific ##' suffixes (e.g., \code{"f1~1"} rather than \code{"f1~1.g2"}) or ##' user-specified labels (that is, the parameter names must follow the rules of ##' lavaan \code{\link[lavaan]{model.syntax}}). ##' @param covariates An optional character vector, only applicable when ##' \code{modelType = "mimic"}. The observed data are partitioned into columns ##' indicated by \code{covariates}, and the rows are permuted simultaneously for ##' the entire set before being merged with the remaining data. Thus, the ##' covariance structure is preserved among the covariates, which is necessary ##' when (e.g.) multiple dummy codes are used to represent a discrete covariate ##' or when covariates interact. If \code{covariates = NULL} when ##' \code{modelType = "mimic"}, the value of \code{covariates} is inferred by ##' searching \code{param} for predictors (i.e., variables appearing after the ##' "\code{~}" operator). ##' @param AFIs A character vector indicating which alternative fit indices (or ##' chi-squared itself) are to be used to test the multiparameter omnibus null ##' hypothesis that the constraints specified in \code{con} hold in the ##' population. Any fit measures returned by \code{\link[lavaan]{fitMeasures}} ##' may be specified (including constants like \code{"df"}, which would be ##' nonsensical). If both \code{AFIs} and \code{moreAFIs} are \code{NULL}, only ##' \code{"chisq"} will be returned. ##' @param moreAFIs Optional. A character vector indicating which (if any) ##' alternative fit indices returned by \code{\link[semTools]{moreFitIndices}} ##' are to be used to test the multiparameter omnibus null hypothesis that the ##' constraints specified in \code{con} hold in the population. ##' @param maxSparse Only applicable when \code{modelType = "mgcfa"} and at ##' least one indicator is \code{ordered}. An integer indicating the maximum ##' number of consecutive times that randomly permuted group assignment can ##' yield a sample in which at least one category (of an \code{ordered} ##' indicator) is unobserved in at least one group, such that the same set of ##' parameters cannot be estimated in each group. If such a sample occurs, group ##' assignment is randomly permuted again, repeatedly until a sample is obtained ##' with all categories observed in all groups. If \code{maxSparse} is exceeded, ##' \code{NA} will be returned for that iteration of the permutation ##' distribution. ##' @param maxNonconv An integer indicating the maximum number of consecutive ##' times that a random permutation can yield a sample for which the model does ##' not converge on a solution. If such a sample occurs, permutation is ##' attempted repeatedly until a sample is obtained for which the model does ##' converge. If \code{maxNonconv} is exceeded, \code{NA} will be returned for ##' that iteration of the permutation distribution, and a warning will be ##' printed when using \code{show} or \code{summary}. ##' @param showProgress Logical. Indicating whether to display a progress bar ##' while permuting. Silently set to \code{FALSE} when using parallel options. ##' @param warn Sets the handling of warning messages when fitting model(s) to ##' permuted data sets. See \code{\link[base]{options}}. ##' @param datafun An optional function that can be applied to the data ##' (extracted from \code{con}) after each permutation, but before fitting the ##' model(s) to each permutation. The \code{datafun} function must have an ##' argument named \code{data} that accepts a \code{data.frame}, and it must ##' return a \code{data.frame} containing the same column names. The column ##' order may differ, the values of those columns may differ (so be careful!), ##' and any additional columns will be ignored when fitting the model, but an ##' error will result if any column names required by the model syntax do not ##' appear in the transformed data set. Although available for any ##' \code{modelType}, \code{datafun} may be useful when using the MIMIC method ##' to test for nonuniform DIF (metric/weak invariance) by using product ##' indicators for a latent factor representing the interaction between a factor ##' and one of the \code{covariates}, in which case the product indicators would ##' need to be recalculated after each permutation of the \code{covariates}. To ##' access other R objects used within \code{permuteMeasEq}, the arguments to ##' \code{datafun} may also contain any subset of the following: \code{"con"}, ##' \code{"uncon"}, \code{"null"}, \code{"param"}, \code{"freeParam"}, ##' \code{"covariates"}, \code{"AFIs"}, \code{"moreAFIs"}, \code{"maxSparse"}, ##' \code{"maxNonconv"}, and/or \code{"iseed"}. The values for those arguments ##' will be the same as the values supplied to \code{permuteMeasEq}. ##' @param extra An optional function that can be applied to any (or all) of the ##' fitted lavaan objects (\code{con}, \code{uncon}, and/or \code{null}). This ##' function will also be applied after fitting the model(s) to each permuted ##' data set. To access the R objects used within \code{permuteMeasEq}, the ##' arguments to \code{extra} must be any subset of the following: \code{"con"}, ##' \code{"uncon"}, \code{"null"}, \code{"param"}, \code{"freeParam"}, ##' \code{"covariates"}, \code{"AFIs"}, \code{"moreAFIs"}, \code{"maxSparse"}, ##' \code{"maxNonconv"}, and/or \code{"iseed"}. The values for those arguments ##' will be the same as the values supplied to \code{permuteMeasEq}. The ##' \code{extra} function must return a named \code{numeric} vector or a named ##' \code{list} of scalars (i.e., a \code{list} of \code{numeric} vectors of ##' \code{length == 1}). Any unnamed elements (e.g., \code{""} or \code{NULL}) ##' of the returned object will result in an error. ##' @param parallelType The type of parallel operation to be used (if any). The ##' default is \code{"none"}. Forking is not possible on Windows, so if ##' \code{"multicore"} is requested on a Windows machine, the request will be ##' changed to \code{"snow"} with a message. ##' @param ncpus Integer: number of processes to be used in parallel operation. ##' If \code{NULL} (the default) and \code{parallelType %in% ##' c("multicore","snow")}, the default is one less than the maximum number of ##' processors detected by \code{\link[parallel]{detectCores}}. This default is ##' also silently set if the user specifies more than the number of processors ##' detected. ##' @param cl An optional \pkg{parallel} or \pkg{snow} cluster for use when ##' \code{parallelType = "snow"}. If \code{NULL}, a \code{"PSOCK"} cluster on ##' the local machine is created for the duration of the \code{permuteMeasEq} ##' call. If a valid \code{\link[parallel]{makeCluster}} object is supplied, ##' \code{parallelType} is silently set to \code{"snow"}, and \code{ncpus} is ##' silently set to \code{length(cl)}. ##' @param iseed Integer: Only used to set the states of the RNG when using ##' parallel options, in which case \code{\link[base]{RNGkind}} is set to ##' \code{"L'Ecuyer-CMRG"} with a message. See ##' \code{\link[parallel]{clusterSetRNGStream}} and Section 6 of ##' \code{vignette("parallel", "parallel")} for more details. If user supplies ##' an invalid value, \code{iseed} is silently set to the default (12345). To ##' set the state of the RNG when not using parallel options, call ##' \code{\link[base]{set.seed}} before calling \code{permuteMeasEq}. ##' ##' @return The \linkS4class{permuteMeasEq} object representing the results of ##' testing measurement equivalence (the multiparameter omnibus test) and DIF ##' (modification indices), as well as diagnostics and any \code{extra} output. ##' ##' @author Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \code{\link[stats]{TukeyHSD}}, \code{\link[lavaan]{lavTestScore}}, ##' \code{\link[semTools]{measurementInvariance}}, ##' \code{\link[semTools]{measurementInvarianceCat}} ##' ##' @references ##' ##' \bold{Papers about permutation tests of measurement equivalence:} ##' ##' Jorgensen, T. D., Kite, B. A., Chen, P.-Y., & Short, S. D. (in press). ##' Permutation randomization methods for testing measurement equivalence and ##' detecting differential item functioning in multiple-group confirmatory ##' factor analysis. \emph{Psychological Methods}. doi:10.1037/met0000152 ##' ##' Kite, B. A., Jorgensen, T. D., & Chen, P.-Y. (in press). Random permutation ##' testing applied to measurement invariance testing with ordered-categorical ##' indicators. \emph{Structural Equation Modeling}. ##' doi:10.1080/10705511.2017.1421467 ##' ##' Jorgensen, T. D. (2017). Applying permutation tests and multivariate ##' modification indices to configurally invariant models that need ##' respecification. \emph{Frontiers in Psychology, 8}(1455). ##' doi:10.3389/fpsyg.2017.01455 ##' ##' \bold{Additional reading:} ##' ##' Chen, F. F. (2007). Sensitivity of goodness of fit indexes to ##' lack of measurement invariance. \emph{Structural Equation Modeling, 14}(3), ##' 464--504. doi:10.1080/10705510701301834 ##' ##' Cheung, G. W., & Rensvold, R. B. (2002). Evaluating goodness-of-fit indexes ##' for testing measurement invariance. \emph{Structural Equation Modeling, ##' 9}(2), 233--255. doi:10.1207/S15328007SEM0902_5 ##' ##' Meade, A. W., Johnson, E. C., & Braddy, P. W. (2008). Power and sensitivity ##' of alternative fit indices in tests of measurement invariance. \emph{Journal ##' of Applied Psychology, 93}(3), 568--592. doi:10.1037/0021-9010.93.3.568 ##' ##' Widamin, K. F., & Thompson, J. S. (2003). On specifying the null model for ##' incremental fit indices in structural equation modeling. \emph{Psychological ##' Methods, 8}(1), 16--37. doi:10.1037/1082-989X.8.1.16 ##' @examples ##' ##' \dontrun{ ##' ##' ######################## ##' ## Multiple-Group CFA ## ##' ######################## ##' ##' ## create 3-group data in lavaan example(cfa) data ##' HS <- lavaan::HolzingerSwineford1939 ##' HS$ageGroup <- ifelse(HS$ageyr < 13, "preteen", ##' ifelse(HS$ageyr > 13, "teen", "thirteen")) ##' ##' ## specify and fit an appropriate null model for incremental fit indices ##' mod.null <- c(paste0("x", 1:9, " ~ c(T", 1:9, ", T", 1:9, ", T", 1:9, ")*1"), ##' paste0("x", 1:9, " ~~ c(L", 1:9, ", L", 1:9, ", L", 1:9, ")*x", 1:9)) ##' fit.null <- cfa(mod.null, data = HS, group = "ageGroup") ##' ##' ## fit target model with varying levels of measurement equivalence ##' mod.config <- ' ##' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ##' ' ##' miout <- measurementInvariance(mod.config, data = HS, std.lv = TRUE, ##' group = "ageGroup") ##' ##' (fit.config <- miout[["fit.configural"]]) ##' (fit.metric <- miout[["fit.loadings"]]) ##' (fit.scalar <- miout[["fit.intercepts"]]) ##' ##' ##' ####################### Permutation Method ##' ##' ## fit indices of interest for multiparameter omnibus test ##' myAFIs <- c("chisq","cfi","rmsea","mfi","aic") ##' moreAFIs <- c("gammaHat","adjGammaHat") ##' ##' ## Use only 20 permutations for a demo. In practice, ##' ## use > 1000 to reduce sampling variability of estimated p values ##' ##' ## test configural invariance ##' set.seed(12345) ##' out.config <- permuteMeasEq(nPermute = 20, con = fit.config) ##' out.config ##' ##' ## test metric equivalence ##' set.seed(12345) # same permutations ##' out.metric <- permuteMeasEq(nPermute = 20, uncon = fit.config, con = fit.metric, ##' param = "loadings", AFIs = myAFIs, ##' moreAFIs = moreAFIs, null = fit.null) ##' summary(out.metric, nd = 4) ##' ##' ## test scalar equivalence ##' set.seed(12345) # same permutations ##' out.scalar <- permuteMeasEq(nPermute = 20, uncon = fit.metric, con = fit.scalar, ##' param = "intercepts", AFIs = myAFIs, ##' moreAFIs = moreAFIs, null = fit.null) ##' summary(out.scalar) ##' ##' ## Not much to see without significant DIF. ##' ## Try using an absurdly high alpha level for illustration. ##' outsum <- summary(out.scalar, alpha = .50) ##' ##' ## notice that the returned object is the table of DIF tests ##' outsum ##' ##' ## visualize permutation distribution ##' hist(out.config, AFI = "chisq") ##' hist(out.metric, AFI = "chisq", nd = 2, alpha = .01, ##' legendArgs = list(x = "topright")) ##' hist(out.scalar, AFI = "cfi", printLegend = FALSE) ##' ##' ##' ####################### Extra Output ##' ##' ## function to calculate expected change of Group-2 and -3 latent means if ##' ## each intercept constraint were released ##' extra <- function(con) { ##' output <- list() ##' output["x1.vis2"] <- lavTestScore(con, release = 19:20, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[70] ##' output["x1.vis3"] <- lavTestScore(con, release = 19:20, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[106] ##' output["x2.vis2"] <- lavTestScore(con, release = 21:22, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[70] ##' output["x2.vis3"] <- lavTestScore(con, release = 21:22, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[106] ##' output["x3.vis2"] <- lavTestScore(con, release = 23:24, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[70] ##' output["x3.vis3"] <- lavTestScore(con, release = 23:24, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[106] ##' output["x4.txt2"] <- lavTestScore(con, release = 25:26, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[71] ##' output["x4.txt3"] <- lavTestScore(con, release = 25:26, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[107] ##' output["x5.txt2"] <- lavTestScore(con, release = 27:28, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[71] ##' output["x5.txt3"] <- lavTestScore(con, release = 27:28, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[107] ##' output["x6.txt2"] <- lavTestScore(con, release = 29:30, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[71] ##' output["x6.txt3"] <- lavTestScore(con, release = 29:30, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[107] ##' output["x7.spd2"] <- lavTestScore(con, release = 31:32, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[72] ##' output["x7.spd3"] <- lavTestScore(con, release = 31:32, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[108] ##' output["x8.spd2"] <- lavTestScore(con, release = 33:34, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[72] ##' output["x8.spd3"] <- lavTestScore(con, release = 33:34, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[108] ##' output["x9.spd2"] <- lavTestScore(con, release = 35:36, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[72] ##' output["x9.spd3"] <- lavTestScore(con, release = 35:36, univariate = FALSE, ##' epc = TRUE, warn = FALSE)$epc$epc[108] ##' output ##' } ##' ##' ## observed EPC ##' extra(fit.scalar) ##' ##' ## permutation results, including extra output ##' set.seed(12345) # same permutations ##' out.scalar <- permuteMeasEq(nPermute = 20, uncon = fit.metric, con = fit.scalar, ##' param = "intercepts", AFIs = myAFIs, ##' moreAFIs = moreAFIs, null = fit.null, extra = extra) ##' ## summarize extra output ##' summary(out.scalar, extra = TRUE) ##' ##' ##' ########### ##' ## MIMIC ## ##' ########### ##' ##' ## Specify Restricted Factor Analysis (RFA) model, equivalent to MIMIC, but ##' ## the factor covaries with the covariate instead of being regressed on it. ##' ## The covariate defines a single-indicator construct, and the ##' ## double-mean-centered products of the indicators define a latent ##' ## interaction between the factor and the covariate. ##' mod.mimic <- ' ##' visual =~ x1 + x2 + x3 ##' age =~ ageyr ##' age.by.vis =~ x1.ageyr + x2.ageyr + x3.ageyr ##' ##' x1 ~~ x1.ageyr ##' x2 ~~ x2.ageyr ##' x3 ~~ x3.ageyr ##' ' ##' ##' HS.orth <- indProd(var1 = paste0("x", 1:3), var2 = "ageyr", match = FALSE, ##' data = HS[ , c("ageyr", paste0("x", 1:3))] ) ##' fit.mimic <- cfa(mod.mimic, data = HS.orth, meanstructure = TRUE) ##' summary(fit.mimic, stand = TRUE) ##' ##' ## Whereas MIMIC models specify direct effects of the covariate on an indicator, ##' ## DIF can be tested in RFA models by specifying free loadings of an indicator ##' ## on the covariate's construct (uniform DIF, scalar invariance) and the ##' ## interaction construct (nonuniform DIF, metric invariance). ##' param <- as.list(paste0("age + age.by.vis =~ x", 1:3)) ##' names(param) <- paste0("x", 1:3) ##' # param <- as.list(paste0("x", 1:3, " ~ age + age.by.vis")) # equivalent ##' ##' ## test both parameters simultaneously for each indicator ##' do.call(rbind, lapply(param, function(x) lavTestScore(fit.mimic, add = x)$test)) ##' ## or test each parameter individually ##' lavTestScore(fit.mimic, add = as.character(param)) ##' ##' ##' ####################### Permutation Method ##' ##' ## function to recalculate interaction terms after permuting the covariate ##' datafun <- function(data) { ##' d <- data[, !names(data) %in% paste0("x", 1:3, ".ageyr")] ##' indProd(var1 = paste0("x", 1:3), var2 = "ageyr", match = FALSE, data = d) ##' } ##' ##' set.seed(12345) ##' perm.mimic <- permuteMeasEq(nPermute = 20, modelType = "mimic", ##' con = fit.mimic, param = param, ##' covariates = "ageyr", datafun = datafun) ##' summary(perm.mimic) ##' ##' } ##' ##' @export permuteMeasEq <- function(nPermute, modelType = c("mgcfa","mimic"), con, uncon = NULL, null = NULL, param = NULL, freeParam = NULL, covariates = NULL, AFIs = NULL, moreAFIs = NULL, maxSparse = 10, maxNonconv = 10, showProgress = TRUE, warn = -1, datafun, extra, parallelType = c("none","multicore","snow"), ncpus = NULL, cl = NULL, iseed = 12345) { ## save arguments from call availableArgs <- as.list(formals(permuteMeasEq)) argNames <- names(availableArgs) if (missing(datafun)) argNames <- setdiff(argNames, "datafun") if (missing(extra)) argNames <- setdiff(argNames, "extra") for (aa in argNames) { if (!is.null(eval(as.name(aa)))) suppressWarnings(availableArgs[[aa]] <- eval(as.name(aa))) } ## check and return them fullCall <- do.call(checkPermArgs, availableArgs) ## assign them to workspace (also adds old_RNG & oldSeed to workspace) for (aa in names(fullCall)) assign(aa, fullCall[[aa]]) ###################### SAVE OBSERVED RESULTS ########################## AFI.obs <- do.call(getAFIs, fullCall) ## save modification indices if !is.null(param) if (is.null(param)) { MI.obs <- data.frame(NULL) } else MI.obs <- do.call(getMIs, fullCall) ## anything extra? if (!missing(extra)) { extraArgs <- formals(extra) neededArgs <- intersect(names(extraArgs), names(fullCall)) extraArgs <- do.call(c, lapply(neededArgs, function(nn) fullCall[nn])) extraOut <- do.call(extra, extraArgs) ## check that extra() returns a named list of scalars if (!is.list(extraOut)) extraOut <- as.list(extraOut) wrongFormat <- paste('Function "extra" must return a numeric vector or a', 'list of scalars, with each element named.') if (!all(sapply(extraOut, is.numeric))) stop(wrongFormat) if (!all(sapply(extraOut, length) == 1L)) stop(wrongFormat) if (is.null(names(extraOut)) | any(names(extraOut) == "")) stop(wrongFormat) extra.obs <- do.call(c, extraOut) } else extra.obs <- numeric(length = 0L) ######################### PREP DATA ############################## argList <- fullCall[c("con","uncon","null","param","freeParam","covariates", "AFIs","moreAFIs","maxSparse","maxNonconv","warn","iseed")] argList$G <- lavInspect(con, "group") ## check for categorical variables # catVars <- lavaan::lavNames(con, type = "ov.ord") # numVars <- lavaan::lavNames(con, type = "ov.num") # latentVars <- lavaan::lavNames(con, type = "lv.regular") ## assemble data to which the models were fit if (length(argList$G)) { dataList <- mapply(FUN = function(x, g, n) { y <- data.frame(as.data.frame(x), g, stringsAsFactors = FALSE) names(y) <- c(n, argList$G) y }, SIMPLIFY = FALSE, x = lavInspect(con, "data"), g = lavInspect(con, "group.label"), n = lavaan::lavNames(con, type = "ov", group = seq_along(lavInspect(con, "group.label")))) argList$d <- do.call(rbind, dataList) } else { argList$d <- as.data.frame(lavInspect(con, "data")) names(argList$d) <- lavaan::lavNames(con, type = "ov") } ## check that covariates are actual variables if (modelType == "mimic") { if (length(covariates) && !all(covariates %in% names(argList$d))) stop('These specified covariates are not columns in the data.frame:\n', paste(setdiff(covariates, names(argList$d)), collapse = ", ")) } ## anything extra? if (!missing(extra)) argList$extra <- extra if (!missing(datafun)) argList$datafun <- datafun ###################### PERMUTED RESULTS ########################### ## permute and return distributions of (delta)AFIs, largest MI, and extras if (showProgress) { mypb <- utils::txtProgressBar(min = 1, max = nPermute, initial = 1, char = "=", width = 50, style = 3, file = "") permuDist <- list() for (j in 1:nPermute) { permuDist[[j]] <- do.call(paste("permuteOnce", modelType, sep = "."), args = c(argList, i = j)) utils::setTxtProgressBar(mypb, j) } close(mypb) } else if (parallelType == "multicore") { if (length(iseed)) set.seed(iseed) argList$FUN <- paste("permuteOnce", modelType, sep = ".") argList$X <- 1:nPermute argList$mc.cores <- ncpus argList$mc.set.seed <- TRUE pmcl <- function(...) { parallel::mclapply(...) } permuDist <- do.call(pmcl, args = argList) ## restore old RNG type if (fullCall$old_RNG[1] != "L'Ecuyer-CMRG") RNGkind(fullCall$old_RNG[1]) } else if (parallelType == "snow") { stopTheCluster <- FALSE if (is.null(cl)) { stopTheCluster <- TRUE cl <- parallel::makePSOCKcluster(rep("localhost", ncpus)) } parallel::clusterSetRNGStream(cl, iseed = iseed) argList$cl <- cl argList$X <- 1:nPermute argList$fun <- paste("permuteOnce", modelType, sep = ".") parallel::clusterExport(cl, varlist = c(argList$fun, "getAFIs","getMIs")) #FIXME: need update? tempppl <- function(...) { parallel::parLapply(...) } permuDist <- do.call(tempppl, args = argList) if (stopTheCluster) parallel::stopCluster(cl) ## restore old RNG type if (fullCall$old_RNG[1] != "L'Ecuyer-CMRG") RNGkind(fullCall$old_RNG[1]) } else { argList$X <- 1:nPermute argList$FUN <- paste("permuteOnce", modelType, sep = ".") permuDist <- do.call(lapply, args = argList) } ## extract AFI distribution if (length(AFI.obs) > 1) { AFI.dist <- as.data.frame(t(sapply(permuDist, function(x) x$AFI))) } if (length(AFI.obs) == 1L) { AFI.dist <- data.frame(sapply(permuDist, function(x) x$AFI)) colnames(AFI.dist) <- names(AFI.obs) } ## identify badness-of-fit measures badness <- grepl(pattern = "fmin|chi|aic|bic|rmr|rmsea|cn|sic|hqc", x = names(AFI.obs), ignore.case = TRUE) ## calculate all one-directional p-values AFI.pval <- mapply(FUN = function(x, y, b) { if (b) return(mean(x >= y, na.rm = TRUE)) mean(x <= y, na.rm = TRUE) }, x = unclass(AFI.dist), y = AFI.obs, b = badness) ## extract distribution of maximum modification indices MI.dist <- as.numeric(unlist(lapply(permuDist, function(x) x$MI))) ## calculate Tukey-adjusted p values for modification indices if (!is.null(param)) { MI.obs$tukey.p.value <- sapply(MI.obs$X2, function(i) mean(i <= MI.dist, na.rm = TRUE)) MI.obs <- as.data.frame(unclass(MI.obs)) rownames(MI.obs) <- names(param) } ## anything extra? if (!missing(extra)) { extra.dist <- do.call(rbind, lapply(permuDist, function(x) x$extra)) } else extra.dist <- data.frame(NULL) ## save parameter table for show/summary methods PT <- as.data.frame(parTable(con)) PT$par <- paste0(PT$lhs, PT$op, PT$rhs) if (length(lavInspect(con, "group"))) PT$group.label[PT$group > 0] <- lavInspect(con, "group.label")[PT$group[PT$group > 0] ] ## return observed results, permutation p values, and ANOVA results if (is.null(uncon)) { delta <- lavaan::anova(con) } else { delta <- lavaan::anova(uncon, con) } ANOVA <- sapply(delta[,c("Chisq diff","Df diff","Pr(>Chisq)")], function(x) x[2]) out <- new("permuteMeasEq", PT = PT, modelType = modelType, ANOVA = ANOVA, AFI.obs = AFI.obs, AFI.dist = AFI.dist, AFI.pval = AFI.pval, MI.obs = MI.obs, MI.dist = MI.dist, extra.obs = extra.obs, extra.dist = extra.dist, n.Permutations = nPermute, n.Converged = sum(!is.na(AFI.dist[,1])), n.nonConverged = sapply(permuDist, function(x) x$n.nonConverged), n.Sparse = sapply(permuDist, function(x) x$n.Sparse), oldSeed = fullCall$oldSeed) out } ## ---------------- ## Hidden Functions ## ---------------- ## function to check validity of arguments to permuteMeasEq() #' @importFrom lavaan lavInspect parTable checkPermArgs <- function(nPermute, modelType, con, uncon, null, param, freeParam, covariates, AFIs, moreAFIs, maxSparse, maxNonconv, showProgress, warn, datafun, extra, parallelType, ncpus, cl, iseed) { fixedCall <- as.list(match.call())[-1] fixedCall$nPermute <- as.integer(nPermute[1]) fixedCall$modelType <- modelType[1] if (!fixedCall$modelType %in% c("mgcfa","mimic","long")) stop('modelType must be one of c("mgcfa","mimic","long")') if (fixedCall$modelType == "long") stop('modelType "long" is not yet available.') if (fixedCall$modelType == "mgcfa" && lavInspect(con, "ngroups") == 1L) stop('modelType = "mgcfa" applies only to multigroup models.') if (fixedCall$modelType == "mimic") { uncon <- NULL fixedCall$uncon <- NULL fixedCall <- c(fixedCall, list(uncon = NULL)) } ## strip white space if (is.list(param)) { fixedCall$param <- lapply(param, function(cc) gsub("[[:space:]]+", "", cc)) } else if (!is.null(param)) fixedCall$param <- gsub("[[:space:]]+", "", param) if (!is.null(freeParam)) fixedCall$freeParam <- gsub("[[:space:]]+", "", freeParam) if (fixedCall$modelType == "mimic") { # PT <- lavaan::lavaanify(fixedCall$param) # checkCovs <- unique(PT$rhs[PT$op == "~"]) # if (is.null(covariates)) covariates <- checkCovs # if (length(setdiff(covariates, checkCovs))) # warning('Argument "covariates" includes predictors not in argument "param"') ##### ordVars <- lavaan::lavNames(con, type = "ov.ord") fixedCall$covariates <- as.character(covariates) } fixedCall$maxSparse <- as.integer(maxSparse[1]) fixedCall$maxNonconv <- as.integer(maxNonconv[1]) fixedCall$showProgress <- as.logical(showProgress[1]) fixedCall$warn <- as.integer(warn[1]) fixedCall$oldSeed <- as.integer(NULL) parallelType <- as.character(parallelType[1]) if (!parallelType %in% c("none","multicore","snow")) parallelType <- "none" if (!is.null(cl)) { if (!is(cl, "cluster")) stop("Invalid cluster object. Check class(cl)") parallelType <- "snow" ncpus <- length(cl) } if (parallelType == "multicore" && .Platform$OS.type == "windows") { parallelType <- "snow" message("'multicore' option unavailable on Windows. Using 'snow' instead.") } ## parallel settings, adapted from boot::boot() if (parallelType != "none") { if (is.null(ncpus) || ncpus > parallel::detectCores()) { ncpus <- parallel::detectCores() - 1 } if (ncpus <= 1L) { parallelType <- "none" } else { fixedCall$showProgress <- FALSE fixedCall$old_RNG <- RNGkind() fixedCall$oldSeed <- .Random.seed if (fixedCall$old_RNG[1] != "L'Ecuyer-CMRG") { RNGkind("L'Ecuyer-CMRG") message("Your RNGkind() was changed from ", fixedCall$old_RNG[1], " to L'Ecuyer-CMRG, which is required for reproducibility ", " in parallel jobs. Your RNGkind() has been returned to ", fixedCall$old_RNG[1], " but the seed has not been set. ", " The state of your previous RNG is saved in the slot ", " named 'oldSeed', if you want to restore it using ", " the syntax:\n", ".Random.seed[-1] <- permuteMeasEqObject@oldSeed[-1]") } fixedCall$iseed <- as.integer(iseed[1]) if (is.na(fixedCall$iseed)) fixedCall$iseed <- 12345 } } fixedCall$parallelType <- parallelType if (is.null(ncpus)) { fixedCall$ncpus <- NULL fixedCall <- c(fixedCall, list(ncpus = NULL)) } else fixedCall$ncpus <- ncpus ## check that "param" is NULL if uncon is NULL, and check for lavaan class notLavaan <- "Non-NULL 'con', 'uncon', or 'null' must be fitted lavaan object." if (is.null(uncon)) { if (!is.null(fixedCall$param) && fixedCall$modelType == "mgcfa") { message(c(" When 'uncon = NULL', only configural invariance is tested.", "\n So the 'param' argument was changed to NULL.")) fixedCall$param <- NULL fixedCall <- c(fixedCall, list(param = NULL)) } if (class(con) != "lavaan") stop(notLavaan) } else { if (class(con) != "lavaan") stop(notLavaan) if (class(uncon) != "lavaan") stop(notLavaan) } if (!is.null(null)) { if (class(null) != "lavaan") stop(notLavaan) } ############ FIXME: check that lavInspect(con, "options")$conditional.x = FALSE (find defaults for continuous/ordered indicators) if (!is.null(fixedCall$param)) { ## Temporarily warn about testing thresholds without necessary constraints. FIXME: check for binary indicators if ("thresholds" %in% fixedCall$param | any(grepl("\\|", fixedCall$param))) { warning(c("This function is not yet optimized for testing thresholds.\n", "Necessary identification contraints might not be specified.")) } ## collect parameter types for "mgcfa" if (fixedCall$modelType != "mimic") { ## save all estimates from constrained model PT <- parTable(con)[ , c("lhs","op","rhs","group","plabel")] ## extract parameters of interest paramTypes <- c("loadings","intercepts","thresholds","residuals","means", "residual.covariances","lv.variances","lv.covariances") params <- PT[paste0(PT$lhs, PT$op, PT$rhs) %in% setdiff(fixedCall$param, paramTypes), ] ## add parameters by type, if any are specified types <- intersect(fixedCall$param, paramTypes) ov.names <- lavaan::lavNames(con, "ov") isOV <- PT$lhs %in% ov.names lv.names <- con@pta$vnames$lv[[1]] isLV <- PT$lhs %in% lv.names & PT$rhs %in% lv.names if ("loadings" %in% types) params <- rbind(params, PT[PT$op == "=~", ]) if ("intercepts" %in% types) { params <- rbind(params, PT[isOV & PT$op == "~1", ]) } if ("thresholds" %in% types) params <- rbind(params, PT[PT$op == "|", ]) if ("residuals" %in% types) { params <- rbind(params, PT[isOV & PT$lhs == PT$rhs & PT$op == "~~", ]) } if ("residual.covariances" %in% types) { params <- rbind(params, PT[isOV & PT$lhs != PT$rhs & PT$op == "~~", ]) } if ("means" %in% types) { params <- rbind(params, PT[PT$lhs %in% lv.names & PT$op == "~1", ]) } if ("lv.variances" %in% types) { params <- rbind(params, PT[isLV & PT$lhs == PT$rhs & PT$op == "~~", ]) } if ("lv.covariances" %in% types) { params <- rbind(params, PT[isLV & PT$lhs != PT$rhs & PT$op == "~~", ]) } ## remove parameters specified by "freeParam" argument params <- params[!paste0(params$lhs, params$op, params$rhs) %in% fixedCall$freeParam, ] fixedCall$param <- paste0(params$lhs, params$op, params$rhs) } } if (is.null(AFIs) & is.null(moreAFIs)) { message("No AFIs were selected, so only chi-squared will be permuted.\n") fixedCall$AFIs <- "chisq" AFIs <- "chisq" } if ("ecvi" %in% AFIs & lavInspect(con, "ngroups") > 1L) stop("ECVI is not available for multigroup models.") ## check estimators leastSq <- grepl("LS", lavInspect(con, "options")$estimator) if (!is.null(uncon)) { if (uncon@Options$estimator != lavInspect(con, "options")$estimator) stop("Models must be fit using same estimator.") } if (!is.null(null)) { if (lavInspect(null, "options")$estimator != lavInspect(con, "options")$estimator) stop("Models must be fit using same estimator.") } ## check extra functions, if any restrictedArgs <- c("con","uncon","null","param","freeParam","covariates", "AFIs","moreAFIs","maxSparse","maxNonconv","iseed") if (!missing(datafun)) { if (!is.function(datafun)) stop('Argument "datafun" must be a function.') extraArgs <- formals(datafun) if (!all(names(extraArgs) %in% c(restrictedArgs, "data"))) stop('The user-supplied function "datafun" can only have any among the ', 'following arguments:\n', paste(restrictedArgs, collapse = ", ")) } if (!missing(extra)) { if (!is.function(extra)) stop('Argument "extra" must be a function.') extraArgs <- formals(extra) if (!all(names(extraArgs) %in% restrictedArgs)) stop('The user-supplied function "extra" can only have any among the ', 'following arguments:\n', paste(restrictedArgs, collapse = ", ")) } ## return evaluated list of other arguments lapply(fixedCall, eval) } ## function to extract fit measures #' @importFrom lavaan lavInspect getAFIs <- function(...) { dots <- list(...) AFI1 <- list() AFI0 <- list() leastSq <- grepl("LS", lavInspect(dots$con, "options")$estimator) ## check validity of user-specified AFIs, save output if (!is.null(dots$AFIs)) { IC <- grep("ic|logl", dots$AFIs, value = TRUE) if (leastSq & length(IC)) { stop(paste("Argument 'AFIs' includes invalid options:", paste(IC, collapse = ", "), "Information criteria unavailable for least-squares estimators.", sep = "\n")) } if (!is.null(dots$uncon)) AFI1[[1]] <- lavaan::fitMeasures(dots$uncon, fit.measures = dots$AFIs, baseline.model = dots$null) AFI0[[1]] <- lavaan::fitMeasures(dots$con, fit.measures = dots$AFIs, baseline.model = dots$null) } ## check validity of user-specified moreAFIs if (!is.null(dots$moreAFIs)) { IC <- grep("ic|hqc", dots$moreAFIs, value = TRUE) if (leastSq & length(IC)) { stop(paste("Argument 'moreAFIs' includes invalid options:", paste(IC, collapse = ", "), "Information criteria unavailable for least-squares estimators.", sep = "\n")) } if (!is.null(dots$uncon)) AFI1[[2]] <- moreFitIndices(dots$uncon, fit.measures = dots$moreAFIs) AFI0[[2]] <- moreFitIndices(dots$con, fit.measures = dots$moreAFIs) } ## save observed AFIs or delta-AFIs if (is.null(dots$uncon)) { AFI.obs <- unlist(AFI0) } else { AFI.obs <- unlist(AFI0) - unlist(AFI1) } AFI.obs } ## Function to extract modification indices for equality constraints #' @importFrom lavaan parTable getMIs <- function(...) { dots <- list(...) if (dots$modelType == "mgcfa") { ## save all estimates from constrained model PT <- parTable(dots$con)[ , c("lhs","op","rhs","group","plabel")] ## extract parameters of interest params <- PT[paste0(PT$lhs, PT$op, PT$rhs) %in% dots$param, ] ## return modification indices for specified constraints (param) MIs <- lavaan::lavTestScore(dots$con)$uni MI.obs <- MIs[MIs$lhs %in% params$plabel, ] } else if (dots$modelType == "mimic") { if (is.list(dots$param)) { MI <- lapply(dots$param, function(x) lavaan::lavTestScore(dots$con, add = x)$test) MI.obs <- do.call(rbind, MI) } else MI.obs <- lavaan::lavTestScore(dots$con, add = dots$param)$uni } else if (dots$modelType == "long") { ## coming soon } MI.obs } ## Functions to find delta-AFIs & maximum modification index in one permutation #' @importFrom lavaan lavInspect permuteOnce.mgcfa <- function(i, d, G, con, uncon, null, param, freeParam, covariates, AFIs, moreAFIs, maxSparse, maxNonconv, iseed, warn, extra = NULL, datafun = NULL) { old_warn <- options()$warn options(warn = warn) ## save arguments from call argNames <- names(formals(permuteOnce.mgcfa)) availableArgs <- lapply(argNames, function(x) eval(as.name(x))) names(availableArgs) <- argNames nSparse <- 0L nTries <- 1L while ( (nSparse <= maxSparse) & (nTries <= maxNonconv) ) { ## permute grouping variable d[ , G] <- sample(d[ , G]) ## transform data? if (!is.null(datafun)) { extraArgs <- formals(datafun) neededArgs <- intersect(names(extraArgs), names(availableArgs)) extraArgs <- do.call(c, lapply(neededArgs, function(nn) availableArgs[nn])) extraArgs$data <- d originalNames <- colnames(d) d <- do.call(datafun, extraArgs) ## coerce extraOut to data.frame if (!is.data.frame(d)) stop('Argument "datafun" did not return a data.frame') if (!all(originalNames %in% colnames(d))) stop('The data.frame returned by argument "datafun" did not contain ', 'column names required by the model:\n', paste(setdiff(originalNames, colnames(d)), collapse = ", ")) } ## for ordered indicators, check that groups have same observed categories ordVars <- lavaan::lavNames(con, type = "ov.ord") if (length(ordVars) > 0) { try(onewayTables <- lavaan::lavTables(d, dimension = 1L, categorical = ordVars, group = G), silent = TRUE) if (exists("onewayTables")) { if (any(onewayTables$obs.prop == 1)) { nSparse <- nSparse + 1L next } } else { ## no "onewayTables" probably indicates empty categories in 1+ groups nSparse <- nSparse + 1L next } } ## fit null model, if it exists if (!is.null(null)) { out.null <- lavaan::update(null, data = d, group.label = lavInspect(con, "group.label")) } ## fit constrained model, check for convergence try(out0 <- lavaan::update(con, data = d, group.label = lavInspect(con, "group.label"))) if (!exists("out0")) { nTries <- nTries + 1L next } if (!lavInspect(out0, "converged")) { nTries <- nTries + 1L next } ## fit unconstrained model (unless NULL), check for convergence if (!is.null(uncon)) { try(out1 <- lavaan::update(uncon, data = d, group.label = lavInspect(con, "group.label"))) if (!exists("out1")) { nTries <- nTries + 1L next } if (!lavInspect(out1, "converged")) { nTries <- nTries + 1L next } } ## If you get this far, everything converged, so break WHILE loop break } ## if WHILE loop ended before getting results, return NA if ( (nSparse == maxSparse) | (nTries == maxNonconv) ) { allAFIs <- c(AFIs, moreAFIs) AFI <- rep(NA, sum(!is.na(allAFIs))) names(AFI) <- allAFIs[!is.na(allAFIs)] MI <- if (is.null(param)) NULL else NA extra.obs <- NA nTries <- nTries + 1L } else { availableArgs$con <- out0 if (exists("out1")) availableArgs$uncon <- out1 if (exists("out.null")) availableArgs$null <- out.null AFI <- do.call(getAFIs, availableArgs) ## save max(MI) if !is.null(param) if (is.null(param)) { MI <- NULL } else { MI <- max(do.call(getMIs, c(availableArgs, modelType = "mgcfa"))$X2) } ## anything extra? if (!is.null(extra)) { extraArgs <- formals(extra) neededArgs <- intersect(names(extraArgs), names(availableArgs)) extraArgs <- do.call(c, lapply(neededArgs, function(nn) availableArgs[nn])) extraOut <- do.call(extra, extraArgs) ## coerce extraOut to data.frame if (!is.list(extraOut)) extraOut <- as.list(extraOut) extra.obs <- data.frame(extraOut) } else extra.obs <- data.frame(NULL) } options(warn = old_warn) list(AFI = AFI, MI = MI, extra = extra.obs, n.nonConverged = nTries - 1L, n.Sparse = nSparse) } #' @importFrom lavaan lavInspect permuteOnce.mimic <- function(i, d, G, con, uncon, null, param, freeParam, covariates, AFIs, moreAFIs, maxSparse, maxNonconv, iseed, warn, extra = NULL, datafun = NULL) { old_warn <- options()$warn options(warn = warn) ## save arguments from call argNames <- names(formals(permuteOnce.mimic)) availableArgs <- lapply(argNames, function(x) eval(as.name(x))) names(availableArgs) <- argNames nTries <- 1L while (nTries <= maxNonconv) { ## permute covariate(s) within each group if (length(G)) { for (gg in lavInspect(con, "group.label")) { dG <- d[ d[[G]] == gg, ] N <- nrow(dG) newd <- dG[sample(1:N, N), covariates, drop = FALSE] for (COV in covariates) d[d[[G]] == gg, COV] <- newd[ , COV] } } else { N <- nrow(d) newd <- d[sample(1:N, N), covariates, drop = FALSE] for (COV in covariates) d[ , COV] <- newd[ , COV] } ## transform data? if (!is.null(datafun)) { extraArgs <- formals(datafun) neededArgs <- intersect(names(extraArgs), names(availableArgs)) extraArgs <- do.call(c, lapply(neededArgs, function(nn) availableArgs[nn])) extraArgs$data <- d originalNames <- colnames(d) d <- do.call(datafun, extraArgs) ## coerce extraOut to data.frame if (!is.data.frame(d)) stop('Argument "datafun" did not return a data.frame') if (!all(originalNames %in% colnames(d))) stop('The data.frame returned by argument "datafun" did not contain ', 'column names required by the model:\n', paste(setdiff(originalNames, colnames(d)), collapse = ", ")) } ## fit null model, if it exists if (!is.null(null)) { out.null <- lavaan::update(null, data = d, group.label = lavInspect(con, "group.label")) } ## fit constrained model try(out0 <- lavaan::update(con, data = d, group.label = lavInspect(con, "group.label"))) ## check for convergence if (!exists("out0")) { nTries <- nTries + 1L next } if (!lavInspect(out0, "converged")) { nTries <- nTries + 1L next } ## If you get this far, everything converged, so break WHILE loop break } ## if WHILE loop ended before getting results, return NA if (nTries == maxNonconv) { allAFIs <- c(AFIs, moreAFIs) AFI <- rep(NA, sum(!is.na(allAFIs))) names(AFI) <- allAFIs[!is.na(allAFIs)] MI <- if (is.null(param)) NULL else NA extra.obs <- NA nTries <- nTries + 1L } else { availableArgs$con <- out0 if (exists("out.null")) availableArgs$null <- out.null AFI <- do.call(getAFIs, availableArgs) if (is.null(param)) { MI <- NULL } else { MI <- max(do.call(getMIs, c(availableArgs, modelType = "mimic"))$X2) } ## anything extra? if (!is.null(extra)) { extraArgs <- formals(extra) neededArgs <- intersect(names(extraArgs), names(availableArgs)) extraArgs <- do.call(c, lapply(neededArgs, function(nn) availableArgs[nn])) extraOut <- do.call(extra, extraArgs) ## coerce extraOut to data.frame if (!is.list(extraOut)) extraOut <- as.list(extraOut) extra.obs <- data.frame(extraOut) } else extra.obs <- data.frame(NULL) } options(warn = old_warn) list(AFI = AFI, MI = MI, extra = extra.obs, n.nonConverged = nTries - 1L, n.Sparse = integer(length = 0)) } semTools/R/powerAnalysisRMSEA.R0000644000176200001440000003536613432435606016056 0ustar liggesusers### Sunthud Pornprasertmanit, Alexander M. Schoemann, Kristopher J. Preacher, Donna Coffman ### Last updated: 9 March 2018 #' Plot power curves for RMSEA #' #' Plots power of RMSEA over a range of sample sizes #' #' This function creates plot of power for RMSEA against a range of sample #' sizes. The plot places sample size on the horizontal axis and power on the #' vertical axis. The user should indicate the lower and upper values for #' sample size and the sample size between each estimate ("step size") We #' strongly urge the user to read the sources below (see References) before #' proceeding. A web version of this function is available at: #' \url{http://quantpsy.org/rmsea/rmseaplot.htm}. #' #' #' @importFrom stats qchisq pchisq #' #' @param rmsea0 Null RMSEA #' @param rmseaA Alternative RMSEA #' @param df Model degrees of freedom #' @param nlow Lower sample size #' @param nhigh Upper sample size #' @param steps Increase in sample size for each iteration. Smaller values of #' steps will lead to more precise plots. However, smaller step sizes means a #' longer run time. #' @param alpha Alpha level used in power calculations #' @param group The number of group that is used to calculate RMSEA. #' @param \dots The additional arguments for the plot function. #' @return Plot of power for RMSEA against a range of sample sizes #' @author #' Alexander M. Schoemann (East Carolina University; \email{schoemanna@@ecu.edu}) #' #' Kristopher J. Preacher (Vanderbilt University; \email{kris.preacher@@vanderbilt.edu}) #' #' Donna L. Coffman (Pennsylvania State University; \email{dlc30@@psu.edu.}) #' #' @seealso \itemize{ #' \item \code{\link{plotRMSEAdist}} to visualize the RMSEA distributions #' \item \code{\link{findRMSEApower}} to find the statistical power based on #' population RMSEA given a sample size #' \item \code{\link{findRMSEAsamplesize}} to find the minium sample size for #' a given statistical power based on population RMSEA #' } #' @references #' MacCallum, R. C., Browne, M. W., & Cai, L. (2006). Testing #' differences between nested covariance structure models: Power analysis and #' null hypotheses. \emph{Psychological Methods, 11}(1), 19--35. #' doi:10.1037/1082-989X.11.1.19 #' #' MacCallum, R. C., Browne, M. W., & Sugawara, H. M. (1996). Power analysis #' and determination of sample size for covariance structure modeling. #' \emph{Psychological Methods, 1}(2), 130--149. doi:10.1037/1082-989X.1.2.130 #' #' MacCallum, R. C., Lee, T., & Browne, M. W. (2010). The issue of isopower in #' power analysis for tests of structural equation models. \emph{Structural #' Equation Modeling, 17}(1), 23--41. doi:10.1080/10705510903438906 #' #' Preacher, K. J., Cai, L., & MacCallum, R. C. (2007). Alternatives to #' traditional model comparison strategies for covariance structure models. In #' T. D. Little, J. A. Bovaird, & N. A. Card (Eds.), \emph{Modeling contextual #' effects in longitudinal studies} (pp. 33--62). Mahwah, NJ: Lawrence Erlbaum #' Associates. #' #' Steiger, J. H. (1998). A note on multiple sample extensions of the RMSEA fit #' index. \emph{Structural Equation Modeling, 5}(4), 411--419. #' doi:10.1080/10705519809540115 #' #' Steiger, J. H., & Lind, J. C. (1980, June). \emph{Statistically based tests #' for the number of factors.} Paper presented at the annual meeting of the #' Psychometric Society, Iowa City, IA. #' @examples #' #' plotRMSEApower(rmsea0 = .025, rmseaA = .075, df = 23, #' nlow = 100, nhigh = 500, steps = 10) #' #' @export plotRMSEApower <- function(rmsea0, rmseaA, df, nlow, nhigh, steps = 1, alpha = .05, group = 1, ...) { pow1 <- 0 nseq <- seq(nlow,nhigh, by=steps) for(i in nseq){ ncp0 <- ((i-1)*df*rmsea0^2)/group ncpa <- ((i-1)*df*rmseaA^2)/group #Compute power if(rmsea0 < rmseaA) { cval <- qchisq(alpha,df,ncp=ncp0,lower.tail=FALSE) pow <- pchisq(cval,df,ncp=ncpa,lower.tail=FALSE) } if(rmsea0 > rmseaA) { cval <- qchisq(1-alpha, df, ncp=ncp0, lower.tail=FALSE) pow <- 1-pchisq(cval,df,ncp=ncpa,lower.tail=FALSE) } pow1<-c(pow1, pow) } pow1 <- pow1[-1] plot(nseq,pow1,xlab="Sample Size",ylab="Power",main="Compute Power for RMSEA",type="l", ...) } #' Plot the sampling distributions of RMSEA #' #' Plots the sampling distributions of RMSEA based on the noncentral chi-square #' distributions #' #' This function creates overlappling plots of the sampling distribution of #' RMSEA based on noncentral \eqn{\chi^2} distribution (MacCallum, Browne, & #' Suguwara, 1996). First, the noncentrality parameter (\eqn{\lambda}) is #' calculated from RMSEA (Steiger, 1998; Dudgeon, 2004) by \deqn{\lambda = (N - #' 1)d\varepsilon^2 / K,} where \eqn{N} is sample size, \eqn{d} is the model #' degree of freedom, \eqn{K} is the number of group, and \eqn{\varepsilon} is #' the population RMSEA. Next, the noncentral \eqn{\chi^2} distribution with a #' specified \emph{df} and noncentrality parameter is plotted. Thus, #' the x-axis represents the sample \eqn{\chi^2} value. The sample \eqn{\chi^2} #' value can be transformed to the sample RMSEA scale (\eqn{\hat{\varepsilon}}) #' by \deqn{\hat{\varepsilon} = \sqrt{K}\sqrt{\frac{\chi^2 - d}{(N - 1)d}},} #' where \eqn{\chi^2} is the \eqn{\chi^2} value obtained from the noncentral #' \eqn{\chi^2} distribution. #' #' #' @importFrom stats qchisq #' #' @param rmsea The vector of RMSEA values to be plotted #' @param n Sample size of a dataset #' @param df Model degrees of freedom #' @param ptile The percentile rank of the distribution of the first RMSEA that #' users wish to plot a vertical line in the resulting graph #' @param caption The name vector of each element of \code{rmsea} #' @param rmseaScale If \code{TRUE}, the RMSEA scale is used in the x-axis. If #' \code{FALSE}, the chi-square scale is used in the x-axis. #' @param group The number of group that is used to calculate RMSEA. #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @seealso \itemize{ #' \item \code{\link{plotRMSEApower}} to plot the statistical power #' based on population RMSEA given the sample size #' \item \code{\link{findRMSEApower}} to find the statistical power based on #' population RMSEA given a sample size #' \item \code{\link{findRMSEAsamplesize}} to find the minium sample size for #' a given statistical power based on population RMSEA #' } #' @references #' Dudgeon, P. (2004). A note on extending Steiger's (1998) #' multiple sample RMSEA adjustment to other noncentrality parameter-based #' statistic. \emph{Structural Equation Modeling, 11}(3), 305--319. #' doi:10.1207/s15328007sem1103_1 #' #' MacCallum, R. C., Browne, M. W., & Sugawara, H. M. (1996). Power analysis #' and determination of sample size for covariance structure modeling. #' \emph{Psychological Methods, 1}(2), 130--149. doi:10.1037/1082-989X.1.2.130 #' #' Steiger, J. H. (1998). A note on multiple sample extensions of the RMSEA fit #' index. \emph{Structural Equation Modeling, 5}(4), 411--419. #' doi:10.1080/10705519809540115 #' @examples #' #' plotRMSEAdist(c(.05, .08), n = 200, df = 20, ptile = .95, rmseaScale = TRUE) #' plotRMSEAdist(c(.05, .01), n = 200, df = 20, ptile = .05, rmseaScale = FALSE) #' #' @export plotRMSEAdist <- function(rmsea, n, df, ptile = NULL, caption = NULL, rmseaScale = TRUE, group = 1) { graph <- cbind(rmsea, df) ncp <- apply(graph, MARGIN = 1, FUN = function(x, n, group) ((n - 1) * x[2] * (x[1]^2))/group, n = n, group = group) graph <- cbind(graph, ncp) dens <- lapply(as.list(data.frame(t(graph))), function(x) findDensity("chisq", df = x[2], ncp=x[3])) if (rmseaScale) dens <- lapply(dens, function(x, df, n, group) { x[,1] <- (x[,1] - df)/(n-1); x[(x[,1] < 0),1] <- 0; x[,1] <- sqrt(group) * sqrt(x[,1]/df); return(x) }, df=df, n=n, group=group) cutoff <- NULL if (!is.null(ptile)) { cutoff <- qchisq(ptile, df = graph[1, 2], ncp = graph[1, 3]) if (rmseaScale) cutoff <- sqrt(group) * sqrt((cutoff - df)/(df * (n - 1))) } if (is.null(caption)) caption <- sapply(graph[,1], function(x) paste("Population RMSEA = ", format(x, digits = 3), sep = "")) plotOverlapDensity(dens, cutoff, caption, ylab = "Density", xlab = ifelse(rmseaScale, "RMSEA", "Chi-Square")) equal0 <- sapply(dens, function(x) x[,1] == 0) if (any(equal0)) warning("The density at RMSEA = 0 cannot be trusted", " because the plots are truncated.") } #' Find the statistical power based on population RMSEA #' #' Find the proportion of the samples from the sampling distribution of RMSEA #' in the alternative hypothesis rejected by the cutoff dervied from the #' sampling distribution of RMSEA in the null hypothesis. This function can be #' applied for both test of close fit and test of not-close fit (MacCallum, #' Browne, & Suguwara, 1996) #' #' This function find the proportion of sampling distribution derived from the #' alternative RMSEA that is in the critical region derived from the sampling #' distribution of the null RMSEA. If \code{rmseaA} is greater than #' \code{rmsea0}, the test of close fit is used and the critical region is in #' the right hand side of the null sampling distribution. On the other hand, if #' \code{rmseaA} is less than \code{rmsea0}, the test of not-close fit is used #' and the critical region is in the left hand side of the null sampling #' distribution (MacCallum, Browne, & Suguwara, 1996). #' #' #' @importFrom stats qchisq pchisq #' #' @param rmsea0 Null RMSEA #' @param rmseaA Alternative RMSEA #' @param df Model degrees of freedom #' @param n Sample size of a dataset #' @param alpha Alpha level used in power calculations #' @param group The number of group that is used to calculate RMSEA. #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @seealso \itemize{ #' \item \code{\link{plotRMSEApower}} to plot the statistical power based on #' population RMSEA given the sample size #' \item \code{\link{plotRMSEAdist}} to visualize the RMSEA distributions #' \item \code{\link{findRMSEAsamplesize}} to find the minium sample size for #' a given statistical power based on population RMSEA #' } #' @references #' MacCallum, R. C., Browne, M. W., & Sugawara, H. M. (1996). Power analysis #' and determination of sample size for covariance structure modeling. #' \emph{Psychological Methods, 1}(2), 130--149. doi:10.1037/1082-989X.1.2.130 #' #' @examples #' #' findRMSEApower(rmsea0 = .05, rmseaA = .08, df = 20, n = 200) #' #' @export findRMSEApower <- function(rmsea0, rmseaA, df, n, alpha = .05, group = 1) { ncp0 <- ((n-1)*df*rmsea0^2)/group ncpa <- ((n-1)*df*rmseaA^2)/group if (rmsea0 power)) { return("Sample Size <= 5") } else if (all(power > pow)) { repeat { n <- n + 500 pow <- findRMSEApower(rmsea0, rmseaA, df, n, alpha, group=group) if(any(pow > power)) { index <- which(pow > power)[1] return(n[index]/group) } } } else { index <- which(pow > power)[1] return(n[index]/group) } } ## ---------------- ## Hidden Functions ## ---------------- # findDensity # Find the x and y coordinate of a distribution in order to plot a density of a distribution # dist: target distribution in text, such as "chisq" # ...: Additional argument of the distribution # Return the data frame with x and y coordinates for plotting density findDensity <- function(dist, ...) { FUN <- list() FUN[[1]] <- get(paste("q", dist, sep="")) FUN[[2]] <- c(0.001, 0.999) FUN <- c(FUN, ...) bound <- eval(as.call(FUN)) val <- seq(bound[1], bound[2], length.out=1000) FUN[[1]] <- get(paste("d", dist, sep="")) FUN[[2]] <- val height <- eval(as.call(FUN)) return(cbind(val, height)) } #Example Code #findDensity("chisq", df=10) # plotOverlapDensity # Plot the overlapping distributions using density # dat: A list of data frame where each data frame has the x coordinate as the variable 1 and y coordinate as the variable 2 # vline: Vertical line in the graph # caption: The name of each density line # ...: Additional argument of the plot function plotOverlapDensity <- function(dat, vline = NULL, caption = NULL, ...) { if (!is.list(dat)) { temp <- list() temp[[1]] <- dat dat <- temp } stack <- do.call(rbind, dat) lim <- apply(stack, 2, function(x) c(min(x), max(x))) plot(stack, xlim = lim[,1], ylim = lim[,2], type = "n", ...) for (i in 1:length(dat)) lines(dat[[i]], col = i, lwd = 1.5) for (i in 1:length(vline)) abline(v = vline[i], lwd = 1.5) if (!is.null(caption)) legend(0.50 * (lim[2,1] - lim[1,1]) + lim[1,1], 0.99 * (lim[2,2] - lim[1,2]) + lim[1,2], caption, col=1:length(dat), lty=1) } semTools/R/TSML.R0000644000176200001440000006765413432435606013212 0ustar liggesusers## Terrence D. Jorgensen ### Last updated: 15 September 2018 ### semTools function to implement 2-stage ML ## ----------------- ## Class and Methods ## ----------------- ##' Class for the Results of 2-Stage Maximum Likelihood (TSML) Estimation for ##' Missing Data ##' ##' This class contains the results of 2-Stage Maximum Likelihood (TSML) ##' estimation for missing data. The \code{summary}, \code{anova}, \code{vcov} ##' methods return corrected \emph{SE}s and test statistics. Other methods are ##' simply wrappers around the corresponding \code{\linkS4class{lavaan}} ##' methods. ##' ##' ##' @name twostage-class ##' @aliases twostage-class show,twostage-method summary,twostage-method ##' anova,twostage-method vcov,twostage-method coef,twostage-method ##' fitted.values,twostage-method fitted,twostage-method ##' residuals,twostage-method resid,twostage-method nobs,twostage-method ##' @docType class ##' ##' @slot saturated A fitted \code{\linkS4class{lavaan}} object containing the ##' saturated model results ##' @slot target A fitted \code{\linkS4class{lavaan}} object containing the ##' target/hypothesized model results ##' @slot baseline A fitted \code{\linkS4class{lavaan}} object containing the ##' baseline/null model results ##' @slot auxNames A character string (potentially of \code{length == 0}) of any ##' auxiliary variable names, if used ##' ##' @param object An object of class \code{twostage}. ##' @param ... arguments passed to \code{\link[lavaan]{parameterEstimates}}. ##' @param h1 An object of class \code{twostage} in which \code{object} is ##' nested, so that their difference in fit can be tested using ##' \code{anova} (see \bold{Value} section for details). ##' @param baseline \code{logical} indicating whether to return results for the ##' baseline model, rather than the default target (hypothesized) model. ##' @param type The meaning of this argument varies depending on which method it ##' it used for. Find detailed descriptions in the \bold{Value} section ##' under \code{coef}, \code{nobs}, and \code{residuals}. ##' @param model \code{character} naming the slot for which to return the ##' model-implied sample moments (see \code{fitted.values} description.) ##' @param labels \code{logical} indicating whether the model-implied sample ##' moments should have (row/column) labels. ##' ##' @return ##' \item{show}{\code{signature(object = "twostage"):} The \code{show} function ##' is used to display the results of the \code{anova} method, as well as the ##' header of the (uncorrected) target model results.} ##' \item{summary}{\code{signature(object = "twostage", ...):} The summary ##' function prints the same information from the \code{show} method, but also ##' provides (and returns) the output of ##' \code{\link[lavaan]{parameterEstimates}(object@target, ...)} with corrected ##' \emph{SE}s, test statistics, and confidence intervals. Additional ##' arguments can be passed to \code{\link[lavaan]{parameterEstimates}}, ##' including \code{fmi = TRUE} to provide an estimate of the fraction of ##' missing information.} ##' \item{anova}{\code{signature(object = "twostage", h1 = NULL, baseline = FALSE):} ##' The \code{anova} function returns the residual-based \eqn{\chi^2} test ##' statistic result, as well as the scaled \eqn{\chi^2} test statistic result, ##' for the model in the \code{target} slot, or for the model in the ##' \code{baseline} slot if \code{baseline = TRUE}. The user can also provide ##' a single additional \code{twostage} object to the \code{h1} argument, in ##' which case \code{anova} returns residual-based and scaled ##' (\eqn{\Delta})\eqn{\chi^2} test results, under the assumption that the ##' models are nested. The models will be automatically sorted according their ##' degrees of freedom.} ##' \item{nobs}{\code{signature(object = "twostage", ##' type = c("ntotal", "ngroups", "n.per.group", "norig", "patterns", "coverage")):} ##' The \code{nobs} function will return the total sample sized used in the ##' analysis by default. Also available are the number of groups or the sample ##' size per group, the original sample size (if any rows were deleted because ##' all variables were missing), the missing data patterns, and the matrix of ##' coverage (diagonal is the proportion of sample observed on each variable, ##' and off-diagonal is the proportion observed for both of each pair of ##' variables).} ##' \item{coef}{\code{signature(object = "twostage", type = c("free", "user")):} ##' This is simply a wrapper around the corresponding ##' \code{\linkS4class{lavaan}} method, providing point estimates from the ##' \code{target} slot.} ##' \item{vcov}{\code{signature(object = "twostage", baseline = FALSE):} Returns ##' the asymptotic covariance matrix of the estimated parameters (corrected for ##' additional uncertainty due to missing data) for the model in the ##' \code{target} slot, or for the model in the \code{baseline} slot if ##' \code{baseline = TRUE}.} ##' \item{fitted.values, fitted}{\code{signature(object = "twostage", ##' model = c("target", "saturated", "baseline")):} This is simply a wrapper ##' around the corresponding \code{\linkS4class{lavaan}} method, providing ##' model-implied sample moments from the slot specified in the \code{model} ##' argument.} ##' \item{residuals, resid}{\code{signature(object = "twostage", type = c("raw", ##' "cor", "normalized", "standardized")):} This is simply a wrapper around the ##' corresponding \code{\linkS4class{lavaan}} method, providing residuals of ##' the specified \code{type} from the \code{target} slot.} ##' ##' @section Objects from the Class: Objects can be created via the ##' \code{\link{twostage}} function. ##' ##' @author Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \code{\link{twostage}} ##' ##' @examples ##' ##' # See the example from the twostage function ##' setClass("twostage", slots = c(saturated = "lavaan", target = "lavaan", baseline = "lavaan", auxNames = "character")) ##' @rdname twostage-class ##' @aliases show,twostage-method ##' @export setMethod("show", "twostage", function(object) { ## show chi-squared test results cat("Chi-squared test(s) results, ADJUSTED for missing data:\n\n") getMethod("anova", "twostage")(object) cat("\n\nChi-squared test results, UNADJUSTED for missing data:\n\n") show(object@target) invisible(object) }) ##' @rdname twostage-class ##' @aliases summary,twostage-method ##' @importFrom stats pnorm qnorm ##' @importFrom lavaan parTable ##' @export setMethod("summary", "twostage", function(object, ...) { ## show chi-squared test results AND estimates getMethod("show", "twostage")(object) cat("\n\nParameter Estimates, with SEs (and tests/CIs) ADJUSTED for missing data:\n\n") dots <- list(...) if (!"fmi" %in% names(dots)) dots$fmi <- FALSE if (!"ci" %in% names(dots)) dots$ci <- TRUE if (!"level" %in% names(dots)) dots$level <- .95 PT <- parTable(object@target) PT <- PT[PT$group > 0, ] PE <- do.call(lavaan::parameterEstimates, c(dots, object = object@target)) SEs <- sqrt(diag(getMethod("vcov", "twostage")(object))) PE$se[PT$free > 0] <- SEs[PT$free] PE$z[PT$free > 0] <- PE$est[PT$free > 0] / PE$se[PT$free > 0] PE$pvalue[PT$free > 0] <- pnorm(abs(PE$z[PT$free > 0]), lower.tail = FALSE)*2 if (dots$ci) { crit <- qnorm(1 - (1 - dots$level) / 2) PE$ci.lower[PT$free > 0] <- PE$est[PT$free > 0] - crit * PE$se[PT$free > 0] PE$ci.upper[PT$free > 0] <- PE$est[PT$free > 0] + crit * PE$se[PT$free > 0] } if (dots$fmi) { compVar <- diag(lavaan::vcov(object@target))[PT$free] ## FIXME: need to re-fit model to model-implied moments from Stage 2? # compFit <- lavaan::update(object@target, sample.nobs = lavaan::nobs(object@target), # sample.cov = lavInspect(object@target, "cov.ov"), # sample.mean = lavInspect(object@target, "mean.ov")) # compVar <- diag(lavaan::vcov(compFit))[PT$free] missVar <- SEs^2 PE$fmi[PT$free > 0] <- 1 - compVar / missVar } PE }) ## (hidden) function utilized by vcov and anova methods ##' @importFrom lavaan lavInspect parTable twostageMatrices <- function(object, baseline) { SLOT <- if (baseline) "baseline" else "target" ## extract parameter table to isolate estimates by group PTsat <- parTable(object@saturated) nG <- max(PTsat$group) isMG <- nG > 1L ## model derivatives delta <- lavInspect(slot(object, SLOT), "delta") if (!isMG) delta <- list(delta) for (g in 1:nG) { covparams <- grep(pattern = "~~", x = rownames(delta[[g]])) meanparams <- grep(pattern = "~1", x = rownames(delta[[g]])) delta[[g]] <- delta[[g]][c(covparams, meanparams), ] } ## stack groups' deltas into 1 matrix delta <- do.call(rbind, delta) ## extract estimated moments from saturated model, and number of moments satSigma <- lavInspect(object@saturated, "cov.ov") satMu <- lavInspect(object@saturated, "mean.ov") if (!isMG) { satSigma <- list(satSigma) satMu <- list(satMu) } if (length(object@auxNames)) { an <- object@auxNames tn <- lavaan::lavNames(slot(object, SLOT)) for (g in 1:nG) { satSigma[[g]] <- satSigma[[g]][tn, tn] satMu[[g]] <- satMu[[g]][tn] } } p <- length(satMu[[1]]) pStar <- p*(p + 1) / 2 ## extract model-implied moments muHat <- lavInspect(slot(object, SLOT), "mean.ov") sigmaHat <- lavInspect(slot(object, SLOT), "cov.ov") if (!isMG) { sigmaHat <- list(sigmaHat) muHat <- list(muHat) } shinv <- list() for (g in 1:nG) { muHat[[g]] <- muHat[[g]][names(satMu[[g]])] sigmaHat[[g]] <- sigmaHat[[g]][rownames(satSigma[[g]]), colnames(satSigma[[g]])] shinv[[g]] <- solve(sigmaHat[[g]]) } ## assemble complete-data information matrix H <- list() for (g in 1:nG) H[[g]] <- matrix(0, (pStar + p), (pStar + p)) if (lavInspect(slot(object, SLOT), "options")$estimator == "expected") { for (g in 1:nG) { H[[g]][1:pStar, 1:pStar] <- .5*lavaan::lav_matrix_duplication_pre_post(shinv[[g]] %x% shinv[[g]]) H[[g]][(pStar + 1):(pStar + p), (pStar + 1):(pStar + p)] <- shinv[[g]] } } else { ## estimator == "observed" dMu <- list() for (g in 1:nG) { dMu[[g]] <- satMu[[g]] - muHat[[g]] H[[g]][1:pStar, 1:pStar] <- lavaan::lav_matrix_duplication_pre_post(shinv[[g]] %x% (shinv[[g]] %*% (satSigma[[g]] + dMu[[g]] %*% t(dMu[[g]])) %*% shinv[[g]] - .5*shinv[[g]])) H[[g]][(pStar + 1):(pStar + p), 1:pStar] <- lavaan::lav_matrix_duplication_post(shinv[[g]] %x% (t(dMu[[g]]) %*% shinv[[g]])) H[[g]][1:pStar, (pStar + 1):(pStar + p)] <- t(H[[g]][(pStar + 1):(pStar + p), 1:pStar]) H[[g]][(pStar + 1):(pStar + p), (pStar + 1):(pStar + p)] <- shinv[[g]] } } ## combine into 1 block-diagonal matrix H <- do.call(lavaan::lav_matrix_bdiag, H) ## asymptotic information and covariance matrices of target model satACOV <- lavaan::vcov(object@saturated) satInfo <- solve(satACOV * lavaan::nobs(object@saturated)) ## all(round(acov*N, 8) == round(solve(info), 8)) ## all(round(acov, 8) == round(solve(info)/N, 8)) if (length(object@auxNames)) { dimTar <- !(PTsat$lhs %in% an | PTsat$rhs %in% an) dimAux <- PTsat$lhs %in% an | PTsat$rhs %in% an infoTar <- satInfo[dimTar, dimTar] infoAux <- satInfo[dimAux, dimAux] infoAT <- satInfo[dimAux, dimTar] satInfo <- infoTar - t(infoAT) %*% solve(infoAux) %*% infoAT satACOV <- solve(satInfo) / lavaan::nobs(object@saturated) } list(delta = delta, H = H, satACOV = satACOV, satInfo = satInfo) } ## (hidden?) function utilized by anova method to test 1 or 2 models ##' @importFrom stats pchisq ##' @importFrom lavaan lavInspect twostageLRT <- function(object, baseline, print = FALSE) { SLOT <- if (baseline) "baseline" else "target" ## calculate model derivatives and complete-data information matrix MATS <- twostageMatrices(object, baseline) ## residual-based statistic (Savalei & Bentler, 2009, eq. 8) N <- lavaan::nobs(slot(object, SLOT)) nG <- lavInspect(slot(object, SLOT), "ngroups") res <- lavaan::residuals(slot(object, SLOT)) if (nG == 1L) res <- list(res) etilde <- do.call(c, lapply(res, function(x) c(lavaan::lav_matrix_vech(x$cov), x$mean))) ID <- MATS$satInfo %*% MATS$delta T.res <- N*t(etilde) %*% (MATS$satInfo - ID %*% MASS::ginv(t(MATS$delta) %*% ID) %*% t(ID)) %*% etilde # FIXME: why not solve()? DF <- lavInspect(slot(object, SLOT), "fit")[["df"]] pval.res <- pchisq(T.res, df = DF, lower.tail = FALSE) residual <- c(chisq = T.res, df = DF, pvalue = pval.res) class(residual) <- c("lavaan.vector","numeric") ## scaled test statistic (Savalei & Bentler, 2009, eq. 9) meat <- MATS$H %*% MATS$delta bread <- MASS::ginv(t(MATS$delta) %*% meat) # FIXME: why not solve()? cc <- DF / sum(diag(MATS$satACOV %*% (MATS$H - meat %*% bread %*% t(meat)))) chisq <- lavInspect(slot(object, SLOT), "fit")[["chisq"]] T.scaled <- cc * chisq pval.scaled <- pchisq(T.scaled, df = DF, lower.tail = FALSE) scaled <- c(chisq.naive = chisq, scaling.factor = 1 / cc, chisq.scaled = T.scaled, df = DF, pvalue = pval.scaled) class(scaled) <- c("lavaan.vector","numeric") ## return both statistics if (print) { if (lavInspect(object@saturated, "options")$se == "standard") { cat("Browne (1984) residual-based test statistic:\n\n") print(residual) } cat("\n\nSatorra-Bentler (2001) scaled test statistic:\n\n") print(scaled) } invisible(list(residual = residual, scaled = scaled)) } ##' @rdname twostage-class ##' @aliases anova,twostage-method ##' @importFrom lavaan lavInspect ##' @export setMethod("anova", "twostage", function(object, h1 = NULL, baseline = FALSE) { if (is.null(h1)) { return(twostageLRT(object, baseline, print = TRUE)) } H0 <- twostageLRT(object, baseline = FALSE) H1 <- twostageLRT(h1, baseline = FALSE) DF0 <- H0$residual[["df"]] DF1 <- H1$residual[["df"]] if (DF0 == DF1) stop("Models have the same degrees of freedom.") if (min(c(DF0, DF1)) == 0L) return(twostageLRT(object, baseline, print = TRUE)) parent <- which.min(c(DF0, DF1)) if (parent == 1L) { parent <- H0 H0 <- H1 H1 <- parent DF0 <- H0$residual[["df"]] DF1 <- H1$residual[["df"]] } DF <- DF0 - DF1 ## residual-based statistic T.res <- H0$residual[["chisq"]] - H1$residual[["chisq"]] residual <- c(chisq = T.res, df = DF, pvalue = pchisq(T.res, df = DF, lower.tail = FALSE)) class(residual) <- c("lavaan.vector","numeric") ## scaled test statistic chisq.naive <- H0$scaled[["chisq.naive"]] - H1$scaled[["chisq.naive"]] cc <- (DF0*H0$scaled[["scaling.factor"]] - DF1*H1$scaled[["scaling.factor"]]) / DF if (cc < 0) { warning("Scaling factor is negative, so it was set to missing.") cc <- NA } scaled <- c(chisq.naive = chisq.naive, scaling.factor = cc, chisq.scaled = chisq.naive / cc, DF = DF, pvalue = pchisq(chisq.naive / cc, df = DF, lower.tail = FALSE)) class(scaled) <- c("lavaan.vector","numeric") ## return both statistics if (lavInspect(object@saturated, "options")$se == "standard") { cat("Difference test for Browne (1984) residual-based statistics:\n\n") print(residual) } cat("\n\nSatorra-Bentler (2001) scaled difference test:\n\n") print(scaled) invisible(list(residual = residual, scaled = scaled)) }) ##' @rdname twostage-class ##' @aliases nobs,twostage-method ##' @importFrom lavaan lavInspect ##' @export setMethod("nobs", "twostage", function(object, type = c("ntotal","ngroups","n.per.group","norig", "patterns","coverage")) { type <- type[1] if (type == "n.per.group") type <- "nobs" lavInspect(object@saturated, what = type) }) ##' @rdname twostage-class ##' @aliases coef,twostage-method ##' @export setMethod("coef", "twostage", function(object, type = c("free","user")) { type <- type[1] lavaan::coef(object@target, type = type) }) ##' @rdname twostage-class ##' @aliases vcov,twostage-method ##' @export setMethod("vcov", "twostage", function(object, baseline = FALSE) { SLOT <- if (baseline) "baseline" else "target" ## calculate model derivatives and complete-data information matrix MATS <- twostageMatrices(object, baseline) meat <- MATS$H %*% MATS$delta bread <- MASS::ginv(t(MATS$delta) %*% meat) # FIXME: why not solve()? out <- bread %*% t(meat) %*% MATS$satACOV %*% meat %*% bread class(out) <- c("lavaan.matrix.symmetric","matrix") if (baseline) { rownames(out) <- names(getMethod("coef", "lavaan")(object@baseline)) } else { rownames(out) <- names(getMethod("coef", "twostage")(object)) } colnames(out) <- rownames(out) out }) ##' @rdname twostage-class ##' @aliases fitted.values,twostage-method ##' @export setMethod("fitted.values", "twostage", function(object, model = c("target","saturated","baseline"), type = "moments", labels = TRUE) { model <- model[1] lavaan::fitted.values(slot(object, model), type = type, labels = labels) }) ##' @rdname twostage-class ##' @aliases fitted,twostage-method ##' @export setMethod("fitted", "twostage", function(object, model = c("target","saturated","baseline"), type = "moments", labels = TRUE) { model <- model[1] lavaan::fitted.values(slot(object, model), type = type, labels = labels) }) ##' @rdname twostage-class ##' @aliases residuals,twostage-method ##' @export setMethod("residuals", "twostage", function(object, type = c("raw","cor","normalized","standardized")) { type <- type[1] lavaan::residuals(object@target, type = type) }) ##' @rdname twostage-class ##' @aliases resid,twostage-method ##' @export setMethod("resid", "twostage", function(object, type = c("raw","cor","normalized","standardized")) { type <- type[1] lavaan::residuals(object@target, type = type) }) # fitS <- cfa(model = model, data = dat1, missing = "fiml", se = "standard") # fitR <- cfa(model = model, data = dat1, missing = "fiml", se = "robust.huber.white") # all(lavInspect(fitS, "information") == lavInspect(fitR, "information")) # all(vcov(fitS) == vcov(fitR)) ## --------------------- ## Constructor Functions ## --------------------- ##' Fit a lavaan model using 2-Stage Maximum Likelihood (TSML) estimation for ##' missing data. ##' ##' This function automates 2-Stage Maximum Likelihood (TSML) estimation, ##' optionally with auxiliary variables. Step 1 involves fitting a saturated ##' model to the partially observed data set (to variables in the hypothesized ##' model as well as auxiliary variables related to missingness). Step 2 ##' involves fitting the hypothesized model to the model-implied means and ##' covariance matrix (also called the "EM" means and covariance matrix) as if ##' they were complete data. Step 3 involves correcting the Step-2 standard ##' errors (\emph{SE}s) and chi-squared statistic to account for additional ##' uncertainty due to missing data (using information from Step 1; see ##' References section for sources with formulas). ##' ##' All variables (including auxiliary variables) are treated as endogenous ##' varaibles in the Step-1 saturated model (\code{fixed.x = FALSE}), so data ##' are assumed continuous, although not necessarily multivariate normal ##' (dummy-coded auxiliary variables may be included in Step 1, but categorical ##' endogenous variables in the Step-2 hypothesized model are not allowed). To ##' avoid assuming multivariate normality, request \code{se = ##' "robust.huber.white"}. CAUTION: In addition to setting \code{fixed.x = ##' FALSE} and \code{conditional.x = FALSE} in \code{\link[lavaan]{lavaan}}, ##' this function will automatically set \code{meanstructure = TRUE}, ##' \code{estimator = "ML"}, \code{missing = "fiml"}, and \code{test = ##' "standard"}. \code{\link[lavaan]{lavaan}}'s \code{se} option can only be ##' set to \code{"standard"} to assume multivariate normality or to ##' \code{"robust.huber.white"} to relax that assumption. ##' ##' ##' @aliases twostage cfa.2stage sem.2stage growth.2stage lavaan.2stage ##' @importFrom lavaan lavInspect ##' ##' @param \dots Arguments passed to the \code{\link[lavaan]{lavaan}} function ##' specified in the \code{fun} argument. See also ##' \code{\link[lavaan]{lavOptions}}. At a minimum, the user must supply the ##' first two named arguments to \code{\link[lavaan]{lavaan}} (i.e., ##' \code{model} and \code{data}). ##' @param aux An optional character vector naming auxiliary variable(s) in ##' \code{data} ##' @param fun The character string naming the lavaan function used to fit the ##' Step-2 hypothesized model (\code{"cfa"}, \code{"sem"}, \code{"growth"}, or ##' \code{"lavaan"}). ##' @param baseline.model An optional character string, specifying the lavaan ##' \code{\link[lavaan]{model.syntax}} for a user-specified baseline model. ##' Interested users can use the fitted baseline model to calculate incremental ##' fit indices (e.g., CFI and TLI) using the corrected chi-squared values (see ##' the \code{anova} method in \code{\linkS4class{twostage}}). If \code{NULL}, ##' the default "independence model" (i.e., freely estimated means and ##' variances, but all covariances constrained to zero) will be specified ##' internally. ##' ##' @return The \code{\linkS4class{twostage}} object contains 3 fitted lavaan ##' models (saturated, target/hypothesized, and baseline) as well as the names ##' of auxiliary variables. None of the individual models provide the correct ##' model results (except the point estimates in the target model are unbiased). ##' Use the methods in \code{\linkS4class{twostage}} to extract corrected ##' \emph{SE}s and test statistics. ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \code{\linkS4class{twostage}} ##' ##' @references ##' Savalei, V., & Bentler, P. M. (2009). A two-stage approach to missing data: ##' Theory and application to auxiliary variables. ##' \emph{Structural Equation Modeling, 16}(3), 477--497. ##' doi:10.1080/10705510903008238 ##' ##' Savalei, V., & Falk, C. F. (2014). Robust two-stage approach outperforms ##' robust full information maximum likelihood with incomplete nonnormal data. ##' \emph{Structural Equation Modeling, 21}(2), 280--302. ##' doi:10.1080/10705511.2014.882692 ##' ##' @examples ##' ##' ## impose missing data for example ##' HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), ##' "ageyr","agemo","school")] ##' set.seed(12345) ##' HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) ##' age <- HSMiss$ageyr + HSMiss$agemo/12 ##' HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ##' ##' ## specify CFA model from lavaan's ?cfa help page ##' HS.model <- ' ##' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ##' ' ##' ##' ## use ageyr and agemo as auxiliary variables ##' out <- cfa.2stage(model = HS.model, data = HSMiss, aux = c("ageyr","agemo")) ##' ##' ## two versions of a corrected chi-squared test results are shown ##' out ##' ## see Savalei & Bentler (2009) and Savalei & Falk (2014) for details ##' ##' ## the summary additionally provides the parameter estimates with corrected ##' ## standard errors, test statistics, and confidence intervals, along with ##' ## any other options that can be passed to parameterEstimates() ##' summary(out, standardized = TRUE) ##' ##' ##' ##' ## use parameter labels to fit a more constrained model ##' modc <- ' ##' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + a*x8 + a*x9 ##' ' ##' outc <- cfa.2stage(model = modc, data = HSMiss, aux = c("ageyr","agemo")) ##' ##' ##' ## use the anova() method to test this constraint ##' anova(out, outc) ##' ## like for a single model, two corrected statistics are provided ##' ##' @export twostage <- function(..., aux, fun, baseline.model = NULL) { if (all(aux == "")) aux <- NULL dots <- list(...) if (is.null(dots$model)) stop("lavaan model syntax argument must be named 'model'.") ####################### FIXME: also check intersect(names(dots), names(lavOptions())) lavaanifyArgs <- dots[intersect(names(dots), names(formals(lavaan::lavaanify)))] funArgs <- dots[intersect(names(dots), names(formals(lavaan::lavaan)))] ## set some non-optional lavaan arguments funArgs$meanstructure <- TRUE funArgs$conditional.x <- FALSE funArgs$fixed.x <- FALSE funArgs$missing <- "fiml" funArgs$estimator <- "ML" funArgs$test <- "standard" if (is.null(funArgs$information)) funArgs$information <- "observed" if (funArgs$information == "expected") { message("If data are MAR, only the observed information matrix is consistent.") if (!is.null(aux)) { funArgs$information <- "observed" message(c("Using auxiliary variables implies assuming that data are MAR. ", "The lavaan argument 'information' was set to 'observed'.")) } if (!is.null(funArgs$se)) if(funArgs$se != "standard") { funArgs$information <- "observed" message(c("The lavaan argument 'information' was set to 'observed' ", "because adjusting SEs for non-normality requires it.")) } } funArgs$NACOV <- NULL funArgs$do.fit <- NULL ## STAGE 1: ## fit saturated model if (!is.null(funArgs$group)) lavaanifyArgs$ngroups <- length(table(funArgs$data[ , funArgs$group])) targetNames <- lavaan::lavNames(do.call(lavaan::lavaanify, lavaanifyArgs)) varnames <- c(targetNames, aux) covstruc <- outer(varnames, varnames, function(x, y) paste(x, "~~", y)) satArgs <- funArgs satArgs$constraints <- NULL satArgs$group.equal <- "" satArgs$model <- c(covstruc[lower.tri(covstruc, diag = TRUE)], paste(varnames, "~ 1")) satFit <- do.call(lavaan::lavaan, satArgs) ## check for robust estimators opts <- lavInspect(satFit, "options") if (!opts$se %in% c("standard","robust.huber.white")) stop(c("Two-Stage estimation requires either se = 'standard' for ", "multivariate normal data or se = 'robust.huber.white' to ", "correct for non-normality.")) ## STAGE 2: ## fit target model to saturated estimates targetArgs <- funArgs targetArgs$data <- NULL targetArgs$sample.cov <- lavInspect(satFit, "cov.ov") targetArgs$sample.mean <- lavInspect(satFit, "mean.ov") targetArgs$sample.nobs <- lavInspect(satFit, "nobs") targetArgs$se <- "standard" targetArgs$sample.cov.rescale <- FALSE targetFit <- do.call(fun, targetArgs) ## STAGE 0: ## fit baseline model (for incremental fit indices) baseArgs <- targetArgs if (is.null(baseline.model)) { basecov <- outer(targetNames, targetNames, function(x, y) paste0(x, " ~~ 0*", y)) diag(basecov) <- paste(targetNames, "~~", targetNames) baseArgs$model <- c(basecov[lower.tri(basecov, diag = TRUE)], paste(targetNames, "~ 1")) } else baseArgs$model <- baseline.model baseArgs$se <- "standard" baseFit <- do.call(lavaan::lavaan, baseArgs) if (length(setdiff(lavaan::lavNames(baseFit), targetNames))) warning("The baseline model includes variables excluded from the target model.") if (length(setdiff(targetNames, lavaan::lavNames(baseFit)))) warning("The target model includes variables excluded from the baseline model.") ## return both models out <- new("twostage", saturated = satFit, target = targetFit, baseline = baseFit, auxNames = as.character(aux)) out } ##' @rdname twostage ##' @export lavaan.2stage <- function(..., aux = NULL, baseline.model = NULL) { twostage(..., aux = aux, fun = "lavaan", baseline.model = baseline.model) } ##' @rdname twostage ##' @export cfa.2stage <- function(..., aux = NULL, baseline.model = NULL) { twostage(..., aux = aux, fun = "cfa", baseline.model = baseline.model) } ##' @rdname twostage ##' @export sem.2stage <- function(..., aux = NULL, baseline.model = NULL) { twostage(..., aux = aux, fun = "sem", baseline.model = baseline.model) } ##' @rdname twostage ##' @export growth.2stage <- function(..., aux = NULL, baseline.model = NULL) { twostage(..., aux = aux, fun = "growth", baseline.model = baseline.model) } semTools/R/partialInvariance.R0000644000176200001440000026214113432435606016053 0ustar liggesusers### Sunthud Pornprasertmanit ### Last updated: 17 September 2018 ##' Partial Measurement Invariance Testing Across Groups ##' ##' This test will provide partial invariance testing by (a) freeing a parameter ##' one-by-one from nested model and compare with the original nested model or ##' (b) fixing (or constraining) a parameter one-by-one from the parent model ##' and compare with the original parent model. This function only works with ##' congeneric models. The \code{partialInvariance} is used for continuous ##' variable. The \code{partialInvarianceCat} is used for categorical variables. ##' ##' There are four types of partial invariance testing: ##' ##' \itemize{ ##' \item Partial weak invariance. The model named 'fit.configural' ##' from the list of models is compared with the model named 'fit.loadings'. ##' Each loading will be freed or fixed from the metric and configural ##' invariance models respectively. The modified models are compared with the ##' original model. Note that the objects in the list of models must have the ##' names of "fit.configural" and "fit.loadings". Users may use "metric", ##' "weak", "loading", or "loadings" in the \code{type} argument. Note that, for ##' testing invariance on marker variables, other variables will be assigned as ##' marker variables automatically. ##' ##' \item Partial strong invariance. The model ##' named 'fit.loadings' from the list of models is compared with the model ##' named either 'fit.intercepts' or 'fit.thresholds'. Each intercept will be ##' freed or fixed from the scalar and metric invariance models respectively. ##' The modified models are compared with the original model. Note that the ##' objects in the list of models must have the names of "fit.loadings" and ##' either "fit.intercepts" or "fit.thresholds". Users may use "scalar", ##' "strong", "intercept", "intercepts", "threshold", or "thresholds" in the ##' \code{type} argument. Note that, for testing invariance on marker variables, ##' other variables will be assigned as marker variables automatically. Note ##' that if all variables are dichotomous, scalar invariance testing is not ##' available. ##' ##' \item Partial strict invariance. The model named either ##' 'fit.intercepts' or 'fit.thresholds' (or 'fit.loadings') from the list of ##' models is compared with the model named 'fit.residuals'. Each residual ##' variance will be freed or fixed from the strict and scalar (or metric) ##' invariance models respectively. The modified models are compared with the ##' original model. Note that the objects in the list of models must have the ##' names of "fit.residuals" and either "fit.intercepts", "fit.thresholds", or ##' "fit.loadings". Users may use "strict", "residual", "residuals", "error", or ##' "errors" in the \code{type} argument. ##' ##' \item Partial mean invariance. The ##' model named either 'fit.intercepts' or 'fit.thresholds' (or 'fit.residuals' ##' or 'fit.loadings') from the list of models is compared with the model named ##' 'fit.means'. Each factor mean will be freed or fixed from the means and ##' scalar (or strict or metric) invariance models respectively. The modified ##' models are compared with the original model. Note that the objects in the ##' list of models must have the names of "fit.means" and either ##' "fit.residuals", "fit.intercepts", "fit.thresholds", or "fit.loadings". ##' Users may use "means" or "mean" in the \code{type} argument. } ##' ##' Two types of comparisons are used in this function: ##' \enumerate{ ##' \item \code{free}: The nested model is used as a template. Then, one ##' parameter indicating the differences between two models is free. The new ##' model is compared with the nested model. This process is repeated for all ##' differences between two models. The likelihood-ratio test and the difference ##' in CFI are provided. ##' ##' \item \code{fix}: The parent model is used as a template. Then, one parameter ##' indicating the differences between two models is fixed or constrained to be ##' equal to other parameters. The new model is then compared with the parent ##' model. This process is repeated for all differences between two models. The ##' likelihood-ratio test and the difference in CFI are provided. ##' ##' \item \code{wald}: This method is similar to the \code{fix} method. However, ##' instead of building a new model and compare them with likelihood-ratio test, ##' multivariate wald test is used to compare equality between parameter ##' estimates. See \code{\link[lavaan]{lavTestWald}} for further details. Note ##' that if any rows of the contrast cannot be summed to 0, the Wald test is not ##' provided, such as comparing two means where one of the means is fixed as 0. ##' This test statistic is not as accurate as likelihood-ratio test provided in ##' \code{fix}. I provide it here in case that likelihood-ratio test fails to ##' converge. ##' } ##' ##' Note that this function does not adjust for the inflated Type I error rate ##' from multiple tests. The degree of freedom of all tests would be the number ##' of groups minus 1. ##' ##' The details of standardized estimates and the effect size used for each ##' parameters are provided in the vignettes by running ##' \code{vignette("partialInvariance")}. ##' ##' @importFrom lavaan lavInspect parTable ##' @aliases partialInvariance partialInvarianceCat ##' ##' @param fit A list of models for invariance testing. Each model should be ##' assigned by appropriate names (see details). The result from ##' \code{\link{measurementInvariance}} or ##' \code{\link{measurementInvarianceCat}} could be used in this argument ##' directly. ##' @param type The types of invariance testing: "metric", "scalar", "strict", ##' or "means" ##' @param free A vector of variable names that are free across groups in ##' advance. If partial mean invariance is tested, this argument represents a ##' vector of factor names that are free across groups. ##' @param fix A vector of variable names that are constrained to be equal ##' across groups in advance. If partial mean invariance is tested, this ##' argument represents a vector of factor names that are fixed across groups. ##' @param refgroup The reference group used to make the effect size comparison ##' with the other groups. ##' @param poolvar If \code{TRUE}, the variances are pooled across group for ##' standardization. Otherwise, the variances of the reference group are used ##' for standardization. ##' @param p.adjust The method used to adjust p values. See ##' \code{\link[stats]{p.adjust}} for the options for adjusting p values. The ##' default is to not use any corrections. ##' @param fbound The z-scores of factor that is used to calculate the effect ##' size of the loading difference proposed by Millsap and Olivera-Aguilar ##' (2012). ##' @param return.fit Return the submodels fitted by this function ##' @param method The method used to calculate likelihood ratio test. See ##' \code{\link[lavaan]{lavTestLRT}} for available options ##' ##' @return A list of results are provided. The list will consists of at least ##' two elements: ##' \enumerate{ ##' \item \code{estimates}: The results of parameter estimates including pooled ##' estimates (\code{poolest}), the estimates for each group, standardized ##' estimates for each group (\code{std}), the difference in standardized ##' values, and the effect size statistic (\emph{q} for factor loading ##' difference and \emph{h} for error variance difference). See the details of ##' this effect size statistic by running \code{vignette("partialInvariance")}. ##' In the \code{partialInvariance} function, the additional effect statistics ##' proposed by Millsap and Olivera-Aguilar (2012) are provided. For factor ##' loading, the additional outputs are the observed mean difference ##' (\code{diff_mean}), the mean difference if factor scores are low ##' (\code{low_fscore}), and the mean difference if factor scores are high ##' (\code{high_fscore}). The low factor score is calculated by (a) finding the ##' factor scores that its \emph{z} score equals -\code{bound} (the default is ##' \eqn{-2}) from all groups and (b) picking the minimum value among the ##' factor scores. The high factor score is calculated by (a) finding the ##' factor scores that its \emph{z} score equals \code{bound} (default = 2) ##' from all groups and (b) picking the maximum value among the factor scores. ##' For measurement intercepts, the additional outputs are the observed means ##' difference (\code{diff_mean}) and the proportion of the differences in the ##' intercepts over the observed means differences (\code{propdiff}). For error ##' variances, the additional outputs are the proportion of the difference in ##' error variances over the difference in observed variances (\code{propdiff}). ##' ##' \item \code{results}: Statistical tests as well as the change in CFI are ##' provided. \eqn{\chi^2} and \emph{p} value are provided for all methods. ##' ##' \item \code{models}: The submodels used in the \code{free} and \code{fix} ##' methods, as well as the nested and parent models. The nested and parent ##' models will be changed from the original models if \code{free} or ##' \code{fit} arguments are specified. ##' } ##' ##' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' @seealso \code{\link{measurementInvariance}} for measurement invariance for ##' continuous variables; \code{\link{measurementInvarianceCat}} for measurement ##' invariance for categorical variables; \code{\link[lavaan]{lavTestWald}} for ##' multivariate Wald test ##' ##' @references Millsap, R. E., & Olivera-Aguilar, M. (2012). Investigating ##' measurement invariance using confirmatory factor analysis. In R. H. Hoyle ##' (Ed.), \emph{Handbook of structural equation modeling} (pp. 380--392). New ##' York, NY: Guilford. ##' ##' @examples ##' ##' ## Conduct weak invariance testing manually by using fixed-factor ##' ## method of scale identification ##' ##' library(lavaan) ##' ##' conf <- " ##' f1 =~ NA*x1 + x2 + x3 ##' f2 =~ NA*x4 + x5 + x6 ##' f1 ~~ c(1, 1)*f1 ##' f2 ~~ c(1, 1)*f2 ##' " ##' ##' weak <- " ##' f1 =~ NA*x1 + x2 + x3 ##' f2 =~ NA*x4 + x5 + x6 ##' f1 ~~ c(1, NA)*f1 ##' f2 ~~ c(1, NA)*f2 ##' " ##' ##' configural <- cfa(conf, data = HolzingerSwineford1939, std.lv = TRUE, group="school") ##' weak <- cfa(weak, data = HolzingerSwineford1939, group="school", group.equal="loadings") ##' models <- list(fit.configural = configural, fit.loadings = weak) ##' partialInvariance(models, "metric") ##' ##' \dontrun{ ##' partialInvariance(models, "metric", free = "x5") # "x5" is free across groups in advance ##' partialInvariance(models, "metric", fix = "x4") # "x4" is fixed across groups in advance ##' ##' ## Use the result from the measurementInvariance function ##' HW.model <- ' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ' ##' ##' models2 <- measurementInvariance(model = HW.model, data=HolzingerSwineford1939, ##' group="school") ##' partialInvariance(models2, "scalar") ##' ##' ## Conduct weak invariance testing manually by using fixed-factor ##' ## method of scale identification for dichotomous variables ##' ##' f <- rnorm(1000, 0, 1) ##' u1 <- 0.9*f + rnorm(1000, 1, sqrt(0.19)) ##' u2 <- 0.8*f + rnorm(1000, 1, sqrt(0.36)) ##' u3 <- 0.6*f + rnorm(1000, 1, sqrt(0.64)) ##' u4 <- 0.7*f + rnorm(1000, 1, sqrt(0.51)) ##' u1 <- as.numeric(cut(u1, breaks = c(-Inf, 0, Inf))) ##' u2 <- as.numeric(cut(u2, breaks = c(-Inf, 0.5, Inf))) ##' u3 <- as.numeric(cut(u3, breaks = c(-Inf, 0, Inf))) ##' u4 <- as.numeric(cut(u4, breaks = c(-Inf, -0.5, Inf))) ##' g <- rep(c(1, 2), 500) ##' dat2 <- data.frame(u1, u2, u3, u4, g) ##' ##' configural2 <- " ##' f1 =~ NA*u1 + u2 + u3 + u4 ##' u1 | c(t11, t11)*t1 ##' u2 | c(t21, t21)*t1 ##' u3 | c(t31, t31)*t1 ##' u4 | c(t41, t41)*t1 ##' f1 ~~ c(1, 1)*f1 ##' f1 ~ c(0, NA)*1 ##' u1 ~~ c(1, 1)*u1 ##' u2 ~~ c(1, NA)*u2 ##' u3 ~~ c(1, NA)*u3 ##' u4 ~~ c(1, NA)*u4 ##' " ##' ##' outConfigural2 <- cfa(configural2, data = dat2, group = "g", ##' parameterization = "theta", estimator = "wlsmv", ##' ordered = c("u1", "u2", "u3", "u4")) ##' ##' weak2 <- " ##' f1 =~ NA*u1 + c(f11, f11)*u1 + c(f21, f21)*u2 + c(f31, f31)*u3 + c(f41, f41)*u4 ##' u1 | c(t11, t11)*t1 ##' u2 | c(t21, t21)*t1 ##' u3 | c(t31, t31)*t1 ##' u4 | c(t41, t41)*t1 ##' f1 ~~ c(1, NA)*f1 ##' f1 ~ c(0, NA)*1 ##' u1 ~~ c(1, 1)*u1 ##' u2 ~~ c(1, NA)*u2 ##' u3 ~~ c(1, NA)*u3 ##' u4 ~~ c(1, NA)*u4 ##' " ##' ##' outWeak2 <- cfa(weak2, data = dat2, group = "g", parameterization = "theta", ##' estimator = "wlsmv", ordered = c("u1", "u2", "u3", "u4")) ##' modelsCat <- list(fit.configural = outConfigural2, fit.loadings = outWeak2) ##' ##' partialInvarianceCat(modelsCat, type = "metric") ##' ##' partialInvarianceCat(modelsCat, type = "metric", free = "u2") ##' partialInvarianceCat(modelsCat, type = "metric", fix = "u3") ##' ##' ## Use the result from the measurementInvarianceCat function ##' ##' model <- ' f1 =~ u1 + u2 + u3 + u4 ##' f2 =~ u5 + u6 + u7 + u8' ##' ##' modelsCat2 <- measurementInvarianceCat(model = model, data = datCat, group = "g", ##' parameterization = "theta", ##' estimator = "wlsmv", strict = TRUE) ##' ##' partialInvarianceCat(modelsCat2, type = "scalar") ##' } ##' ##' @export partialInvariance <- function(fit, type, free = NULL, fix = NULL, refgroup = 1, poolvar = TRUE, p.adjust = "none", fbound = 2, return.fit = FALSE, method = "satorra.bentler.2001") { # fit <- measurementInvariance(HW.model, data=HolzingerSwineford1939, group="school", strict = TRUE) # type <- "weak" # free <- NULL # fix <- "x1" # refgroup <- 1 # poolvar <- TRUE # p.adjust <- "none" # return.fit <- FALSE # fbound <- 2 # method <- "satorra.bentler.2001" type <- tolower(type) numType <- 0 fit1 <- fit0 <- NULL # fit0 = Nested model, fit1 = Parent model if(type %in% c("metric", "weak", "loading", "loadings")) { numType <- 1 if(all(c("fit.configural", "fit.loadings") %in% names(fit))) { fit1 <- fit$fit.configural fit0 <- fit$fit.loadings } else { stop("The elements named 'fit.configural' and 'fit.loadings' are needed in the 'fit' argument") } } else if (type %in% c("scalar", "strong", "intercept", "intercepts", "threshold", "thresholds")) { numType <- 2 if(all(c("fit.loadings", "fit.intercepts") %in% names(fit))) { fit1 <- fit$fit.loadings fit0 <- fit$fit.intercepts } else { stop("The elements named 'fit.loadings' and 'fit.intercepts' are needed in the 'fit' argument") } } else if (type %in% c("strict", "residual", "residuals", "error", "errors")) { numType <- 3 if(all(c("fit.intercepts", "fit.residuals") %in% names(fit))) { fit1 <- fit$fit.intercepts fit0 <- fit$fit.residuals } else { stop("The elements named 'fit.intercepts' and 'fit.residuals' are needed in the 'fit' argument") } } else if (type %in% c("means", "mean")) { numType <- 4 if("fit.means" %in% names(fit)) { fit0 <- fit$fit.means if("fit.residuals" %in% names(fit)) { fit1 <- fit$fit.residuals } else if ("fit.intercepts" %in% names(fit)) { fit1 <- fit$fit.intercepts } else { stop("The elements named either 'fit.residuals' or 'fit.intercepts ' is needed in the 'fit' argument") } } else { stop("The elements named 'fit.means' is needed in the 'fit' argument") } } else { stop("Please specify the correct type of measurement invariance. See the help page.") } pt1 <- parTable(fit1) pt0 <- parTable(fit0) pt0$start <- pt0$est <- pt0$se <- NULL pt1$start <- pt1$est <- pt1$se <- NULL pt1$label[substr(pt1$label, 1, 1) == "." & substr(pt1$label, nchar(pt1$label), nchar(pt1$label)) == "."] <- "" pt0$label[substr(pt0$label, 1, 1) == "." & substr(pt0$label, nchar(pt0$label), nchar(pt0$label)) == "."] <- "" namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) if(length(table(table(pt0$rhs[pt0$op == "=~"]))) != 1) stop("The model is not congeneric. This function does not support non-congeneric model.") varfree <- varnames <- unique(pt0$rhs[pt0$op == "=~"]) facnames <- unique(pt0$lhs[(pt0$op == "=~") & (pt0$rhs %in% varnames)]) facrepresent <- table(pt0$lhs[(pt0$op == "=~") & (pt0$rhs %in% varnames)], pt0$rhs[(pt0$op == "=~") & (pt0$rhs %in% varnames)]) if(any(apply(facrepresent, 2, function(x) sum(x != 0)) > 1)) stop("The model is not congeneric. This function does not support non-congeneric model.") facList <- list() for(i in 1:nrow(facrepresent)) { facList[[i]] <- colnames(facrepresent)[facrepresent[i,] > 0] } names(facList) <- rownames(facrepresent) facList <- facList[match(names(facList), facnames)] fixLoadingFac <- list() for(i in seq_along(facList)) { select <- pt1$lhs == names(facList)[i] & pt1$op == "=~" & pt1$rhs %in% facList[[i]] & pt1$group == 1 & pt1$free == 0 & (!is.na(pt1$ustart) & pt1$ustart > 0) fixLoadingFac[[i]] <- pt1$rhs[select] } names(fixLoadingFac) <- names(facList) fixIntceptFac <- list() for(i in seq_along(facList)) { select <- pt1$op == "~1" & pt1$rhs %in% facList[[i]] & pt1$group == 1 & pt1$free == 0 fixIntceptFac[[i]] <- pt1$rhs[select] } names(fixIntceptFac) <- names(facList) ngroups <- max(pt0$group) neach <- lavInspect(fit0, "nobs") groupvar <- lavInspect(fit0, "group") grouplab <- lavInspect(fit0, "group.label") if(!is.numeric(refgroup)) refgroup <- which(refgroup == grouplab) grouporder <- 1:ngroups grouporder <- c(refgroup, setdiff(grouporder, refgroup)) grouplaborder <- grouplab[grouporder] complab <- paste(grouplaborder[2:ngroups], "vs.", grouplaborder[1]) if(ngroups <= 1) stop("Well, the number of groups is 1. Measurement invariance across 'groups' cannot be done.") if(numType == 4) { if(!all(c(free, fix) %in% facnames)) stop("'free' and 'fix' arguments should consist of factor names because mean invariance is tested.") } else { if(!all(c(free, fix) %in% varnames)) stop("'free' and 'fix' arguments should consist of variable names.") } result <- fixCon <- freeCon <- NULL estimates <- NULL listFreeCon <- listFixCon <- list() beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) if(numType == 1) { if(!is.null(free) | !is.null(fix)) { if(!is.null(fix)) { facinfix <- findFactor(fix, facList) dup <- duplicated(facinfix) for(i in seq_along(fix)) { if(dup[i]) { pt0 <- constrainParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups) pt1 <- constrainParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups) } else { oldmarker <- fixLoadingFac[[facinfix[i]]] if(length(oldmarker) > 0) { oldmarkerval <- pt1$ustart[pt1$lhs == facinfix[i] & pt1$op == "=~" & pt1$rhs == oldmarker & pt1$group == 1] if(oldmarker == fix[i]) { pt0 <- fixParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) } else { pt0 <- freeParTable(pt0, facinfix[i], "=~", oldmarker, 1:ngroups) pt0 <- constrainParTable(pt0, facinfix[i], "=~", oldmarker, 1:ngroups) pt1 <- freeParTable(pt1, facinfix[i], "=~", oldmarker, 1:ngroups) pt0 <- fixParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) fixLoadingFac[[facinfix[i]]] <- fix[i] } } else { pt0 <- constrainParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups) pt1 <- constrainParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups) } } } } if(!is.null(free)) { facinfree <- findFactor(free, facList) for(i in seq_along(free)) { # Need to change marker variable if fixed oldmarker <- fixLoadingFac[[facinfree[i]]] if(length(oldmarker) > 0 && oldmarker == free[i]) { oldmarkerval <- pt1$ustart[pt1$lhs == facinfix[i] & pt1$op == "=~" & pt1$rhs == oldmarker & pt1$group == 1] candidatemarker <- setdiff(facList[[facinfree[i]]], free[i])[1] pt0 <- freeParTable(pt0, facinfree[i], "=~", free[i], 1:ngroups) pt1 <- freeParTable(pt1, facinfree[i], "=~", free[i], 1:ngroups) pt0 <- fixParTable(pt0, facinfix[i], "=~", candidatemarker, 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, facinfix[i], "=~", candidatemarker, 1:ngroups, oldmarkerval) fixLoadingFac[[facinfix[i]]] <- candidatemarker } else { pt0 <- freeParTable(pt0, facinfree[i], "=~", free[i], 1:ngroups) pt1 <- freeParTable(pt1, facinfree[i], "=~", free[i], 1:ngroups) } } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } obsmean <- sapply(lavInspect(fit0, "sampstat"), "[[", "mean") #FIXME: there might not be a mean structure obsmean <- obsmean[,grouporder] obsdiff <- obsmean[,2:ngroups, drop = FALSE] - matrix(obsmean[,1], nrow(obsmean), ngroups - 1) obsdiff <- obsdiff[varfree, , drop = FALSE] colnames(obsdiff) <- paste0("diff_mean:", complab) estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("load:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- esz <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) colnames(esz) <- paste0("q:", complab) esdiff <- matrix(NA, length(varfree), ngroups - 1) # Extract facmean, facsd, load, tau -> lowdiff, highdiff lowdiff <- matrix(NA, length(varfree), ngroups - 1) highdiff <- matrix(NA, length(varfree), ngroups - 1) colnames(lowdiff) <- paste0("low_fscore:", complab) colnames(highdiff) <- paste0("high_fscore:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$rhs %in% varfree) & (pt1$op == "=~") & (pt1$group == 1)) facinfix <- findFactor(fix, facList) varinfixvar <- unlist(facList[facinfix]) varinfixvar <- setdiff(varinfixvar, setdiff(varinfixvar, varfree)) indexfixvar <- which((pt1$rhs %in% varinfixvar) & (pt1$op == "=~") & (pt1$group == 1)) varnonfixvar <- setdiff(varfree, varinfixvar) indexnonfixvar <- setdiff(index, indexfixvar) pos <- 1 for(i in seq_along(indexfixvar)) { runnum <- indexfixvar[i] temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) loadVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 2:ncol(estimates)] <- loadVal facVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~~", pt0$lhs[runnum], 1:ngroups) totalVal <- sapply(lavaan::fitted.values(tempfit0), function(x, v) x$cov[v, v], v = pt0$rhs[runnum]) names(facVal) <- names(totalVal) <- grouplab ifelse(poolvar, refFacVal <- poolVariance(facVal, neach), refFacVal <- facVal[refgroup]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdLoadVal <- loadVal * sqrt(refFacVal) / sqrt(refTotalVal) stdestimates[pos,] <- stdLoadVal stdLoadVal <- stdLoadVal[grouporder] esstd[pos,] <- stdLoadVal[2:ngroups] - stdLoadVal[1] if(any(abs(stdLoadVal) > 0.9999)) warning(paste("Standardized Loadings of", pt0$rhs[runnum], "in some groups are less than -1 or over 1. The standardized loadings used in Fisher z transformation are changed to -0.9999 or 0.9999.")) stdLoadVal[stdLoadVal > 0.9999] <- 0.9999 stdLoadVal[stdLoadVal < -0.9999] <- -0.9999 zLoadVal <- atanh(stdLoadVal) esz[pos,] <- zLoadVal[2:ngroups] - zLoadVal[1] facMean <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~1", "", 1:ngroups) wlow <- min(facMean - fbound * sqrt(facVal)) whigh <- max(facMean + fbound * sqrt(facVal)) intVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$rhs[runnum], "~1", "", 1:ngroups) loadVal <- loadVal[grouporder] intVal <- intVal[grouporder] loaddiff <- loadVal[2:ngroups] - loadVal[1] intdiff <- intVal[2:ngroups] - intVal[1] lowdiff[pos,] <- intdiff + wlow * loaddiff highdiff[pos,] <- intdiff + whigh * loaddiff } listFreeCon <- c(listFreeCon, tryresult0) waldCon[pos,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) pos <- pos + 1 } facinvarfree <- findFactor(varnonfixvar, facList) for(i in seq_along(indexnonfixvar)) { runnum <- indexnonfixvar[i] # Need to change marker variable if fixed oldmarker <- fixLoadingFac[[facinvarfree[i]]] if(length(oldmarker) > 0 && oldmarker == varnonfixvar[i]) { candidatemarker <- setdiff(facList[[facinvarfree[i]]], varnonfixvar[i])[1] temp <- freeParTable(pt1, facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups) temp <- fixParTable(temp, facinvarfree[i], "=~", candidatemarker, 1:ngroups, ustart = 1) temp <- constrainParTable(temp, facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups) newparent <- freeParTable(pt1, facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups) newparent <- fixParTable(newparent, facinvarfree[i], "=~", candidatemarker, 1:ngroups, ustart = 1) newparentresult <- try(newparentfit <- refit(newparent, fit1), silent = TRUE) if(!is(newparentresult, "try-error")) { tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, newparentfit, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(newparentfit, tempfit)) } waldCon[pos,] <- waldConstraint(newparentfit, newparent, waldMat, cbind(facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups)) } } else { temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } waldCon[pos,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } listFixCon <- c(listFixCon, tryresult) if(length(oldmarker) > 0 && oldmarker == varnonfixvar[i]) { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 2:ngroups) } else { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) } estimates[pos, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) loadVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 2:ncol(estimates)] <- loadVal facVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~~", pt0$lhs[runnum], 1:ngroups) totalVal <- sapply(lavaan::fitted.values(tempfit0), function(x, v) x$cov[v, v], v = pt0$rhs[runnum]) names(facVal) <- names(totalVal) <- grouplab ifelse(poolvar, refFacVal <- poolVariance(facVal, neach), refFacVal <- facVal[refgroup]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdLoadVal <- loadVal * sqrt(refFacVal) / sqrt(refTotalVal) stdestimates[pos,] <- stdLoadVal stdLoadVal <- stdLoadVal[grouporder] esstd[pos,] <- stdLoadVal[2:ngroups] - stdLoadVal[1] if(any(abs(stdLoadVal) > 0.9999)) warning(paste("Standardized Loadings of", pt0$rhs[runnum], "in some groups are less than -1 or over 1. The standardized loadings used in Fisher z transformation are changed to -0.9999 or 0.9999.")) stdLoadVal[stdLoadVal > 0.9999] <- 0.9999 stdLoadVal[stdLoadVal < -0.9999] <- -0.9999 zLoadVal <- atanh(stdLoadVal) esz[pos,] <- zLoadVal[2:ngroups] - zLoadVal[1] facMean <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~1", "", 1:ngroups) wlow <- min(facMean - fbound * sqrt(facVal)) whigh <- max(facMean + fbound * sqrt(facVal)) intVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$rhs[runnum], "~1", "", 1:ngroups) loadVal <- loadVal[grouporder] intVal <- intVal[grouporder] loaddiff <- loadVal[2:ngroups] - loadVal[1] intdiff <- intVal[2:ngroups] - intVal[1] lowdiff[pos,] <- intdiff + wlow * loaddiff highdiff[pos,] <- intdiff + whigh * loaddiff } listFreeCon <- c(listFreeCon, tryresult0) pos <- pos + 1 } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[c(indexfixvar, indexnonfixvar)] estimates <- cbind(estimates, stdestimates, esstd, esz, obsdiff, lowdiff, highdiff) result <- cbind(freeCon, fixCon, waldCon) } else if (numType == 2) { if(!is.null(free) | !is.null(fix)) { if(!is.null(fix)) { facinfix <- findFactor(fix, facList) dup <- duplicated(facinfix) for(i in seq_along(fix)) { if(dup[i]) { pt0 <- constrainParTable(pt0, fix[i], "~1", "", 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "~1", "", 1:ngroups) } else { oldmarker <- fixIntceptFac[[facinfix[i]]] if(length(oldmarker) > 0) { oldmarkerval <- pt1$ustart[pt1$lhs == fix[i] & pt1$op == "~1" & pt1$rhs == "" & pt1$group == 1] if(oldmarker == fix[i]) { pt0 <- fixParTable(pt0, fix[i], "~1", "", 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, fix[i], "~1", "", 1:ngroups, oldmarkerval) } else { pt0 <- freeParTable(pt0, oldmarker, "~1", "", 1:ngroups) pt0 <- constrainParTable(pt0, oldmarker, "~1", "", 1:ngroups) pt1 <- freeParTable(pt1, oldmarker, "~1", "", 1:ngroups) pt0 <- fixParTable(pt0, fix[i], "~1", "", 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, fix[i], "~1", "", 1:ngroups, oldmarkerval) fixIntceptFac[[facinfix[i]]] <- fix[i] } } else { pt0 <- constrainParTable(pt0, fix[i], "~1", "", 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "~1", "", 1:ngroups) } } } } if(!is.null(free)) { facinfree <- findFactor(free, facList) for(i in seq_along(free)) { # Need to change marker variable if fixed oldmarker <- fixIntceptFac[[facinfree[i]]] if(length(oldmarker) > 0 && oldmarker == free[i]) { oldmarkerval <- pt1$ustart[pt1$lhs == oldmarker & pt1$op == "~1" & pt1$rhs == "" & pt1$group == 1] candidatemarker <- setdiff(facList[[facinfree[i]]], free[i])[1] pt0 <- freeParTable(pt0, free[i], "~1", "", 1:ngroups) pt1 <- freeParTable(pt1, free[i], "~1", "", 1:ngroups) pt0 <- fixParTable(pt0, candidatemarker, "~1", "", 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, candidatemarker, "~1", "", 1:ngroups, oldmarkerval) fixIntceptFac[[facinfix[i]]] <- candidatemarker } else { pt0 <- freeParTable(pt0, free[i], "~1", "", 1:ngroups) pt1 <- freeParTable(pt1, free[i], "~1", "", 1:ngroups) } } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } obsmean <- sapply(lavInspect(fit0, "sampstat"), "[[", "mean") #FIXME: there might not be a mean structure obsmean <- obsmean[,grouporder] obsdiff <- obsmean[,2:ngroups, drop = FALSE] - matrix(obsmean[,1], nrow(obsmean), ngroups - 1) obsdiff <- obsdiff[varfree, , drop = FALSE] colnames(obsdiff) <- paste0("diff_mean:", complab) # Prop diff propdiff <- matrix(NA, length(varfree), ngroups - 1) colnames(propdiff) <- paste0("propdiff:", complab) estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("int:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$lhs %in% varfree) & (pt1$op == "~1") & (pt1$group == 1)) facinfix <- findFactor(fix, facList) varinfixvar <- unlist(facList[facinfix]) varinfixvar <- setdiff(varinfixvar, setdiff(varinfixvar, varfree)) indexfixvar <- which((pt1$lhs %in% varinfixvar) & (pt1$op == "~1") & (pt1$group == 1)) varnonfixvar <- setdiff(varfree, varinfixvar) indexnonfixvar <- setdiff(index, indexfixvar) pos <- 1 for(i in seq_along(varinfixvar)) { runnum <- indexfixvar[i] temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) intVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 2:ncol(estimates)] <- intVal totalVal <- sapply(lavaan::fitted.values(tempfit0), function(x, v) x$cov[v, v], v = pt0$lhs[runnum]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdIntVal <- intVal / sqrt(refTotalVal) stdestimates[pos,] <- stdIntVal stdIntVal <- stdIntVal[grouporder] esstd[pos,] <- stdIntVal[2:ngroups] - stdIntVal[1] intVal <- intVal[grouporder] propdiff[pos,] <- (intVal[2:ngroups] - intVal[1]) / obsdiff[pos,] } listFreeCon <- c(listFreeCon, tryresult0) waldCon[pos,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) pos <- pos + 1 } facinvarfree <- findFactor(varfree, facList) for(i in seq_along(varnonfixvar)) { runnum <- indexnonfixvar[i] # Need to change marker variable if fixed oldmarker <- fixIntceptFac[[facinvarfree[i]]] if(length(oldmarker) > 0 && oldmarker == varfree[i]) { candidatemarker <- setdiff(facList[[facinvarfree[i]]], varfree[i])[1] temp <- freeParTable(pt1, varfree[i], "~1", "", 1:ngroups) temp <- constrainParTable(temp, varfree[i], "~1", "", 1:ngroups) temp <- fixParTable(temp, candidatemarker, "~1", "", 1:ngroups) newparent <- freeParTable(pt1, varfree[i], "~1", "", 1:ngroups) newparent <- fixParTable(newparent, candidatemarker, "~1", "", 1:ngroups) newparentresult <- try(newparentfit <- refit(newparent, fit1), silent = TRUE) if(!is(newparentresult, "try-error")) { tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, newparentfit, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(newparentfit, tempfit)) } waldCon[pos,] <- waldConstraint(newparentfit, newparent, waldMat, cbind(varfree[i], "~1", "", 1:ngroups)) } } else { temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } waldCon[pos,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } listFixCon <- c(listFixCon, tryresult) if(length(oldmarker) > 0 && oldmarker == varfree[i]) { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 2:ngroups) } else { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) } estimates[pos, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) intVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 2:ncol(estimates)] <- intVal totalVal <- sapply(lavaan::fitted.values(tempfit0), function(x, v) x$cov[v, v], v = pt0$lhs[runnum]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdIntVal <- intVal / sqrt(refTotalVal) stdestimates[pos,] <- stdIntVal stdIntVal <- stdIntVal[grouporder] esstd[pos,] <- stdIntVal[2:ngroups] - stdIntVal[1] intVal <- intVal[grouporder] propdiff[pos,] <- (intVal[2:ngroups] - intVal[1]) / obsdiff[pos,] } listFreeCon <- c(listFreeCon, tryresult0) pos <- pos + 1 } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[c(indexfixvar, indexnonfixvar)] estimates <- cbind(estimates, stdestimates, esstd, obsdiff, propdiff) result <- cbind(freeCon, fixCon, waldCon) } else if (numType == 3) { if(!is.null(free) | !is.null(fix)) { if(!is.null(fix)) { for(i in seq_along(fix)) { pt0 <- constrainParTable(pt0, fix[i], "~~", fix[i], 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "~~", fix[i], 1:ngroups) } } if(!is.null(free)) { for(i in seq_along(free)) { pt0 <- freeParTable(pt0, free[i], "~~", free[i], 1:ngroups) pt1 <- freeParTable(pt1, free[i], "~~", free[i], 1:ngroups) } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } # Prop diff propdiff <- matrix(NA, length(varfree), ngroups - 1) colnames(propdiff) <- paste0("propdiff:", complab) estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("errvar:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- esz <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) colnames(esz) <- paste0("h:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$lhs %in% varfree) & (pt1$op == "~~") & (pt1$lhs == pt1$rhs) & (pt1$group == 1)) for(i in seq_along(index)) { runnum <- index[i] temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[i,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[i, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[i,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) errVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[i, 2:ncol(estimates)] <- errVal totalVal <- sapply(lavaan::fitted.values(tempfit0), function(x, v) x$cov[v, v], v = pt0$rhs[runnum]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdErrVal <- errVal / sqrt(refTotalVal) stdestimates[i,] <- stdErrVal stdErrVal <- stdErrVal[grouporder] esstd[i,] <- stdErrVal[2:ngroups] - stdErrVal[1] if(any(abs(stdErrVal) > 0.9999)) warning(paste("The uniqueness of", pt0$rhs[runnum], "in some groups are over 1. The uniqueness used in arctan transformation are changed to 0.9999.")) stdErrVal[stdErrVal > 0.9999] <- 0.9999 zErrVal <- asin(sqrt(stdErrVal)) esz[i,] <- zErrVal[2:ngroups] - zErrVal[1] errVal <- errVal[grouporder] totalVal <- totalVal[grouporder] errdiff <- errVal[2:ngroups] - errVal[1] totaldiff <- totalVal[2:ngroups] - totalVal[1] propdiff[i,] <- errdiff / totaldiff } listFreeCon <- c(listFreeCon, tryresult0) waldCon[i,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[index] estimates <- cbind(estimates, stdestimates, esstd, esz, propdiff) result <- cbind(freeCon, fixCon, waldCon) } else if (numType == 4) { varfree <- facnames if(!is.null(free) | !is.null(fix)) { if(!is.null(fix)) { for(i in seq_along(fix)) { pt0 <- constrainParTable(pt0, fix[i], "~1", "", 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "~1", "", 1:ngroups) } } if(!is.null(free)) { for(i in seq_along(free)) { pt0 <- freeParTable(pt0, free[i], "~1", "", 1:ngroups) pt1 <- freeParTable(pt1, free[i], "~1", "", 1:ngroups) } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("mean:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$lhs %in% varfree) & (pt1$op == "~1") & (pt1$group == 1)) for(i in seq_along(index)) { runnum <- index[i] isfree <- pt1$free[runnum] != 0 if(isfree) { temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) } else { temp <- fixParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 2:ngroups, ustart = pt1$ustart[runnum]) } tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[i,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) isfree0 <- pt0$free[runnum] != 0 if(isfree0) { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) } else { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 2:ngroups) } estimates[i, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if(!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if(!is(compresult0, "try-error")) freeCon[i,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) meanVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[i, 2:ncol(estimates)] <- meanVal facVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~~", pt0$lhs[runnum], 1:ngroups) ifelse(poolvar, refFacVal <- poolVariance(facVal, neach), refFacVal <- facVal[refgroup]) stdMeanVal <- meanVal / sqrt(refFacVal) stdestimates[i,] <- stdMeanVal stdMeanVal <- stdMeanVal[grouporder] esstd[i,] <- stdMeanVal[2:ngroups] - stdMeanVal[1] } listFreeCon <- c(listFreeCon, tryresult0) waldCon[i,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[index] estimates <- cbind(estimates, stdestimates, esstd) result <- cbind(freeCon, fixCon, waldCon) } if(return.fit) { return(invisible(list(estimates = estimates, results = result, models = list(free = listFreeCon, fix = listFixCon, nested = fit0, parent = fit1)))) } else { return(list(estimates = estimates, results = result)) } } ##' @importFrom lavaan lavInspect parTable ##' @rdname partialInvariance ##' @export partialInvarianceCat <- function(fit, type, free = NULL, fix = NULL, refgroup = 1, poolvar = TRUE, p.adjust = "none", return.fit = FALSE, method = "satorra.bentler.2001") { # model <- ' f1 =~ u1 + u2 + u3 + u4 # f2 =~ u5 + u6 + u7 + u8' # modelsCat2 <- measurementInvarianceCat(model, data = datCat, group = "g", parameterization="theta", # estimator="wlsmv", strict = TRUE) # fit <- modelsCat2 # type <- "weak" # free <- NULL # fix <- NULL # refgroup <- 1 # poolvar <- TRUE # p.adjust <- "none" # return.fit <- FALSE # method = "satorra.bentler.2001" type <- tolower(type) numType <- 1 fit1 <- fit0 <- NULL # fit0 = Nested model, fit1 = Parent model if (type %in% c("metric", "weak", "loading", "loadings")) { numType <- 1 if (all(c("fit.configural", "fit.loadings") %in% names(fit))) { fit1 <- fit$fit.configural fit0 <- fit$fit.loadings } else { stop("The elements named 'fit.configural' and 'fit.loadings' are needed", " in the 'fit' argument") } } else if (type %in% c("scalar", "strong", "intercept", "intercepts", "threshold", "thresholds")) { numType <- 2 if (all(c("fit.loadings", "fit.thresholds") %in% names(fit))) { fit1 <- fit$fit.loadings fit0 <- fit$fit.thresholds } else { stop("The elements named 'fit.loadings' and 'fit.thresholds' are needed", " in the 'fit' argument") } } else if (type %in% c("strict", "residual", "residuals", "error", "errors")) { numType <- 3 if ("fit.residuals" %in% names(fit)) { fit0 <- fit$fit.residuals if ("fit.thresholds" %in% names(fit)) { fit1 <- fit$fit.thresholds } else if ("fit.loadings" %in% names(fit)) { fit1 <- fit$fit.loadings } else { stop("The element named either 'fit.thresholds' or 'fit.loadings' is", " needed in the 'fit' argument") } } else { stop("The element named 'fit.residuals' is needed in the 'fit' argument") } } else if (type %in% c("means", "mean")) { numType <- 4 if ("fit.means" %in% names(fit)) { fit0 <- fit$fit.means if("fit.residuals" %in% names(fit)) { fit1 <- fit$fit.residuals } else if ("fit.thresholds" %in% names(fit)) { fit1 <- fit$fit.thresholds } else if ("fit.loadings" %in% names(fit)) { fit1 <- fit$fit.loadings } else { stop("The element named either 'fit.residuals', 'fit.thresholds',", " or 'fit.loadings' is needed in the 'fit' argument") } } else { stop("The element named 'fit.means' is needed in the 'fit' argument") } } else { stop("Please specify the correct type of measurement invariance. See the help page.") } pt1 <- parTable(fit1) pt0 <- parTable(fit0) pt0$start <- pt0$est <- pt0$se <- NULL pt1$start <- pt1$est <- pt1$se <- NULL pt1$label[substr(pt1$label, 1, 1) == "." & substr(pt1$label, nchar(pt1$label), nchar(pt1$label)) == "."] <- "" pt0$label[substr(pt0$label, 1, 1) == "." & substr(pt0$label, nchar(pt0$label), nchar(pt0$label)) == "."] <- "" namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) if (length(table(table(pt0$rhs[pt0$op == "=~"]))) != 1) stop("The model is not congeneric. This function does not support non-congeneric model.") varfree <- varnames <- unique(pt0$rhs[pt0$op == "=~"]) facnames <- unique(pt0$lhs[(pt0$op == "=~") & (pt0$rhs %in% varnames)]) facrepresent <- table(pt0$lhs[(pt0$op == "=~") & (pt0$rhs %in% varnames)], pt0$rhs[(pt0$op == "=~") & (pt0$rhs %in% varnames)]) if (any(apply(facrepresent, 2, function(x) sum(x != 0)) > 1)) stop("The model is not congeneric. This function does not support non-congeneric model.") facList <- list() for (i in 1:nrow(facrepresent)) { facList[[i]] <- colnames(facrepresent)[facrepresent[i,] > 0] } names(facList) <- rownames(facrepresent) facList <- facList[match(names(facList), facnames)] fixLoadingFac <- list() for (i in seq_along(facList)) { select <- pt1$lhs == names(facList)[i] & pt1$op == "=~" & pt1$rhs %in% facList[[i]] & pt1$group == 1 & pt1$free == 0 & (!is.na(pt1$ustart) & pt1$ustart > 0) fixLoadingFac[[i]] <- pt1$rhs[select] } names(fixLoadingFac) <- names(facList) # Find the number of thresholds # Check whether the factor configuration is the same across gorups conParTable <- lapply(pt1, "[", pt1$op == "==") group1pt <- lapply(pt1, "[", pt1$group != 1) numThreshold <- table(sapply(group1pt, "[", group1pt$op == "|")[,"lhs"]) plabelthres <- split(group1pt$plabel[group1pt$op == "|"], group1pt$lhs[group1pt$op == "|"]) numFixedThreshold <- sapply(lapply(plabelthres, function(vec) !is.na(match(vec, conParTable$lhs)) | !is.na(match(vec, conParTable$rhs))), sum)[names(numThreshold)] #numFixedThreshold <- table(sapply(group1pt, "[", group1pt$op == "|" & group1pt$eq.id != 0)[,"lhs"]) fixIntceptFac <- list() for (i in seq_along(facList)) { tmp <- numFixedThreshold[facList[[i]]] if (all(tmp > 1)) { fixIntceptFac[[i]] <- integer(0) } else { fixIntceptFac[[i]] <- names(which.max(tmp))[1] } } names(fixIntceptFac) <- names(facList) ngroups <- max(pt0$group) neach <- lavInspect(fit0, "nobs") groupvar <- lavInspect(fit0, "group") grouplab <- lavInspect(fit0, "group.label") if (!is.numeric(refgroup)) refgroup <- which(refgroup == grouplab) grouporder <- 1:ngroups grouporder <- c(refgroup, setdiff(grouporder, refgroup)) grouplaborder <- grouplab[grouporder] complab <- paste(grouplaborder[2:ngroups], "vs.", grouplaborder[1]) if (ngroups <= 1) stop("Well, the number of groups is 1. Measurement", " invariance across 'groups' cannot be done.") if (numType == 4) { if (!all(c(free, fix) %in% facnames)) stop("'free' and 'fix' arguments should consist of factor names because", " mean invariance is tested.") } else { if (!all(c(free, fix) %in% varnames)) stop("'free' and 'fix' arguments should consist of variable names.") } result <- fixCon <- freeCon <- NULL estimates <- NULL listFreeCon <- listFixCon <- list() beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) if (numType == 1) { if (!is.null(free) | !is.null(fix)) { if (!is.null(fix)) { facinfix <- findFactor(fix, facList) dup <- duplicated(facinfix) for (i in seq_along(fix)) { if (dup[i]) { pt0 <- constrainParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups) pt1 <- constrainParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups) } else { oldmarker <- fixLoadingFac[[facinfix[i]]] if (length(oldmarker) > 0) { oldmarkerval <- pt1$ustart[pt1$lhs == facinfix[i] & pt1$op == "=~" & pt1$rhs == oldmarker & pt1$group == 1] if (oldmarker == fix[i]) { pt0 <- fixParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) } else { pt0 <- freeParTable(pt0, facinfix[i], "=~", oldmarker, 1:ngroups) pt0 <- constrainParTable(pt0, facinfix[i], "=~", oldmarker, 1:ngroups) pt1 <- freeParTable(pt1, facinfix[i], "=~", oldmarker, 1:ngroups) pt0 <- fixParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups, oldmarkerval) fixLoadingFac[[facinfix[i]]] <- fix[i] } } else { pt0 <- constrainParTable(pt0, facinfix[i], "=~", fix[i], 1:ngroups) pt1 <- constrainParTable(pt1, facinfix[i], "=~", fix[i], 1:ngroups) } } } } if (!is.null(free)) { facinfree <- findFactor(free, facList) for (i in seq_along(free)) { # Need to change marker variable if fixed oldmarker <- fixLoadingFac[[facinfree[i]]] if (length(oldmarker) > 0 && oldmarker == free[i]) { oldmarkerval <- pt1$ustart[pt1$lhs == facinfix[i] & pt1$op == "=~" & pt1$rhs == oldmarker & pt1$group == 1] candidatemarker <- setdiff(facList[[facinfree[i]]], free[i])[1] pt0 <- freeParTable(pt0, facinfree[i], "=~", free[i], 1:ngroups) pt1 <- freeParTable(pt1, facinfree[i], "=~", free[i], 1:ngroups) pt0 <- fixParTable(pt0, facinfix[i], "=~", candidatemarker, 1:ngroups, oldmarkerval) pt1 <- fixParTable(pt1, facinfix[i], "=~", candidatemarker, 1:ngroups, oldmarkerval) fixLoadingFac[[facinfix[i]]] <- candidatemarker } else { pt0 <- freeParTable(pt0, facinfree[i], "=~", free[i], 1:ngroups) pt1 <- freeParTable(pt1, facinfree[i], "=~", free[i], 1:ngroups) } } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("load:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- esz <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) colnames(esz) <- paste0("q:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$rhs %in% varfree) & (pt1$op == "=~") & (pt1$group == 1)) facinfix <- findFactor(fix, facList) varinfixvar <- unlist(facList[facinfix]) varinfixvar <- setdiff(varinfixvar, setdiff(varinfixvar, varfree)) indexfixvar <- which((pt1$rhs %in% varinfixvar) & (pt1$op == "=~") & (pt1$group == 1)) varnonfixvar <- setdiff(varfree, varinfixvar) indexnonfixvar <- setdiff(index, indexfixvar) pos <- 1 for (i in seq_along(indexfixvar)) { runnum <- indexfixvar[i] temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if (!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if (!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if (!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if (!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) loadVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 2:ncol(estimates)] <- loadVal facVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~~", pt0$lhs[runnum], 1:ngroups) totalVal <- sapply(thetaImpliedTotalVar(tempfit0), function(x, v) x[v, v], v = pt0$rhs[runnum]) names(facVal) <- names(totalVal) <- grouplab ifelse(poolvar, refFacVal <- poolVariance(facVal, neach), refFacVal <- facVal[refgroup]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdLoadVal <- loadVal * sqrt(refFacVal) / sqrt(refTotalVal) stdestimates[pos,] <- stdLoadVal stdLoadVal <- stdLoadVal[grouporder] esstd[pos,] <- stdLoadVal[2:ngroups] - stdLoadVal[1] if (any(abs(stdLoadVal) > 0.9999)) warning(paste("Standardized Loadings of", pt0$rhs[runnum], "in some groups are less than -1 or over 1. The", " standardized loadings used in Fisher z", " transformation are changed to -0.9999 or 0.9999.")) stdLoadVal[stdLoadVal > 0.9999] <- 0.9999 stdLoadVal[stdLoadVal < -0.9999] <- -0.9999 zLoadVal <- atanh(stdLoadVal) esz[pos,] <- zLoadVal[2:ngroups] - zLoadVal[1] } listFreeCon <- c(listFreeCon, tryresult0) waldCon[pos,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) pos <- pos + 1 } facinvarfree <- findFactor(varnonfixvar, facList) for (i in seq_along(indexnonfixvar)) { runnum <- indexnonfixvar[i] # Need to change marker variable if fixed oldmarker <- fixLoadingFac[[facinvarfree[i]]] if (length(oldmarker) > 0 && oldmarker == varnonfixvar[i]) { candidatemarker <- setdiff(facList[[facinvarfree[i]]], varnonfixvar[i])[1] temp <- freeParTable(pt1, facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups) temp <- constrainParTable(temp, facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups) temp <- fixParTable(temp, facinvarfree[i], "=~", candidatemarker, 1:ngroups) newparent <- freeParTable(pt1, facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups) newparent <- fixParTable(newparent, facinvarfree[i], "=~", candidatemarker, 1:ngroups) newparentresult <- try(newparentfit <- refit(newparent, fit1), silent = TRUE) if (!is(newparentresult, "try-error")) { tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if (!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, newparentfit, method = method), silent = TRUE) if (!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(newparentfit, tempfit)) } waldCon[pos,] <- waldConstraint(newparentfit, newparent, waldMat, cbind(facinvarfree[i], "=~", varnonfixvar[i], 1:ngroups)) } } else { temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if (!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if (!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } waldCon[pos,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } listFixCon <- c(listFixCon, tryresult) if (length(oldmarker) > 0 && oldmarker == varnonfixvar[i]) { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 2:ngroups) } else { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) } estimates[pos, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if (!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if (!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) loadVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, 2:ncol(estimates)] <- loadVal facVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~~", pt0$lhs[runnum], 1:ngroups) totalVal <- sapply(thetaImpliedTotalVar(tempfit0), function(x, v) x[v, v], v = pt0$rhs[runnum]) names(facVal) <- names(totalVal) <- grouplab ifelse(poolvar, refFacVal <- poolVariance(facVal, neach), refFacVal <- facVal[refgroup]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdLoadVal <- loadVal * sqrt(refFacVal) / sqrt(refTotalVal) stdestimates[pos,] <- stdLoadVal stdLoadVal <- stdLoadVal[grouporder] esstd[pos,] <- stdLoadVal[2:ngroups] - stdLoadVal[1] if (any(abs(stdLoadVal) > 0.9999)) warning(paste("Standardized Loadings of", pt0$rhs[runnum], "in some groups are less than -1 or over 1. The", " standardized loadings used in Fisher z", " transformation are changed to -0.9999 or 0.9999.")) stdLoadVal[stdLoadVal > 0.9999] <- 0.9999 stdLoadVal[stdLoadVal < -0.9999] <- -0.9999 zLoadVal <- atanh(stdLoadVal) esz[pos,] <- zLoadVal[2:ngroups] - zLoadVal[1] } listFreeCon <- c(listFreeCon, tryresult0) pos <- pos + 1 } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[c(indexfixvar, indexnonfixvar)] estimates <- cbind(estimates, stdestimates, esstd, esz) result <- cbind(freeCon, fixCon, waldCon) } else if (numType == 2) { if (!is.null(free) | !is.null(fix)) { if (!is.null(fix)) { facinfix <- findFactor(fix, facList) dup <- duplicated(facinfix) for (i in seq_along(fix)) { numfixthres <- numThreshold[fix[i]] if (numfixthres > 1) { if (dup[i]) { for (s in 2:numfixthres) { pt0 <- constrainParTable(pt0, fix[i], "|", paste0("t", s), 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "|", paste0("t", s), 1:ngroups) } } else { oldmarker <- fixIntceptFac[[facinfix[i]]] numoldthres <- numThreshold[oldmarker] if (length(oldmarker) > 0) { if (oldmarker == fix[i]) { for (s in 2:numfixthres) { pt0 <- constrainParTable(pt0, fix[i], "|", paste0("t", s), 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "|", paste0("t", s), 1:ngroups) } } else { for (r in 2:numoldthres) { pt1 <- freeParTable(pt1, oldmarker, "|", paste0("t", r), 1:ngroups) } for (s in 2:numfixthres) { pt0 <- constrainParTable(pt0, fix[i], "|", paste0("t", s), 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "|", paste0("t", s), 1:ngroups) } fixIntceptFac[[facinfix[i]]] <- fix[i] } } else { for (s in 2:numfixthres) { pt0 <- constrainParTable(pt0, fix[i], "|", paste0("t", s), 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "|", paste0("t", s), 1:ngroups) } } } } } } if (!is.null(free)) { facinfree <- findFactor(free, facList) for (i in seq_along(free)) { numfreethres <- numThreshold[free[i]] # Need to change marker variable if fixed oldmarker <- fixIntceptFac[[facinfree[i]]] numoldthres <- numThreshold[oldmarker] if (length(oldmarker) > 0 && oldmarker == free[i]) { candidatemarker <- setdiff(facList[[facinfree[i]]], free[i]) candidatemarker <- candidatemarker[numThreshold[candidatemarker] > 1][1] numcandidatethres <- numThreshold[candidatemarker] pt0 <- constrainParTable(pt0, candidatemarker, "|", "t2", 1:ngroups) pt1 <- constrainParTable(pt1, candidatemarker, "|", "t2", 1:ngroups) for (s in 2:numfixthres) { pt0 <- freeParTable(pt0, free[i], "|", paste0("t", s), 1:ngroups) pt1 <- freeParTable(pt1, free[i], "|", paste0("t", s), 1:ngroups) } fixIntceptFac[[facinfix[i]]] <- candidatemarker } else { for (s in 2:numfixthres) { pt0 <- freeParTable(pt0, free[i], "|", paste0("t", s), 1:ngroups) pt1 <- freeParTable(pt1, free[i], "|", paste0("t", s), 1:ngroups) } } } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } maxcolumns <- max(numThreshold[varfree]) - 1 tname <- paste0("t", 2:(maxcolumns + 1)) estimates <- matrix(NA, length(varfree), (ngroups * length(tname)) + length(tname)) stdestimates <- matrix(NA, length(varfree), ngroups * length(tname)) tnameandlab <- expand.grid(tname, grouplab) colnames(estimates) <- c(paste0("pool:", tname), paste0(tnameandlab[,1], ":", tnameandlab[,2])) colnames(stdestimates) <- paste0("std:", tnameandlab[,1], ":", tnameandlab[,2]) esstd <- matrix(NA, length(varfree), (ngroups - 1)* length(tname)) tnameandcomplab <- expand.grid(tname, complab) colnames(esstd) <- paste0("diff_std:", tnameandcomplab[,1], ":", tnameandcomplab[,2]) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") facinfix <- findFactor(fix, facList) varinfixvar <- unlist(facList[facinfix]) varinfixvar <- setdiff(varinfixvar, setdiff(varinfixvar, varfree)) varnonfixvar <- setdiff(varfree, varinfixvar) pos <- 1 for (i in seq_along(varinfixvar)) { temp <- pt1 for (s in 2:numThreshold[varinfixvar[i]]) { runnum <- which((pt1$lhs == varfree[i]) & (pt1$op == "|") & (pt1$rhs == paste0("t", s)) & (pt1$group == 1)) temp <- constrainParTable(temp, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) } tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if (!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if (!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) temp0 <- pt0 for (s in 2:numThreshold[varinfixvar[i]]) { runnum <- which((pt0$lhs == varfree[i]) & (pt0$op == "|") & (pt0$rhs == paste0("t", s)) & (pt0$group == 1)) temp0 <- freeParTable(temp0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, s - 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) } tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if (!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if (!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) for (s in 2:numThreshold[varinfixvar[i]]) { runnum <- which((pt0$lhs == varfree[i]) & (pt0$op == "|") & (pt0$rhs == paste0("t", s)) & (pt0$group == 1)) thresVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, maxcolumns*(1:ngroups) + (s - 1)] <- thresVal totalVal <- sapply(thetaImpliedTotalVar(tempfit0), function(x, v) x[v, v], v = pt0$lhs[runnum]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdIntVal <- thresVal / sqrt(refTotalVal) stdestimates[pos, maxcolumns*(1:ngroups - 1) + (s - 1)] <- stdIntVal stdIntVal <- stdIntVal[grouporder] esstd[pos, maxcolumns*(1:length(complab) - 1) + (s - 1)] <- stdIntVal[2:ngroups] - stdIntVal[1] } } listFreeCon <- c(listFreeCon, tryresult0) args <- list(fit1, pt1, waldMat) for (s in 2:numThreshold[varinfixvar[i]]) { runnum <- which((pt1$lhs == varfree[i]) & (pt1$op == "|") & (pt1$rhs == paste0("t", s)) & (pt1$group == 1)) args <- c(args, list(cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups))) } waldCon[pos,] <- do.call(waldConstraint, args) pos <- pos + 1 } facinvarfree <- findFactor(varnonfixvar, facList) for (i in seq_along(varnonfixvar)) { # Need to change marker variable if fixed oldmarker <- fixIntceptFac[[facinvarfree[i]]] if (length(oldmarker) > 0 && oldmarker == varfree[i]) { candidatemarker <- setdiff(facList[[facinvarfree[i]]], varnonfixvar[i]) candidatemarker <- candidatemarker[numThreshold[candidatemarker] > 1][1] numcandidatethres <- numThreshold[candidatemarker] newparent <- constrainParTable(pt1, candidatemarker, "|", "t2", 1:ngroups) for (s in 2:numcandidatethres) { newparent <- freeParTable(newparent, varnonfixvar[i], "|", paste0("t", s), 1:ngroups) } temp <- newparent for (s in 2:numThreshold[varnonfixvar[i]]) { runnum <- which((newparent$lhs == varnonfixvar[i]) & (newparent$op == "|") & (newparent$rhs == paste0("t", s)) & (newparent$group == 1)) temp <- constrainParTable(temp, newparent$lhs[runnum], newparent$op[runnum], newparent$rhs[runnum], 1:ngroups) } newparentresult <- try(newparentfit <- refit(newparent, fit1), silent = TRUE) if (!is(newparentresult, "try-error")) { tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if (!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, newparentfit, method = method), silent = TRUE) if (!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(newparentfit, tempfit)) } args <- list(newparentfit, newparent, waldMat) for (s in 2:numThreshold[varnonfixvar[i]]) { runnum <- which((newparent$lhs == varnonfixvar[i]) & (newparent$op == "|") & (newparent$rhs == paste0("t", s)) & (newparent$group == 1)) args <- c(args, list(cbind(newparent$lhs[runnum], newparent$op[runnum], newparent$rhs[runnum], 1:ngroups))) } waldCon[pos,] <- do.call(waldConstraint, args) } } else { temp <- pt1 for (s in 2:numThreshold[varnonfixvar[i]]) { runnum <- which((pt1$lhs == varfree[i]) & (pt1$op == "|") & (pt1$rhs == paste0("t", s)) & (pt1$group == 1)) temp <- constrainParTable(temp, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) } tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if (!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if (!is(compresult, "try-error")) fixCon[pos,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } args <- list(fit1, pt1, waldMat) for (s in 2:numThreshold[varnonfixvar[i]]) { runnum <- which((pt1$lhs == varfree[i]) & (pt1$op == "|") & (pt1$rhs == paste0("t", s)) & (pt1$group == 1)) args <- c(args, list(cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups))) } waldCon[pos,] <- do.call(waldConstraint, args) } listFixCon <- c(listFixCon, tryresult) temp0 <- pt0 for (s in 2:numThreshold[varnonfixvar[i]]) { runnum <- which((pt0$lhs == varfree[i]) & (pt0$op == "|") & (pt0$rhs == paste0("t", s)) & (pt0$group == 1)) temp0 <- freeParTable(temp0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, s - 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) } tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if (!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if (!is(compresult0, "try-error")) freeCon[pos,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) for (s in 2:numThreshold[varnonfixvar[i]]) { runnum <- which((pt0$lhs == varfree[i]) & (pt0$op == "|") & (pt0$rhs == paste0("t", s)) & (pt0$group == 1)) thresVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[pos, maxcolumns*(1:ngroups) + (s - 1)] <- thresVal totalVal <- sapply(thetaImpliedTotalVar(tempfit0), function(x, v) x[v, v], v = pt0$lhs[runnum]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdIntVal <- thresVal / sqrt(refTotalVal) stdestimates[pos, maxcolumns*(1:ngroups - 1) + (s - 1)] <- stdIntVal stdIntVal <- stdIntVal[grouporder] esstd[pos, maxcolumns*(1:length(complab) - 1) + (s - 1)] <- stdIntVal[2:ngroups] - stdIntVal[1] } } listFreeCon <- c(listFreeCon, tryresult0) pos <- pos + 1 } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- paste0(c(varinfixvar, varnonfixvar), "|") estimates <- cbind(estimates, stdestimates, esstd) result <- cbind(freeCon, fixCon, waldCon) } else if (numType == 3) { if (!is.null(free) | !is.null(fix)) { if (!is.null(fix)) { for (i in seq_along(fix)) { pt0 <- constrainParTable(pt0, fix[i], "~~", fix[i], 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "~~", fix[i], 1:ngroups) } } if (!is.null(free)) { for (i in seq_along(free)) { pt0 <- freeParTable(pt0, free[i], "~~", free[i], 1:ngroups) pt1 <- freeParTable(pt1, free[i], "~~", free[i], 1:ngroups) } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("errvar:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- esz <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) colnames(esz) <- paste0("h:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$lhs %in% varfree) & (pt1$op == "~~") & (pt1$lhs == pt1$rhs) & (pt1$group == 1)) for (i in seq_along(index)) { runnum <- index[i] ustart <- getValue(pt1, beta, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1) temp <- fixParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 2:ngroups, ustart) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if (!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if (!is(compresult, "try-error")) fixCon[i,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 2:ngroups) estimates[i, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if (!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if (!is(compresult0, "try-error")) freeCon[i,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) errVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[i, 2:ncol(estimates)] <- errVal totalVal <- sapply(thetaImpliedTotalVar(tempfit0), function(x, v) x[v, v], v = pt0$rhs[runnum]) ifelse(poolvar, refTotalVal <- poolVariance(totalVal, neach), refTotalVal <- totalVal[refgroup]) stdErrVal <- errVal / sqrt(refTotalVal) stdestimates[i,] <- stdErrVal stdErrVal <- stdErrVal[grouporder] esstd[i,] <- stdErrVal[2:ngroups] - stdErrVal[1] if (any(abs(stdErrVal) > 0.9999)) warning(paste("The uniqueness of", pt0$rhs[runnum], "in some groups are over 1. The uniqueness used in", " arctan transformation are changed to 0.9999.")) stdErrVal[stdErrVal > 0.9999] <- 0.9999 zErrVal <- asin(sqrt(stdErrVal)) esz[i,] <- zErrVal[2:ngroups] - zErrVal[1] } listFreeCon <- c(listFreeCon, tryresult0) waldCon[i,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[index] estimates <- cbind(estimates, stdestimates, esstd, esz) result <- cbind(freeCon, fixCon, waldCon) } else if (numType == 4) { varfree <- facnames if (!is.null(free) | !is.null(fix)) { if (!is.null(fix)) { for (i in seq_along(fix)) { pt0 <- constrainParTable(pt0, fix[i], "~1", "", 1:ngroups) pt1 <- constrainParTable(pt1, fix[i], "~1", "", 1:ngroups) } } if (!is.null(free)) { for (i in seq_along(free)) { pt0 <- freeParTable(pt0, free[i], "~1", "", 1:ngroups) pt1 <- freeParTable(pt1, free[i], "~1", "", 1:ngroups) } } namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) fit0 <- refit(pt0, fit0) fit1 <- refit(pt1, fit1) beta <- lavaan::coef(fit1) beta0 <- lavaan::coef(fit0) waldMat <- matrix(0, ngroups - 1, length(beta)) varfree <- setdiff(varfree, c(free, fix)) } estimates <- matrix(NA, length(varfree), ngroups + 1) stdestimates <- matrix(NA, length(varfree), ngroups) colnames(estimates) <- c("poolest", paste0("mean:", grouplab)) colnames(stdestimates) <- paste0("std:", grouplab) esstd <- matrix(NA, length(varfree), ngroups - 1) colnames(esstd) <- paste0("diff_std:", complab) fixCon <- freeCon <- matrix(NA, length(varfree), 4) waldCon <- matrix(NA, length(varfree), 3) colnames(fixCon) <- c("fix.chi", "fix.df", "fix.p", "fix.cfi") colnames(freeCon) <- c("free.chi", "free.df", "free.p", "free.cfi") colnames(waldCon) <- c("wald.chi", "wald.df", "wald.p") index <- which((pt1$lhs %in% varfree) & (pt1$op == "~1") & (pt1$group == 1)) for (i in seq_along(index)) { runnum <- index[i] isfree <- pt1$free[runnum] != 0 if (isfree) { temp <- constrainParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups) } else { temp <- fixParTable(pt1, pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 2:ngroups, ustart = pt1$ustart[runnum]) } tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if (!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if (!is(compresult, "try-error")) fixCon[i,] <- c(unlist(modelcomp[2,5:7]), deltacfi(fit1, tempfit)) } listFixCon <- c(listFixCon, tryresult) isfree0 <- pt0$free[runnum] != 0 if (isfree0) { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) } else { temp0 <- freeParTable(pt0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 2:ngroups) } estimates[i, 1] <- getValue(pt0, beta0, pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1) tryresult0 <- try(tempfit0 <- refit(temp0, fit0), silent = TRUE) if (!is(tryresult0, "try-error")) { compresult0 <- try(modelcomp0 <- lavaan::lavTestLRT(tempfit0, fit0, method = method), silent = TRUE) if (!is(compresult0, "try-error")) freeCon[i,] <- c(unlist(modelcomp0[2,5:7]), deltacfi(tempfit0, fit0)) meanVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], pt0$op[runnum], pt0$rhs[runnum], 1:ngroups) estimates[i, 2:ncol(estimates)] <- meanVal facVal <- getValue(temp0, lavaan::coef(tempfit0), pt0$lhs[runnum], "~~", pt0$lhs[runnum], 1:ngroups) ifelse(poolvar, refFacVal <- poolVariance(facVal, neach), refFacVal <- facVal[refgroup]) stdMeanVal <- meanVal / sqrt(refFacVal) stdestimates[i,] <- stdMeanVal stdMeanVal <- stdMeanVal[grouporder] esstd[i,] <- stdMeanVal[2:ngroups] - stdMeanVal[1] } listFreeCon <- c(listFreeCon, tryresult0) waldCon[i,] <- waldConstraint(fit1, pt1, waldMat, cbind(pt1$lhs[runnum], pt1$op[runnum], pt1$rhs[runnum], 1:ngroups)) } freeCon[,3] <- stats::p.adjust(freeCon[,3], p.adjust) fixCon[,3] <- stats::p.adjust(fixCon[,3], p.adjust) waldCon[,3] <- stats::p.adjust(waldCon[,3], p.adjust) rownames(fixCon) <- names(listFixCon) <- rownames(freeCon) <- names(listFreeCon) <- rownames(waldCon) <- rownames(estimates) <- namept1[index] estimates <- cbind(estimates, stdestimates, esstd) result <- cbind(freeCon, fixCon, waldCon) } if (return.fit) { return(invisible(list(estimates = estimates, results = result, models = list(free = listFreeCon, fix = listFixCon, nested = fit0, parent = fit1)))) } else { return(list(estimates = estimates, results = result)) } } ## ---------------- ## Hidden Functions ## ---------------- findFactor <- function(var, facList) { tempfac <- lapply(facList, intersect, var) facinvar <- rep(names(tempfac), sapply(tempfac, length)) facinvar[match(unlist(tempfac), var)] } ## Terry moved here from wald.R so that wald() could be removed (redundant with lavaan::lavTestWald) ## FIXME: Update WaldConstraint to rely on lavaan::lavTestWald instead #' @importFrom stats pchisq waldContrast <- function(object, contrast) { beta <- lavaan::coef(object) acov <- lavaan::vcov(object) chisq <- t(contrast %*% beta) %*% solve(contrast %*% as.matrix(acov) %*% t(contrast)) %*% (contrast %*% beta) df <- nrow(contrast) p <- pchisq(chisq, df, lower.tail=FALSE) c(chisq = chisq, df = df, p = p) } #' @importFrom lavaan parTable waldConstraint <- function(fit, pt, mat, ...) { dotdotdot <- list(...) overallMat <- NULL for(i in seq_along(dotdotdot)) { target <- dotdotdot[[i]] tempMat <- mat element <- apply(target, 1, matchElement, parTable = pt) freeIndex <- pt$free[element] tempMat[,freeIndex[1]] <- -1 for(m in 2:length(freeIndex)) { tempMat[m - 1, freeIndex[m]] <- 1 } overallMat <- rbind(overallMat, tempMat) } result <- rep(NA, 3) if(!any(apply(overallMat, 1, sum) != 0)) { try(result <- waldContrast(fit, overallMat), silent = TRUE) } return(result) } poolVariance <- function(var, n) { nm <- n - 1 sum(var * nm) / sum(nm) } deltacfi <- function(parent, nested) lavaan::fitmeasures(nested)["cfi"] - lavaan::fitmeasures(parent)["cfi"] ## For categorical. FIXME: Why is this even necessary? ## Did Sunthud not know implied Sigma is available? #' @importFrom lavaan lavInspect thetaImpliedTotalVar <- function(object) { # param <- lavInspect(object, "est") # ngroup <- lavInspect(object, "ngroups") # name <- names(param) # if(ngroup == 1) { # ly <- param[name == "lambda"] # } else { # ly <- lapply(param, "[[", "lambda") # } # ps <- lavInspect(object, "cov.lv") # if(ngroup == 1) ps <- list(ps) # if(ngroup == 1) { # te <- param[name == "theta"] # } else { # te <- lapply(param, "[[", "theta") # } # result <- list() # for(i in 1:ngroup) { # result[[i]] <- ly[[i]] %*% ps[[i]] %*% t(ly[[i]]) + te[[i]] # } # result if (lavInspect(object, "ngroups") == 1L) return(list(lavInspect(object, "cov.ov"))) lavInspect(object, "cov.ov") } semTools/R/loadingFromAlpha.R0000644000176200001440000000132513432435606015621 0ustar liggesusers### Sunthud Pornprasertmanit ### Last updated: 3 April 2017 #' Find standardized factor loading from coefficient alpha #' #' Find standardized factor loading from coefficient alpha assuming that all #' items have equal loadings. #' #' @param alpha A desired coefficient alpha value. #' @param ni A desired number of items. #' @return \item{result}{The standardized factor loadings that make desired #' coefficient alpha with specified number of items.} #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @examples #' #' loadingFromAlpha(0.8, 4) #' #' @export loadingFromAlpha <- function(alpha, ni) { denominator <- ni - ((ni - 1) * alpha) result <- sqrt(alpha/denominator) return(result) } semTools/R/probeInteraction.R0000644000176200001440000020660713475443441015736 0ustar liggesusers### Sunthud Pornprasertmanit ### Last updated: 4 June 2019 ##' Probing two-way interaction on the no-centered or mean-centered latent ##' interaction ##' ##' Probing interaction for simple intercept and simple slope for the ##' no-centered or mean-centered latent two-way interaction ##' ##' Before using this function, researchers need to make the products of the ##' indicators between the first-order factors using mean centering (Marsh, Wen, ##' & Hau, 2004). Note that the double-mean centering may not be appropriate for ##' probing interaction if researchers are interested in simple intercepts. The ##' mean or double-mean centering can be done by the \code{\link{indProd}} ##' function. The indicator products can be made for all possible combination or ##' matched-pair approach (Marsh et al., 2004). Next, the hypothesized model ##' with the regression with latent interaction will be used to fit all original ##' indicators and the product terms. See the example for how to fit the product ##' term below. Once the lavaan result is obtained, this function will be used ##' to probe the interaction. ##' ##' Let that the latent interaction model regressing the dependent variable ##' (\eqn{Y}) on the independent varaible (\eqn{X}) and the moderator (\eqn{Z}) ##' be \deqn{ Y = b_0 + b_1X + b_2Z + b_3XZ + r, } where \eqn{b_0} is the ##' estimated intercept or the expected value of \eqn{Y} when both \eqn{X} and ##' \eqn{Z} are 0, \eqn{b_1} is the effect of \eqn{X} when \eqn{Z} is 0, ##' \eqn{b_2} is the effect of \eqn{Z} when \eqn{X} is 0, \eqn{b_3} is the ##' interaction effect between \eqn{X} and \eqn{Z}, and \eqn{r} is the residual ##' term. ##' ##' For probing two-way interaction, the simple intercept of the independent ##' variable at each value of the moderator (Aiken & West, 1991; Cohen, Cohen, ##' West, & Aiken, 2003; Preacher, Curran, & Bauer, 2006) can be obtained by ##' \deqn{ b_{0|X = 0, Z} = b_0 + b_2Z. } ##' ##' The simple slope of the independent varaible at each value of the moderator ##' can be obtained by \deqn{ b_{X|Z} = b_1 + b_3Z. } ##' ##' The variance of the simple intercept formula is \deqn{ Var\left(b_{0|X = 0, ##' Z}\right) = Var\left(b_0\right) + 2ZCov\left(b_0, b_2\right) + ##' Z^2Var\left(b_2\right) } where \eqn{Var} denotes the variance of a parameter ##' estimate and \eqn{Cov} denotes the covariance of two parameter estimates. ##' ##' The variance of the simple slope formula is \deqn{ Var\left(b_{X|Z}\right) = ##' Var\left(b_1\right) + 2ZCov\left(b_1, b_3\right) + Z^2Var\left(b_3\right) } ##' ##' Wald statistic is used for test statistic. ##' ##' ##' @importFrom lavaan lavInspect ##' @importFrom stats pnorm ##' ##' @param fit The lavaan model object used to evaluate model fit ##' @param nameX The vector of the factor names used as the predictors. The ##' first-order factor will be listed first. The last name must be the name ##' representing the interaction term. ##' @param nameY The name of factor that is used as the dependent variable. ##' @param modVar The name of factor that is used as a moderator. The effect of ##' the other independent factor on each moderator variable value will be ##' probed. ##' @param valProbe The values of the moderator that will be used to probe the ##' effect of the other independent factor. ##' @param group In multigroup models, the label of the group for which the ##' results will be returned. Must correspond to one of ##' \code{\link[lavaan]{lavInspect}(fit, "group.label")}. ##' ##' @return A list with two elements: ##' \enumerate{ ##' \item \code{SimpleIntercept}: The intercepts given each value of the ##' moderator. This element will be shown only if the factor intercept is ##' estimated (e.g., not fixed as 0). ##' \item \code{SimpleSlope}: The slopes given each value of the moderator. ##' } ##' In each element, the first column represents the values of the moderators ##' specified in the \code{valProbe} argument. The second column is the simple ##' intercept or simple slope. The third column is the \emph{SE} of the simple ##' intercept or simple slope. The fourth column is the Wald (\emph{z}) ##' statistic. The fifth column is the \emph{p} value testing whether the simple ##' intercepts or slopes are different from 0. ##' ##' @author ##' Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \itemize{ ##' \item \code{\link{indProd}} For creating the indicator products with no ##' centering, mean centering, double-mean centering, or residual centering. ##' \item \code{\link{probe3WayMC}} For probing the three-way latent interaction ##' when the results are obtained from mean-centering, or double-mean centering ##' \item \code{\link{probe2WayRC}} For probing the two-way latent interaction ##' when the results are obtained from residual-centering approach. ##' \item \code{\link{probe3WayRC}} For probing the two-way latent interaction ##' when the results are obtained from residual-centering approach. ##' \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the ##' latent interaction. ##' } ##' ##' @references ##' Aiken, L. S., & West, S. G. (1991). \emph{Multiple regression: Testing ##' and interpreting interactions}. Newbury Park, CA: Sage. ##' ##' Cohen, J., Cohen, P., West, S. G., & Aiken, L. S. (2003). \emph{Applied ##' multiple regression/correlation analysis for the behavioral sciences} ##' (3rd ed.). New York, NY: Routledge. ##' ##' Marsh, H. W., Wen, Z., & Hau, K. T. (2004). Structural equation models of ##' latent interactions: Evaluation of alternative estimation strategies and ##' indicator construction. \emph{Psychological Methods, 9}(3), 275--300. ##' doi:10.1037/1082-989X.9.3.275 ##' ##' Preacher, K. J., Curran, P. J., & Bauer, D. J. (2006). Computational tools ##' for probing interactions in multiple linear regression, multilevel modeling, ##' and latent curve analysis. \emph{Journal of Educational and Behavioral ##' Statistics, 31}(4), 437--448. doi:10.3102/10769986031004437 ##' ##' @examples ##' ##' library(lavaan) ##' ##' dat2wayMC <- indProd(dat2way, 1:3, 4:6) ##' ##' model1 <- " ##' f1 =~ x1 + x2 + x3 ##' f2 =~ x4 + x5 + x6 ##' f12 =~ x1.x4 + x2.x5 + x3.x6 ##' f3 =~ x7 + x8 + x9 ##' f3 ~ f1 + f2 + f12 ##' f12 ~~0*f1 ##' f12 ~~ 0*f2 ##' x1 ~ 0*1 ##' x4 ~ 0*1 ##' x1.x4 ~ 0*1 ##' x7 ~ 0*1 ##' f1 ~ NA*1 ##' f2 ~ NA*1 ##' f12 ~ NA*1 ##' f3 ~ NA*1 ##' " ##' ##' fitMC2way <- sem(model1, data = dat2wayMC, std.lv = FALSE, ##' meanstructure = TRUE) ##' summary(fitMC2way) ##' ##' result2wayMC <- probe2WayMC(fitMC2way, c("f1", "f2", "f12"), ##' "f3", "f2", c(-1, 0, 1)) ##' result2wayMC ##' ##' @export probe2WayMC <- function(fit, nameX, nameY, modVar, valProbe, group) { # Check whether modVar is correct if(is.character(modVar)) { modVar <- match(modVar, nameX) } if(is.na(modVar) || !(modVar %in% 1:2)) stop("The moderator name is not in the name of independent factors or not 1 or 2.") # Check whether the fit object does not use mlm, mlr, or mlf estimator (because the variance-covariance matrix of parameter estimates cannot be computed estSpec <- lavInspect(fit, "call")$estimator if(!is.null(estSpec) && (estSpec %in% c("mlr", "mlm", "mlf"))) stop("This function does not work when 'mlr', 'mlm', or 'mlf' is used as the estimator because the covariance matrix of the parameter estimates cannot be computed.") ## TDJ: If multigroup, check group %in% group.label nG <- lavInspect(fit, "ngroups") if (nG > 1L) { group.label <- lavInspect(fit, "group.label") if (missing(group)) { warning('No argument provided for "group". Using the first group.') group <- 1L } ## assign numeric to character if (is.numeric(group)) { if (group %in% 1:nG) { group <- group.label[group] } else group <- as.character(group) } else group <- as.character(group) ## check that character is a group if (!as.character(group) %in% group.label) stop('"group" must be a character string naming a group of interest, or ', 'an ingteger corresponding to a group in lavInspect(fit, "group.label")') ## Get the parameter estimates for that group est <- lavInspect(fit, "est")[[group]] } else { ## single-group model est <- lavInspect(fit, "est") } # Compute the intercept of no-centering betaNC <- as.matrix(est$beta[nameY, nameX]); colnames(betaNC) <- nameY # Extract all varEst varEst <- lavaan::vcov(fit) # Check whether intercept are estimated targetcol <- paste(nameY, "~", 1, sep="") estimateIntcept <- targetcol %in% rownames(varEst) pvalue <- function(x) (1 - pnorm(abs(x))) * 2 resultIntcept <- NULL resultSlope <- NULL if(estimateIntcept) { # Extract SE from residual centering targetcol <- c(targetcol, paste(nameY, "~", nameX, sep="")) # Transform it to non-centering SE usedVar <- varEst[targetcol, targetcol] usedBeta <- rbind(est$alpha[nameY,], betaNC) # Change the order of usedVar and usedBeta if the moderator variable is listed first if(modVar == 1) { usedVar <- usedVar[c(1, 3, 2, 4), c(1, 3, 2, 4)] usedBeta <- usedBeta[c(1, 3, 2, 4)] } # Find simple intercept simpleIntcept <- usedBeta[1] + usedBeta[3] * valProbe varIntcept <- usedVar[1, 1] + 2 * valProbe * usedVar[1, 3] + (valProbe^2) * usedVar[3, 3] zIntcept <- simpleIntcept/sqrt(varIntcept) pIntcept <- round(pvalue(zIntcept),6) #JG: rounded values to make them more readable resultIntcept <- cbind(valProbe, simpleIntcept, sqrt(varIntcept), zIntcept, pIntcept) colnames(resultIntcept) <- c(nameX[modVar], "Intcept", "SE", "Wald", "p") # Find simple slope simpleSlope <- usedBeta[2] + usedBeta[4] * valProbe varSlope <- usedVar[2, 2] + 2 * valProbe * usedVar[2, 4] + (valProbe^2) * usedVar[4, 4] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- pvalue(zSlope) resultSlope <- cbind(valProbe, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } else { targetcol <- paste(nameY, "~", nameX, sep="") # Transform it to non-centering SE usedVar <- varEst[targetcol, targetcol] usedBeta <- betaNC # Change the order of usedVar and usedBeta if the moderator variable is listed first if(modVar == 2) { usedVar <- usedVar[c(2, 1, 3), c(2, 1, 3)] # usedBeta <- usedBeta[c(2, 1, 3)] } # Find simple slope simpleSlope <- usedBeta[1] + usedBeta[3] * valProbe varSlope <- usedVar[1, 1] + 2 * valProbe * usedVar[1, 3] + (valProbe^2) * usedVar[3, 3] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- round(pvalue(zSlope),6) #JG: rounded values to make them more readable resultSlope <- cbind(valProbe, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } return(list(SimpleIntcept = resultIntcept, SimpleSlope = resultSlope)) } ##' Probing two-way interaction on the residual-centered latent interaction ##' ##' Probing interaction for simple intercept and simple slope for the ##' residual-centered latent two-way interaction (Pornprasertmanit, Schoemann, ##' Geldhof, & Little, submitted) ##' ##' Before using this function, researchers need to make the products of the ##' indicators between the first-order factors and residualize the products by ##' the original indicators (Lance, 1988; Little, Bovaird, & Widaman, 2006). The ##' process can be automated by the \code{\link{indProd}} function. Note that ##' the indicator products can be made for all possible combination or ##' matched-pair approach (Marsh et al., 2004). Next, the hypothesized model ##' with the regression with latent interaction will be used to fit all original ##' indicators and the product terms. To use this function the model must be fit ##' with a mean structure. See the example for how to fit the product term ##' below. Once the lavaan result is obtained, this function will be used to ##' probe the interaction. ##' ##' The probing process on residual-centered latent interaction is based on ##' transforming the residual-centered result into the no-centered result. See ##' Pornprasertmanit, Schoemann, Geldhof, and Little (submitted) for further ##' details. Note that this approach based on a strong assumption that the ##' first-order latent variables are normally distributed. The probing process ##' is applied after the no-centered result (parameter estimates and their ##' covariance matrix among parameter estimates) has been computed. See the ##' \code{\link{probe2WayMC}} for further details. ##' ##' ##' @importFrom lavaan lavInspect ##' @importFrom stats pnorm ##' ##' @param fit The lavaan model object used to evaluate model fit ##' @param nameX The vector of the factor names used as the predictors. The ##' first-order factor will be listed first. The last name must be the name ##' representing the interaction term. ##' @param nameY The name of factor that is used as the dependent variable. ##' @param modVar The name of factor that is used as a moderator. The effect of ##' the other independent factor on each moderator variable value will be ##' probed. ##' @param valProbe The values of the moderator that will be used to probe the ##' effect of the other independent factor. ##' @param group In multigroup models, the label of the group for which the ##' results will be returned. Must correspond to one of ##' \code{\link[lavaan]{lavInspect}(fit, "group.label")}. ##' ##' @return A list with two elements: ##' \enumerate{ ##' \item \code{SimpleIntercept}: The intercepts given each value of the ##' moderator. This element will be shown only if the factor intercept is ##' estimated (e.g., not fixed as 0). ##' \item \code{SimpleSlope}: The slopes given each value of the moderator. ##' } ##' In each element, the first column represents the values of the moderators ##' specified in the \code{valProbe} argument. The second column is the simple ##' intercept or simple slope. The third column is the standard error of the ##' simple intercept or simple slope. The fourth column is the Wald (\emph{z}) ##' statistic. The fifth column is the \emph{p} value testing whether the simple ##' intercepts or slopes are different from 0. ##' ##' @author ##' Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \itemize{ ##' \item \code{\link{indProd}} For creating the indicator products with no ##' centering, mean centering, double-mean centering, or residual centering. ##' \item \code{\link{probe2WayMC}} For probing the two-way latent interaction ##' when the results are obtained from mean-centering, or double-mean centering ##' \item \code{\link{probe3WayMC}} For probing the three-way latent interaction ##' when the results are obtained from mean-centering, or double-mean centering ##' \item \code{\link{probe3WayRC}} For probing the two-way latent interaction ##' when the results are obtained from residual-centering approach. ##' \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the ##' latent interaction. ##' } ##' @references ##' ##' Lance, C. E. (1988). Residual centering, exploratory and confirmatory ##' moderator analysis, and decomposition of effects in path models containing ##' interactions. \emph{Applied Psychological Measurement, 12}(2), 163--175. ##' doi:10.1177/014662168801200205 ##' ##' Little, T. D., Bovaird, J. A., & Widaman, K. F. (2006). On the merits of ##' orthogonalizing powered and product terms: Implications for modeling ##' interactions. \emph{Structural Equation Modeling, 13}(4), 497--519. ##' doi:10.1207/s15328007sem1304_1 ##' ##' Marsh, H. W., Wen, Z., & Hau, K. T. (2004). Structural equation models of ##' latent interactions: Evaluation of alternative estimation strategies and ##' indicator construction. \emph{Psychological Methods, 9}(3), 275--300. ##' doi:10.1037/1082-989X.9.3.275 ##' ##' Geldhof, G. J., Pornprasertmanit, S., Schoemann, A. M., & Little, T. D. ##' (2013). Orthogonalizing through residual centering: Extended applications ##' and caveats \emph{Educational and Psychological Measurement, 73}(1), 27--46. ##' doi:10.1177/0013164412445473 ##' @examples ##' ##' library(lavaan) ##' ##' dat2wayRC <- orthogonalize(dat2way, 1:3, 4:6) ##' ##' model1 <- " ##' f1 =~ x1 + x2 + x3 ##' f2 =~ x4 + x5 + x6 ##' f12 =~ x1.x4 + x2.x5 + x3.x6 ##' f3 =~ x7 + x8 + x9 ##' f3 ~ f1 + f2 + f12 ##' f12 ~~0*f1 ##' f12 ~~ 0*f2 ##' x1 ~ 0*1 ##' x4 ~ 0*1 ##' x1.x4 ~ 0*1 ##' x7 ~ 0*1 ##' f1 ~ NA*1 ##' f2 ~ NA*1 ##' f12 ~ NA*1 ##' f3 ~ NA*1 ##' " ##' ##' fitRC2way <- sem(model1, data = dat2wayRC, std.lv = FALSE, ##' meanstructure = TRUE) ##' summary(fitRC2way) ##' ##' result2wayRC <- probe2WayRC(fitRC2way, c("f1", "f2", "f12"), ##' "f3", "f2", c(-1, 0, 1)) ##' result2wayRC ##' ##' @export probe2WayRC <- function(fit, nameX, nameY, modVar, valProbe, group) { # Check whether modVar is correct if(is.character(modVar)) { modVar <- match(modVar, nameX) } if(is.na(modVar) || !(modVar %in% 1:2)) stop("The moderator name is not in the name of independent factors or not 1 or 2.") # Check whether the fit object does not use mlm, mlr, or mlf estimator (because the variance-covariance matrix of parameter estimates cannot be computed estSpec <- lavInspect(fit, "call")$estimator if(!is.null(estSpec) && (estSpec %in% c("mlr", "mlm", "mlf"))) stop("This function does not work when 'mlr', 'mlm', or 'mlf' is used as the estimator because the covariance matrix of the parameter estimates cannot be computed.") ## TDJ: If multigroup, check group %in% group.label nG <- lavInspect(fit, "ngroups") if (nG > 1L) { group.label <- lavInspect(fit, "group.label") if (missing(group)) { warning('No argument provided for "group". Using the first group.') group <- 1L } ## assign numeric to character if (is.numeric(group)) { if (group %in% 1:nG) { group <- group.label[group] } else group <- as.character(group) } else group <- as.character(group) ## check that character is a group if (!as.character(group) %in% group.label) stop('"group" must be a character string naming a group of interest, or ', 'an ingteger corresponding to a group in lavInspect(fit, "group.label")') ## Get the parameter estimates for that group est <- lavInspect(fit, "est")[[group]] } else { ## single-group model est <- lavInspect(fit, "est") } # Find the mean and covariance matrix of independent factors varX <- est$psi[nameX, nameX] meanX <- as.matrix(est$alpha[nameX,]); colnames(meanX) <- "intcept" # Find the intercept, regression coefficients, and residual variance of residual-centered regression intceptRC <- est$alpha[nameY,] resVarRC <- est$psi[nameY, nameY] betaRC <- as.matrix(est$beta[nameY, nameX]); colnames(betaRC) <- nameY # Find the number of observations numobs <- lavInspect(fit, "nobs") # Compute SSRC meanXwith1 <- rbind(1, meanX) varXwith0 <- cbind(0, rbind(0, varX)) SSRC <- numobs * (varXwith0 + (meanXwith1 %*% t(meanXwith1))) # Compute Mean(Y) and Var(Y) betaRCWithIntcept <- rbind(intceptRC, betaRC) meanY <- t(meanXwith1) %*% betaRCWithIntcept varY <- (t(betaRCWithIntcept) %*% SSRC %*% betaRCWithIntcept)/numobs - meanY^2 + resVarRC # Compute Cov(Y, X) covY <- as.matrix((varX %*% betaRC)[1:2,]) # Compute E(XZ) meanX[3] <- meanX[1] * meanX[2] + varX[1, 2] # Compute Var(XZ) varX[3, 3] <- meanX[1]^2 * varX[2, 2] + meanX[2]^2 * varX[1, 1] + 2 * meanX[1] * meanX[2] * varX[1, 2] + varX[1, 1] * varX[2, 2] + varX[1, 2]^2 # Compute Cov(X, XZ), Cov(Z, XZ) varX[1, 3] <- varX[3, 1] <- meanX[1] * varX[1, 2] + meanX[2] * varX[1, 1] varX[2, 3] <- varX[3, 2] <- meanX[1] * varX[2, 2] + meanX[2] * varX[1, 2] # Compute Cov(Y, XZ) and regression coefficients of no-centering betaNC <- solve(varX[1:2,1:2], covY - rbind(varX[1,3] * betaRC[3,1], varX[2, 3] * betaRC[3,1])) betaNC <- rbind(betaNC, betaRC[3, 1]) covY <- rbind(covY, (varX %*% betaNC)[3, 1]) # Aggregate the non-centering sufficient statistics (Just show how to do but not necessary) fullCov <- rbind(cbind(varX, covY), c(covY, varY)) fullMean <- rbind(meanX, meanY) # Compute the intercept of no-centering intceptNC <- meanY - t(betaNC) %*% meanX # Compute SSNC betaNCWithIntcept <- rbind(intceptNC, betaNC) meanXwith1 <- rbind(1, meanX) varXwith0 <- rbind(0, cbind(0, varX)) SSNC <- numobs * (varXwith0 + (meanXwith1 %*% t(meanXwith1))) # Compute residual variance on non-centering resVarNC <- varY - (t(betaNCWithIntcept) %*% SSNC %*% betaNCWithIntcept)/numobs + meanY^2 # Extract all varEst varEst <- lavaan::vcov(fit) # Check whether intercept are estimated targetcol <- paste(nameY, "~", 1, sep="") estimateIntcept <- targetcol %in% rownames(varEst) pvalue <- function(x) (1 - pnorm(abs(x))) * 2 resultIntcept <- NULL resultSlope <- NULL if(estimateIntcept) { # Extract SE from residual centering targetcol <- c(targetcol, paste(nameY, "~", nameX, sep="")) varEstSlopeRC <- varEst[targetcol, targetcol] # Transform it to non-centering SE usedVar <- as.numeric(resVarNC/resVarRC) * (varEstSlopeRC %*% SSRC %*% solve(SSNC)) usedBeta <- betaNCWithIntcept # Change the order of usedVar and usedBeta if the moderator variable is listed first if(modVar == 1) { usedVar <- usedVar[c(1, 3, 2, 4), c(1, 3, 2, 4)] usedBeta <- usedBeta[c(1, 3, 2, 4)] } # Find simple intercept simpleIntcept <- usedBeta[1] + usedBeta[3] * valProbe varIntcept <- usedVar[1, 1] + 2 * valProbe * usedVar[1, 3] + (valProbe^2) * usedVar[3, 3] zIntcept <- simpleIntcept/sqrt(varIntcept) pIntcept <- round(pvalue(zIntcept),6) #JG: rounded values to make them more readable resultIntcept <- cbind(valProbe, simpleIntcept, sqrt(varIntcept), zIntcept, pIntcept) colnames(resultIntcept) <- c(nameX[modVar], "Intcept", "SE", "Wald", "p") # Find simple slope simpleSlope <- usedBeta[2] + usedBeta[4] * valProbe varSlope <- usedVar[2, 2] + 2 * valProbe * usedVar[2, 4] + (valProbe^2) * usedVar[4, 4] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- pvalue(zSlope) resultSlope <- cbind(valProbe, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } else { targetcol <- paste(nameY, "~", nameX, sep="") varEstSlopeRC <- varEst[targetcol, targetcol] # Transform it to non-centering SE usedVar <- as.numeric(resVarNC/resVarRC) * (varEstSlopeRC %*% SSRC[2:4, 2:4] %*% solve(SSNC[2:4, 2:4])) usedBeta <- betaNC # Change the order of usedVar and usedBeta if the moderator variable is listed first if(modVar == 2) { usedVar <- usedVar[c(2, 1, 3), c(2, 1, 3)] # usedBeta <- usedBeta[c(2, 1, 3)] } # Find simple slope simpleSlope <- usedBeta[1] + usedBeta[3] * valProbe varSlope <- usedVar[1, 1] + 2 * valProbe * usedVar[1, 3] + (valProbe^2) * usedVar[3, 3] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- round(pvalue(zSlope),6) #JG: rounded values to make them more readable resultSlope <- cbind(valProbe, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } return(list(SimpleIntcept = resultIntcept, SimpleSlope = resultSlope)) } ##' Probing two-way interaction on the no-centered or mean-centered latent ##' interaction ##' ##' Probing interaction for simple intercept and simple slope for the ##' no-centered or mean-centered latent two-way interaction ##' ##' Before using this function, researchers need to make the products of the ##' indicators between the first-order factors using mean centering (Marsh, Wen, ##' & Hau, 2004). Note that the double-mean centering may not be appropriate for ##' probing interaction if researchers are interested in simple intercepts. The ##' mean or double-mean centering can be done by the \code{\link{indProd}} ##' function. The indicator products can be made for all possible combination or ##' matched-pair approach (Marsh et al., 2004). Next, the hypothesized model ##' with the regression with latent interaction will be used to fit all original ##' indicators and the product terms. See the example for how to fit the product ##' term below. Once the lavaan result is obtained, this function will be used ##' to probe the interaction. ##' ##' Let that the latent interaction model regressing the dependent variable ##' (\eqn{Y}) on the independent varaible (\eqn{X}) and two moderators (\eqn{Z} ##' and \eqn{W}) be \deqn{ Y = b_0 + b_1X + b_2Z + b_3W + b_4XZ + b_5XW + b_6ZW ##' + b_7XZW + r, } where \eqn{b_0} is the estimated intercept or the expected ##' value of \eqn{Y} when \eqn{X}, \eqn{Z}, and \eqn{W} are 0, \eqn{b_1} is the ##' effect of \eqn{X} when \eqn{Z} and \eqn{W} are 0, \eqn{b_2} is the effect of ##' \eqn{Z} when \eqn{X} and \eqn{W} is 0, \eqn{b_3} is the effect of \eqn{W} ##' when \eqn{X} and \eqn{Z} are 0, \eqn{b_4} is the interaction effect between ##' \eqn{X} and \eqn{Z} when \eqn{W} is 0, \eqn{b_5} is the interaction effect ##' between \eqn{X} and \eqn{W} when \eqn{Z} is 0, \eqn{b_6} is the interaction ##' effect between \eqn{Z} and \eqn{W} when \eqn{X} is 0, \eqn{b_7} is the ##' three-way interaction effect between \eqn{X}, \eqn{Z}, and \eqn{W}, and ##' \eqn{r} is the residual term. ##' ##' For probing three-way interaction, the simple intercept of the independent ##' variable at the specific values of the moderators (Aiken & West, 1991) can ##' be obtained by \deqn{ b_{0|X = 0, Z, W} = b_0 + b_2Z + b_3W + b_6ZW. } ##' ##' The simple slope of the independent varaible at the specific values of the ##' moderators can be obtained by \deqn{ b_{X|Z, W} = b_1 + b_3Z + b_4W + b_7ZW. ##' } ##' ##' The variance of the simple intercept formula is \deqn{ Var\left(b_{0|X = 0, ##' Z, W}\right) = Var\left(b_0\right) + Z^2Var\left(b_2\right) + ##' W^2Var\left(b_3\right) + Z^2W^2Var\left(b_6\right) + 2ZCov\left(b_0, ##' b_2\right) + 2WCov\left(b_0, b_3\right) + 2ZWCov\left(b_0, b_6\right) + ##' 2ZWCov\left(b_2, b_3\right) + 2Z^2WCov\left(b_2, b_6\right) + ##' 2ZW^2Cov\left(b_3, b_6\right) } where \eqn{Var} denotes the variance of a ##' parameter estimate and \eqn{Cov} denotes the covariance of two parameter ##' estimates. ##' ##' The variance of the simple slope formula is \deqn{ Var\left(b_{X|Z, ##' W}\right) = Var\left(b_1\right) + Z^2Var\left(b_4\right) + ##' W^2Var\left(b_5\right) + Z^2W^2Var\left(b_7\right) + 2ZCov\left(b_1, ##' b_4\right) + 2WCov\left(b_1, b_5\right) + 2ZWCov\left(b_1, b_7\right) + ##' 2ZWCov\left(b_4, b_5\right) + 2Z^2WCov\left(b_4, b_7\right) + ##' 2ZW^2Cov\left(b_5, b_7\right) } ##' ##' Wald statistic is used for test statistic. ##' ##' ##' @importFrom lavaan lavInspect ##' @importFrom stats pnorm ##' ##' @param fit The lavaan model object used to evaluate model fit ##' @param nameX The vector of the factor names used as the predictors. The ##' three first-order factors will be listed first. Then the second-order ##' factors will be listeed. The last element of the name will represent the ##' three-way interaction. Note that the fourth element must be the interaction ##' between the first and the second variables. The fifth element must be the ##' interaction between the first and the third variables. The sixth element ##' must be the interaction between the second and the third variables. ##' @param nameY The name of factor that is used as the dependent variable. ##' @param modVar The name of two factors that are used as the moderators. The ##' effect of the independent factor on each combination of the moderator ##' variable values will be probed. ##' @param valProbe1 The values of the first moderator that will be used to ##' probe the effect of the independent factor. ##' @param valProbe2 The values of the second moderator that will be used to ##' probe the effect of the independent factor. ##' @param group In multigroup models, the label of the group for which the ##' results will be returned. Must correspond to one of ##' \code{\link[lavaan]{lavInspect}(fit, "group.label")}. ##' ##' @return A list with two elements: ##' \enumerate{ ##' \item \code{SimpleIntercept}: The intercepts given each value of the moderator. ##' This element will be shown only if the factor intercept is estimated ##' (e.g., not fixed as 0). ##' \item \code{SimpleSlope}: The slopes given each value of the moderator. ##' } ##' In each element, the first column represents values of the first moderator ##' specified in the \code{valProbe1} argument. The second column represents ##' values of the second moderator specified in the \code{valProbe2} argument. ##' The third column is the simple intercept or simple slope. The fourth column ##' is the standard error of the simple intercept or simple slope. The fifth ##' column is the Wald (\emph{z}) statistic. The sixth column is the \emph{p} ##' value testing whether the simple intercepts or slopes are different from 0. ##' ##' @author ##' Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \itemize{ ##' \item \code{\link{indProd}} For creating the indicator products with no ##' centering, mean centering, double-mean centering, or residual centering. ##' \item \code{\link{probe2WayMC}} For probing the two-way latent interaction ##' when the results are obtained from mean-centering, or double-mean centering ##' \item \code{\link{probe2WayRC}} For probing the two-way latent interaction ##' when the results are obtained from residual-centering approach. ##' \item \code{\link{probe3WayRC}} For probing the two-way latent interaction ##' when the results are obtained from residual-centering approach. ##' \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the ##' latent interaction. ##' } ##' ##' @references ##' Aiken, L. S., & West, S. G. (1991). \emph{Multiple regression: Testing ##' and interpreting interactions}. Newbury Park, CA: Sage. ##' ##' Marsh, H. W., Wen, Z., & Hau, K. T. (2004). Structural equation models of ##' latent interactions: Evaluation of alternative estimation strategies and ##' indicator construction. \emph{Psychological Methods, 9}(3), 275--300. ##' doi:10.1037/1082-989X.9.3.275 ##' ##' @examples ##' ##' library(lavaan) ##' ##' dat3wayMC <- indProd(dat3way, 1:3, 4:6, 7:9) ##' ##' model3 <- " ##' f1 =~ x1 + x2 + x3 ##' f2 =~ x4 + x5 + x6 ##' f3 =~ x7 + x8 + x9 ##' f12 =~ x1.x4 + x2.x5 + x3.x6 ##' f13 =~ x1.x7 + x2.x8 + x3.x9 ##' f23 =~ x4.x7 + x5.x8 + x6.x9 ##' f123 =~ x1.x4.x7 + x2.x5.x8 + x3.x6.x9 ##' f4 =~ x10 + x11 + x12 ##' f4 ~ f1 + f2 + f3 + f12 + f13 + f23 + f123 ##' f1 ~~ 0*f12 ##' f1 ~~ 0*f13 ##' f1 ~~ 0*f123 ##' f2 ~~ 0*f12 ##' f2 ~~ 0*f23 ##' f2 ~~ 0*f123 ##' f3 ~~ 0*f13 ##' f3 ~~ 0*f23 ##' f3 ~~ 0*f123 ##' f12 ~~ 0*f123 ##' f13 ~~ 0*f123 ##' f23 ~~ 0*f123 ##' x1 ~ 0*1 ##' x4 ~ 0*1 ##' x7 ~ 0*1 ##' x10 ~ 0*1 ##' x1.x4 ~ 0*1 ##' x1.x7 ~ 0*1 ##' x4.x7 ~ 0*1 ##' x1.x4.x7 ~ 0*1 ##' f1 ~ NA*1 ##' f2 ~ NA*1 ##' f3 ~ NA*1 ##' f12 ~ NA*1 ##' f13 ~ NA*1 ##' f23 ~ NA*1 ##' f123 ~ NA*1 ##' f4 ~ NA*1 ##' " ##' ##' fitMC3way <- sem(model3, data = dat3wayMC, std.lv = FALSE, ##' meanstructure = TRUE) ##' summary(fitMC3way) ##' ##' result3wayMC <- probe3WayMC(fitMC3way, ##' c("f1", "f2", "f3", "f12", "f13", "f23", "f123"), ##' "f4", c("f1", "f2"), c(-1, 0, 1), c(-1, 0, 1)) ##' result3wayMC ##' ##' @export probe3WayMC <- function(fit, nameX, nameY, modVar, valProbe1, valProbe2, group) { # Check whether modVar is correct if(is.character(modVar)) { modVar <- match(modVar, nameX) } if((NA %in% modVar) || !(do.call("&", as.list(modVar %in% 1:3)))) stop("The moderator name is not in the list of independent factors and is not 1, 2 or 3.") # Check whether the fit object does not use mlm, mlr, or mlf estimator (because the variance-covariance matrix of parameter estimates cannot be computed estSpec <- lavInspect(fit, "call")$estimator if(!is.null(estSpec) && (estSpec %in% c("mlr", "mlm", "mlf"))) stop("This function does not work when 'mlr', 'mlm', or 'mlf' is used as the estimator because the covariance matrix of the parameter estimates cannot be computed.") ## TDJ: If multigroup, check group %in% group.label nG <- lavInspect(fit, "ngroups") if (nG > 1L) { group.label <- lavInspect(fit, "group.label") if (missing(group)) { warning('No argument provided for "group". Using the first group.') group <- 1L } ## assign numeric to character if (is.numeric(group)) { if (group %in% 1:nG) { group <- group.label[group] } else group <- as.character(group) } else group <- as.character(group) ## check that character is a group if (!as.character(group) %in% group.label) stop('"group" must be a character string naming a group of interest, or ', 'an ingteger corresponding to a group in lavInspect(fit, "group.label")') ## Get the parameter estimates for that group est <- lavInspect(fit, "est")[[group]] } else { ## single-group model est <- lavInspect(fit, "est") } # Compute the intercept of no-centering betaNC <- as.matrix(est$beta[nameY, nameX]); colnames(betaNC) <- nameY # Extract all varEst varEst <- lavaan::vcov(fit) # Check whether intercept are estimated targetcol <- paste(nameY, "~", 1, sep="") estimateIntcept <- targetcol %in% rownames(varEst) pvalue <- function(x) (1 - pnorm(abs(x))) * 2 # Find the order to rearrange ord <- c(setdiff(1:3, modVar), modVar) ord <- c(ord, 7 - rev(ord)) resultIntcept <- NULL resultSlope <- NULL if(estimateIntcept) { # Extract SE from residual centering targetcol <- c(targetcol, paste(nameY, "~", nameX, sep="")) # Transform it to non-centering SE usedVar <- varEst[targetcol, targetcol] usedBeta <- rbind(est$alpha[nameY,], betaNC) if(sum(diag(usedVar) < 0) > 0) stop("This method does not work. The resulting calculation provided negative standard errors.") # JG: edited this error # Change the order of usedVar and usedBeta if the moderator variable is listed first usedVar <- usedVar[c(1, ord+1, 8), c(1, ord+1, 8)] usedBeta <- usedBeta[c(1, ord+1, 8)] # Find probe value val <- expand.grid(valProbe1, valProbe2) # Find simple intercept simpleIntcept <- usedBeta[1] + usedBeta[3] * val[,1] + usedBeta[4] * val[,2] + usedBeta[7] * val[,1] * val[,2] varIntcept <- usedVar[1, 1] + val[,1]^2 * usedVar[3, 3] + val[,2]^2 * usedVar[4, 4] + val[,1]^2 * val[,2]^2 * usedVar[7, 7] + 2 * val[,1] * usedVar[1, 3] + 2 * val[,2] * usedVar[1, 4] + 2 * val[,1] * val[,2] * usedVar[1, 7] + 2 * val[,1] * val[,2] * usedVar[3, 4] + 2 * val[,1]^2 * val[,2] * usedVar[3, 7] + 2* val[,1] * val[,2]^2 * usedVar[4, 7] zIntcept <- simpleIntcept/sqrt(varIntcept) pIntcept <- pvalue(zIntcept) resultIntcept <- cbind(val, simpleIntcept, sqrt(varIntcept), zIntcept, pIntcept) colnames(resultIntcept) <- c(nameX[modVar], "Intcept", "SE", "Wald", "p") # Find simple slope simpleSlope <- usedBeta[2] + usedBeta[5] * val[,1] + usedBeta[6] * val[,2] + usedBeta[8] * val[,1] * val[,2] varSlope <- usedVar[2, 2] + val[,1]^2 * usedVar[5, 5] + val[,2]^2 * usedVar[6, 6] + val[,1]^2 * val[,2]^2 * usedVar[8, 8] + 2 * val[,1] * usedVar[2, 5] + 2 * val[,2] * usedVar[2, 6] + 2 * val[,1] * val[,2] * usedVar[2, 8] + 2 * val[,1] * val[,2] * usedVar[5, 6] + 2 * val[,1]^2 * val[,2] * usedVar[5, 8] + 2 * val[,1] * val[,2]^2 * usedVar[6, 8] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- round(pvalue(zSlope),6) # JG: rounded values resultSlope <- cbind(val, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } else { targetcol <- paste(nameY, "~", nameX, sep="") # Transform it to non-centering SE usedVar <- varEst[targetcol, targetcol] usedBeta <- betaNC if(sum(diag(usedVar) < 0) > 0) stop("This method does not work. The resulting calculation provided negative standard errors.") # JG: edited this error # Change the order of usedVar and usedBeta if the moderator variable is listed first usedVar <- usedVar[c(ord, 7), c(ord, 7)] usedBeta <- usedBeta[c(ord, 7)] # Find probe value val <- expand.grid(valProbe1, valProbe2) # Find simple slope simpleSlope <- usedBeta[1] + usedBeta[4] * val[,1] + usedBeta[5] * val[,2] + usedBeta[7] * val[,1] * val[,2] varSlope <- usedVar[1, 1] + val[,1]^2 * usedVar[4, 4] + val[,2]^2 * usedVar[5, 5] + val[,1]^2 * val[,2]^2 * usedVar[7, 7] + 2 * val[,1] * usedVar[1, 4] + 2 * val[,2] * usedVar[1, 5] + 2 * val[,1] * val[,2] * usedVar[1, 7] + 2 * val[,1] * val[,2] * usedVar[4, 5] + 2 * val[,1]^2 * val[,2] * usedVar[4, 7] + 2 * val[,1] * val[,2]^2 * usedVar[5, 7] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- round(pvalue(zSlope),6) # JG: rounded values resultSlope <- cbind(val, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } return(list(SimpleIntcept = resultIntcept, SimpleSlope = resultSlope)) } ##' Probing three-way interaction on the residual-centered latent interaction ##' ##' Probing interaction for simple intercept and simple slope for the ##' residual-centered latent three-way interaction (Pornprasertmanit, Schoemann, ##' Geldhof, & Little, submitted) ##' ##' Before using this function, researchers need to make the products of the ##' indicators between the first-order factors and residualize the products by ##' the original indicators (Lance, 1988; Little, Bovaird, & Widaman, 2006). The ##' process can be automated by the \code{\link{indProd}} function. Note that ##' the indicator products can be made for all possible combination or ##' matched-pair approach (Marsh et al., 2004). Next, the hypothesized model ##' with the regression with latent interaction will be used to fit all original ##' indicators and the product terms (Geldhof, Pornprasertmanit, Schoemann, & ##' Little, in press). To use this function the model must be fit with a mean ##' structure. See the example for how to fit the product term below. Once the ##' lavaan result is obtained, this function will be used to probe the ##' interaction. ##' ##' The probing process on residual-centered latent interaction is based on ##' transforming the residual-centered result into the no-centered result. See ##' Pornprasertmanit, Schoemann, Geldhof, and Little (submitted) for further ##' details. Note that this approach based on a strong assumption that the ##' first-order latent variables are normally distributed. The probing process ##' is applied after the no-centered result (parameter estimates and their ##' covariance matrix among parameter estimates) has been computed See the ##' \code{\link{probe3WayMC}} for further details. ##' ##' ##' @importFrom lavaan lavInspect ##' @importFrom stats pnorm ##' ##' @param fit The lavaan model object used to evaluate model fit ##' @param nameX The vector of the factor names used as the predictors. The ##' three first-order factors will be listed first. Then the second-order ##' factors will be listeed. The last element of the name will represent the ##' three-way interaction. Note that the fourth element must be the interaction ##' between the first and the second variables. The fifth element must be the ##' interaction between the first and the third variables. The sixth element ##' must be the interaction between the second and the third variables. ##' @param nameY The name of factor that is used as the dependent variable. ##' @param modVar The name of two factors that are used as the moderators. The ##' effect of the independent factor on each combination of the moderator ##' variable values will be probed. ##' @param valProbe1 The values of the first moderator that will be used to ##' probe the effect of the independent factor. ##' @param valProbe2 The values of the second moderator that will be used to ##' probe the effect of the independent factor. ##' @param group In multigroup models, the label of the group for which the ##' results will be returned. Must correspond to one of ##' \code{\link[lavaan]{lavInspect}(fit, "group.label")}. ##' ##' @return A list with two elements: ##' \enumerate{ ##' \item \code{SimpleIntercept}: The intercepts given each value of the moderator. ##' This element will be shown only if the factor intercept is estimated ##' (e.g., not fixed as 0). ##' \item \code{SimpleSlope}: The slopes given each value of the moderator. ##' } ##' In each element, the first column represents values of the first moderator ##' specified in the \code{valProbe1} argument. The second column represents ##' values of the second moderator specified in the \code{valProbe2} argument. ##' The third column is the simple intercept or simple slope. The fourth column ##' is the \emph{SE} of the simple intercept or simple slope. The fifth column ##' is the Wald (\emph{z}) statistic. The sixth column is the \emph{p} value ##' testing whether the simple intercepts or slopes are different from 0. ##' ##' @author ##' Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \itemize{ ##' \item \code{\link{indProd}} For creating the indicator products with no ##' centering, mean centering, double-mean centering, or residual centering. ##' \item \code{\link{probe2WayMC}} For probing the two-way latent interaction ##' when the results are obtained from mean-centering, or double-mean centering ##' \item \code{\link{probe3WayMC}} For probing the three-way latent interaction ##' when the results are obtained from mean-centering, or double-mean centering ##' \item \code{\link{probe2WayRC}} For probing the two-way latent interaction ##' when the results are obtained from residual-centering approach. ##' \item \code{\link{plotProbe}} Plot the simple intercepts and slopes of the ##' latent interaction. ##' } ##' ##' @references ##' Geldhof, G. J., Pornprasertmanit, S., Schoemann, A., & Little, ##' T. D. (2013). Orthogonalizing through residual centering: Extended ##' applications and caveats. \emph{Educational and Psychological Measurement, ##' 73}(1), 27--46. doi:10.1177/0013164412445473 ##' ##' Lance, C. E. (1988). Residual centering, exploratory and confirmatory ##' moderator analysis, and decomposition of effects in path models containing ##' interactions. \emph{Applied Psychological Measurement, 12}(2), 163--175. ##' doi:10.1177/014662168801200205 ##' ##' Little, T. D., Bovaird, J. A., & Widaman, K. F. (2006). On the merits of ##' orthogonalizing powered and product terms: Implications for modeling ##' interactions. \emph{Structural Equation Modeling, 13}(4), 497--519. ##' doi:10.1207/s15328007sem1304_1 ##' ##' Marsh, H. W., Wen, Z., & Hau, K. T. (2004). Structural equation models of ##' latent interactions: Evaluation of alternative estimation strategies and ##' indicator construction. \emph{Psychological Methods, 9}(3), 275--300. ##' doi:10.1037/1082-989X.9.3.275 ##' ##' Pornprasertmanit, S., Schoemann, A. M., Geldhof, G. J., & Little, T. D. ##' (submitted). \emph{Probing latent interaction estimated with a residual ##' centering approach.} ##' ##' @examples ##' ##' library(lavaan) ##' ##' dat3wayRC <- orthogonalize(dat3way, 1:3, 4:6, 7:9) ##' ##' model3 <- " ##' f1 =~ x1 + x2 + x3 ##' f2 =~ x4 + x5 + x6 ##' f3 =~ x7 + x8 + x9 ##' f12 =~ x1.x4 + x2.x5 + x3.x6 ##' f13 =~ x1.x7 + x2.x8 + x3.x9 ##' f23 =~ x4.x7 + x5.x8 + x6.x9 ##' f123 =~ x1.x4.x7 + x2.x5.x8 + x3.x6.x9 ##' f4 =~ x10 + x11 + x12 ##' f4 ~ f1 + f2 + f3 + f12 + f13 + f23 + f123 ##' f1 ~~ 0*f12 ##' f1 ~~ 0*f13 ##' f1 ~~ 0*f123 ##' f2 ~~ 0*f12 ##' f2 ~~ 0*f23 ##' f2 ~~ 0*f123 ##' f3 ~~ 0*f13 ##' f3 ~~ 0*f23 ##' f3 ~~ 0*f123 ##' f12 ~~ 0*f123 ##' f13 ~~ 0*f123 ##' f23 ~~ 0*f123 ##' x1 ~ 0*1 ##' x4 ~ 0*1 ##' x7 ~ 0*1 ##' x10 ~ 0*1 ##' x1.x4 ~ 0*1 ##' x1.x7 ~ 0*1 ##' x4.x7 ~ 0*1 ##' x1.x4.x7 ~ 0*1 ##' f1 ~ NA*1 ##' f2 ~ NA*1 ##' f3 ~ NA*1 ##' f12 ~ NA*1 ##' f13 ~ NA*1 ##' f23 ~ NA*1 ##' f123 ~ NA*1 ##' f4 ~ NA*1 ##' " ##' ##' fitRC3way <- sem(model3, data = dat3wayRC, std.lv = FALSE, ##' meanstructure = TRUE) ##' summary(fitRC3way) ##' ##' result3wayRC <- probe3WayRC(fitRC3way, ##' c("f1", "f2", "f3", "f12", "f13", "f23", "f123"), ##' "f4", c("f1", "f2"), c(-1, 0, 1), c(-1, 0, 1)) ##' result3wayRC ##' ##' @export probe3WayRC <- function(fit, nameX, nameY, modVar, valProbe1, valProbe2, group) { # Check whether modVar is correct if(is.character(modVar)) { modVar <- match(modVar, nameX) } if((NA %in% modVar) || !(do.call("&", as.list(modVar %in% 1:3)))) stop("The moderator name is not in the list of independent factors and is not 1, 2 or 3.") # JG: Changed error # Check whether the fit object does not use mlm, mlr, or mlf estimator (because the variance-covariance matrix of parameter estimates cannot be computed estSpec <- lavInspect(fit, "call")$estimator if(!is.null(estSpec) && (estSpec %in% c("mlr", "mlm", "mlf"))) stop("This function does not work when 'mlr', 'mlm', or 'mlf' is used as the estimator because the covariance matrix of the parameter estimates cannot be computed.") ## TDJ: If multigroup, check group %in% group.label nG <- lavInspect(fit, "ngroups") if (nG > 1L) { group.label <- lavInspect(fit, "group.label") if (missing(group)) { warning('No argument provided for "group". Using the first group.') group <- 1L } ## assign numeric to character if (is.numeric(group)) { if (group %in% 1:nG) { group <- group.label[group] } else group <- as.character(group) } else group <- as.character(group) ## check that character is a group if (!as.character(group) %in% group.label) stop('"group" must be a character string naming a group of interest, or ', 'an ingteger corresponding to a group in lavInspect(fit, "group.label")') ## Get the parameter estimates for that group est <- lavInspect(fit, "est")[[group]] } else { ## single-group model est <- lavInspect(fit, "est") } # Find the mean and covariance matrix of independent factors varX <- est$psi[nameX, nameX] meanX <- as.matrix(est$alpha[nameX,]); colnames(meanX) <- "intcept" # Find the intercept, regression coefficients, and residual variance of residual-centered regression intceptRC <- est$alpha[nameY,] resVarRC <- est$psi[nameY, nameY] if(resVarRC < 0) stop("The residual variance is negative. The model did not converge!") # JG: Changed error betaRC <- as.matrix(est$beta[nameY, nameX]); colnames(betaRC) <- nameY # Find the number of observations numobs <- lavInspect(fit, "nobs") # Compute SSRC meanXwith1 <- rbind(1, meanX) varXwith0 <- cbind(0, rbind(0, varX)) SSRC <- numobs * (varXwith0 + (meanXwith1 %*% t(meanXwith1))) # Compute Mean(Y) and Var(Y) betaRCWithIntcept <- rbind(intceptRC, betaRC) meanY <- t(meanXwith1) %*% betaRCWithIntcept varY <- (t(betaRCWithIntcept) %*% SSRC %*% betaRCWithIntcept)/numobs - meanY^2 + resVarRC # Compute Cov(Y, X) covY <- as.matrix((varX %*% betaRC)[1:3,]) # Compute E(XZ), E(XW), E(ZW), E(XZW) meanX[4] <- expect2NormProd(meanX[c(1,2)], varX[c(1,2), c(1,2)]) meanX[5] <- expect2NormProd(meanX[c(1,3)], varX[c(1,3), c(1,3)]) meanX[6] <- expect2NormProd(meanX[c(2,3)], varX[c(2,3), c(2,3)]) meanX[7] <- expect3NormProd(meanX[1:3], varX[1:3, 1:3]) # Compute Var(XZ), Var(XW), Var(ZW), Var(XZW) varX[4, 4] <- var2NormProd(meanX[c(1,2)], varX[c(1,2), c(1,2)]) varX[5, 5] <- var2NormProd(meanX[c(1,3)], varX[c(1,3), c(1,3)]) varX[6, 6] <- var2NormProd(meanX[c(2,3)], varX[c(2,3), c(2,3)]) varX[7, 7] <- var3NormProd(meanX[1:3], varX[1:3, 1:3]) # Compute All covariances varX[4, 1] <- varX[1, 4] <- expect3NormProd(meanX[c(1, 2, 1)], varX[c(1, 2, 1),c(1, 2, 1)]) - expect2NormProd(meanX[c(1,2)], varX[c(1,2),c(1,2)]) * meanX[1] varX[5, 1] <- varX[1, 5] <- expect3NormProd(meanX[c(1, 3, 1)], varX[c(1, 3, 1),c(1, 3, 1)]) - expect2NormProd(meanX[c(1,3)], varX[c(1,3),c(1,3)]) * meanX[1] varX[6, 1] <- varX[1, 6] <- expect3NormProd(meanX[c(2, 3, 1)], varX[c(2, 3, 1),c(2, 3, 1)]) - expect2NormProd(meanX[c(2,3)], varX[c(2,3),c(2,3)]) * meanX[1] varX[7, 1] <- varX[1, 7] <- expect4NormProd(meanX[c(1,2,3,1)], varX[c(1,2,3,1),c(1,2,3,1)]) - expect3NormProd(meanX[c(1,2,3)], varX[c(1,2,3),c(1,2,3)]) * meanX[1] varX[4, 2] <- varX[2, 4] <- expect3NormProd(meanX[c(1, 2, 2)], varX[c(1, 2, 2),c(1, 2, 2)]) - expect2NormProd(meanX[c(1,2)], varX[c(1,2),c(1,2)]) * meanX[2] varX[5, 2] <- varX[2, 5] <- expect3NormProd(meanX[c(1, 3, 2)], varX[c(1, 3, 2),c(1, 3, 2)]) - expect2NormProd(meanX[c(1,3)], varX[c(1,3),c(1,3)]) * meanX[2] varX[6, 2] <- varX[2, 6] <- expect3NormProd(meanX[c(2, 3, 2)], varX[c(2, 3, 2),c(2, 3, 2)]) - expect2NormProd(meanX[c(2,3)], varX[c(2,3),c(2,3)]) * meanX[2] varX[7, 2] <- varX[2, 7] <- expect4NormProd(meanX[c(1,2,3,2)], varX[c(1,2,3,2),c(1,2,3,2)]) - expect3NormProd(meanX[c(1,2,3)], varX[c(1,2,3),c(1,2,3)]) * meanX[2] varX[4, 3] <- varX[3, 4] <- expect3NormProd(meanX[c(1, 2, 3)], varX[c(1, 2, 3),c(1, 2, 3)]) - expect2NormProd(meanX[c(1,2)], varX[c(1,2),c(1,2)]) * meanX[3] varX[5, 3] <- varX[3, 5] <- expect3NormProd(meanX[c(1, 3, 3)], varX[c(1, 3, 3),c(1, 3, 3)]) - expect2NormProd(meanX[c(1,3)], varX[c(1,3),c(1,3)]) * meanX[3] varX[6, 3] <- varX[3, 6] <- expect3NormProd(meanX[c(2, 3, 3)], varX[c(2, 3, 3),c(2, 3, 3)]) - expect2NormProd(meanX[c(2,3)], varX[c(2,3),c(2,3)]) * meanX[3] varX[7, 3] <- varX[3, 7] <- expect4NormProd(meanX[c(1,2,3,3)], varX[c(1,2,3,3),c(1,2,3,3)]) - expect3NormProd(meanX[c(1,2,3)], varX[c(1,2,3),c(1,2,3)]) * meanX[3] varX[5, 4] <- varX[4, 5] <- expect4NormProd(meanX[c(1,3,1,2)], varX[c(1,3,1,2),c(1,3,1,2)]) - expect2NormProd(meanX[c(1,3)], varX[c(1,3),c(1,3)]) * expect2NormProd(meanX[c(1,2)], varX[c(1,2),c(1,2)]) varX[6, 4] <- varX[4, 6] <- expect4NormProd(meanX[c(2,3,1,2)], varX[c(2,3,1,2),c(2,3,1,2)]) - expect2NormProd(meanX[c(2,3)], varX[c(2,3),c(2,3)]) * expect2NormProd(meanX[c(1,2)], varX[c(1,2),c(1,2)]) varX[7, 4] <- varX[4, 7] <- expect5NormProd(meanX[c(1,2,3,1,2)], varX[c(1,2,3,1,2),c(1,2,3,1,2)]) - expect3NormProd(meanX[c(1, 2, 3)], varX[c(1, 2, 3),c(1, 2, 3)]) * expect2NormProd(meanX[c(1,2)], varX[c(1,2),c(1,2)]) varX[6, 5] <- varX[5, 6] <- expect4NormProd(meanX[c(2,3,1,3)], varX[c(2,3,1,3),c(2,3,1,3)]) - expect2NormProd(meanX[c(2,3)], varX[c(2,3),c(2,3)]) * expect2NormProd(meanX[c(1,3)], varX[c(1,3),c(1,3)]) varX[7, 5] <- varX[5, 7] <- expect5NormProd(meanX[c(1,2,3,1,3)], varX[c(1,2,3,1,3),c(1,2,3,1,3)]) - expect3NormProd(meanX[c(1, 2, 3)], varX[c(1, 2, 3),c(1, 2, 3)]) * expect2NormProd(meanX[c(1,3)], varX[c(1,3),c(1,3)]) varX[7, 6] <- varX[6, 7] <- expect5NormProd(meanX[c(1,2,3,2,3)], varX[c(1,2,3,2,3),c(1,2,3,2,3)]) - expect3NormProd(meanX[c(1, 2, 3)], varX[c(1, 2, 3),c(1, 2, 3)]) * expect2NormProd(meanX[c(2,3)], varX[c(2,3),c(2,3)]) # Find the meanX and varX without XZW meanXReducedWith1 <- rbind(1, as.matrix(meanX[1:6])) varXReducedWith0 <- cbind(0, rbind(0, varX[1:6, 1:6])) SSMCReduced <- numobs * (varXReducedWith0 + (meanXReducedWith1 %*% t(meanXReducedWith1))) # Find product of main and two-way onto three-way covXZWwith0 <- rbind(0, as.matrix(varX[7, 1:6])) meanXZWwith1 <- meanX[7] * meanXReducedWith1 SSXZW <- numobs * (covXZWwith0 + meanXZWwith1) # should the mean vector be squared (postmultiplied by its transpose)? # Compute a vector and b4, b5, b6 a <- solve(SSMCReduced) %*% as.matrix(SSXZW) betaTemp <- betaRC[4:6] - (as.numeric(betaRC[7]) * a[5:7]) betaTemp <- c(betaTemp, betaRC[7]) # Compute Cov(Y, XZ) and regression coefficients of no-centering betaNC <- solve(varX[1:3,1:3], as.matrix(covY) - (t(varX[4:7, 1:3]) %*% as.matrix(betaTemp))) betaNC <- rbind(as.matrix(betaNC), as.matrix(betaTemp)) covY <- rbind(covY, as.matrix((varX %*% betaNC)[4:7, 1])) # Aggregate the non-centering sufficient statistics (Just show how to do but not necessary) fullCov <- rbind(cbind(varX, covY), c(covY, varY)) fullMean <- rbind(meanX, meanY) # Compute the intercept of no-centering intceptNC <- meanY - t(betaNC) %*% meanX # Compute SSNC betaNCWithIntcept <- rbind(intceptNC, betaNC) meanXwith1 <- rbind(1, meanX) #JG: redundant varXwith0 <- rbind(0, cbind(0, varX)) #JG: redundant SSNC <- numobs * (varXwith0 + (meanXwith1 %*% t(meanXwith1))) # Compute residual variance on non-centering resVarNC <- varY - (t(betaNCWithIntcept) %*% SSNC %*% betaNCWithIntcept)/numobs + meanY^2 # Extract all varEst varEst <- lavaan::vcov(fit) # Check whether intercept are estimated targetcol <- paste(nameY, "~", 1, sep="") estimateIntcept <- targetcol %in% rownames(varEst) pvalue <- function(x) (1 - pnorm(abs(x))) * 2 # Find the order to rearrange ord <- c(setdiff(1:3, modVar), modVar) ord <- c(ord, 7 - rev(ord)) resultIntcept <- NULL resultSlope <- NULL if(estimateIntcept) { # Extract SE from residual centering targetcol <- c(targetcol, paste(nameY, "~", nameX, sep="")) varEstSlopeRC <- varEst[targetcol, targetcol] # Transform it to non-centering SE usedVar <- as.numeric(resVarNC/resVarRC) * (varEstSlopeRC %*% SSRC %*% solve(SSNC)) usedBeta <- betaNCWithIntcept if(sum(diag(usedVar) < 0) > 0) stop("This method does not work. The resulting calculation provided negative standard errors.") # JG: edited this error # Change the order of usedVar and usedBeta if the moderator variable is listed first usedVar <- usedVar[c(1, ord+1, 8), c(1, ord+1, 8)] usedBeta <- usedBeta[c(1, ord+1, 8)] # Find probe value val <- expand.grid(valProbe1, valProbe2) # Find simple intercept simpleIntcept <- usedBeta[1] + usedBeta[3] * val[,1] + usedBeta[4] * val[,2] + usedBeta[7] * val[,1] * val[,2] varIntcept <- usedVar[1, 1] + val[,1]^2 * usedVar[3, 3] + val[,2]^2 * usedVar[4, 4] + val[,1]^2 * val[,2]^2 * usedVar[7, 7] + 2 * val[,1] * usedVar[1, 3] + 2 * val[,2] * usedVar[1, 4] + 2 * val[,1] * val[,2] * usedVar[1, 7] + 2 * val[,1] * val[,2] * usedVar[3, 4] + 2 * val[,1]^2 * val[,2] * usedVar[3, 7] + 2* val[,1] * val[,2]^2 * usedVar[4, 7] zIntcept <- simpleIntcept/sqrt(varIntcept) pIntcept <- pvalue(zIntcept) resultIntcept <- cbind(val, simpleIntcept, sqrt(varIntcept), zIntcept, pIntcept) colnames(resultIntcept) <- c(nameX[modVar], "Intcept", "SE", "Wald", "p") # Find simple slope simpleSlope <- usedBeta[2] + usedBeta[5] * val[,1] + usedBeta[6] * val[,2] + usedBeta[8] * val[,1] * val[,2] varSlope <- usedVar[2, 2] + val[,1]^2 * usedVar[5, 5] + val[,2]^2 * usedVar[6, 6] + val[,1]^2 * val[,2]^2 * usedVar[8, 8] + 2 * val[,1] * usedVar[2, 5] + 2 * val[,2] * usedVar[2, 6] + 2 * val[,1] * val[,2] * usedVar[2, 8] + 2 * val[,1] * val[,2] * usedVar[5, 6] + 2 * val[,1]^2 * val[,2] * usedVar[5, 8] + 2 * val[,1] * val[,2]^2 * usedVar[6, 8] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- round(pvalue(zSlope),6) # JG: rounded values resultSlope <- cbind(val, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } else { targetcol <- paste(nameY, "~", nameX, sep="") varEstSlopeRC <- varEst[targetcol, targetcol] # Transform it to non-centering SE usedVar <- as.numeric(resVarNC/resVarRC) * (varEstSlopeRC %*% SSRC[2:8, 2:8] %*% solve(SSNC[2:8, 2:8])) usedBeta <- betaNC if(sum(diag(usedVar) < 0) > 0) stop("This method does not work. The resulting calculation provided negative standard errors.") # JG: edited this error # Change the order of usedVar and usedBeta if the moderator variable is listed first usedVar <- usedVar[c(ord, 7), c(ord, 7)] usedBeta <- usedBeta[c(ord, 7)] # Find probe value val <- expand.grid(valProbe1, valProbe2) # Find simple slope simpleSlope <- usedBeta[1] + usedBeta[4] * val[,1] + usedBeta[5] * val[,2] + usedBeta[7] * val[,1] * val[,2] varSlope <- usedVar[1, 1] + val[,1]^2 * usedVar[4, 4] + val[,2]^2 * usedVar[5, 5] + val[,1]^2 * val[,2]^2 * usedVar[7, 7] + 2 * val[,1] * usedVar[1, 4] + 2 * val[,2] * usedVar[1, 5] + 2 * val[,1] * val[,2] * usedVar[1, 7] + 2 * val[,1] * val[,2] * usedVar[4, 5] + 2 * val[,1]^2 * val[,2] * usedVar[4, 7] + 2 * val[,1] * val[,2]^2 * usedVar[5, 7] zSlope <- simpleSlope/sqrt(varSlope) pSlope <- round(pvalue(zSlope),6) # JG: rounded values resultSlope <- cbind(val, simpleSlope, sqrt(varSlope), zSlope, pSlope) colnames(resultSlope) <- c(nameX[modVar], "Slope", "SE", "Wald", "p") } return(list(SimpleIntcept = resultIntcept, SimpleSlope = resultSlope)) } ##' Plot a latent interaction ##' ##' This function will plot the line graphs representing the simple effect of ##' the independent variable given the values of the moderator. For multigroup ##' models, it will only generate a plot for 1 group, as specified in the ##' function used to obtain the first argument. ##' ##' ##' @param object The result of probing latent interaction obtained from ##' \code{\link{probe2WayMC}}, \code{\link{probe2WayRC}}, ##' \code{\link{probe3WayMC}}, or \code{\link{probe3WayRC}} function. ##' @param xlim The vector of two numbers: the minimum and maximum values of the ##' independent variable ##' @param xlab The label of the x-axis ##' @param ylab The label of the y-axis ##' @param legend \code{logical}. If \code{TRUE} (default), a legend is printed. ##' @param legendArgs \code{list} of arguments passed to \code{\link{legend}} ##' function if \code{legend=TRUE}. ##' @param \dots Any addition argument for the \code{\link{plot}} function ##' ##' @return None. This function will plot the simple main effect only. ##' ##' @author ##' Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \itemize{ ##' \item \code{\link{indProd}} For creating the indicator products with no ##' centering, mean centering, double-mean centering, or residual centering. ##' \item \code{\link{probe2WayMC}} For probing the two-way latent interaction ##' when the results are obtained from mean-centering, or double-mean centering ##' \item \code{\link{probe3WayMC}} For probing the three-way latent interaction ##' when the results are obtained from mean-centering, or double-mean centering ##' \item \code{\link{probe2WayRC}} For probing the two-way latent interaction ##' when the results are obtained from residual-centering approach. ##' \item \code{\link{probe3WayRC}} For probing the two-way latent interaction ##' when the results are obtained from residual-centering approach. ##' } ##' ##' @examples ##' ##' library(lavaan) ##' ##' dat2wayMC <- indProd(dat2way, 1:3, 4:6) ##' ##' model1 <- " ##' f1 =~ x1 + x2 + x3 ##' f2 =~ x4 + x5 + x6 ##' f12 =~ x1.x4 + x2.x5 + x3.x6 ##' f3 =~ x7 + x8 + x9 ##' f3 ~ f1 + f2 + f12 ##' f12 ~~ 0*f1 ##' f12 ~~ 0*f2 ##' x1 ~ 0*1 ##' x4 ~ 0*1 ##' x1.x4 ~ 0*1 ##' x7 ~ 0*1 ##' f1 ~ NA*1 ##' f2 ~ NA*1 ##' f12 ~ NA*1 ##' f3 ~ NA*1 ##' " ##' ##' fitMC2way <- sem(model1, data = dat2wayMC, std.lv = FALSE, ##' meanstructure = TRUE) ##' result2wayMC <- probe2WayMC(fitMC2way, c("f1", "f2", "f12"), ##' "f3", "f2", c(-1, 0, 1)) ##' plotProbe(result2wayMC, xlim = c(-2, 2)) ##' ##' ##' dat3wayMC <- indProd(dat3way, 1:3, 4:6, 7:9) ##' ##' model3 <- " ##' f1 =~ x1 + x2 + x3 ##' f2 =~ x4 + x5 + x6 ##' f3 =~ x7 + x8 + x9 ##' f12 =~ x1.x4 + x2.x5 + x3.x6 ##' f13 =~ x1.x7 + x2.x8 + x3.x9 ##' f23 =~ x4.x7 + x5.x8 + x6.x9 ##' f123 =~ x1.x4.x7 + x2.x5.x8 + x3.x6.x9 ##' f4 =~ x10 + x11 + x12 ##' f4 ~ f1 + f2 + f3 + f12 + f13 + f23 + f123 ##' f1 ~~ 0*f12 ##' f1 ~~ 0*f13 ##' f1 ~~ 0*f123 ##' f2 ~~ 0*f12 ##' f2 ~~ 0*f23 ##' f2 ~~ 0*f123 ##' f3 ~~ 0*f13 ##' f3 ~~ 0*f23 ##' f3 ~~ 0*f123 ##' f12 ~~ 0*f123 ##' f13 ~~ 0*f123 ##' f23 ~~ 0*f123 ##' x1 ~ 0*1 ##' x4 ~ 0*1 ##' x7 ~ 0*1 ##' x10 ~ 0*1 ##' x1.x4 ~ 0*1 ##' x1.x7 ~ 0*1 ##' x4.x7 ~ 0*1 ##' x1.x4.x7 ~ 0*1 ##' f1 ~ NA*1 ##' f2 ~ NA*1 ##' f3 ~ NA*1 ##' f12 ~ NA*1 ##' f13 ~ NA*1 ##' f23 ~ NA*1 ##' f123 ~ NA*1 ##' f4 ~ NA*1 ##' " ##' ##' fitMC3way <- sem(model3, data = dat3wayMC, std.lv = FALSE, ##' meanstructure = TRUE) ##' result3wayMC <- probe3WayMC(fitMC3way, ##' c("f1", "f2", "f3", "f12", "f13", "f23", "f123"), ##' "f4", c("f1", "f2"), c(-1, 0, 1), c(-1, 0, 1)) ##' plotProbe(result3wayMC, xlim = c(-2, 2)) ##' ##' @export plotProbe <- function(object, xlim, xlab = "Indepedent Variable", ylab = "Dependent Variable", legend = TRUE, legendArgs = list(), ...) { if (length(xlim) != 2) stop("The x-limit should be specified as a numeric", " vector with the length of 2.") # Extract simple slope slope <- object$SimpleSlope # Check whether the object is the two-way or three-way interaction result numInt <- 2 if (ncol(slope) == 6) numInt <- 3 estSlope <- slope[, ncol(slope) - 3] # Get whether the simple slope is significant. If so, the resulting lines will be # shown as red. If not, the line will be black. estSlopeSig <- (slope[, ncol(slope)] < 0.05) + 1 # Extract simple intercept. If the simple intercept is not provided, the intercept # will be fixed as 0. estIntercept <- NULL if (!is.null(object$SimpleIntcept)) estIntercept <- object$SimpleIntcept[, ncol(slope) - 3] if (numInt == 2) { if (is.null(legendArgs$title)) legendArgs$title <- colnames(slope)[1] if (is.null(legendArgs$legend)) legendArgs$legend <- slope[, 1] plotSingleProbe(estSlope, estIntercept, xlim = xlim, xlab = xlab, ylab = ylab, colLine = estSlopeSig, legend = legend, legendArgs = legendArgs, ...) } else if (numInt == 3) { # Three-way interaction; separate lines for the first moderator, separate graphs # for the second moderator mod2 <- unique(slope[, 2]) mod1 <- unique(slope[, 1]) # Use multiple graphs in a figure if (length(mod2) == 2) { obj <- par(mfrow = c(1, 2)) } else if (length(mod2) == 3) { obj <- par(mfrow = c(1, 3)) } else if (length(mod2) > 3) { obj <- par(mfrow = c(2, ceiling(length(mod2)/2))) } else if (length(mod2) == 1) { # Intentionally leaving as blank } else stop("Some errors occur") for (i in 1:length(mod2)) { select <- slope[, 2] == mod2[i] if (is.null(legendArgs$title)) legendArgs$title <- colnames(slope)[1] if (is.null(legendArgs$legend)) legendArgs$legend <- mod1 plotSingleProbe(estSlope[select], estIntercept[select], xlim = xlim, xlab = xlab, ylab = ylab, colLine = estSlopeSig[select], main = paste(colnames(slope)[2], "=", mod2[i]), legend = legend, legendArgs = legendArgs, ...) } if (length(mod2) > 1) par(obj) } else { stop("Please make sure that the object argument is obtained from", " 'probe2wayMC', 'probe2wayRC', 'probe3wayMC', or 'probe3wayRC'.") } } ## ---------------- ## Hidden Functions ## ---------------- ## Find the expected value of the product of two normal variates ## m = the mean of each normal variate ## s = the covariance matrix of all variates expect2NormProd <- function(m, s) return(prod(m) + s[1, 2]) ## Find the expected value of the product of three normal variates ## m = the mean of each normal variate ## s = the covariance matrix of all variates expect3NormProd <- function(m, s) { return(prod(m) + m[3] * s[1, 2] + m[2] * s[1, 3] + m[1] * s[2, 3]) } ## Find the expected value of the product of four normal variates ## m = the mean of each normal variate ## s = the covariance matrix of all variates expect4NormProd <- function(m, s) { first <- prod(m) com <- utils::combn(1:4, 2) forSecond <- function(draw, meanval, covval, index) { draw2 <- setdiff(index, draw) prod(meanval[draw2]) * covval[draw[1], draw[2]] } second <- sum(apply(com, 2, forSecond, meanval=m, covval=s, index=1:4)) com2 <- com[,1:3] #select only first three terms containing the first element only forThird <- function(draw, covval, index) { draw2 <- setdiff(index, draw) covval[draw[1], draw[2]] * covval[draw2[1], draw2[2]] } third <- sum(apply(com2, 2, forThird, covval=s, index=1:4)) return(first + second + third) } ## Find the expected value of the product of five normal variates ## m = the mean of each normal variate ## s = the covariance matrix of all variates expect5NormProd <- function(m, s) { first <- prod(m) com <- utils::combn(1:5, 2) forSecond <- function(draw, meanval, covval, index) { draw2 <- setdiff(index, draw) prod(meanval[draw2]) * covval[draw[1], draw[2]] } second <- sum(apply(com, 2, forSecond, meanval=m, covval=s, index=1:5)) com2 <- utils::combn(1:5, 4) forThirdOuter <- function(index, m, s, indexall) { targetMean <- m[setdiff(indexall, index)] cominner <- utils::combn(index, 2)[,1:3] #select only first three terms containing the first element only forThirdInner <- function(draw, covval, index) { draw2 <- setdiff(index, draw) covval[draw[1], draw[2]] * covval[draw2[1], draw2[2]] } thirdInner <- targetMean * sum(apply(cominner, 2, forThirdInner, covval=s, index=index)) return(thirdInner) } third <- sum(apply(com2, 2, forThirdOuter, m=m, s=s, indexall=1:5)) return(first + second + third) } ## Find the variance of the product of two normal variates ## m = the mean of each normal variate ## s = the covariance matrix of all variates var2NormProd <- function(m, s) { first <- m[2]^2 * s[1, 1] + m[1]^2 * s[2, 2] second <- 2 * m[1] * m[2] * s[1, 2] third <- s[1, 1] * s[2, 2] fourth <- s[1, 2]^2 return(first + second + third + fourth) } ## Find the variance of the product of three normal variates ## m = the mean of each normal variate ## s = the covariance matrix of all variates var3NormProd <- function(m, s) { com <- utils::combn(1:3, 2) forFirst <- function(draw, meanval, covval, index) { # draw = 2, 3; draw2 = 1 draw2 <- setdiff(index, draw) term1 <- meanval[draw[1]]^2 * meanval[draw[2]]^2 * covval[draw2, draw2] term2 <- 2 * meanval[draw2]^2 * meanval[draw[1]] * meanval[draw[2]] * covval[draw[1], draw[2]] term3 <- (meanval[draw2]^2 * covval[draw[1], draw[1]] * covval[draw[2], draw[2]]) + (meanval[draw2]^2 * covval[draw[1], draw[2]]^2) term4 <- 4 * meanval[draw[1]] * meanval[draw[2]] * covval[draw2, draw2] * covval[draw[1], draw[2]] term5 <- 6 * meanval[draw[1]] * meanval[draw[2]] * covval[draw2, draw[1]] * covval[draw2, draw[2]] term1 + term2 + term3 + term4 + term5 } first <- sum(apply(com, 2, forFirst, meanval=m, covval=s, index=1:3)) second <- prod(diag(s)) third <- 2 * s[3, 3] * s[1, 2]^2 + 2 * s[2, 2] * s[1, 3]^2 + 2 * s[1, 1] * s[2, 3]^2 fourth <- 8 * s[1, 2] * s[1, 3] * s[2, 3] return(first + second + third + fourth) } ## plotSingleProbe : plot the probing interaction result specific for only one moderator ## estSlope = slope of each line ## estIntercept = intercept of each line ## xlim = the minimum and maximum values of the independent variable (x-axis) ## xlab = the label for the independent variable ## ylab = the lable for the dependent variable ## main = the title of the graph ## colLine = the color of each line ## legend = whether to print a legend ## legendArgs = arguments to pass to legend() function plotSingleProbe <- function(estSlope, estIntercept = NULL, xlim, xlab = "Indepedent Variable", ylab = "Dependent Variable", main = NULL, colLine = "black", legend = TRUE, legendArgs = list(), ...) { if (is.null(estIntercept)) estIntercept <- rep(0, length(estSlope)) if (length(colLine) == 1) colLine <- rep(colLine, length(estSlope)) lower <- estIntercept + (xlim[1] * estSlope) upper <- estIntercept + (xlim[2] * estSlope) ylim <- c(min(c(lower, upper)), max(c(lower, upper))) plot(cbind(xlim, ylim), xlim = xlim, ylim = ylim, type = "n", xlab = xlab, ylab = ylab, main = main, ...) for (i in 1:length(estSlope)) { lines(cbind(xlim, c(lower[i], upper[i])), col = colLine[i], lwd = 1.5, lty = i) } if (legend) { positionX <- 0.25 if (all(estSlope > 0)) positionX <- 0.01 if (all(estSlope < 0)) positionX <- 0.50 if (is.null(legendArgs$x)) legendArgs$x <- positionX * (xlim[2] - xlim[1]) + xlim[1] if (is.null(legendArgs$y)) legendArgs$y <- 0.99 * (ylim[2] - ylim[1]) + ylim[1] if (is.null(legendArgs$col)) legendArgs$col <- colLine if (is.null(legendArgs$lty)) legendArgs$lty <- 1:length(estSlope) do.call(graphics::legend, legendArgs) } } semTools/R/plausibleValues.R0000644000176200001440000005020613532031230015536 0ustar liggesusers### Terrence D. Jorgensen ### Last updated: 29 August 2019 ### function to draw plausible values of factor scores from lavPredict # library(blavaan) # bfit <- bcfa(HS.model, data=HolzingerSwineford1939, save.lvs = TRUE, # bcontrol=list(method="rjparallel"), group = "school", # #target = "stan", control=list(cores = 4, seed = 123), # burnin = 4000, sample = 30, n.chains = 2) # bFS <- do.call(rbind, blavInspect(bfit, "lvs")) # do.call() ## ------------- ## Main function ## ------------- ##' Plausible-Values Imputation of Factor Scores Estimated from a lavaan Model ##' ##' Draw plausible values of factor scores estimated from a fitted ##' \code{\link[lavaan]{lavaan}} model, then treat them as multiple imputations ##' of missing data using \code{\link{runMI}}. ##' ##' ##' Because latent variables are unobserved, they can be considered as missing ##' data, which can be imputed using Monte Carlo methods. This may be of ##' interest to researchers with sample sizes too small to fit their complex ##' structural models. Fitting a factor model as a first step, ##' \code{\link[lavaan]{lavPredict}} provides factor-score estimates, which can ##' be treated as observed values in a path analysis (Step 2). However, the ##' resulting standard errors and test statistics could not be trusted because ##' the Step-2 analysis would not take into account the uncertainty about the ##' estimated factor scores. Using the asymptotic sampling covariance matrix ##' of the factor scores provided by \code{\link[lavaan]{lavPredict}}, ##' \code{plausibleValues} draws a set of \code{nDraws} imputations from the ##' sampling distribution of each factor score, returning a list of data sets ##' that can be treated like multiple imputations of incomplete data. If the ##' data were already imputed to handle missing data, \code{plausibleValues} ##' also accepts an object of class \code{\linkS4class{lavaan.mi}}, and will ##' draw \code{nDraws} plausible values from each imputation. Step 2 would ##' then take into account uncertainty about both missing values and factor ##' scores. Bayesian methods can also be used to generate factor scores, as ##' available with the \pkg{blavaan} package, in which case plausible ##' values are simply saved parameters from the posterior distribution. See ##' Asparouhov and Muthen (2010) for further technical details and references. ##' ##' Each returned \code{data.frame} includes a \code{case.idx} column that ##' indicates the corresponding rows in the data set to which the model was ##' originally fitted (unless the user requests only Level-2 variables). This ##' can be used to merge the plausible values with the original observed data, ##' but users should note that including any new variables in a Step-2 model ##' might not accurately account for their relationship(s) with factor scores ##' because they were not accounted for in the Step-1 model from which factor ##' scores were estimated. ##' ##' If \code{object} is a multilevel \code{lavaan} model, users can request ##' plausible values for latent variables at particular levels of analysis by ##' setting the \code{\link[lavaan]{lavPredict}} argument \code{level=1} or ##' \code{level=2}. If the \code{level} argument is not passed via \dots, ##' then both levels are returned in a single merged data set per draw. For ##' multilevel models, each returned \code{data.frame} also includes a column ##' indicating to which cluster each row belongs (unless the user requests only ##' Level-2 variables). ##' ##' ##' @importFrom lavaan lavInspect lavPredict ##' ##' @param object A fitted model of class \code{\linkS4class{lavaan}}, ##' \code{\link[blavaan]{blavaan}}, or \code{\linkS4class{lavaan.mi}} ##' @param nDraws \code{integer} specifying the number of draws, analogous to ##' the number of imputed data sets. If \code{object} is of class ##' \code{\linkS4class{lavaan.mi}}, this will be the number of draws taken ##' \emph{per imputation}. Ignored if \code{object} is of class ##' \code{\link[blavaan]{blavaan}}, in which case the number of draws is the ##' number of MCMC samples from the posterior. ##' @param seed \code{integer} passed to \code{\link{set.seed}()}. Ignored if ##' \code{object} is of class \code{\link[blavaan]{blavaan}}, ##' @param omit.imps \code{character} vector specifying criteria for omitting ##' imputations when \code{object} is of class \code{\linkS4class{lavaan.mi}}. ##' Can include any of \code{c("no.conv", "no.se", "no.npd")}. ##' @param ... Optional arguments to pass to \code{\link[lavaan]{lavPredict}}. ##' \code{assemble} will be ignored because multiple groups are always ##' assembled into a single \code{data.frame} per draw. \code{type} will be ##' ignored because it is set internally to \code{type="lv"}. ##' ##' @return A \code{list} of length \code{nDraws}, each of which is a ##' \code{data.frame} containing plausible values, which can be treated as ##' a \code{list} of imputed data sets to be passed to \code{\link{runMI}} ##' (see \bold{Examples}). If \code{object} is of class ##' \code{\linkS4class{lavaan.mi}}, the \code{list} will be of length ##' \code{nDraws*m}, where \code{m} is the number of imputations. ##' ##' @author Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' @references ##' Asparouhov, T. & Muthen, B. O. (2010). \emph{Plausible values for latent ##' variables using M}plus. Technical Report. Retrieved from ##' www.statmodel.com/download/Plausible.pdf ##' ##' @seealso \code{\link{runMI}}, \code{\linkS4class{lavaan.mi}} ##' ##' @examples ##' ##' ## example from ?cfa and ?lavPredict help pages ##' HS.model <- ' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ' ##' ##' fit1 <- cfa(HS.model, data = HolzingerSwineford1939) ##' fs1 <- plausibleValues(fit1, nDraws = 3, ##' ## lavPredict() can add only the modeled data ##' append.data = TRUE) ##' lapply(fs1, head) ##' ##' ## To merge factor scores to original data.frame (not just modeled data) ##' fs1 <- plausibleValues(fit1, nDraws = 3) ##' idx <- lavInspect(fit1, "case.idx") # row index for each case ##' if (is.list(idx)) idx <- do.call(c, idx) # for multigroup models ##' data(HolzingerSwineford1939) # copy data to workspace ##' HolzingerSwineford1939$case.idx <- idx # add row index as variable ##' ## loop over draws to merge original data with factor scores ##' for (i in seq_along(fs1)) { ##' fs1[[i]] <- merge(fs1[[i]], HolzingerSwineford1939, by = "case.idx") ##' } ##' lapply(fs1, head) ##' ##' ##' ## multiple-group analysis, in 2 steps ##' step1 <- cfa(HS.model, data = HolzingerSwineford1939, group = "school", ##' group.equal = c("loadings","intercepts")) ##' PV.list <- plausibleValues(step1) ##' ##' ## subsequent path analysis ##' path.model <- ' visual ~ c(t1, t2)*textual + c(s1, s2)*speed ' ##' \dontrun{ ##' step2 <- sem.mi(path.model, data = PV.list, group = "school") ##' ## test equivalence of both slopes across groups ##' lavTestWald.mi(step2, constraints = 't1 == t2 ; s1 == s2') ##' } ##' ##' ##' ## multilevel example from ?Demo.twolevel help page ##' model <- ' ##' level: 1 ##' fw =~ y1 + y2 + y3 ##' fw ~ x1 + x2 + x3 ##' level: 2 ##' fb =~ y1 + y2 + y3 ##' fb ~ w1 + w2 ##' ' ##' msem <- sem(model, data = Demo.twolevel, cluster = "cluster") ##' mlPVs <- plausibleValues(msem, nDraws = 3) # both levels by default ##' lapply(mlPVs, head, n = 10) ##' ## only Level 1 ##' mlPV1 <- plausibleValues(msem, nDraws = 3, level = 1) ##' lapply(mlPV1, head) ##' ## only Level 2 ##' mlPV2 <- plausibleValues(msem, nDraws = 3, level = 2) ##' lapply(mlPV2, head) ##' ##' @export plausibleValues <- function(object, nDraws = 20L, seed = 12345, omit.imps = c("no.conv","no.se"), ...) { if (class(object) == "lavaan") { ## generate vector of seeds set.seed(seed) seeds <- sample(100000:9999999, size = nDraws, replace = FALSE) PV <- lapply(seeds, plaus.lavaan, object = object, ...) } else if (class(object) == "lavaan.mi") { ## generate vector of seeds set.seed(seed) seeds <- sample(100000:9999999, size = nDraws, replace = FALSE) PV <- plaus.mi(object, seeds = seeds, omit.imps = omit.imps, ...) } else if (class(object) == "blavaan") { ## requireNamespace("blavaan") ## blavaan::blavInspect(object, "lvs") PV <- plaus.blavaan(object) } else stop("object's class not valid: ", class(object)) PV } ## ---------------- ## Hidden functions ## ---------------- ## draw 1 set of plausible values from a lavaan object ##' @importFrom lavaan lavInspect lavPredict lavNames plaus.lavaan <- function(seed = 1, object, ...) { stopifnot(inherits(object, "lavaan")) if (lavInspect(object, "categorical")) { stop("Plausible values not available (yet) for categorical data") } if (lavInspect(object, "options")$missing %in% c("ml", "ml.x")) { stop("Plausible values not available (yet) for missing data + fiml.\n", " Multiple imputations can be used via lavaan.mi()") } #FIXME? https://github.com/yrosseel/lavaan/issues/156 set.seed(seed) cluster <- lavInspect(object, "cluster") group <- lavInspect(object, "group") group.label <- lavInspect(object, "group.label") nG <- lavInspect(object, "ngroups") nL <- lavInspect(object, "nlevels") l.names <- o.names <- list() if (nG == 1L && nL == 1L) { ## single block l.names <- list(lavNames(object, "lv")) o.names <- list(lavNames(object, "ov")) } else if (nG == 1L && nL > 1L) { ## multilevel for (BB in 1:nL) { l.names <- c(l.names, list(lavNames(object, "lv", block = BB))) o.names <- c(o.names, list(lavNames(object, "ov", block = BB))) } } else if (nG > 1L && nL == 1L) { ## multigroup for (BB in 1:nG) { l.names <- c(l.names, list(lavNames(object, "lv", block = BB))) o.names <- c(o.names, list(lavNames(object, "ov", block = BB))) } } else { ## multilevel + multigroup for (BB in 1:(nG*nL)) { #FIXME: lavInspect(object, "nblocks") l.names <- c(l.names, list(lavNames(object, "lv", block = BB))) o.names <- c(o.names, list(lavNames(object, "ov", block = BB))) } } ## extract factor scores + covariance matrix fsArgs <- list(...) fsArgs$type <- "lv" fsArgs$assemble <- FALSE # assemble after drawing append.data <- fsArgs$append.data if (is.null(append.data)) append.data <- FALSE # default in lavPredict() only.L2 <- fsArgs$level == 2L if (length(only.L2) == 0L) only.L2 <- FALSE if (only.L2) fsArgs$append.data <- append.data <- FALSE #FIXME: how will Yves handle lavPredict(fit, append=T, level=2)? bothLevels <- nL > 1L && is.null(fsArgs$level) fsArgs$object <- object fsArgs$acov <- "standard" #FIXME: update if other options become available FS <- do.call(lavPredict, fsArgs) #FIXME: breaks when multigroup MLSEM: https://github.com/yrosseel/lavaan/issues/157 ## also draw Level 2, if multilevel and no specific level requested if (bothLevels) { fsArgs$level <- 2L fsArgs$append.data <- FALSE #FIXME: how will Yves handle lavPredict(fit, append=T, level=2)? FS2 <- do.call(lavPredict, fsArgs) } ## draw plausible values, if factor scores exist if (nG == 1L) { if (ncol(FS) == 0L) { PV <- FS } else { ACOV <- attr(FS, "acov")[[1]] v.idx <- if (only.L2) 2L else 1L PV <- apply(FS[ , l.names[[v.idx]], drop = FALSE], 1, function(mu) { MASS::mvrnorm(n = 1, mu = mu, Sigma = ACOV) }) if (is.null(dim(PV))) { PV <- as.matrix(PV) colnames(PV) <- l.names[[v.idx]] } else PV <- t(PV) if (append.data) { PV <- cbind(FS[ , o.names[[v.idx]], drop = FALSE], PV) } } ## add Level 2 if multilevel and no specific level requested if (bothLevels) { if (ncol(FS2) == 0L) { PV2 <- FS2 } else { ACOV2 <- attr(FS2, "acov")[[1]] #FIXME: how will Yves handle lavPredict(fit, append=T, level=2)? PV2 <- apply(FS2, 1, function(mu) { out <- MASS::mvrnorm(n = 1, mu = mu, Sigma = ACOV2) }) if (is.null(dim(PV2))) { PV2 <- as.matrix(PV2) colnames(PV2) <- l.names[[2]] } else PV2 <- t(PV2) } } } else { ACOV <- list() PV <- list() for (gg in 1:nG) { if (ncol(FS[[gg]]) == 0L) { PV[[gg]] <- FS[[gg]] } else { ACOV[[gg]] <- attr(FS, "acov")[[gg]] v.idx <- if (only.L2) (2L + (gg - 1L)*nL) else (1L + (gg - 1L)*nL) PV[[gg]] <- apply(FS[[gg]][ , l.names[[v.idx]], drop = FALSE], 1, function(mu) { MASS::mvrnorm(n = 1, mu = mu, Sigma = ACOV[[gg]]) }) if (is.null(dim(PV[[gg]]))) { PV[[gg]] <- as.matrix(PV[[gg]]) colnames(PV[[gg]]) <- l.names[[v.idx]] } else PV[[gg]] <- t(PV[[gg]]) } if (append.data) { PV[[gg]] <- cbind(FS[[gg]][ , o.names[[v.idx]], drop = FALSE], PV[[gg]]) } } ## add Level 2 if multilevel and no specific level requested if (bothLevels) { ACOV2 <- list() PV2 <- list() for (gg in 1:nG) { if (ncol(FS2[[gg]]) == 0L) { PV2[[gg]] <- FS2[[gg]] } else { ACOV2[[gg]] <- attr(FS2, "acov")[[gg]] #FIXME: how will Yves handle lavPredict(fit, append=T, level=2)? PV2[[gg]] <- apply(FS2[[gg]], 1, function(mu) { MASS::mvrnorm(n = 1, mu = mu, Sigma = ACOV2[[gg]]) }) if (is.null(dim(PV2[[gg]]))) { PV2[[gg]] <- as.matrix(PV2[[gg]]) colnames(PV2[[gg]]) <- colnames(FS2[[gg]]) } else PV2[[gg]] <- t(PV2[[gg]]) } } } } ## save as data.frame if (nG > 1L) { temp <- lapply(1:nG, function(gg) { dd <- data.frame(PV[[gg]]) ## add groups if multiple dd[ , group] <- group.label[gg] dd <- dd[ , c(group, setdiff(names(dd), group)), drop = FALSE ] ## attach row indices from original data for optional merging if (only.L2) { dd[ , cluster] <- lavInspect(object, "cluster.id")[[gg]] dd <- dd[ , c(cluster, setdiff(names(dd), cluster)), drop = FALSE ] } else { dd <- cbind(case.idx = lavInspect(object, "case.idx")[[gg]], dd) } ## attach cluster IDs, if multilevel and no level requested if (bothLevels) { dd[ , cluster] <- lavInspect(object, "cluster.label")[[gg]] d2 <- data.frame(PV2[[gg]]) d2[ , group] <- group.label[gg] d2[ , cluster] <- lavInspect(object, "cluster.id")[[gg]] dd <- merge(dd, d2, by = c(group, cluster), all = TRUE) } dd }) PV <- do.call(rbind, temp) } else { PV <- data.frame(PV) ## attach row indices from original data for optional merging if (only.L2) { PV[ , cluster] <- lavInspect(object, "cluster.id") PV <- PV[ , c(cluster, setdiff(names(PV), cluster)), drop = FALSE ] } else { PV <- cbind(case.idx = lavInspect(object, "case.idx"), PV) } ## attach cluster IDs, if multilevel and no level requested if (bothLevels) { PV[ , cluster] <- lavInspect(object, "cluster.label") PV2 <- data.frame(PV2) PV2[ , cluster] <- lavInspect(object, "cluster.id") PV <- merge(PV, PV2, by = cluster, all = TRUE) } } PV } ## draw plausible values from a lavaan.mi object ##' @importFrom lavaan lavInspect lavPredict plaus.mi <- function(object, seeds = 1:5, omit.imps = c("no.conv","no.se"), ...) { stopifnot(inherits(object, "lavaan.mi")) useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) useImps <- which(useImps) ## check if model has converged if (m == 0L) stop("No models converged. Score tests unavailable.") oldCall <- object@lavListCall if (!is.null(oldCall$parallel)) { if (oldCall$parallel == "snow") { oldCall$parallel <- "no" oldCall$ncpus <- 1L message("Unable to pass lavaan::lavPredict() arguments ", "when parallel='snow'. Switching to parallel='no'.", " Unless using Windows, parallel='multicore' should work.") } } ## call lavaanList() again to run lavTestScore() on each imputation oldCall$dataList <- object@DataList[useImps] oldCall$FUN <- function(obj) lapply(seeds, plaus.lavaan, object = obj, ...) FIT <- eval(as.call(oldCall)) ## check if there are any results noFS <- sapply(FIT@funList, is.null) if (all(noFS)) stop("No success drawing plausible values for any imputations.") do.call(c, FIT@funList) # concatenate lists } ## draw 1 set of plausible values from a blavaan object ##' @importFrom lavaan lavNames lavInspect plaus.blavaan <- function(object) { stopifnot(inherits(object, "blavaan")) requireNamespace("blavaan") if (!"package:blavaan" %in% search()) attachNamespace("blavaan") # cluster <- lavInspect(object, "cluster") group <- lavInspect(object, "group") group.label <- lavInspect(object, "group.label") nG <- lavInspect(object, "ngroups") # nL <- lavInspect(object, "nlevels") case.idx <- lavInspect(object, "case.idx") ## stack factor scores from each chain (one row per PV) FS <- do.call(rbind, blavaan::blavInspect(object, "lvs")) ## column names contain indices to store PVs in matrix eta.idx <- colnames(FS) ## N and latent variable names, to know dimensions of PV N <- lavInspect(object, "ntotal") etas <- lavNames(object, "lv") #FIXME: assumes same model in both groups PV <- list() ## loop over rows (draws), assign columns to eta matrix, save in PV list for (i in 1:nrow(FS)) { eta <- matrix(NA, nrow = N, ncol = length(etas), dimnames = list(NULL, etas)) for (j in eta.idx) eval(parse(text = paste(j, "<-", FS[i, j]) )) PV[[i]] <- data.frame(eta) ## add case indices, and groups (if applicable) if (nG == 1L) PV[[i]]$case.idx <- case.idx else { PV[[i]]$case.idx <- do.call(c, case.idx) PV[[i]][ , group] <- rep(group.label, times = lavInspect(object, "nobs")) } } PV } ## ------ ## Checks ## ------ # HS.model <- ' visual =~ x1 + x2 + x3 # textual =~ x4 + x5 + x6 # speed =~ x7 + x8 + x9 ' # # fit1 <- cfa(HS.model, data = HolzingerSwineford1939) # fs1 <- plausibleValues(fit1, nDraws = 3, append.data = T) # lapply(fs1, head) # # # # step1 <- cfa(HS.model, data = HolzingerSwineford1939, group = "school", # group.equal = c("loadings","intercepts")) # PV.list <- plausibleValues(step1, append.data = T) # lapply(PV.list[1:3], head) # # # model <- ' # level: 1 # fw =~ y1 + y2 + y3 # fw ~ x1 + x2 + x3 # level: 2 # fb =~ y1 + y2 + y3 # fb ~ w1 + w2 # ' # msem <- sem(model, data = Demo.twolevel, cluster = "cluster") # mlPVs <- plausibleValues(msem, nDraws = 3, append.data = T) # both levels by default # lapply(mlPVs, head, n = 10) # ## only Level 1 # mlPV1 <- plausibleValues(msem, nDraws = 3, level = 1, append.data = T) # lapply(mlPV1, head) # ## only Level 2 # mlPV2 <- plausibleValues(msem, nDraws = 3, level = 2, append.data = T) # lapply(mlPV2, head) # # # # data(Demo.twolevel) # Demo.twolevel$g <- ifelse(Demo.twolevel$cluster %% 2L, "foo", "bar") # arbitrary groups # table(Demo.twolevel$g) # model2 <- ' group: foo # level: within # fw =~ y1 + L2*y2 + L3*y3 # fw ~ x1 + x2 + x3 # level: between # fb =~ y1 + L2*y2 + L3*y3 # fb ~ w1 + w2 # # group: bar # # level: within # fw =~ y1 + L2*y2 + L3*y3 # fw ~ x1 + x2 + x3 # level: between # fb =~ y1 + L2*y2 + L3*y3 # fb ~ w1 + w2 # ' # msem2 <- sem(model2, data = Demo.twolevel, cluster = "cluster", group = "g") # ml2PVs <- plausibleValues(msem2, nDraws = 3, append.data = T) # both levels by default # lapply(ml2PVs, head, n = 10) # ## only Level 1 # ml2PV1 <- plausibleValues(msem2, nDraws = 3, level = 1, append.data = T) # lapply(ml2PV1, head) # ## only Level 2 # ml2PV2 <- plausibleValues(msem2, nDraws = 3, level = 2, append.data = T) # lapply(ml2PV2, head) semTools/R/runMI-score.R0000644000176200001440000011072113532030031014536 0ustar liggesusers### Terrence D. Jorgensen & Yves Rosseel ### Last updated: 29 August 2019 ### Pooled score test (= Lagrange Multiplier test) for multiple imputations ### Borrowed source code from lavaan/R/lav_test_score.R ## this function can run two modes: ## MODE 1: 'add' ## add new parameters that are currently not included in de model ## (aka fixed to zero), but should be released ## MODE 2: 'release' (the default) ## release existing "==" constraints ##' Score Test for Multiple Imputations ##' ##' Score test (or "Lagrange multiplier" test) for lavaan models fitted to ##' multiple imputed data sets. Statistics for releasing one or more ##' fixed or constrained parameters in model can be calculated by pooling ##' the gradient and information matrices pooled across imputed data sets, ##' analogous to Li, Meng, Raghunathan, & Rubin's (1991) proposed Wald test, or ##' by pooling the complete-data score-test statistics across imputed data sets ##' (Li et al., 1991). ##' ##' @aliases lavTestScore.mi ##' @importFrom lavaan lavListInspect parTable ##' @importFrom stats cov pchisq pf ##' @importFrom methods getMethod ##' ##' @param object An object of class \code{\linkS4class{lavaan.mi}}. ##' @param add Either a \code{character} string (typically between single ##' quotes) or a parameter table containing additional (currently fixed-to-zero) ##' parameters for which the score test must be computed. ##' @param release Vector of \code{integer}s. The indices of the \emph{equality} ##' constraints that should be released. The indices correspond to the order of ##' the equality constraints as they appear in the parameter table. ##' @param test \code{character} indicating which pooling method to use. ##' \code{"D1"} indicates Li, Meng, Raghunathan, & Rubin's (1991) proposed Wald ##' test will be applied to the gradient and information, and those pooled ##' values will be used to calculate score-test statistics in the usual manner. ##' \code{"D2"} (default) indicates that complete-data score-test statistics ##' calculated from each imputed data set will be pooled across imputations, ##' as described in Li et al. (1991) and Enders (2010). ##' @param scale.W \code{logical}. If \code{FALSE}, the pooled ##' information matrix is calculated as the weighted sum of the ##' within-imputation and between-imputation components. Otherwise, the pooled ##' information is calculated by scaling the within-imputation component by the ##' average relative increase in variance (ARIV; see Enders, 2010, p. 235), ##' which is \emph{only} consistent when requesting the \emph{F} test (i.e., ##' \code{asymptotic = FALSE}. Ignored (irrelevant) if \code{test = "D2"}. ##' @param omit.imps \code{character} vector specifying criteria for omitting ##' imputations from pooled results. Can include any of ##' \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the ##' default setting, which excludes any imputations that did not ##' converge or for which standard errors could not be computed. The ##' last option (\code{"no.npd"}) would exclude any imputations which ##' yielded a nonpositive definite covariance matrix for observed or ##' latent variables, which would include any "improper solutions" such ##' as Heywood cases. ##' @param asymptotic \code{logical}. If \code{FALSE} (default when using ##' \code{add} to test adding fixed parameters to the model), the pooled test ##' will be returned as an \emph{F}-distributed variable with numerator ##' (\code{df1}) and denominator (\code{df2}) degrees of freedom. ##' If \code{TRUE}, the pooled \emph{F} statistic will be multiplied by its ##' \code{df1} on the assumption that its \code{df2} is sufficiently large ##' enough that the statistic will be asymptotically \eqn{\chi^2} distributed ##' with \code{df1}. When using the \code{release} argument, \code{asymptotic} ##' will be set to \code{TRUE} because (A)RIV can only be calculated for ##' \code{add}ed parameters. ##' @param univariate \code{logical}. If \code{TRUE}, compute the univariate ##' score statistics, one for each constraint. ##' @param cumulative \code{logical}. If \code{TRUE}, order the univariate score ##' statistics from large to small, and compute a series of multivariate ##' score statistics, each time including an additional constraint in the test. ##' @param epc \code{logical}. If \code{TRUE}, and we are releasing existing ##' constraints, compute the expected parameter changes for the existing (free) ##' parameters (and any specified with \code{add}), if all constraints ##' were released. For EPCs associated with a particular (1-\emph{df}) ##' constraint, only specify one parameter in \code{add} or one constraint in ##' \code{release}. ##' @param standardized If \code{TRUE}, two extra columns (\code{sepc.lv} and ##' \code{sepc.all}) in the \code{$epc} table will contain standardized values ##' for the EPCs. See \code{\link{lavTestScore}}. ##' @param cov.std \code{logical}. See \code{\link{standardizedSolution}}. ##' @param verbose \code{logical}. Not used for now. ##' @param warn \code{logical}. If \code{TRUE}, print warnings if they occur. ##' @param information \code{character} indicating the type of information ##' matrix to use (check \code{\link{lavInspect}} for available options). ##' \code{"expected"} information is the default, which provides better ##' control of Type I errors. ##' ##' @return ##' A list containing at least one \code{data.frame}: ##' \itemize{ ##' \item{\code{$test}: The total score test, with columns for the score ##' test statistic (\code{X2}), its degrees of freedom (\code{df}), its ##' \emph{p} value under the \eqn{\chi^2} distribution (\code{p.value}), ##' and if \code{asymptotic=FALSE}, the average relative invrease in ##' variance (ARIV) used to calculate the denominator \emph{df} is also ##' returned as a missing-data diagnostic, along with the fraction missing ##' information (FMI = ARIV / (1 + ARIV)).} ##' \item{\code{$uni}: Optional (if \code{univariate=TRUE}). ##' Each 1-\emph{df} score test, equivalent to modification indices. Also ##' includes EPCs if \code{epc=TRUE}, and RIV and FMI if ##' \code{asymptotic=FALSE}.} ##' \item{\code{$cumulative}: Optional (if \code{cumulative=TRUE}). ##' Cumulative score tests, with ARIV and FMI if \code{asymptotic=FALSE}.} ##' \item{\code{$epc}: Optional (if \code{epc=TRUE}). Parameter estimates, ##' expected parameter changes, and expected parameter values if ALL ##' the tested constraints were freed.} ##' } ##' See \code{\link[lavaan]{lavTestScore}} for details. ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' Adapted from \pkg{lavaan} source code, written by ##' Yves Rosseel (Ghent University; \email{Yves.Rosseel@@UGent.be}) ##' ##' \code{test = "D1"} method proposed by ##' Maxwell Mansolf (University of California, Los Angeles; ##' \email{mamansolf@@gmail.com}) ##' ##' @references ##' Bentler, P. M., & Chou, C.-P. (1992). Some new covariance structure model ##' improvement statistics. \emph{Sociological Methods & Research, 21}(2), ##' 259--282. doi:10.1177/0049124192021002006 ##' ##' Enders, C. K. (2010). \emph{Applied missing data analysis}. ##' New York, NY: Guilford. ##' ##' Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). ##' Significance levels from repeated \emph{p}-values with multiply-imputed ##' data. \emph{Statistica Sinica, 1}(1), 65--92. Retrieved from ##' https://www.jstor.org/stable/24303994 ##' ##' @seealso \code{\link[lavaan]{lavTestScore}} ##' ##' @examples ##' \dontrun{ ##' ## impose missing data for example ##' HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), ##' "ageyr","agemo","school")] ##' set.seed(12345) ##' HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) ##' age <- HSMiss$ageyr + HSMiss$agemo/12 ##' HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ##' ##' ## impute missing data ##' library(Amelia) ##' set.seed(12345) ##' HS.amelia <- amelia(HSMiss, m = 20, noms = "school", p2s = FALSE) ##' imps <- HS.amelia$imputations ##' ##' ## specify CFA model from lavaan's ?cfa help page ##' HS.model <- ' ##' speed =~ c(L1, L1)*x7 + c(L1, L1)*x8 + c(L1, L1)*x9 ##' ' ##' ##' out <- cfa.mi(HS.model, data = imps, group = "school", std.lv = TRUE) ##' ##' ## Mode 1: Score test for releasing equality constraints ##' ##' ## default test: Li et al.'s (1991) "D2" method ##' lavTestScore.mi(out, cumulative = TRUE) ##' ## Li et al.'s (1991) "D1" method ##' lavTestScore.mi(out, test = "D1") ##' ##' ## Mode 2: Score test for adding currently fixed-to-zero parameters ##' lavTestScore.mi(out, add = 'x7 ~~ x8 + x9') ##' ##' } ##' ##' @export lavTestScore.mi <- function(object, add = NULL, release = NULL, test = c("D2","D1"), scale.W = !asymptotic, omit.imps = c("no.conv","no.se"), asymptotic = is.null(add), # as F or chi-squared univariate = TRUE, cumulative = FALSE, epc = FALSE, standardized = epc, cov.std = epc, verbose = FALSE, warn = TRUE, information = "expected") { stopifnot(inherits(object, "lavaan.mi")) lavoptions <- object@Options useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) test <- toupper(test[1]) if (!test %in% c("D2","D1")) stop('Invalid choice of "test" argument.') ## check if model has converged if (m == 0L) stop("No models converged. Score tests unavailable.") # check for inequality constraints PT <- parTable(object) if (any(PT$op == ">" | PT$op == "<")) { stop("lavTestScore.mi() does not handle inequality constraints (yet)") } # check arguments if (cumulative) univariate <- TRUE if (sum(is.null(release), is.null(add)) == 0) { stop("`add' and `release' arguments cannot be used together.\n", "Fixed parameters can instead be labeled in the model syntax ", "and those labels can be constrained to fixed values, so that ", "the constraints can be tested using the `release' argument along ", "with other released constraints.") } oldCall <- object@lavListCall #oldCall$model <- parTable(object) # FIXME: necessary? if (test == "D2") { if (!is.null(oldCall$parallel)) { if (oldCall$parallel == "snow") { oldCall$parallel <- "no" oldCall$ncpus <- 1L if (warn) warning("Unable to pass lavaan::lavTestScore() arguments ", "when parallel='snow'. Switching to parallel='no'.", " Unless using Windows, parallel='multicore' works.") } } ## call lavaanList() again to run lavTestScore() on each imputation oldCall$FUN <- function(obj) { out <- try(lavaan::lavTestScore(obj, add = add, release = release, univariate = univariate, epc = epc, cumulative = cumulative, cov.std = cov.std, standardized = standardized, warn = FALSE, information = information), silent = TRUE) if (inherits(out, "try-error")) return(NULL) out } FIT <- eval(as.call(oldCall)) ## check if there are any results noScores <- sapply(FIT@funList, is.null) if (all(noScores)) stop("No success using lavTestScore() on any imputations.") ## template to fill in pooled values OUT <- FIT@funList[[ intersect(useImps, which(!noScores))[1] ]] ## at a minimum, pool the total score test chiList <- sapply(FIT@funList[intersect(useImps, which(!noScores))], function(x) x$test$X2) chiPooled <- calculate.D2(chiList, DF = OUT$test$df, asymptotic) OUT$test$X2 <- chiPooled[1] if (!asymptotic) { names(OUT$test)[names(OUT$test) == "X2"] <- "F" names(OUT$test)[names(OUT$test) == "df"] <- "df1" OUT$test$df2 <- chiPooled[["df2"]] OUT$test$p.value <- NULL # so it appears after "df2" column } OUT$test$p.value <- chiPooled[["pvalue"]] OUT$test$ariv <- chiPooled[["ariv"]] OUT$test$fmi <- chiPooled[["fmi"]] ## univariate? if (univariate) { if (!asymptotic) { names(OUT$uni)[names(OUT$uni) == "X2"] <- "F" OUT$uni$p.value <- NULL # so it appears after "df2" column OUT$uni$df2 <- NA OUT$uni$p.value <- NA OUT$uni$riv <- NA OUT$uni$fmi <- NA if ("epc" %in% colnames(OUT$uni)) { OUT$uni$epc <- NULL # so it appears after "p.value" OUT$uni$epc <- NA # fill in below } } for (i in 1:nrow(OUT$uni)) { chiList <- sapply(FIT@funList[intersect(useImps, which(!noScores))], function(x) x$uni$X2[i] ) chiPooled <- calculate.D2(chiList, DF = OUT$uni$df[i], asymptotic) if (!asymptotic) { OUT$uni$F[i] <- chiPooled[[1]] OUT$uni$df2[i] <- chiPooled[["df2"]] } else OUT$uni$X2[i] <- chiPooled[[1]] OUT$uni$p.value[i] <- chiPooled[["pvalue"]] OUT$uni$riv[i] <- chiPooled[["ariv"]] OUT$uni$fmi[i] <- chiPooled[["fmi"]] if ("epc" %in% colnames(OUT$uni)) { epcUniList <- sapply(FIT@funList[intersect(useImps, which(!noScores))], function(x) x$uni$epc[i] ) OUT$uni$epc[i] <- mean(epcUniList) } } if (!asymptotic) names(OUT$uni)[names(OUT$uni) == "df"] <- "df1" } ## cumulative? if (cumulative) { if (!asymptotic) { names(OUT$cumulative)[names(OUT$cumulative) == "X2"] <- "F" OUT$cumulative$p.value <- NULL # so it appears after "df2" column OUT$cumulative$df2 <- NA OUT$cumulative$p.value <- NA OUT$cumulative$ariv <- NA OUT$cumulative$fmi <- NA } for (i in 1:nrow(OUT$cumulative)) { chiList <- sapply(FIT@funList[intersect(useImps, which(!noScores))], function(x) x$cumulative$X2[i] ) chiPooled <- calculate.D2(chiList, DF = OUT$cumulative$df[i], asymptotic) if (!asymptotic) { OUT$cumulative$F[i] <- chiPooled[[1]] OUT$cumulative$df2[i] <- chiPooled[["df2"]] } else OUT$cumulative$X2[i] <- chiPooled[[1]] OUT$cumulative$p.value[i] <- chiPooled[["pvalue"]] OUT$cumulative$ariv[i] <- chiPooled[["ariv"]] OUT$cumulative$fmi[i] <- chiPooled[["fmi"]] } if (!asymptotic) names(OUT$cumulative)[names(OUT$cumulative) == "df"] <- "df1" } ## EPCs? if (epc) { estList <- lapply(FIT@funList[intersect(useImps, which(!noScores))], function(x) x$epc$est) OUT$epc$est <- rowMeans(do.call(cbind, estList)) epcList <- lapply(FIT@funList[intersect(useImps, which(!noScores))], function(x) x$epc$epc) OUT$epc$epc <- rowMeans(do.call(cbind, epcList)) OUT$epc$epv <- OUT$epc$est + OUT$epc$epc if (standardized) { sepcList <- lapply(FIT@funList[intersect(useImps, which(!noScores))], function(x) x$epc$sepc.lv) OUT$epc$sepc.lv <- rowMeans(do.call(cbind, sepcList)) sepcList <- lapply(FIT@funList[intersect(useImps, which(!noScores))], function(x) x$epc$sepc.all) OUT$epc$sepc.all <- rowMeans(do.call(cbind, sepcList)) if ("sepc.nox" %in% colnames(FIT@funList[intersect(useImps, which(!noScores))][[1]])) { sepcList <- lapply(FIT@funList[intersect(useImps, which(!noScores))], function(x) x$epc$sepc.nox) OUT$epc$sepc.nox <- rowMeans(do.call(cbind, sepcList)) } } } return(OUT) } # else test == "D1", making 'scale.W=' relevant ## number of free parameters (regardless of whether they are constrained) npar <- object@Model@nx.free ## sample size N <- lavListInspect(object, "ntotal") if (lavoptions$mimic == "EQS") N <- N - 1 # Mode 1: ADDING new parameters if (!is.null(add) && nchar(add) > 0L) { ## turn off SNOW cluster (can't past arguments) if (!is.null(oldCall$parallel)) { if (oldCall$parallel == "snow") { oldCall$parallel <- "no" oldCall$ncpus <- 1L if (warn) warning("Unable to pass lavaan::lavTestScore() arguments ", "when parallel='snow'. Switching to parallel='no'.", " Unless using Windows, parallel='multicore' works.") } } ## call lavaanList() to fit augmented model (do.fit = FALSE) oldCall$FUN <- function(obj) { ngroups <- lavaan::lavInspect(obj, "ngroups") nlevels <- obj@Data@nlevels #FIXME: lavListInspect(obj, "nlevels") ## -------------------------------------- ## borrowed code from lav_object_extend() ## -------------------------------------- ## select columns that should always be included below myCols <- c("lhs","op","rhs") if (ngroups > 1L) myCols <- c(myCols,"block","group") if (nlevels > 1L) myCols <- c(myCols,"block","level") myCols <- unique(myCols) # partable original model oldPT <- lavaan::parTable(obj)[c(myCols, "free","label","plabel")] # replace 'start' column, since lav_model will fill these in in GLIST oldPT$start <- lavaan::parameterEstimates(obj, remove.system.eq = FALSE, remove.def = FALSE, remove.eq = FALSE, remove.ineq = FALSE)$est # add new parameters, extend model # ADD <- lavaan::modindices(obj, standardized = FALSE)[myCols] if (is.list(add)) { stopifnot(!is.null(add$lhs), !is.null(add$op), !is.null(add$rhs)) ADD <- as.data.frame(add) } else if (is.character(add)) { ADD <- lavaan::lavaanify(add, ngroups = ngroups) ADD <- ADD[ , c(myCols, "user","label")] remove.idx <- which(ADD$user == 0) if (length(remove.idx) > 0L) { ADD <- ADD[-remove.idx,] } ADD$start <- rep( 0, nrow(ADD)) ADD$free <- rep( 1, nrow(ADD)) ADD$user <- rep(10, nrow(ADD)) } else stop("'add' must be lavaan model syntax or a parameter table.") # nR <- try(nrow(ADD), silent = TRUE) # if (class(nR) == "try-error" || is.null(nR)) return(list(gradient = NULL, # information = NULL)) # ADD$free <- rep(1L, nR) # ADD$user <- rep(10L, nR) # merge LIST <- lavaan::lav_partable_merge(oldPT, ADD, remove.duplicated = TRUE, warn = FALSE) # redo 'free' free.idx <- which(LIST$free > 0) LIST$free[free.idx] <- 1:length(free.idx) # adapt options lavoptions <- obj@Options if (any(LIST$op == "~1")) lavoptions$meanstructure <- TRUE lavoptions$do.fit <- FALSE obj2 <- lavaan::lavaan(LIST, slotOptions = lavoptions, slotSampleStats = obj@SampleStats, slotData = obj@Data, slotCache = obj@Cache, sloth1 = obj@h1) ## --------------------------------- list(gradient = lavaan::lavInspect(obj2, "gradient"), information = lavaan::lavInspect(obj2, paste("information", information, sep = ".")), #TODO: Max wants to calculate EPCs as averages across imputations. # Run lavTestScore(epc=TRUE) here? or be consistent... nadd = nrow(ADD), parTable = lavaan::parTable(obj2)) } FIT <- eval(as.call(oldCall)) ## obtain list of inverted Jacobians: within-impuation covariance matrices R.model <- object@Model@con.jac[,,drop = FALSE] nadd <- FIT@funList[[ useImps[1] ]]$nadd ## pool gradients and information matrices gradList <- lapply(FIT@funList[useImps], "[[", i = "gradient") infoList <- lapply(FIT@funList[useImps], "[[", i = "information") score <- colMeans(do.call(rbind, gradList)) # pooled point estimates B <- cov(do.call(rbind, gradList) * sqrt(N)) # between-imputation UNIT information W <- Reduce("+", infoList) / m # within-imputation UNIT information inv.W <- try(solve(W), silent = TRUE) if (inherits(inv.W, "try-error")) { if (warn && scale.W) warning("Could not invert W for total score test, ", "perhaps due to constraints on estimated ", "parameters. Generalized inverse used instead.\n", "If the model does not have equality constraints, ", "it may be safer to set `scale.W = FALSE'.") inv.W <- MASS::ginv(W) } ## relative increase in variance due to missing data ariv <- (1 + 1/m)/nadd * sum(diag(B %*% inv.W)) # ONLY relevant for scaling full INFO matrix if (scale.W) { information <- (1 + ariv) * W # Enders (2010, p. 235) eqs. 8.20-21 } else { ## less reliable, but constraints prevent inversion of W information <- W + B + (1/m)*B # Enders (2010, p. 235) eq. 8.19 } if (nrow(R.model) > 0L) { R.model <- cbind(R.model, matrix(0, nrow(R.model), ncol = nadd)) R.add <- cbind(matrix(0, nrow = nadd, ncol = npar), diag(nadd)) R <- rbind(R.model, R.add) Z <- cbind(rbind(information, R.model), rbind(t(R.model),matrix(0,nrow(R.model),nrow(R.model)))) Z.plus <- MASS::ginv(Z) J.inv <- Z.plus[ 1:nrow(information), 1:nrow(information) ] r.idx <- seq_len(nadd) + nrow(R.model) } else { R <- cbind(matrix(0, nrow = nadd, ncol = npar), diag(nadd)) J.inv <- MASS::ginv(information) r.idx <- seq_len(nadd) } PT <- FIT@funList[[ useImps[1] ]]$parTable if (is.null(PT$group)) PT$group <- PT$block # lhs/rhs lhs <- lavaan::lav_partable_labels(PT)[ PT$user == 10L ] op <- rep("==", nadd) rhs <- rep("0", nadd) Table <- data.frame(lhs = lhs, op = op, rhs = rhs) class(Table) <- c("lavaan.data.frame", "data.frame") } else { # MODE 2: releasing constraints if (!asymptotic) { message('The average relative increase in variance (ARIV) cannot be ', 'calculated for releasing estimated constraints, preventing the ', 'denominator degrees of freedom from being calculated for the F ', 'test, so the "asymptotic" argument was switched to TRUE.' ) asymptotic <- TRUE scale.W <- FALSE } if (is.character(release)) stop("not implemented yet") #FIXME: moved up to save time R <- object@Model@con.jac[,,drop = FALSE] if (nrow(R) == 0L) stop("No equality constraints found in the model.") ## use lavaanList() to get gradient/information from each imputation oldCall$FUN <- function(obj) { list(gradient = lavaan::lavInspect(obj, "gradient"), information = lavaan::lavInspect(obj, paste("information", information, sep = "."))) } FIT <- eval(as.call(oldCall)) ## pool gradients and information matrices gradList <- lapply(FIT@funList[useImps], "[[", i = "gradient") infoList <- lapply(FIT@funList[useImps], "[[", i = "information") score <- colMeans(do.call(rbind, gradList)) # pooled point estimates B <- cov(do.call(rbind, gradList) * sqrt(N)) # between-imputation UNIT information W <- Reduce("+", infoList) / m # within-imputation UNIT information inv.W <- try(solve(W), silent = TRUE) if (inherits(inv.W, "try-error")) { if (warn && scale.W) warning("Could not invert W for total score test, ", "perhaps due to constraints on estimated ", "parameters. Generalized inverse used instead.\n", "If the model does not have equality constraints, ", "it may be safer to set `scale.W = FALSE'.") inv.W <- MASS::ginv(W) } ## relative increase in variance due to missing data k <- length(release) if (k == 0) k <- nrow(R) ariv <- (1 + 1/m)/k * sum(diag(B %*% inv.W)) #FIXME: Invalid extrapolation! if (scale.W) { #TODO: This option is disabled, kept only to update with future research information <- (1 + ariv) * W # Enders (2010, p. 235) eqs. 8.20-21 } else { ## less reliable, but constraints prevent inversion of W information <- W + B + (1/m)*B # Enders (2010, p. 235) eq. 8.19 } if (is.null(release)) { # ALL constraints r.idx <- seq_len( nrow(R) ) J.inv <- MASS::ginv(information) #FIXME? Yves has this above if(is.null(release)) } else if (is.numeric(release)) { r.idx <- release if(max(r.idx) > nrow(R)) { stop("lavaan ERROR: maximum constraint number (", max(r.idx), ") is larger than number of constraints (", nrow(R), ")") } # neutralize the non-needed constraints R1 <- R[-r.idx, , drop = FALSE] Z1 <- cbind( rbind(information, R1), rbind(t(R1), matrix(0, nrow(R1), nrow(R1))) ) Z1.plus <- MASS::ginv(Z1) J.inv <- Z1.plus[ 1:nrow(information), 1:nrow(information) ] } else if (is.character(release)) { stop("not implemented yet") } # lhs/rhs eq.idx <- which(object@ParTable$op == "==") if (length(eq.idx) > 0L) { lhs <- object@ParTable$lhs[eq.idx][r.idx] op <- rep("==", length(r.idx)) rhs <- object@ParTable$rhs[eq.idx][r.idx] } Table <- data.frame(lhs = lhs, op = op, rhs = rhs) class(Table) <- c("lavaan.data.frame", "data.frame") } if (lavoptions$se == "standard") { stat <- as.numeric(N * score %*% J.inv %*% score) } else { # generalized score test if (warn) warning("se is not `standard'. Robust test not implemented yet. ", "Falling back to ordinary score test.") # NOTE!!! # we can NOT use VCOV here, because it reflects the constraints, # and the whole point is to test for these constraints... stat <- as.numeric(N * score %*% J.inv %*% score) } # compute df, taking into account that some of the constraints may # be needed to identify the model (and hence information is singular) # information.plus <- information + crossprod(R) #df <- qr(R[r.idx,,drop = FALSE])$rank + # ( qr(information)$rank - qr(information.plus)$rank ) DF <- nrow( R[r.idx, , drop = FALSE] ) if (asymptotic) { TEST <- data.frame(test = "score", X2 = stat, df = DF, p.value = pchisq(stat, df = DF, lower.tail = FALSE)) } else { ## calculate denominator DF for F statistic myDims <- 1:nadd + npar #TODO: not available in release mode, unless calculating Jacobian of constraints, like Wald test? ARIV <- (1 + 1/m)/nadd * sum(diag(B[myDims, myDims, drop = FALSE] %*% inv.W[myDims, myDims, drop = FALSE])) a <- DF*(m - 1) if (a > 4) { df2 <- 4 + (a - 4) * (1 + (1 - 2/a)*(1 / ARIV))^2 # Enders (eq. 8.24) } else { df2 <- a*(1 + 1/DF) * (1 + 1/ARIV)^2 / 2 # Enders (eq. 8.25) } TEST <- data.frame(test = "score", "F" = stat / DF, df1 = DF, df2 = df2, p.value = pf(stat / DF, df1 = DF, df2 = df2, lower.tail = FALSE), ariv = ARIV, fmi = ARIV / (1 + ARIV)) } class(TEST) <- c("lavaan.data.frame", "data.frame") attr(TEST, "header") <- "total score test:" OUT <- list(test = TEST) if (univariate) { TS <- numeric( nrow(R) ) EPC.uni <- numeric( nrow(R) ) #FIXME: to add univariate EPCs for added parameters for (r in r.idx) { R1 <- R[-r, , drop = FALSE] Z1 <- cbind( rbind(information, R1), rbind(t(R1), matrix(0, nrow(R1), nrow(R1))) ) Z1.plus <- MASS::ginv(Z1) Z1.plus1 <- Z1.plus[ 1:nrow(information), 1:nrow(information) ] TS[r] <- as.numeric(N * t(score) %*% Z1.plus1 %*% score) ## FIXME: experimentally add univariate EPCs for added parameters, as would accompany modification indices if (epc && !is.null(add)) EPC.uni[r] <- 1 * utils::tail(as.numeric(score %*% Z1.plus1), n = nrow(R))[r] } Table2 <- Table DF <- rep(1, length(r.idx)) if (asymptotic) { Table2$X2 <- TS[r.idx] Table2$df <- DF Table2$p.value <- pchisq(Table2$X2, df = DF, lower.tail = FALSE) } else { Table2$F <- TS[r.idx] / DF Table2$df1 <- DF ## calculate denominator DF for F statistic using RIV per 1-df test (Enders eq. 8.10) myDims <- 1:nadd + npar RIVs <- diag((1 + 1/m) * B[myDims, myDims, drop = FALSE]) / diag(W[myDims, myDims, drop = FALSE]) Table2$df2 <- sapply(RIVs, function(riv) { DF1 <- 1L # Univariate tests a <- DF1*(m - 1) DF2 <- if (a > 4) { 4 + (a - 4) * (1 + (1 - 2/a)*(1 / riv))^2 # Enders (eq. 8.24) } else a*(1 + 1/DF1) * (1 + 1/riv)^2 / 2 # Enders (eq. 8.25) DF2 }) Table2$p.value <- pf(Table2$F, df1 = DF, df2 = Table2$df2, lower.tail = FALSE) Table2$riv <- RIVs Table2$fmi <- RIVs / (1 + RIVs) } ## add univariate EPCs, equivalent to modindices() output if (epc && !is.null(add)) Table2$epc <- EPC.uni[r.idx] attr(Table2, "header") <- "univariate score tests:" OUT$uni <- Table2 } if (cumulative) { TS.order <- sort.int(TS, index.return = TRUE, decreasing = TRUE)$ix TS <- numeric( length(r.idx) ) if (!asymptotic) ARIVs <- numeric( length(r.idx) ) for (r in 1:length(r.idx)) { rcumul.idx <- TS.order[1:r] R1 <- R[-rcumul.idx, , drop = FALSE] Z1 <- cbind( rbind(information, R1), rbind(t(R1), matrix(0, nrow(R1), nrow(R1))) ) Z1.plus <- MASS::ginv(Z1) Z1.plus1 <- Z1.plus[ 1:nrow(information), 1:nrow(information) ] TS[r] <- as.numeric(N * t(score) %*% Z1.plus1 %*% score) if (!asymptotic) { myDims <- rcumul.idx + npar ARIVs[r] <- (1 + 1/m)/length(myDims) * sum(diag(B[myDims, myDims, drop = FALSE] %*% inv.W[myDims, myDims, drop = FALSE])) } } Table3 <- Table[TS.order,] DF <- seq_len( length(TS) ) if (asymptotic) { Table3$X2 <- TS Table3$df <- DF Table3$p.value <- pchisq(Table3$X2, df = DF, lower.tail = FALSE) } else { Table3$F <- TS / DF Table3$df1 <- DF ## calculate denominator DF for F statistic Table3$df2 <- mapply(FUN = function(DF1, ariv) { a <- DF1*(m - 1) DF2 <- if (a > 4) { 4 + (a - 4) * (1 + (1 - 2/a)*(1 / ariv))^2 # Enders (eq. 8.24) } else a*(1 + 1/DF1) * (1 + 1/ariv)^2 / 2 # Enders (eq. 8.25) DF2 }, DF1 = DF, ariv = ARIVs) Table3$p.value = pf(Table3$F, df1 = DF, df2 = Table3$df2, lower.tail = FALSE) Table3$riv <- ARIVs Table3$fmi <- ARIVs / (1 + ARIVs) } attr(Table3, "header") <- "cumulative score tests:" OUT$cumulative <- Table3 } if (epc) { ngroups <- lavaan::lavInspect(object, "ngroups") nlevels <- object@Data@nlevels #FIXME: lavListInspect(object, "nlevels") ################# source code Yves commented out. ################# Calculates 1 EPC-vector per constraint. ################# Better to call lavTestScore() multiple times? Ugh... # EPC <- vector("list", length = length(r.idx)) # for (i in 1:length(r.idx)) { # r <- r.idx[i] # R1 <- R[-r,,drop = FALSE] # Z1 <- cbind( rbind(information, R1), # rbind(t(R1), matrix(0,nrow(R1),nrow(R1))) ) # Z1.plus <- MASS::ginv(Z1) # Z1.plus1 <- Z1.plus[ 1:nrow(information), 1:nrow(information) ] # EPC[[i]] <- -1 * as.numeric(score %*% Z1.plus1) # } # OUT$EPC <- EPC # EPCs when freeing all constraints together (total test) R1 <- R[-r.idx, , drop = FALSE] Z1 <- cbind( rbind(information, R1), rbind(t(R1), matrix(0, nrow(R1), nrow(R1))) ) Z1.plus <- MASS::ginv(Z1) Z1.plus1 <- Z1.plus[ 1:nrow(information), 1:nrow(information) ] EPC.all <- 1 * as.numeric(score %*% Z1.plus1) # create epc table for the 'free' parameters myCoefs <- getMethod("coef","lavaan.mi")(object, omit.imps = omit.imps) myCols <- c("lhs","op","rhs","user") if (ngroups > 1L) myCols <- c(myCols, "block","group") if (nlevels > 1L) myCols <- c(myCols, "block","level") myCols <- c(unique(myCols), "free","exo","label","plabel") LIST <- if (!is.null(add) && nchar(add) > 0L) { PT[ , myCols] } else parTable(object)[ , myCols] nonpar.idx <- which(LIST$op %in% c("==", ":=", "<", ">")) if (length(nonpar.idx) > 0L) LIST <- LIST[ -nonpar.idx , ] LIST$est[ LIST$free > 0 & LIST$user != 10 ] <- myCoefs LIST$est[ LIST$user == 10L ] <- 0 LIST$epc <- rep(as.numeric(NA), length(LIST$lhs)) LIST$epc[ LIST$free > 0 ] <- EPC.all LIST$epv <- LIST$est + LIST$epc #TODO: add SEPCs if (standardized) { EPC <- LIST$epc if (cov.std) { # replace epc values for variances by est values var.idx <- which(LIST$op == "~~" & LIST$lhs == LIST$rhs & LIST$exo == 0L) EPC[ var.idx ] <- LIST$est[ var.idx ] } # two problems: # - EPC of variances can be negative, and that is perfectly legal # - EPC (of variances) can be tiny (near-zero), and we should # not divide by tiny variables small.idx <- which(LIST$op == "~~" & LIST$lhs == LIST$rhs & abs(EPC) < sqrt( .Machine$double.eps ) ) if (length(small.idx) > 0L) EPC[ small.idx ] <- as.numeric(NA) # get the sign EPC.sign <- sign(LIST$epc) ## pooled estimates for standardizedSolution() pooledest <- getMethod("coef", "lavaan.mi")(object, omit.imps = omit.imps) ## update @Model@GLIST for standardizedSolution(..., GLIST=) object@Model <- lavaan::lav_model_set_parameters(object@Model, x = pooledest) LIST$sepc.lv <- EPC.sign * lavaan::standardizedSolution(object, se = FALSE, type = "std.lv", cov.std = cov.std, partable = LIST, GLIST = object@Model@GLIST, est = abs(EPC))$est.std LIST$sepc.all <- EPC.sign * lavaan::standardizedSolution(object, se = FALSE, type = "std.all", cov.std = cov.std, partable = LIST, GLIST = object@Model@GLIST, est = abs(EPC))$est.std fixed.x <- lavListInspect(object, "options")$fixed.x && length(lavNames(object, "ov.x")) if (fixed.x) { LIST$sepc.nox <- EPC.sign * lavaan::standardizedSolution(object, se = FALSE, type = "std.nox", cov.std = cov.std, partable = LIST, GLIST = object@Model@GLIST, est = abs(EPC))$est.std } if (length(small.idx) > 0L) { LIST$sepc.lv[small.idx] <- 0 LIST$sepc.all[small.idx] <- 0 if (fixed.x) LIST$sepc.nox[small.idx] <- 0 } } LIST$free[ LIST$user == 10L ] <- 0 LIST$user <- NULL LIST$exo <- NULL DF <- if (asymptotic) OUT$test$df else OUT$test$df1 attr(LIST, "header") <- paste0("expected parameter changes (epc) and ", "expected parameter values (epv)", if (DF < 2) ":" else { " if ALL constraints in 'add' or 'release' were freed:" }) OUT$epc <- LIST } OUT } semTools/R/zzz.R0000644000176200001440000000106513432435606013250 0ustar liggesusers.onAttach <- function(libname, pkgname) { version <- read.dcf(file = system.file("DESCRIPTION", package = pkgname), fields = "Version") packageStartupMessage(" ") packageStartupMessage("###############################################################################") packageStartupMessage("This is ", paste(pkgname, version)) packageStartupMessage("All users of R (or SEM) are invited to submit functions or ideas for functions.") packageStartupMessage("###############################################################################") } semTools/R/measurementInvariance.R0000644000176200001440000003002113432435606016732 0ustar liggesusers### Sunthud Pornprasertmanit, Yves Rosseel, and Terrence D. Jorgensen ### Last updated: 1 September 2018 ##' Measurement Invariance Tests ##' ##' Testing measurement invariance across groups using a typical sequence of ##' model comparison tests. ##' ##' If \code{strict = FALSE}, the following four models are tested in order: ##' \enumerate{ ##' \item Model 1: configural invariance. The same factor structure ##' is imposed on all groups. ##' \item Model 2: weak invariance. The factor loadings are constrained to ##' be equal across groups. ##' \item Model 3: strong invariance. The factor loadings and intercepts ##' are constrained to be equal across groups. ##' \item Model 4: The factor loadings, intercepts and means are constrained ##' to be equal across groups. ##' } ##' ##' Each time a more restricted model is fitted, a \eqn{\Delta\chi^2} test is ##' reported, comparing the current model with the previous one, and comparing ##' the current model to the baseline model (Model 1). In addition, the ##' difference in CFI is also reported (\eqn{\Delta}CFI). ##' ##' If \code{strict = TRUE}, the following five models are tested in order: ##' \enumerate{ ##' \item Model 1: configural invariance. The same factor structure ##' is imposed on all groups. ##' \item Model 2: weak invariance. The factor loadings are constrained to be ##' equal across groups. ##' \item Model 3: strong invariance. The factor loadings and intercepts are ##' constrained to be equal across groups. ##' \item Model 4: strict invariance. The factor loadings, intercepts and ##' residual variances are constrained to be equal across groups. ##' \item Model 5: The factor loadings, intercepts, residual variances and means ##' are constrained to be equal across groups. ##' } ##' ##' Note that if the \eqn{\chi^2} test statistic is scaled (e.g., a Satorra-Bentler ##' or Yuan-Bentler test statistic), a special version of the \eqn{\Delta\chi^2} ##' test is used as described in \url{http://www.statmodel.com/chidiff.shtml} ##' ##' @importFrom lavaan parTable ##' ##' @param ... The same arguments as for any lavaan model. See ##' \code{\link{cfa}} for more information. ##' @param std.lv If \code{TRUE}, the fixed-factor method of scale ##' identification is used. If \code{FALSE}, the first variable for each factor ##' is used as marker variable. ##' @param strict If \code{TRUE}, the sequence requires `strict' invariance. ##' See details for more information. ##' @param quiet If \code{FALSE} (default), a summary is printed out containing ##' an overview of the different models that are fitted, together with some ##' model comparison tests. If \code{TRUE}, no summary is printed. ##' @param fit.measures Fit measures used to calculate the differences between ##' nested models. ##' @param baseline.model custom baseline model passed to ##' \code{\link[lavaan]{fitMeasures}} ##' @param method The method used to calculate likelihood ratio test. See ##' \code{\link[lavaan]{lavTestLRT}} for available options ##' ##' @return Invisibly, all model fits in the sequence are returned as a list. ##' ##' @author Yves Rosseel (Ghent University; \email{Yves.Rosseel@@UGent.be}) ##' ##' Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) ##' ##' @references ##' Vandenberg, R. J., and Lance, C. E. (2000). A review and synthesis of the ##' measurement invariance literature: Suggestions, practices, and ##' recommendations for organizational research. \emph{Organizational ##' Research Methods, 3,} 4--70. ##' ##' @examples ##' ##' HW.model <- ' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ' ##' ##' measurementInvariance(model = HW.model, data = HolzingerSwineford1939, ##' group = "school", fit.measures = c("cfi","aic")) ##' ##' @name measurementInvariance-deprecated ##' @usage ##' measurementInvariance(..., std.lv = FALSE, strict = FALSE, quiet = FALSE, ##' fit.measures = "default", baseline.model = NULL, ##' method = "satorra.bentler.2001") ##' @seealso \code{\link{semTools-deprecated}} ##' @keywords internal NULL ##' @rdname semTools-deprecated ##' @section Previous measurement-invariance functions: ##' The \code{measurementInvariance}, \code{measurementInvarianceCat}, and ##' \code{longInvariance} functions will no longer be supported. Instead, use ##' the \code{\link{measEq.syntax}} function, which is much more flexible and ##' supports a wider range of data (e.g., any mixture of \code{numeric} and ##' \code{ordered} indicators, any combination of multiple groups and repeated ##' measures, models fit to multiple imputations with \code{\link{runMI}}). ##' ##' @export measurementInvariance <- function(..., std.lv = FALSE, strict = FALSE, quiet = FALSE, fit.measures = "default", baseline.model = NULL, method = "satorra.bentler.2001") { .Deprecated(msg = c("The measurementInvariance function is deprecated, and", " it will cease to be included in future versions of ", "semTools. See help('semTools-deprecated) for details.")) lavaancfa <- function(...) { lavaan::cfa(...) } ## check for a group.equal argument in ... dotdotdot <- list(...) if (is.null(dotdotdot$model)) stop('all lavaan() and lavOptions() arguments must', ' named, including the "model=" argument.') if (!is.null(dotdotdot$group.equal)) stop("lavaan ERROR: group.equal argument should not be used") ## and a model if (names(dotdotdot)[1] == "") names(dotdotdot)[1] <- "model" res <- list() ## base-line model: configural invariance configural <- dotdotdot configural$group.equal <- "" template <- try(do.call(lavaancfa, configural), silent = TRUE) if (class(template) == "try-error") stop('Configural model did not converge.') pttemplate <- parTable(template) varnames <- unique(pttemplate$rhs[pttemplate$op == "=~"]) facnames <- unique(pttemplate$lhs[(pttemplate$op == "=~") & (pttemplate$rhs %in% varnames)]) ngroups <- max(pttemplate$group) if (ngroups <= 1) stop("Well, the number of groups is 1. Measurement", " invariance across 'groups' cannot be done.") if (std.lv) { for (i in facnames) { pttemplate <- fixParTable(pttemplate, i, "~~", i, 1:ngroups, 1) } fixloadings <- which(pttemplate$op == "=~" & pttemplate$free == 0) for (i in fixloadings) { pttemplate <- freeParTable(pttemplate, pttemplate$lhs[i], "=~", pttemplate$rhs[i], pttemplate$group[i]) } dotdotdot$model <- pttemplate res$fit.configural <- try(do.call(lavaancfa, dotdotdot), silent = TRUE) } else { res$fit.configural <- template } ## fix loadings across groups if (std.lv) { findloadings <- which(pttemplate$op == "=~" & pttemplate$free != 0 & pttemplate$group == 1) for (i in findloadings) { pttemplate <- constrainParTable(pttemplate, pttemplate$lhs[i], "=~", pttemplate$rhs[i], 1:ngroups) } for (i in facnames) { pttemplate <- freeParTable(pttemplate, i, "~~", i, 2:ngroups) } dotdotdot$model <- pttemplate res$fit.loadings <- try(do.call(lavaancfa, dotdotdot), silent = TRUE) } else { loadings <- dotdotdot loadings$group.equal <- c("loadings") res$fit.loadings <- try(do.call(lavaancfa, loadings), silent = TRUE) } ## fix loadings + intercepts across groups if (std.lv) { findintcepts <- which(pttemplate$op == "~1" & pttemplate$lhs %in% varnames & pttemplate$free != 0 & pttemplate$group == 1) for (i in findintcepts) { pttemplate <- constrainParTable(pttemplate, pttemplate$lhs[i], "~1", "", 1:ngroups) } for (i in facnames) { pttemplate <- freeParTable(pttemplate, i, "~1", "", 2:ngroups) } dotdotdot$model <- pttemplate res$fit.intercepts <- try(do.call(lavaancfa, dotdotdot), silent = TRUE) } else { intercepts <- dotdotdot intercepts$group.equal <- c("loadings", "intercepts") res$fit.intercepts <- try(do.call(lavaancfa, intercepts), silent = TRUE) } if (strict) { if (std.lv) { findresiduals <- which(pttemplate$op == "~~" & pttemplate$lhs %in% varnames & pttemplate$rhs == pttemplate$lhs & pttemplate$free != 0 & pttemplate$group == 1) for (i in findresiduals) { pttemplate <- constrainParTable(pttemplate, pttemplate$lhs[i], "~~", pttemplate$rhs[i], 1:ngroups) } dotdotdot$model <- pttemplate res$fit.residuals <- try(do.call(lavaancfa, dotdotdot), silent = TRUE) for (i in facnames) { pttemplate <- fixParTable(pttemplate, i, "~1", "", 1:ngroups, 0) } dotdotdot$model <- pttemplate res$fit.means <- try(do.call(lavaancfa, dotdotdot), silent = TRUE) } else { # fix loadings + intercepts + residuals residuals <- dotdotdot residuals$group.equal <- c("loadings", "intercepts", "residuals") res$fit.residuals <- try(do.call(lavaancfa, residuals), silent = TRUE) # fix loadings + residuals + intercepts + means means <- dotdotdot means$group.equal <- c("loadings", "intercepts", "residuals", "means") res$fit.means <- try(do.call(lavaancfa, means), silent = TRUE) } } else { if (std.lv) { for (i in facnames) { pttemplate <- fixParTable(pttemplate, i, "~1", "", 1:ngroups, 0) } dotdotdot$model <- pttemplate res$fit.means <- try(do.call(lavaancfa, dotdotdot), silent = TRUE) } else { # fix loadings + intercepts + means means <- dotdotdot means$group.equal <- c("loadings", "intercepts", "means") res$fit.means <- try(do.call(lavaancfa, means), silent = TRUE) } } if (!quiet) printInvarianceResult(res, fit.measures, baseline.model, method) invisible(res) } ## ---------------- ## Hidden Functions ## ---------------- ##' @importFrom lavaan lavInspect ##' @importMethodsFrom lavaan fitMeasures printInvarianceResult <- function(FIT, fit.measures, baseline.model, method) { ## check whether models converged NAMES <- names(FIT) nonconv <- which(sapply(FIT, class) == "try-error") if (length(nonconv)) { message('The following model(s) did not converge: \n', paste(NAMES[nonconv], sep = "\n")) FIT <- FIT[-nonconv] NAMES <- NAMES[-nonconv] } names(FIT) <- NULL ## compare models lavaanLavTestLRT <- function(...) lavaan::lavTestLRT(...) TABLE <- do.call(lavaanLavTestLRT, c(FIT, list(model.names = NAMES, method = method))) if (length(fit.measures) == 1L && fit.measures == "default") { ## scaled test statistic? if (length(lavInspect(FIT[[1]], "test")) > 1L) { if (lavInspect(FIT[[1]], "test")[[2]]$test %in% c("satorra.bentler", "yuan.bentler")) { fit.measures <- c("cfi.robust", "rmsea.robust") } else fit.measures <- c("cfi.scaled", "rmsea.scaled") } else { fit.measures <- c("cfi", "rmsea") } } ## add some fit measures if (length(fit.measures)) { FM <- lapply(FIT, fitMeasures, fit.measures = fit.measures, baseline.model = baseline.model) FM.table1 <- sapply(fit.measures, function(x) sapply(FM, "[[", x)) if (length(FM) == 1L) { FM.table1 <- rbind( rep(as.numeric(NA), length(fit.measures)), FM.table1) } if (length(FM) > 1L) { FM.table2 <- rbind(as.numeric(NA), abs(apply(FM.table1, 2, diff))) colnames(FM.table2) <- paste(colnames(FM.table2), ".delta", sep = "") FM.TABLE <- as.data.frame(cbind(FM.table1, FM.table2)) } else { FM.TABLE <- as.data.frame(FM.table1) } rownames(FM.TABLE) <- rownames(TABLE) class(FM.TABLE) <- c("lavaan.data.frame", "data.frame") } cat("\n") cat("Measurement invariance models:\n\n") cat(paste(paste("Model", seq_along(FIT), ":", NAMES), collapse = "\n")) cat("\n\n") print(TABLE) if (length(fit.measures)) { cat("\n\n") cat("Fit measures:\n\n") print(FM.TABLE) cat("\n") return(list(anova = TABLE, fitMeasures = FM.TABLE)) } TABLE } semTools/R/mvrnonnorm.R0000644000176200001440000001526213432435606014632 0ustar liggesusers### Yves Rosseel, Sunthud Pornprasertmanit, & Terrence D. Jorgensen ### Last updated: 9 March 2018 #' Generate Non-normal Data using Vale and Maurelli (1983) method #' #' Generate Non-normal Data using Vale and Maurelli (1983) method. The function #' is designed to be as similar as the popular \code{mvrnorm} function in the #' \code{MASS} package. The codes are copied from \code{mvrnorm} function in #' the \code{MASS} package for argument checking and \code{lavaan} package for #' data generation using Vale and Maurelli (1983) method. #' #' #' @importFrom stats cov2cor #' #' @param n Sample size #' @param mu A mean vector. If elements are named, those will be used as #' variable names in the returned data matrix. #' @param Sigma A positive-definite symmetric matrix specifying the covariance #' matrix of the variables. If rows or columns are named (and \code{mu} is #' unnamed), those will be used as variable names in the returned data matrix. #' @param skewness A vector of skewness of the variables #' @param kurtosis A vector of excessive kurtosis of the variables #' @param empirical If \code{TRUE}, \code{mu} and \code{Sigma} specify the #' empirical rather than population mean and covariance matrix #' @return A data matrix #' @author The original function is the \code{\link[lavaan]{simulateData}} #' function written by Yves Rosseel in the \code{lavaan} package. The function #' is adjusted for a convenient usage by Sunthud Pornprasertmanit #' (\email{psunthud@@gmail.com}). Terrence D. Jorgensen added the feature to #' retain variable names from \code{mu} or \code{Sigma}. #' @references Vale, C. D. & Maurelli, V. A. (1983). Simulating multivariate #' nonormal distributions. \emph{Psychometrika, 48}(3), 465--471. #' doi:10.1007/BF02293687 #' @examples #' #' set.seed(123) #' mvrnonnorm(20, c(1, 2), matrix(c(10, 2, 2, 5), 2, 2), #' skewness = c(5, 2), kurtosis = c(3, 3)) #' ## again, with variable names specified in mu #' set.seed(123) #' mvrnonnorm(20, c(a = 1, b = 2), matrix(c(10, 2, 2, 5), 2, 2), #' skewness = c(5, 2), kurtosis = c(3, 3)) #' #' @export mvrnonnorm <- function(n, mu, Sigma, skewness = NULL, kurtosis = NULL, empirical = FALSE) { ## number of variables p <- length(mu) if (!all(dim(Sigma) == c(p, p))) stop("incompatible arguments") ## save variable names, if they exist varnames <- names(mu) if (is.null(varnames)) varnames <- rownames(Sigma) if (is.null(varnames)) varnames <- colnames(Sigma) ## check for NPD eS <- eigen(Sigma, symmetric = TRUE) ev <- eS$values if (!all(ev >= -1e-06 * abs(ev[1L]))) stop("'Sigma' is not positive definite") ## simulate X <- NULL if (is.null(skewness) && is.null(kurtosis)) { X <- MASS::mvrnorm(n = n, mu = mu, Sigma = Sigma, empirical = empirical) } else { if (empirical) warning(c("The empirical argument does not work when the ", "Vale and Maurelli's method is used.")) if (is.null(skewness)) skewness <- rep(0, p) if (is.null(kurtosis)) kurtosis <- rep(0, p) Z <- ValeMaurelli1983copied(n = n, COR = cov2cor(Sigma), skewness = skewness, kurtosis = kurtosis) TMP <- scale(Z, center = FALSE, scale = 1/sqrt(diag(Sigma)))[ , , drop = FALSE] X <- sweep(TMP, MARGIN = 2, STATS = mu, FUN = "+") } colnames(X) <- varnames X } ## ---------------- ## Hidden Functions ## ---------------- ## Copied from lavaan package #' @importFrom stats nlminb ValeMaurelli1983copied <- function(n = 100L, COR, skewness, kurtosis, debug = FALSE) { fleishman1978_abcd <- function(skewness, kurtosis) { system.function <- function(x, skewness, kurtosis) { b.=x[1L]; c.=x[2L]; d.=x[3L] eq1 <- b.^2 + 6*b.*d. + 2*c.^2 + 15*d.^2 - 1 eq2 <- 2*c.*(b.^2 + 24*b.*d. + 105*d.^2 + 2) - skewness eq3 <- 24*(b.*d. + c.^2*(1 + b.^2 + 28*b.*d.) + d.^2*(12 + 48*b.*d. + 141*c.^2 + 225*d.^2)) - kurtosis eq <- c(eq1,eq2,eq3) sum(eq^2) ## SS } out <- nlminb(start = c(1, 0, 0), objective = system.function, scale = 10, control = list(trace = 0), skewness = skewness, kurtosis = kurtosis) if(out$convergence != 0) warning("no convergence") b. <- out$par[1L]; c. <- out$par[2L]; d. <- out$par[3L]; a. <- -c. c(a.,b.,c.,d.) } getICOV <- function(b1, c1, d1, b2, c2, d2, R) { objectiveFunction <- function(x, b1, c1, d1, b2, c2, d2, R) { rho=x[1L] eq <- rho*(b1*b2 + 3*b1*d2 + 3*d1*b2 + 9*d1*d2) + rho^2*(2*c1*c2) + rho^3*(6*d1*d2) - R eq^2 } #gradientFunction <- function(x, bcd1, bcd2, R) { # #} out <- nlminb(start=R, objective=objectiveFunction, scale=10, control=list(trace=0), b1=b1, c1=c1, d1=d1, b2=b2, c2=c2, d2=d2, R=R) if(out$convergence != 0) warning("no convergence") rho <- out$par[1L] rho } # number of variables nvar <- ncol(COR) # check skewness if(is.null(skewness)) { SK <- rep(0, nvar) } else if(length(skewness) == nvar) { SK <- skewness } else if(length(skewness) == 1L) { SK <- rep(skewness, nvar) } else { stop("skewness has wrong length") } if(is.null(kurtosis)) { KU <- rep(0, nvar) } else if(length(kurtosis) == nvar) { KU <- kurtosis } else if(length(kurtosis) == 1L) { KU <- rep(kurtosis, nvar) } else { stop("kurtosis has wrong length") } # create Fleishman table FTable <- matrix(0, nvar, 4L) for(i in 1:nvar) { FTable[i,] <- fleishman1978_abcd(skewness=SK[i], kurtosis=KU[i]) } # compute intermediate correlations between all pairs ICOR <- diag(nvar) for(j in 1:(nvar-1L)) { for(i in (j+1):nvar) { if(COR[i,j] == 0) next ICOR[i,j] <- ICOR[j,i] <- getICOV(FTable[i,2], FTable[i,3], FTable[i,4], FTable[j,2], FTable[j,3], FTable[j,4], R=COR[i,j]) } } if(debug) { cat("\nOriginal correlations (for Vale-Maurelli):\n") print(COR) cat("\nIntermediate correlations (for Vale-Maurelli):\n") print(ICOR) cat("\nEigen values ICOR:\n") print( eigen(ICOR)$values ) } # generate Z ## FIXME: replace by rmvnorm once we use that package X <- Z <- MASS::mvrnorm(n=n, mu=rep(0,nvar), Sigma=ICOR) # transform Z using Fleishman constants for(i in 1:nvar) { X[,i] <- FTable[i,1L] + FTable[i,2L]*Z[,i] + FTable[i,3L]*Z[,i]^2 + FTable[i,4L]*Z[,i]^3 } X } semTools/R/dataDiagnosis.R0000644000176200001440000002353113432435606015167 0ustar liggesusers### Sunthud Pornprasertmanit ### Last updated: 2 April 2017 ### Higher-order moments. Initial version from the simsem package. #' Finding skewness #' #' Finding skewness (\eqn{g_{1}}) of an object #' #' The skewness computed is \eqn{g_{1}}. The parameter skewness \eqn{\gamma_{2}} #' formula is #' #' \deqn{\gamma_{2} = \frac{\mu_{3}}{\mu^{3/2}_{2}},} #' #' where \eqn{\mu_{i}} denotes the \eqn{i} order central moment. #' #' The excessive kurtosis formula for sample statistic \eqn{g_{2}} is #' #' \deqn{g_{2} = \frac{k_{3}}{k^{2}_{2}},} #' #' where \eqn{k_{i}} are the \eqn{i} order \emph{k}-statistic. #' #' The standard error of the skewness is #' #' \deqn{Var(\hat{g}_2) = \frac{6}{N}} #' #' where \eqn{N} is the sample size. #' #' #' @importFrom stats pnorm #' #' @param object A vector used to find a skewness #' @param population \code{TRUE} to compute the parameter formula. \code{FALSE} #' to compute the sample statistic formula. #' @return A value of a skewness with a test statistic if the population is #' specified as \code{FALSE} #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @seealso \itemize{ #' \item \code{\link{kurtosis}} Find the univariate excessive kurtosis #' of a variable #' \item \code{\link{mardiaSkew}} Find Mardia's multivariate skewness #' of a set of variables #' \item \code{\link{mardiaKurtosis}} Find the Mardia's multivariate #' kurtosis of a set of variables #' } #' @references Weisstein, Eric W. (n.d.). \emph{Skewness}. Retrived from #' \emph{MathWorld}--A Wolfram Web Resource: #' \url{http://mathworld.wolfram.com/Skewness.html} #' @examples #' #' skew(1:5) #' #' @export skew <- function(object, population = FALSE) { if(any(is.na(object))) { object <- object[!is.na(object)] warning("Missing observations are removed from a vector.") } if(population) { return(centralMoment(object, 3)/(centralMoment(object, 2)^(3/2))) } else { est <- kStat(object, 3)/(kStat(object, 2)^(3/2)) se <- sqrt(6/length(object)) z <- est/se p <- (1 - pnorm(abs(z)))*2 return(c("skew (g1)"=est, se=se, z=z, p=p)) } } #' Finding excessive kurtosis #' #' Finding excessive kurtosis (\eqn{g_{2}}) of an object #' #' The excessive kurtosis computed is \eqn{g_{2}}. The parameter excessive #' kurtosis \eqn{\gamma_{2}} formula is #' #' \deqn{\gamma_{2} = \frac{\mu_{4}}{\mu^{2}_{2}} - 3,} #' #' where \eqn{\mu_{i}} denotes the \eqn{i} order central moment. #' #' The excessive kurtosis formula for sample statistic \eqn{g_{2}} is #' #' \deqn{g_{2} = \frac{k_{4}}{k^{2}_{2}},} #' #' where \eqn{k_{i}} are the \eqn{i} order \emph{k}-statistic. #' #' The standard error of the excessive kurtosis is #' #' \deqn{Var(\hat{g}_{2}) = \frac{24}{N}} #' #' where \eqn{N} is the sample size. #' #' #' @importFrom stats pnorm #' #' @param object A vector used to find a excessive kurtosis #' @param population \code{TRUE} to compute the parameter formula. \code{FALSE} #' to compute the sample statistic formula. #' @return A value of an excessive kurtosis with a test statistic if the #' population is specified as \code{FALSE} #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @seealso \itemize{ #' \item \code{\link{skew}} Find the univariate skewness of a variable #' \item \code{\link{mardiaSkew}} Find the Mardia's multivariate #' skewness of a set of variables #' \item \code{\link{mardiaKurtosis}} Find the Mardia's multivariate kurtosis #' of a set of variables #' } #' @references Weisstein, Eric W. (n.d.). \emph{Kurtosis.} Retrived from #' \emph{MathWorld}--A Wolfram Web Resource: #' \url{http://mathworld.wolfram.com/Kurtosis.html} #' @examples #' #' kurtosis(1:5) #' #' @export kurtosis <- function(object, population = FALSE) { if(any(is.na(object))) { object <- object[!is.na(object)] warning("Missing observations are removed from a vector.") } if(population) { return((centralMoment(object, 4)/(centralMoment(object, 2)^2)) - 3) } else { est <- kStat(object, 4)/(kStat(object, 2)^(2)) se <- sqrt(24/length(object)) z <- est/se p <- (1 - pnorm(abs(z)))*2 return(c("Excess Kur (g2)"=est, se=se, z=z, p=p)) } } #' Finding Mardia's multivariate skewness #' #' Finding Mardia's multivariate skewness of multiple variables #' #' The Mardia's multivariate skewness formula (Mardia, 1970) is #' \deqn{ b_{1, d} = \frac{1}{n^2}\sum^n_{i=1}\sum^n_{j=1}\left[ #' \left(\bold{X}_i - \bold{\bar{X}} \right)^{'} \bold{S}^{-1} #' \left(\bold{X}_j - \bold{\bar{X}} \right) \right]^3, } #' where \eqn{d} is the number of variables, \eqn{X} is the target dataset #' with multiple variables, \eqn{n} is the sample size, \eqn{\bold{S}} is #' the sample covariance matrix of the target dataset, and \eqn{\bold{\bar{X}}} #' is the mean vectors of the target dataset binded in \eqn{n} rows. #' When the population multivariate skewness is normal, the #' \eqn{\frac{n}{6}b_{1,d}} is asymptotically distributed as \eqn{\chi^2} #' distribution with \eqn{d(d + 1)(d + 2)/6} degrees of freedom. #' #' #' @importFrom stats cov pchisq #' #' @param dat The target matrix or data frame with multiple variables #' @param use Missing data handling method from the \code{\link[stats]{cov}} #' function. #' @return A value of a Mardia's multivariate skewness with a test statistic #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @seealso \itemize{ #' \item \code{\link{skew}} Find the univariate skewness of a variable #' \item \code{\link{kurtosis}} Find the univariate excessive #' kurtosis of a variable #' \item \code{\link{mardiaKurtosis}} Find the Mardia's multivariate #' kurtosis of a set of variables #' } #' @references Mardia, K. V. (1970). Measures of multivariate skewness and #' kurtosis with applications. \emph{Biometrika, 57}(3), 519-530. #' doi:10.2307/2334770 #' @examples #' #' library(lavaan) #' mardiaSkew(HolzingerSwineford1939[ , paste0("x", 1:9)]) #' #' @export mardiaSkew <- function(dat, use = "everything") { centeredDat <- scale(dat, center=TRUE, scale=FALSE) invS <- solve(cov(dat, use = use)) FUN <- function(vec1, vec2, invS) { as.numeric(t(as.matrix(vec1)) %*% invS %*% as.matrix(vec2)) } FUN2 <- function(vec1, listVec2, invS) { sapply(listVec2, FUN, vec1=vec1, invS=invS) } indivTerm <- sapply(as.list(data.frame(t(centeredDat))), FUN2, listVec2=as.list(data.frame(t(centeredDat))), invS=invS) b1d <- sum(indivTerm^3)/(nrow(dat)^2) d <- ncol(dat) chi <- nrow(dat) * b1d / 6 df <- d * (d + 1) * (d + 2) / 6 p <- pchisq(chi, df = df, lower.tail = FALSE) return(c(b1d = b1d, chi = chi, df=df, p=p)) } #' Finding Mardia's multivariate kurtosis #' #' Finding Mardia's multivariate kurtosis of multiple variables #' #' The Mardia's multivariate kurtosis formula (Mardia, 1970) is #' \deqn{ b_{2, d} = \frac{1}{n}\sum^n_{i=1}\left[ \left(\bold{X}_i - #' \bold{\bar{X}} \right)^{'} \bold{S}^{-1} \left(\bold{X}_i - #' \bold{\bar{X}} \right) \right]^2, } #' where \eqn{d} is the number of variables, \eqn{X} is the target #' dataset with multiple variables, \eqn{n} is the sample size, \eqn{\bold{S}} #' is the sample covariance matrix of the target dataset, and #' \eqn{\bold{\bar{X}}} is the mean vectors of the target dataset binded in #' \eqn{n} rows. When the population multivariate kurtosis is normal, the #' \eqn{b_{2,d}} is asymptotically distributed as normal distribution with the #' mean of \eqn{d(d + 2)} and variance of \eqn{8d(d + 2)/n}. #' #' #' @importFrom stats cov pnorm #' #' @param dat The target matrix or data frame with multiple variables #' @param use Missing data handling method from the \code{\link[stats]{cov}} #' function. #' @return A value of a Mardia's multivariate kurtosis with a test statistic #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @seealso \itemize{ #' \item \code{\link{skew}} Find the univariate skewness of a variable #' \item \code{\link{kurtosis}} Find the univariate excessive kurtosis #' of a variable #' \item \code{\link{mardiaSkew}} Find the Mardia's multivariate skewness #' of a set of variables #' } #' @references Mardia, K. V. (1970). Measures of multivariate skewness and #' kurtosis with applications. \emph{Biometrika, 57}(3), 519-530. #' doi:10.2307/2334770 #' @examples #' #' library(lavaan) #' mardiaKurtosis(HolzingerSwineford1939[ , paste0("x", 1:9)]) #' #' @export mardiaKurtosis <- function(dat, use = "everything") { centeredDat <- scale(dat, center=TRUE, scale=FALSE) invS <- solve(cov(dat, use = use)) FUN <- function(vec, invS) { as.numeric(t(as.matrix(vec)) %*% invS %*% as.matrix(vec)) } indivTerm <- sapply(as.list(data.frame(t(centeredDat))), FUN, invS=invS) b2d <- sum(indivTerm^2)/nrow(dat) d <- ncol(dat) m <- d * (d + 2) v <- 8 * d * (d + 2) / nrow(dat) z <- (b2d - m)/sqrt(v) p <- pnorm(-abs(z)) * 2 return(c(b2d = b2d, z = z, p=p)) } ## ---------------- ## Hidden Functions ## ---------------- ## centralMoment ## Calculate central moments of a variable ## Arguments: ## x: vector of a variable ## ord: order of the moment ## weight: weight variable centralMoment <- function(x, ord) { if(ord < 2) stop("Central moment can be calculated for order 2 or more in an integer.") wm <- mean(x) result <- sum((x - wm)^(ord))/length(x) return(result) } # Example # centralMoment(1:5, 2) # kStat # Calculate the k-statistic (i.e., unbiased estimator of a cumulant) of a variable # Arguments: # x: vector of a variable # ord: order of the k-statistics kStat <- function(x, ord) { # Formula from mathworld wolfram n <- length(x) if(ord == 1) { return(mean(x)) } else if (ord == 2) { return(centralMoment(x, 2) * n / (n - 1)) } else if (ord == 3) { return(centralMoment(x, 3) * n^2 / ((n - 1) * (n - 2))) } else if (ord == 4) { num1 <- n^2 num2 <- (n + 1) * centralMoment(x, 4) num3 <- 3 * (n - 1) * centralMoment(x, 2)^2 denom <- (n - 1) * (n - 2) * (n - 3) return((num1 * (num2 - num3))/denom) } else { stop("Order can be 1, 2, 3, or 4 only.") } } # Example # kStat(1:5, 4) semTools/R/measurementInvarianceCat.R0000644000176200001440000002734213432435606017376 0ustar liggesusers### Sunthud Pornprasertmanit, Yves Rosseel, & Terrence D. Jorgensen ### Last updated: 1 September 2018 ### automate measurement invariance tests for categorical indicators ##' Measurement Invariance Tests for Categorical Items ##' ##' Testing measurement invariance across groups using a typical sequence of ##' model comparison tests. ##' ##' Theta parameterization is used to represent SEM for categorical items. That ##' is, residual variances are modeled instead of the total variance of ##' underlying normal variate for each item. Five models can be tested based on ##' different constraints across groups. ##' \enumerate{ ##' \item Model 1: configural invariance. The same factor structure is imposed ##' on all groups. ##' \item Model 2: weak invariance. The factor loadings are constrained to be ##' equal across groups. ##' \item Model 3: strong invariance. The factor loadings and thresholds are ##' constrained to be equal across groups. ##' \item Model 4: strict invariance. The factor loadings, thresholds and ##' residual variances are constrained to be equal across groups. ##' For categorical variables, all residual variances are fixed as 1. ##' \item Model 5: The factor loadings, threshoulds, residual variances and ##' means are constrained to be equal across groups. ##' } ##' ##' However, if all items have two items (dichotomous), scalar invariance and ##' weak invariance cannot be separated because thresholds need to be equal ##' across groups for scale identification. Users can specify \code{strict} ##' option to include the strict invariance model for the invariance testing. ##' See the further details of scale identification and different ##' parameterization in Millsap and Yun-Tein (2004). ##' ##' @importFrom lavaan lavInspect parTable ##' ##' @param ... The same arguments as for any lavaan model. See ##' \code{\link{cfa}} for more information. ##' @param std.lv If \code{TRUE}, the fixed-factor method of scale ##' identification is used. If \code{FALSE}, the first variable for each ##' factor is used as marker variable. ##' @param strict If \code{TRUE}, the sequence requires `strict' invariance. ##' See details for more information. ##' @param quiet If \code{FALSE} (default), a summary is printed out containing ##' an overview of the different models that are fitted, together with some ##' model comparison tests. If \code{TRUE}, no summary is printed. ##' @param fit.measures Fit measures used to calculate the differences between ##' nested models. ##' @param baseline.model custom baseline model passed to ##' \code{\link[lavaan]{fitMeasures}} ##' @param method The method used to calculate likelihood ratio test. See ##' \code{\link[lavaan]{lavTestLRT}} for available options ##' ##' @return Invisibly, all model fits in the sequence are returned as a list. ##' ##' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' Yves Rosseel (Ghent University; \email{Yves.Rosseel@@UGent.be}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) ##' ##' @references Millsap, R. E., & Yun-Tein, J. (2004). Assessing factorial ##' invariance in ordered-categorical measures. \emph{Multivariate Behavioral ##' Research, 39}(3), 479--515. doi:10.1207/S15327906MBR3903_4 ##' ##' @examples ##' ##' \dontrun{ ##' syntax <- ' f1 =~ u1 + u2 + u3 + u4' ##' ##' measurementInvarianceCat(model = syntax, data = datCat, group = "g", ##' parameterization = "theta", estimator = "wlsmv", ##' ordered = c("u1", "u2", "u3", "u4")) ##' } ##' ##' @name measurementInvarianceCat-deprecated ##' @usage ##' measurementInvarianceCat(..., std.lv = FALSE, strict = FALSE, ##' quiet = FALSE, fit.measures = "default", ##' baseline.model = NULL, method = "default") ##' @seealso \code{\link{semTools-deprecated}} ##' @keywords internal NULL ##' @rdname semTools-deprecated ##' ##' @export measurementInvarianceCat <- function(..., std.lv = FALSE, strict = FALSE, quiet = FALSE, fit.measures = "default", baseline.model = NULL, method = "default") { .Deprecated(msg = c("The measurementInvarianceCat function is deprecated, and", " it will cease to be included in future versions of ", "semTools. See help('semTools-deprecated) for details.")) List <- list(...) if (is.null(List$model)) stop('all lavaan() and lavOptions() arguments must ', 'named, including the "model=" argument.') lavaancfa <- function(...) { lavaan::cfa(...) } lavaanlavaan <- function(...) { lavaan::lavaan(...) } if (!is.null(List$parameterization) && tolower(List$parameterization) != "theta") warning("The parameterization is set to 'theta' by default.") List$parameterization <- "theta" # Find the number of groups if (is.null(List$group)) stop("Please specify the group variable") # Get the lavaan parameter table template <- do.call(lavaancfa, c(List, do.fit = FALSE)) lavaanParTable <- parTable(template) # Find the number of groups ngroups <- max(lavaanParTable$group) # Check whether all variables are categorical sampstat <- lavInspect(template, "samp")[[1]] meanname <- names(sampstat$mean) thname <- names(sampstat$th) if (any(is.na(charmatch(meanname, thname)))) stop("Some variables in your model are not identified as categorical.") varList <- lavaanParTable$rhs[lavaanParTable$op == "=~"] facName <- lavaanParTable$lhs[(lavaanParTable$op == "=~") & (lavaanParTable$rhs %in% varList)] if (length(unique(sapply(facName, function(x) length(x)))) > 1) stop("The numbers of variables in each element are not equal.") varList <- unique(varList) facName <- unique(facName) # Check whether the factor configuration is the same across gorups groupParTable <- split(lavaanParTable, lavaanParTable$group) group1pt <- groupParTable[[1]] groupParTable <- lapply(groupParTable, "[", c("lhs", "op", "rhs")) if (!multipleAllEqualList(lapply(groupParTable, function(x) sapply(x, "[", x$op == "=~")))) stop("Factor configuration is not the same across groups") # Extract the number of thresholds numThreshold <- table(sapply(group1pt, "[", group1pt$op == "|")[,"lhs"]) # Find the indicators of each factor group1facload <- sapply(group1pt, "[", group1pt$op == "=~") factorRep <- split(group1facload[,"rhs"], group1facload[,"lhs"]) # Find marker variables marker <- rep(NA, length(factorRep)) numThresholdMarker <- rep(NA, length(factorRep)) for (i in seq_along(factorRep)) { temp <- sapply(group1pt, "[", group1pt$rhs %in% factorRep[[i]] & group1pt$op == "=~" & group1pt$lhs == names(factorRep)[i]) marker[i] <- temp[!is.na(temp[,"ustart"]), "rhs"] numThresholdMarker[i] <- numThreshold[marker[i]] } numThresholdFactorRep <- lapply(factorRep, function(x) numThreshold[x]) constraintSecondThreshold <- unlist(lapply(numThresholdFactorRep, function(x) names(which(x > 1)[1]))) constraintSecondThreshold <- constraintSecondThreshold[!is.na(constraintSecondThreshold)] # Find the marker variable of each facto for (i in names(numThreshold)) { lavaanParTable <- constrainParTable(lavaanParTable, i, "|", "t1", 1:ngroups) } if (length(constraintSecondThreshold) > 0) { for (i in constraintSecondThreshold) { lavaanParTable <- constrainParTable(lavaanParTable, i, "|", "t2", 1:ngroups) } } # Group 1 for (i in facName) { lavaanParTable <- fixParTable(lavaanParTable, i, "~1", "", 1, 0) # Fix factor means as 0 if (std.lv) { lavaanParTable <- fixParTable(lavaanParTable, i, "~~", i, 1, 1) } else { lavaanParTable <- freeParTable(lavaanParTable, i, "~~", i, 1, NA) # Free factor variances } # Assuming that all factor covariances are freeParTable } for (i in varList) { lavaanParTable <- fixParTable(lavaanParTable, i, "~~", i, 1, 1) } # Other groups for (k in 2:ngroups) { for (i in facName) { lavaanParTable <- freeParTable(lavaanParTable, i, "~1", "", k, NA) if (std.lv) { lavaanParTable <- fixParTable(lavaanParTable, i, "~~", i, k, 1) } else { lavaanParTable <- freeParTable(lavaanParTable, i, "~~", i, k, NA) } } for (i in varList) { lavaanParTable <- freeParTable(lavaanParTable, i, "~~", i, k, NA) } # Fix the indicator variances of marker variables with two categories as 1 for (i in seq_along(marker)) { if (numThresholdMarker[i] == 1) lavaanParTable <- fixParTable(lavaanParTable, marker[i], "~~", marker[i], k, 1) } } if (std.lv) { for (i in seq_along(factorRep)) { lavaanParTable <- freeParTable(lavaanParTable, names(factorRep)[i], "=~", marker[i], 1:ngroups, NA) } } # Fit configural invariance ListConfigural <- List ListConfigural$model <- lavaanParTable fitConfigural <- try(do.call(lavaanlavaan, ListConfigural), silent = TRUE) # Create the parameter table for metric invariance ptMetric <- lavaanParTable for (i in seq_along(factorRep)) { varwithin <- factorRep[[i]] if (!std.lv) { varwithin <- setdiff(varwithin, marker[i]) } for (j in seq_along(varwithin)) { ptMetric <- constrainParTable(ptMetric, names(factorRep)[i], "=~", varwithin[j], 1:ngroups) } } if (std.lv) { for (k in 2:ngroups) { for (i in facName) { ptMetric <- freeParTable(ptMetric, i, "~~", i, k, NA) } } } ListMetric <- List ListMetric$model <- ptMetric fitMetric <- try(do.call(lavaanlavaan, ListMetric), silent = TRUE) ptMeans <- ptStrict <- ptMetric nonMarker <- setdiff(names(numThreshold), marker) nonDichoMarker <- numThreshold[which(numThreshold[nonMarker] > 1)] scalar <- length(nonDichoMarker) > 0 if (scalar) { ptScalar <- ptMetric for (i in seq_along(numThreshold)) { thresholdName <- paste0("t", 1:numThreshold[i]) for(j in seq_along(thresholdName)) { ptScalar <- constrainParTable(ptScalar, names(numThreshold)[i], "|", thresholdName[j], 1:ngroups) } } ListScalar <- List ListScalar$model <- ptScalar fitScalar <- try(do.call(lavaanlavaan, ListScalar), silent = TRUE) ptMeans <- ptStrict <- ptScalar } else fitScalar <- NULL fitStrict <- NULL # Create the parameter table for strict invariance if specified if (strict) { if (scalar) ptStrict <- ptScalar for (k in 2:ngroups) { # Constrain measurement error variances for (i in varList) { ptStrict <- fixParTable(ptStrict, i, "~~", i, k, 1) } } ListStrict <- List ListStrict$model <- ptStrict fitStrict <- try(do.call(lavaanlavaan, ListStrict), silent = TRUE) ptMeans <- ptStrict } # Create the parameter table for mean equality # Constrain factor means to be equal for (k in 2:ngroups) { ptMeans <- fixParTable(ptMeans, facName, "~1", "", k, ustart = 0) } ListMeans <- List ListMeans$model <- ptMeans fitMeans <- try(do.call(lavaanlavaan, ListMeans), silent = TRUE) FIT <- invisible(list(fit.configural = fitConfigural, fit.loadings = fitMetric, fit.thresholds = fitScalar, fit.residuals = fitStrict, fit.means = fitMeans)) FIT <- FIT[!sapply(FIT, is.null)] if (!quiet) { printInvarianceResult(FIT, fit.measures, baseline.model, method) } invisible(FIT) } ## ---------------- ## Hidden Functions ## ---------------- multipleAllEqual <- function(...) { obj <- list(...) multipleAllEqualList(obj) } multipleAllEqualList <- function(obj) { for (i in 2:length(obj)) { for (j in 1:(i - 1)) { temp <- isTRUE(all.equal(obj[[i]], obj[[j]])) if (!temp) return(FALSE) } } return(TRUE) } multipleAnyEqual <- function(...) { obj <- list(...) multipleAnyEqualList(obj) } multipleAnyEqualList <- function(obj) { for (i in 2:length(obj)) { for (j in 1:(i - 1)) { temp <- isTRUE(all.equal(obj[[i]], obj[[j]])) if (temp) return(TRUE) } } return(FALSE) } semTools/R/missingBootstrap.R0000644000176200001440000010157713477656613016007 0ustar liggesusers### Terrence D. Jorgensen ### Last updated: 11 June 2019 ### Savalei & Yuan's (2009) model-based bootstrap for missing data ## ---------------------------- ## "BootMiss" Class and Methods ## ---------------------------- ##' Class For the Results of Bollen-Stine Bootstrap with Incomplete Data ##' ##' This class contains the results of Bollen-Stine bootstrap with missing data. ##' ##' ##' @name BootMiss-class ##' @aliases BootMiss-class show,BootMiss-method summary,BootMiss-method ##' hist,BootMiss-method ##' @docType class ##' @section Objects from the Class: Objects can be created via the ##' \code{\link{bsBootMiss}} function. ##' @slot time A list containing 2 \code{difftime} objects (\code{transform} ##' and \code{fit}), indicating the time elapsed for data transformation and ##' for fitting the model to bootstrap data sets, respectively. ##' @slot transData Transformed data ##' @slot bootDist The vector of \eqn{chi^2} values from bootstrap data sets ##' fitted by the target model ##' @slot origChi The \eqn{chi^2} value from the original data set ##' @slot df The degree of freedom of the model ##' @slot bootP The \emph{p} value comparing the original \eqn{chi^2} with the ##' bootstrap distribution ##' @author Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' @seealso \code{\link{bsBootMiss}} ##' @examples ##' ##' # See the example from the bsBootMiss function ##' setClass("BootMiss", representation(time = "list", transData = "data.frame", bootDist = "vector", origChi = "numeric", df = "numeric", bootP = "numeric")) ##' @rdname BootMiss-class ##' @aliases show,BootMiss-method ##' @importFrom stats pchisq ##' @export setMethod("show", "BootMiss", function(object) { cat("Chi-Squared = ", object@origChi, "\nDegrees of Freedom = ", object@df, "\nTheoretical p value = ", pchisq(object@origChi, object@df, lower.tail = FALSE), "\n i.e., pchisq(", object@origChi, ", df = ", object@df, ", lower.tail = FALSE)\n", "\nBootstrapped p value = ", object@bootP, "\n\n", sep = "") invisible(object) }) ##' @rdname BootMiss-class ##' @aliases summary,BootMiss-method ##' @importFrom stats var ##' @export setMethod("summary", "BootMiss", function(object) { cat("Time elapsed to transform the data:\n") print(object@time$transform) cat("\nTime elapsed to fit the model to", length(object@bootDist), "bootstrapped samples:\n") print(object@time$fit) cat("\nMean of Theoretical Distribution = DF =", object@df, "\nVariance of Theoretical Distribution = 2*DF =", 2*object@df, "\n\nMean of Bootstrap Distribution =", mean(object@bootDist), "\nVariance of Bootstrap Distribution =", var(object@bootDist), "\n\n") show(object) invisible(object) }) ##' @rdname BootMiss-class ##' @aliases hist,BootMiss-method ##' @importFrom stats qchisq dchisq quantile ##' @param object,x object of class \code{BootMiss} ##' @param ... Additional arguments to pass to \code{\link[graphics]{hist}} ##' @param alpha alpha level used to draw confidence limits ##' @param nd number of digits to display ##' @param printLegend \code{logical}. If \code{TRUE} (default), a legend will ##' be printed with the histogram ##' @param legendArgs \code{list} of arguments passed to the ##' \code{\link[graphics]{legend}} function. The default argument is a list ##' placing the legend at the top-left of the figure. ##' @return The \code{hist} method returns a list of \code{length == 2}, ##' containing the arguments for the call to \code{hist} and the arguments ##' to the call for \code{legend}, respectively. ##' @export setMethod("hist", "BootMiss", function(x, ..., alpha = .05, nd = 2, printLegend = TRUE, legendArgs = list(x = "topleft")) { ChiSq <- x@origChi DF <- x@df bootDist <- x@bootDist bCrit <- round(quantile(bootDist, probs = 1 - alpha), nd) theoDist <- dchisq(seq(.1, max(c(ChiSq, bootDist)), by = .1), df = DF) Crit <- round(qchisq(p = alpha, df = DF, lower.tail = FALSE), nd) Lim <- c(0, max(c(ChiSq, bootDist, Crit))) if (ChiSq > Lim[2]) Lim[2] <- ChiSq histArgs <- list(...) histArgs$x <- bootDist histArgs$freq <- FALSE if (is.null(histArgs$col)) histArgs$col <- "grey69" if (is.null(histArgs$xlim)) histArgs$xlim <- Lim if (is.null(histArgs$main)) histArgs$main <- expression("Model-Based Bootstrap Distribution of" ~ chi^2) if (is.null(histArgs$ylab)) histArgs$ylab <- "Probability Density" if (is.null(histArgs$xlab)) histArgs$xlab <- expression(chi^2) if (printLegend) { if (nd < length(strsplit(as.character(1 / alpha), "")[[1]]) - 1) { warning(paste0("The number of digits argument (nd = ", nd , ") is too low to display your p value at the", " same precision as your requested alpha level (alpha = ", alpha, ")")) } if (x@bootP < (1 / 10^nd)) { pVal <- paste(c("< .", rep(0, nd - 1),"1"), collapse = "") } else { pVal <- paste("=", round(x@bootP, nd)) } if (is.null(legendArgs$box.lty)) legendArgs$box.lty <- 0 if (is.null(legendArgs$lty)) legendArgs$lty <- c(1, 2, 2, 1, 0, 0) if (is.null(legendArgs$lwd)) legendArgs$lwd <- c(2, 2, 2, 3, 0, 0) #if (is.null(legendArgs$cex)) legendArgs$cex <- c(1.1, 1, 1, 1, 1, 1) if (is.null(legendArgs$col)) legendArgs$col <- c("black","black","grey69","red","", "") legendArgs$legend <- c(bquote(chi[.(paste("df =", DF))]^2), bquote(Critical ~ chi[alpha ~ .(paste(" =", alpha))]^2 == .(Crit)), bquote(Bootstrap~Critical~chi[alpha ~ .(paste(" =", alpha))]^2 == .(bCrit)), expression(Observed ~ chi^2), bquote(.("")), bquote(Bootstrap ~ italic(p) ~~ .(pVal))) } H <- do.call(hist, c(histArgs["x"], plot = FALSE)) histArgs$ylim <- c(0, max(H$density, theoDist)) suppressWarnings({ do.call(hist, histArgs) lines(x = seq(.1, max(c(ChiSq, bootDist)), by = .1), y = theoDist, lwd = 2) abline(v = Crit, col = "black", lwd = 2, lty = 2) abline(v = bCrit, col = "grey69", lwd = 2, lty = 2) abline(v = ChiSq, col = "red", lwd = 3) if (printLegend) do.call(legend, legendArgs) }) ## return arguments to create histogram (and optionally, legend) invisible(list(hist = histArgs, legend = legendArgs)) }) ## -------------------- ## Constructor Function ## -------------------- ##' Bollen-Stine Bootstrap with the Existence of Missing Data ##' ##' Implement the Bollen and Stine's (1992) Bootstrap when missing observations ##' exist. The implemented method is proposed by Savalei and Yuan (2009). This ##' can be used in two ways. The first and easiest option is to fit the model to ##' incomplete data in \code{lavaan} using the FIML estimator, then pass that ##' \code{lavaan} object to \code{bsBootMiss}. ##' ##' The second is designed for users of other software packages (e.g., LISREL, ##' EQS, Amos, or Mplus). Users can import their data, \eqn{\chi^2} value, and ##' model-implied moments from another package, and they have the option of ##' saving (or writing to a file) either the transformed data or bootstrapped ##' samples of that data, which can be analyzed in other programs. In order to ##' analyze the bootstrapped samples and return a \emph{p} value, users of other ##' programs must still specify their model using lavaan syntax. ##' ##' ##' @importFrom lavaan lavInspect parTable ##' @param x A target \code{lavaan} object used in the Bollen-Stine bootstrap ##' @param transformation The transformation methods in Savalei and Yuan (2009). ##' There are three methods in the article, but only the first two are currently ##' implemented here. Use \code{transformation = 1} when there are few missing ##' data patterns, each of which has a large size, such as in a ##' planned-missing-data design. Use \code{transformation = 2} when there are ##' more missing data patterns. The currently unavailable ##' \code{transformation = 3} would be used when several missing data patterns ##' have n = 1. ##' @param nBoot The number of bootstrap samples. ##' @param model Optional. The target model if \code{x} is not provided. ##' @param rawData Optional. The target raw data set if \code{x} is not ##' provided. ##' @param Sigma Optional. The model-implied covariance matrix if \code{x} is ##' not provided. ##' @param Mu Optional. The model-implied mean vector if \code{x} is not ##' provided. ##' @param group Optional character string specifying the name of the grouping ##' variable in \code{rawData} if \code{x} is not provided. ##' @param ChiSquared Optional. The model's \eqn{\chi^2} test statistic if ##' \code{x} is not provided. ##' @param EMcov Optional, if \code{x} is not provided. The EM (or Two-Stage ML) ##' estimated covariance matrix used to speed up Transformation 2 algorithm. ##' @param transDataOnly Logical. If \code{TRUE}, the result will provide the ##' transformed data only. ##' @param writeTransData Logical. If \code{TRUE}, the transformed data set is ##' written to a text file, \code{transDataOnly} is set to \code{TRUE}, and the ##' transformed data is returned invisibly. ##' @param bootSamplesOnly Logical. If \code{TRUE}, the result will provide ##' bootstrap data sets only. ##' @param writeBootData Logical. If \code{TRUE}, the stacked bootstrap data ##' sets are written to a text file, \code{bootSamplesOnly} is set to ##' \code{TRUE}, and the list of bootstrap data sets are returned invisibly. ##' @param writeArgs Optional \code{list}. If \code{writeBootData = TRUE} or ##' \code{writeBootData = TRUE}, user can pass arguments to the ##' \code{\link[utils]{write.table}} function as a list. Some default values ##' are provided: \code{file} = "bootstrappedSamples.dat", \code{row.names} = ##' \code{FALSE}, and \code{na} = "-999", but the user can override all of these ##' by providing other values for those arguments in the \code{writeArgs} list. ##' @param seed The seed number used in randomly drawing bootstrap samples. ##' @param suppressWarn Logical. If \code{TRUE}, warnings from \code{lavaan} ##' function will be suppressed when fitting the model to each bootstrap sample. ##' @param showProgress Logical. Indicating whether to display a progress bar ##' while fitting models to bootstrap samples. ##' @param \dots The additional arguments in the \code{\link[lavaan]{lavaan}} ##' function. See also \code{\link[lavaan]{lavOptions}} ##' @return As a default, this function returns a \code{\linkS4class{BootMiss}} ##' object containing the results of the bootstrap samples. Use \code{show}, ##' \code{summary}, or \code{hist} to examine the results. Optionally, the ##' transformed data set is returned if \code{transDataOnly = TRUE}. Optionally, ##' the bootstrap data sets are returned if \code{bootSamplesOnly = TRUE}. ##' @author Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' Syntax for transformations borrowed from http://www2.psych.ubc.ca/~vsavalei/ ##' @seealso \code{\linkS4class{BootMiss}} ##' @references ##' ##' Bollen, K. A., & Stine, R. A. (1992). Bootstrapping goodness-of-fit measures ##' in structural equation models. \emph{Sociological Methods & ##' Research, 21}(2), 205--229. doi:10.1177/0049124192021002004 ##' ##' Savalei, V., & Yuan, K.-H. (2009). On the model-based bootstrap with missing ##' data: Obtaining a p-value for a test of exact fit. \emph{Multivariate ##' Behavioral Research, 44}(6), 741--763. doi:10.1080/00273170903333590 ##' @examples ##' ##' \dontrun{ ##' dat1 <- HolzingerSwineford1939 ##' dat1$x5 <- ifelse(dat1$x1 <= quantile(dat1$x1, .3), NA, dat1$x5) ##' dat1$x9 <- ifelse(is.na(dat1$x5), NA, dat1$x9) ##' ##' targetModel <- " ##' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ##' " ##' targetFit <- sem(targetModel, dat1, meanstructure = TRUE, std.lv = TRUE, ##' missing = "fiml", group = "school") ##' summary(targetFit, fit = TRUE, standardized = TRUE) ##' ##' # The number of bootstrap samples should be much higher. ##' temp <- bsBootMiss(targetFit, transformation = 1, nBoot = 10, seed = 31415) ##' ##' temp ##' summary(temp) ##' hist(temp) ##' hist(temp, printLegend = FALSE) # suppress the legend ##' ## user can specify alpha level (default: alpha = 0.05), and the number of ##' ## digits to display (default: nd = 2). Pass other arguments to hist(...), ##' ## or a list of arguments to legend() via "legendArgs" ##' hist(temp, alpha = .01, nd = 3, xlab = "something else", breaks = 25, ##' legendArgs = list("bottomleft", box.lty = 2)) ##' } ##' ##' @export bsBootMiss <- function(x, transformation = 2, nBoot = 500, model, rawData, Sigma, Mu, group, ChiSquared, EMcov, writeTransData = FALSE, transDataOnly = FALSE, writeBootData = FALSE, bootSamplesOnly = FALSE, writeArgs, seed = NULL, suppressWarn = TRUE, showProgress = TRUE, ...) { if(writeTransData) transDataOnly <- TRUE if(writeBootData) bootSamplesOnly <- TRUE check.nBoot <- (!is.numeric(nBoot) | nBoot < 1L) & !transDataOnly if (check.nBoot) stop("The \"nBoot\" argument must be a positive integer.") ## Which transformation? if (!(transformation %in% 1:2)) stop("User must specify transformation 1 or 2. Consult Savalei & Yuan (2009) for advice. Transformation 3 is not currently available.") if (transformation == 2) SavaleiYuan <- trans2 #if (transformation == 3) SavaleiYuan <- trans3 ###################### ## Data Preparation ## ###################### ## If a lavaan object is supplied, the extracted values for rawData, Sigma, Mu, ## EMcov, and EMmeans will override any user-supplied arguments. if (hasArg(x)) { if (!lavInspect(x, "options")$meanstructure) stop('You must fit the lavaan model with the argument "meanstructure=TRUE".') nG <- lavInspect(x, "ngroups") if (nG == 1L) { rawData <- list(as.data.frame(lavInspect(x, "data"))) } else rawData <- lapply(lavInspect(x, "data"), as.data.frame) for (g in seq_along(rawData)) { colnames(rawData[[g]]) <- lavaan::lavNames(x) checkAllMissing <- apply(rawData[[g]], 1, function(x) all(is.na(x))) if (any(checkAllMissing)) rawData[[g]] <- rawData[[g]][!checkAllMissing, ] } ChiSquared <- lavInspect(x, "fit")[c("chisq", "chisq.scaled")] ChiSquared <- ifelse(is.na(ChiSquared[2]), ChiSquared[1], ChiSquared[2]) group <- lavInspect(x, "group") if (length(group) == 0) group <- "group" group.label <- lavInspect(x, "group.label") if (length(group.label) == 0) group.label <- 1 Sigma <- lavInspect(x, "cov.ov") Mu <- lavInspect(x, "mean.ov") EMcov <- lavInspect(x, "sampstat")$cov if (nG == 1L) { Sigma <- list(Sigma) Mu <- list(Mu) EMcov <- list(EMcov) } } else { ## If no lavaan object is supplied, check that required arguments are. suppliedData <- c(hasArg(rawData), hasArg(Sigma), hasArg(Mu)) if (!all(suppliedData)) { stop("Without a lavaan fitted object, user must supply raw data and", " model-implied moments.") } if (!hasArg(model) & !(transDataOnly | bootSamplesOnly)) { stop("Without model syntax or fitted lavaan object, user can only call", " this function to save transformed data or bootstrapped samples.") } if (!hasArg(ChiSquared) & !(transDataOnly | bootSamplesOnly)) { stop("Without a fitted lavaan object or ChiSquared argument, user can", " only call this function to save transformed data, bootstrapped", " samples, or bootstrapped chi-squared values.") } if (!any(c(transDataOnly, bootSamplesOnly))) { if (!is.numeric(ChiSquared)) stop("The \"ChiSquared\" argument must be numeric.") } ## If user supplies one-group data & moments, convert to lists. if (class(rawData) == "data.frame") rawData <- list(rawData) if (class(rawData) != "list") { stop("The \"rawData\" argument must be a data.frame or list of data frames.") } else { if (!all(sapply(rawData, is.data.frame))) stop("Every element of \"rawData\" must be a data.frame") } if (class(Sigma) == "matrix") Sigma <- list(Sigma) if (is.numeric(Mu)) Mu <- list(Mu) ## check whether EMcov was supplied for starting values in Trans2/Trans3 if (!hasArg(EMcov)) { EMcov <- vector("list", length(Sigma)) } else { if (class(EMcov) == "matrix") EMcov <- list(EMcov) ## check EMcov is symmetric and dimensions match Sigma for (g in seq_along(EMcov)) { if (!isSymmetric(EMcov[[g]])) stop("EMcov in group ", g, " not symmetric.") unequalDim <- !all(dim(EMcov[[g]]) == dim(Sigma[[g]])) if (unequalDim) stop("Unequal dimensions in Sigma and EMcov.") } } ## Check the number of groups by the size of the lists. unequalGroups <- !all(length(rawData) == c(length(Sigma), length(Mu))) if (unequalGroups) stop("Unequal number of groups in rawData, Sigma, Mu. For multiple-group models, rawData must be a list of data frames, NOT a single data frame with a \"group\" column.") nG <- length(Sigma) ## In each group, check Sigma is symmetric and dimensions match rawData and Mu. for (g in seq_along(rawData)) { if (!isSymmetric(Sigma[[g]])) stop("Sigma in group ", g, " not symmetric.") unequalDim <- !all(ncol(rawData[[g]]) == c(nrow(Sigma[[g]]), length(Mu[[g]]))) if (unequalDim) stop("Unequal dimensions in rawData, Sigma, Mu.") } ## Check for names of group levels. If NULL, assign arbitrary ones. if (!hasArg(group)) group <- "group" if (!is.character(group)) stop("The \"group\" argument must be a character string.") if (is.null(names(rawData))) { group.label <- paste0("g", seq_along(rawData)) } else { group.label <- names(rawData) } } ## save a copy as myTransDat, whose elements will be replaced iteratively by ## group and by missing data pattern within group. myTransDat <- rawData names(myTransDat) <- group.label output <- list() ######################### ## Data Transformation ## ######################### for (g in seq_along(group.label)) { if (transformation == 1) { ## get missing data patterns R <- ifelse(is.na(rawData[[g]]), 1, 0) rowMissPatt <- apply(R, 1, function(x) paste(x, collapse = "")) patt <- unique(rowMissPatt) myRows <- lapply(patt, function(x) which(rowMissPatt == x)) ## for each pattern, apply transformation tStart <- Sys.time() transDatList <- lapply(patt, trans1, rowMissPatt = rowMissPatt, dat = rawData[[g]], Sigma = Sigma[[g]], Mu = Mu[[g]]) output$timeTrans <- Sys.time() - tStart for (i in seq_along(patt)) myTransDat[[g]][myRows[[i]], ] <- transDatList[[i]] } else { tStart <- Sys.time() myTransDat[[g]] <- SavaleiYuan(dat = rawData[[g]], Sigma = Sigma[[g]], Mu = Mu[[g]], EMcov = EMcov[[g]]) output$timeTrans <- Sys.time() - tStart } } ## option to end function here if (transDataOnly) { for (g in seq_along(myTransDat)) myTransDat[[g]][ , group] <- group.label[g] ## option to write transformed data to a file if (writeTransData) { ## Set a few options, if the user didn't. if (!hasArg(writeArgs)) writeArgs <- list(file = "transformedData.dat", row.names = FALSE, na = "-999") if (!exists("file", where = writeArgs)) writeTransArgs$file <- "transformedData.dat" if (!exists("row.names", where = writeArgs)) writeArgs$row.names <- FALSE if (!exists("na", where = writeArgs)) writeArgs$na <- "-999" ## add grouping variable and bind together into one data frame for (g in seq_along(myTransDat)) myTransDat[[g]][ , group] <- group.label[g] writeArgs$x <- do.call("rbind", myTransDat) ## write to file, print details to screen do.call("write.table", writeArgs) cat("Transformed data was written to file \"", writeArgs$file, "\" in:\n\n", getwd(), "\n\nunless path specified by user in 'file' argument.\n", sep = "") return(invisible(writeArgs$x)) } return(do.call("rbind", myTransDat)) } ############################################# ## Bootstrap distribution of fit statistic ## ############################################# ## draw bootstrap samples if (!is.null(seed)) set.seed(seed) bootSamples <- lapply(1:nBoot, function(x) getBootSample(myTransDat, group, group.label)) ## option to write bootstrapped samples to file(s) if (writeBootData) { ## Set a few options, if the user didn't. if (!hasArg(writeArgs)) writeArgs <- list(file = "bootstrappedSamples.dat", row.names = FALSE, na = "-999") if (!exists("file", where = writeArgs)) writeTransArgs$file <- "bootstrappedSamples.dat" if (!exists("row.names", where = writeArgs)) writeArgs$row.names <- FALSE if (!exists("na", where = writeArgs)) writeArgs$na <- "-999" ## add indicator for bootstrapped sample, bind together into one data frame for (b in seq_along(bootSamples)) bootSamples[[b]]$bootSample <- b writeArgs$x <- do.call("rbind", bootSamples) ## write to file, print details to screen do.call("write.table", writeArgs) cat("Bootstrapped samples written to file \"", writeArgs$file, "\" in:\n\n", getwd(), "\n\nunless path specified by user in 'file' argument.\n", sep = "") return(invisible(bootSamples)) } ## option to end function here if (bootSamplesOnly) return(bootSamples) ## check for lavaan arguments in (...) lavaanArgs <- list(...) lavaanArgs$group <- group ## fit model to bootstrap samples, save distribution of chi-squared test stat if (hasArg(x)) { ## grab defaults from lavaan object "x" lavaanArgs$slotParTable <- as.list(parTable(x)) lavaanArgs$slotModel <- x@Model lavaanArgs$slotOptions <- lavInspect(x, "options") } else { lavaanArgs$model <- model lavaanArgs$missing <- "fiml" ## set defaults that will be necessary for many models to run, that will ## probably not be specified explictly or included in lavaan syntax lavaanArgs$meanstructure <- TRUE if (!exists("auto.var", where = lavaanArgs)) lavaanArgs$auto.var <- TRUE if (!exists("auto.cov.y", where = lavaanArgs)) lavaanArgs$auto.cov.y <- TRUE if (!exists("auto.cov.lv.x", where = lavaanArgs)) lavaanArgs$auto.cov.lv.x <- TRUE } ## run bootstrap fits if (showProgress) { mypb <- utils::txtProgressBar(min = 1, max = nBoot, initial = 1, char = "=", width = 50, style = 3, file = "") bootFits <- numeric() tStart <- Sys.time() for (j in 1:nBoot) { bootFits[j] <- fitBootSample(bootSamples[[j]], args = lavaanArgs, suppress = suppressWarn) utils::setTxtProgressBar(mypb, j) } close(mypb) output$timeFit <- Sys.time() - tStart } else { tStart <- Sys.time() bootFits <- sapply(bootSamples, fitBootSample, args = lavaanArgs, suppress = suppressWarn) output$timeFit <- Sys.time() - tStart } ## stack groups, save transformed data and distribution in output object for (g in seq_along(myTransDat)) myTransDat[[g]][ , group] <- group.label[g] output$Transformed.Data <- do.call("rbind", myTransDat) output$Bootstrapped.Distribution <- bootFits output$Original.ChiSquared <- ChiSquared if (hasArg(x)) { output$Degrees.Freedom <- lavInspect(x, "fit")["df"] } else { convSamp <- which(!is.na(bootFits))[1] lavaanArgs$data <- bootSamples[[convSamp]] lavaanlavaan <- function(...) { lavaan::lavaan(...) } output$Degrees.Freedom <- lavInspect(do.call(lavaanlavaan, lavaanArgs), "fit")["df"] } ## calculate bootstrapped p-value output$Bootstrapped.p.Value <- mean(bootFits >= ChiSquared, na.rm = TRUE) ## print warning if any models didn't converge if (any(is.na(bootFits))) { nonConvMessage <- paste("Model did not converge for the following bootstrapped samples", paste(which(is.na(bootFits)), collapse = "\t"), sep = ":\n") warning(nonConvMessage) } finalResult <- new("BootMiss", time = list(transform = output$timeTrans, fit = output$timeFit), transData = output$Transformed.Data, bootDist = output$Bootstrapped.Distribution, origChi = output$Original.ChiSquared, df = output$Degrees.Freedom, bootP = output$Bootstrapped.p.Value) finalResult } ## ---------------- ## Hidden Functions ## ---------------- ## Function to execute Transformation 1 on a single missing-data pattern trans1 <- function(MDpattern, rowMissPatt, dat, Sigma, Mu) { myRows <- which(rowMissPatt == MDpattern) X <- apply(dat[myRows, ], 2, scale, scale = FALSE) observed <- !is.na(X[1, ]) Xreduced <- X[ , observed] Mreduced <- as.numeric(Mu[observed]) SigmaChol <- chol(Sigma[observed, observed]) S <- t(Xreduced) %*% Xreduced / nrow(X) Areduced <- t(SigmaChol) %*% t(solve(chol(S))) Yreduced <- t(Areduced %*% t(Xreduced) + Mreduced) Y <- replace(X, !is.na(X), Yreduced) Y } ## Function to execute Transformation 2 on a single group trans2 <- function(dat, Sigma, Mu, EMcov) { ## Computing Function of A (eq. 12), whose root is desired eq12 <- function(A) { ga <- rep(0, pStar) for (j in 1:J) { Tj <- Mjs[[j]] %*% A %*% Hjs[[j]] %*% A %*% Mjs[[j]] - Mjs[[j]] ga <- ga + Njs[j] * Dupinv %*% c(Tj) # same as vech(Tj) } ga } ## Computing Derivative of Function of A (eq. 13) eq13 <- function(A) { deriv12 <- matrix(0, nrow = pStar, ncol = pStar) for (j in 1:J) { Tj1 <- Mjs[[j]] %*% A %*% Hjs[[j]] deriv12 <- deriv12 + 2*Njs[j]*Dupinv %*% kronecker(Tj1, Mjs[[j]]) %*% Dup } deriv12 } ## get missing data patterns R <- ifelse(is.na(dat), 1, 0) rowMissPatt <- apply(R, 1, function(x) paste(x, collapse = "")) MDpattern <- unique(rowMissPatt) ## sample size within each MD pattern Njs <- sapply(MDpattern, function(patt) sum(rowMissPatt == patt)) J <- length(MDpattern) # number of MD patterns p <- ncol(dat) # number of variables in model pStar <- p*(p + 1) / 2 # number of nonredundant covariance elements ## create empty lists for each MD pattern Xjs <- vector("list", J) Hjs <- vector("list", J) Mjs <- vector("list", J) ## Create Duplication Matrix and its inverse (Magnus & Neudecker, 1999) Dup <- lavaan::lav_matrix_duplication(p) Dupinv <- solve(t(Dup) %*% Dup) %*% t(Dup) ## step through each MD pattern, populate Hjs and Mjs for (j in 1:J) { Xjs[[j]] <- apply(dat[rowMissPatt == MDpattern[j], ], 2, scale, scale = FALSE) if (!is.matrix(Xjs[[j]])) Xjs[[j]] <- t(Xjs[[j]]) observed <- !is.na(Xjs[[j]][1, ]) Sj <- t(Xjs[[j]]) %*% Xjs[[j]] / Njs[j] Hjs[[j]] <- replace(Sj, is.na(Sj), 0) Mjs[[j]] <- replace(Sj, !is.na(Sj), solve(Sigma[observed, observed])) Mjs[[j]] <- replace(Mjs[[j]], is.na(Mjs[[j]]), 0) } ## Compute starting Values for A if (is.null(EMcov)) { A <- diag(p) } else { EMeig <- eigen(EMcov) EMrti <- EMeig$vectors %*% diag(1 / sqrt(EMeig$values)) %*% t(EMeig$vectors) Sigeig <- eigen(Sigma) Sigrt <- Sigeig$vectors %*% diag(sqrt(Sigeig$values)) %*% t(Sigeig$vectors) B <- Sigrt %*% EMrti A <- .5*(B + t(B)) } ## Newton Algorithm for finding root (eq. 14) crit <- .1 a <- c(A) fA <- eq12(A) while (crit > 1e-11) { dvecF <- eq13(A) a <- a - Dup %*% solve(dvecF) %*% fA A <- matrix(a, ncol = p) fA <- eq12(A) crit <- max(abs(fA)) } ## Transform dataset X to dataset Y Yjs <- Xjs for (j in 1:J) { observed <- !is.na(Xjs[[j]][1, ]) XjReduced <- Xjs[[j]][ , observed, drop = FALSE] Aj <- A[observed, observed, drop = FALSE] Mj <- as.numeric(Mu[observed]) Yj <- t(Aj %*% t(XjReduced) + Mj) Yjs[[j]] <- replace(Yjs[[j]], !is.na(Yjs[[j]]), Yj) } Y <- as.data.frame(do.call("rbind", Yjs)) colnames(Y) <- colnames(dat) Y } ## Function to execute Transformation 3 on a single group -- TRANSFORMATION DOES NOT RETURN CH-SQ = 0 trans3 <- function(dat, Sigma, Mu, EMcov) { # Computing Saturated Means as a Function of A (eq. B1 in Appendix B) mut <- function(A) { M <- matrix(0, ncol = 1, nrow = p) for (j in 1:J) { M <- M + Njs[[j]] * Mjs[[j]] %*% A %*% Ybarjs[[j]] } Mjtoti %*% M } # Computing Function of A (eq. 18) whose root is desired eq18 <- function(A) { ga <- rep(0, pStar) mutilda <- mut(A) for (j in 1:J) { Tj <- Mjs[[j]] %*% A %*% Hjs[[j]] %*% A %*% Mjs[[j]] - Mjs[[j]] dif <- A %*% Ybarjs[[j]] - mutilda middle <- dif %*% t(dif) Tjnew <- Tj + Mjs[[j]] %*% middle %*% Mjs[[j]] ga <- ga + Njs[j] * Dupinv %*% c(Tjnew) } ga } # Computing Derivative of Function eq. 18 deriv18 <- function(A) { d18 <- matrix(0, nrow = pStar, ncol = pStar) for (j in 1:J) { Tj1 <- Mjs[[j]] %*% A %*% Hjs[[j]] mutilda <- mut(A) dif <- A %*% Ybarjs[[j]] - mutilda Tj2 <- Mjs[[j]] %*% dif %*% t(Ybarjs[[j]]) Tj3 <- kronecker(Mjs[[j]] %*% dif, Mjs[[j]]) %*% Mjtoti %*% Tj3add d18 <- d18 + 2*Njs[j]*Dupinv %*% ((kronecker((Tj1 + Tj2), Mjs[[j]])) - Tj3) %*% Dup } d18 } ## get missing data patterns R <- ifelse(is.na(dat), 1, 0) rowMissPatt <- apply(R, 1, function(x) paste(x, collapse = "")) MDpattern <- unique(rowMissPatt) ## sample size within each MD pattern Njs <- sapply(MDpattern, function(patt) sum(rowMissPatt == patt)) J <- length(MDpattern) # number of MD patterns p <- ncol(dat) # number of variables in model pStar <- p*(p + 1) / 2 # number of nonredundant covariance elements ## create empty lists for each MD pattern Xjs <- vector("list", J) Ybarjs <- vector("list", J) Hjs <- vector("list", J) Mjs <- vector("list", J) Mjtot <- matrix(0, ncol = p, nrow = p) Tj3add <- matrix(0, nrow = p, ncol = p * p) ## Create Duplication Matrix and its inverse (Magnus & Neudecker, 1999) Dup <- lavaan::duplicationMatrix(p) Dupinv <- solve(t(Dup) %*% Dup) %*% t(Dup) ## step through each MD pattern, populate Hjs and Mjs for (j in 1:J) { Xjs[[j]] <- apply(dat[rowMissPatt == MDpattern[j], ], 2, scale, scale = FALSE) if (!is.matrix(Xjs[[j]])) Xjs[[j]] <- t(Xjs[[j]]) observed <- !is.na(Xjs[[j]][1, ]) pj <- p - sum(observed) means <- colMeans(dat[rowMissPatt == MDpattern[j], ]) Ybarjs[[j]] <- replace(means, is.na(means), 0) Sj <- t(Xjs[[j]]) %*% Xjs[[j]] / Njs[j] Hjs[[j]] <- replace(Sj, is.na(Sj), 0) Mjs[[j]] <- replace(Sj, !is.na(Sj), solve(Sigma[observed, observed])) Mjs[[j]] <- replace(Mjs[[j]], is.na(Mjs[[j]]), 0) Mjtot <- Mjtot + Njs[[j]] * Mjs[[j]] Tj3add <- Tj3add + Njs[[j]] * kronecker(t(Ybarjs[[j]]), Mjs[[j]]) } Mjtoti <- solve(Mjtot) ## Compute starting Values for A if (is.null(EMcov)) { A <- diag(p) } else { EMeig <- eigen(EMcov) EMrti <- EMeig$vectors %*% diag(1 / sqrt(EMeig$values)) %*% t(EMeig$vectors) Sigeig <- eigen(Sigma) Sigrt <- Sigeig$vectors %*% diag(sqrt(Sigeig$values)) %*% t(Sigeig$vectors) B <- Sigrt %*% EMrti A <- .5*(B + t(B)) } ## Newton Algorithm for finding root (eq. 14) crit <- .1 a <- c(A) fA <- eq18(A) while (crit > 1e-11) { dvecF <- deriv18(A) a <- a - Dup %*% solve(dvecF) %*% fA A <- matrix(a, ncol = p) fA <- eq18(A) crit <- max(abs(fA)) } ## Transform dataset X to dataset Y (Z in the paper, eqs. 15-16) Yjs <- Xjs for (j in 1:J) { observed <- !is.na(Xjs[[j]][1, ]) XjReduced <- Xjs[[j]][ , observed, drop = FALSE] Aj <- A[observed, observed, drop = FALSE] Mj <- as.numeric((Mu - mut(A))[observed]) Yj <- t(Aj %*% t(XjReduced) + Mj) Yjs[[j]] <- replace(Yjs[[j]], !is.na(Yjs[[j]]), Yj) } Y <- as.data.frame(do.call("rbind", Yjs)) colnames(Y) <- colnames(dat) Y } ## Get a single bootstrapped sample from the transformed data. If there are ## multiple groups, bootstrapping occurs independently within each group, and ## a single data frame is returned. A new column is added to indicate group ## membership, which will be ignored in a single-group analysis. getBootSample <- function(groupDat, group, group.label) { bootSamp <- list() for (g in seq_along(groupDat)) { dat <- groupDat[[g]] dat[ , group] <- group.label[g] bootSamp[[g]] <- dat[sample(1:nrow(dat), nrow(dat), replace = TRUE), ] } do.call("rbind", bootSamp) } ## fit the model to a single bootstrapped sample and return chi-squared ##' @importFrom lavaan lavInspect fitBootSample <- function(dat, args, suppress) { args$data <- dat lavaanlavaan <- function(...) { lavaan::lavaan(...) } if (suppress) { fit <- suppressWarnings(do.call(lavaanlavaan, args)) } else { fit <- do.call(lavaanlavaan, args) } if (!exists("fit")) return(c(chisq = NA)) if (lavInspect(fit, "converged")) { chisq <- lavInspect(fit, "fit")[c("chisq", "chisq.scaled")] } else { chisq <- NA } if (is.na(chisq[2])) return(chisq[1]) else return(chisq[2]) } semTools/R/semTools.R0000644000176200001440000000622713432435606014225 0ustar liggesusers### Terrence D. Jorgensen ### Last updated: 1 September 2018 ### package documentation, along with convenience documentation (e.g., imports) ##' semTools: Useful Tools for Structural Equation Modeling ##' ##' The \pkg{semTools} package provides many miscellaneous functions that are ##' useful for statistical analysis involving SEM in R. Many functions extend ##' the funtionality of the \pkg{lavaan} package. Some sets of functions in ##' \pkg{semTools} correspond to the same theme. We call such a collection of ##' functions a \emph{suite}. Our suites include: ##' \itemize{ ##' \item{Model Fit Evaluation: ##' \code{\link{moreFitIndices}}, ##' \code{\link{nullRMSEA}}, ##' \code{\link{singleParamTest}}, ##' \code{\link{miPowerFit}}, and ##' \code{\link{chisqSmallN}}} ##' \item{Measurement Invariance: ##' \code{\link{measEq.syntax}}, ##' \code{\link{partialInvariance}}, ##' \code{\link{partialInvarianceCat}}, and ##' \code{\link{permuteMeasEq}}} ##' \item{Power Analysis: ##' \code{\link{SSpower}}, ##' \code{\link{findRMSEApower}}, ##' \code{\link{plotRMSEApower}}, ##' \code{\link{plotRMSEAdist}}, ##' \code{\link{findRMSEAsamplesize}}, ##' \code{\link{findRMSEApowernested}}, ##' \code{\link{plotRMSEApowernested}}, and ##' \code{\link{findRMSEAsamplesizenested}}} ##' \item{Missing Data Analysis: ##' \code{\link{auxiliary}}, ##' \code{\link{runMI}}, ##' \code{\link{twostage}}, ##' \code{\link{fmi}}, ##' \code{\link{bsBootMiss}}, ##' \code{\link{quark}}, and ##' \code{\link{combinequark}}} ##' \item{Latent Interactions: ##' \code{\link{indProd}}, ##' \code{\link{orthogonalize}}, ##' \code{\link{probe2WayMC}}, ##' \code{\link{probe3WayMC}}, ##' \code{\link{probe2WayRC}}, ##' \code{\link{probe3WayRC}}, and ##' \code{\link{plotProbe}}} ##' \item{Exploratory Factor Analysis (EFA): ##' \code{\link{efa.ekc}}, ##' \code{\link{efaUnrotate}}, ##' \code{\link{orthRotate}}, ##' \code{\link{oblqRotate}}, and ##' \code{\link{funRotate}}} ##' \item{Reliability Estimation: ##' \code{\link{reliability}}, ##' \code{\link{reliabilityL2}}, and ##' \code{\link{maximalRelia}}} ##' \item{Parceling: ##' \code{\link{parcelAllocation}}, ##' \code{\link{PAVranking}}, and ##' \code{\link{poolMAlloc}}} ##' \item{Non-Normality: ##' \code{\link{skew}}, ##' \code{\link{kurtosis}}, ##' \code{\link{mardiaSkew}}, ##' \code{\link{mardiaKurtosis}}, and ##' \code{\link{mvrnonnorm}}} ##' } ##' All users of R (or SEM) are invited to submit functions or ideas for ##' functions by contacting the maintainer, Terrence Jorgensen ##' (\email{TJorgensen314@gmail.com}). Contributors are encouraged to use ##' \code{Roxygen} comments to document their contributed code, which is ##' consistent with the rest of \pkg{semTools}. Read the vignette from the ##' \pkg{roxygen2} package for details: ##' \code{vignette("rd", package = "roxygen2")} ##' ##' @docType package ##' @name semTools NULL ##' @importFrom methods setClass setMethod getMethod show is new slot as hasArg NULL ##' @importFrom graphics hist plot par abline lines legend NULL ##' @importFrom stats nobs residuals resid fitted fitted.values coef anova vcov NULL semTools/R/quark.R0000644000176200001440000004623113432435606013542 0ustar liggesusers### Steven R. Chesnut, Danny Squire, Terrence D. Jorgensen ### Last updated: 26 June 2018 #' Quark #' #' The \code{quark} function provides researchers with the ability to calculate #' and include component scores calculated by taking into account the variance #' in the original dataset and all of the interaction and polynomial effects of #' the data in the dataset. #' #' The \code{quark} function calculates these component scores by first filling #' in the data via means of multiple imputation methods and then expanding the #' dataset by aggregating the non-overlapping interaction effects between #' variables by calculating the mean of the interactions and polynomial #' effects. The multiple imputation methods include one of iterative sampling #' and group mean substitution and multiple imputation using a polytomous #' regression algorithm (mice). During the expansion process, the dataset is #' expanded to three times its normal size (in width). The first third of the #' dataset contains all of the original data post imputation, the second third #' contains the means of the polynomial effects (squares and cubes), and the #' final third contains the means of the non-overlapping interaction effects. A #' full principal componenent analysis is conducted and the individual #' components are retained. The subsequent \code{\link{combinequark}} function #' provides researchers the control in determining how many components to #' extract and retain. The function returns the dataset as submitted (with #' missing values) and the component scores as requested for a more accurate #' multiple imputation in subsequent steps. #' #' @param data The data frame is a required component for \code{quark}. In #' order for \code{quark} to process a data frame, it must not contain any #' factors or text-based variables. All variables must be in numeric format. #' Identifiers and dates can be left in the data; however, they will need to be #' identified under the \code{id} argument. #' @param id Identifiers and dates within the dataset will need to be #' acknowledged as \code{quark} cannot process these. By acknowledging the #' identifiers and dates as a vector of column numbers or variable names, #' \code{quark} will remove them from the data temporarily to complete its main #' processes. Among many potential issues of not acknowledging identifiers and #' dates are issues involved with imputation, product and polynomial effects, #' and principal component analysis. #' @param order Order is an optional argument provided by quark that can be #' used when the imputation procedures in mice fail. Under some circumstances, #' mice cannot calculate missing values due to issues with extreme missingness. #' Should an error present itself stating a failure due to not having any #' columns selected, set the argument \code{order = 2} in order to reorder the #' imputation method procedure. Otherwise, use the default \code{order = 1}. #' @param silent If \code{FALSE}, the details of the \code{quark} process are #' printed. #' @param \dots additional arguments to pass to \code{\link[mice]{mice}}. #' @return The output value from using the quark function is a list. It will #' return a list with 7 components. #' \item{ID Columns}{Is a vector of the identifier columns entered when #' running quark.} #' \item{ID Variables}{Is a subset of the dataset that contains the identifiers #' as acknowledged when running quark.} #' \item{Used Data}{Is a matrix / dataframe of the data provided by user as #' the basis for quark to process.} #' \item{Imputed Data}{Is a matrix / dataframe of the data after the multiple #' method imputation process.} #' \item{Big Matrix}{Is the expanded product and polynomial matrix.} #' \item{Principal Components}{Is the entire dataframe of principal components #' for the dataset. This dataset will have the same number of rows of the big #' matrix, but will have 1 less column (as is the case with principal #' component analyses).} #' \item{Percent Variance Explained}{Is a vector of the percent variance #' explained with each column of principal components.} #' @author Steven R. Chesnut (University of Southern Mississippi; #' \email{Steven.Chesnut@@usm.edu}) #' #' Danny Squire (Texas Tech University) #' #' Terrence D. Jorgensen (University of Amsterdam) #' #' The PCA code is copied and modified from the \code{FactoMineR} package. #' @seealso \code{\link{combinequark}} #' @references Howard, W. J., Rhemtulla, M., & Little, T. D. (2015). Using #' Principal Components as Auxiliary Variables in Missing Data Estimation. #' \emph{Multivariate Behavioral Research, 50}(3), 285--299. #' doi:10.1080/00273171.2014.999267 #' @examples #' #' set.seed(123321) #' #' dat <- HolzingerSwineford1939[,7:15] #' misspat <- matrix(runif(nrow(dat) * 9) < 0.3, nrow(dat)) #' dat[misspat] <- NA #' dat <- cbind(HolzingerSwineford1939[,1:3], dat) #' \dontrun{ #' quark.list <- quark(data = dat, id = c(1, 2)) #' #' final.data <- combinequark(quark = quark.list, percent = 80) #' #' ## Example to rerun quark after imputation failure: #' quark.list <- quark(data = dat, id = c(1, 2), order = 2) #' } #' #' @export quark <- function(data, id, order = 1, silent = FALSE, ...){ if(!is.data.frame(data) && !is.matrix(data)) { stop("Inappropriate data file provided.") } if(!silent) cat("Data Check Passed.\n") if(is.character(id)) id <- match(id, colnames(data)) for(i in 1:length(id)){ if(id[i] > ncol(data) || id[i] < 1){ stop("At least one of the IDs is out of bounds.") } } if(!silent) cat("ID Check Passed.\n") if(!(order %in% 1:2)) stop("Currently, the order argument can take either 1 or 2.") final.collect <- list() final.collect$ID_Columns <- id final.collect$ID_Vars <- data[,id] final.collect$Used_Data <- data[,-c(id)] ##FIXME 26-June-2018: Terrence had to add a logical check for whether mice ## is installed, otherwise won't pass CRAN checks. checkMice <- requireNamespace("mice") if (!checkMice) { message('The quark function requires the "mice" package to be installed.') return(invisible(NULL)) } final.collect$Imputed_Data <- imputequark(data = final.collect$Used_Data, order = order, silent = silent, ...) final.collect$Big_Data_Matrix <- bigquark(data = final.collect$Imputed_Data, silent = silent) cmp <- compquark(data = final.collect$Big_Data_Matrix, silent = silent) final.collect$Prin_Components <- cmp[[1]] final.collect$Prin_Components_Prcnt <- cmp[[2]] return(final.collect) } #' Combine the results from the quark function #' #' This function builds upon the \code{\link{quark}} function to provide a #' final dataset comprised of the original dataset provided to #' \code{\link{quark}} and enough principal components to be able to account #' for a certain level of variance in the data. #' #' #' @param quark Provide the \code{\link{quark}} object that was returned. It #' should be a list of objects. Make sure to include it in its entirety. #' @param percent Provide a percentage of variance that you would like to have #' explained. That many components (columns) will be extracted and kept with #' the output dataset. Enter this variable as a number WITHOUT a percentage #' sign. #' @return The output of this function is the original dataset used in quark #' combined with enough principal component scores to be able to account for #' the amount of variance that was requested. #' @author Steven R. Chesnut (University of Southern Mississippi #' \email{Steven.Chesnut@@usm.edu}) #' @seealso \code{\link{quark}} #' @examples #' #' set.seed(123321) #' dat <- HolzingerSwineford1939[,7:15] #' misspat <- matrix(runif(nrow(dat) * 9) < 0.3, nrow(dat)) #' dat[misspat] <- NA #' dat <- cbind(HolzingerSwineford1939[,1:3], dat) #' #' quark.list <- quark(data = dat, id = c(1, 2)) #' #' final.data <- combinequark(quark = quark.list, percent = 80) #' #' @export combinequark <- function(quark, percent) { data <- cbind(quark$ID_Vars, quark$Used_Data) pct <- quark$Prin_Components_Prcnt comp <- quark$Prin_Components for (i in 1:length(pct)) { if(pct[i] >= percent) { num <- i break } } return(cbind(data, comp[,1:num])) } ## ---------------- ## Hidden Functions ## ---------------- imputequark <- function(data, order, silent = FALSE, ...){ if (order == 1){ data <- aImp(data = data, silent = silent, ...) data <- gImp(data = data, silent = silent) } else if(order == 2) { data <- gImp(data = data, silent = silent) if (length(which(is.na(data > 0)))) { data <- aImp(data = data, silent = silent, ...) } } return(data) } #' @importFrom stats cor gImp <- function(data, silent = FALSE) { imputed_data <- data num_adds <- vector(length = ncol(data)) # number of columns combined into one for averaging. data.cor <- cor(data, use = "pairwise", method = "pearson") class(data.cor) <- c("lavaan.matrix.symmetric","matrix") if (!silent) print(data.cor) #populate multiple matrices that can then be utilized to determine if one column should enhance another based upon #the correlations they share... if (!silent) cat("Imputing Column... \n") for (a in 1:ncol(data)) { temp_mat <- matrix(ncol = ncol(data), nrow = nrow(data)) list <- unique(sort(data[,a])) if (length(list) > 1 && length(list) <= 10) { for (b in 1:nrow(data)) { for (c in 1:length(list)) { if (data[b, a] == list[c] && !is.na(data[b,a])) { temp_mat[b,] <- round(colMeans(subset(data, data[ , a] == list[c]), na.rm = TRUE), digits = 1) } else if (is.na(data[b,a])) { for (p in 1:ncol(data)) temp_mat[b,p] <- data[b,p] } } } # Here I need to determine if the other columns are correlated enough with # the reference to ensure accuracy of predictions temp_cor <- data.cor[,a] # if (countNA(temp_cor)==0) { for (i in 1:length(temp_cor)) { if (i != a) { if (abs(temp_cor[i]) >= .5 && !is.na(temp_cor[i])) { # Using a moderate effect size, column a, will inform other columns. for (x in 1:nrow(imputed_data)){ imputed_data[x,i] <- sum(imputed_data[x,i], temp_mat[x,a], na.rm = TRUE) } num_adds[i] <- num_adds[i] + 1 } } } #} if (!silent) cat("\t", colnames(data)[a]) } } if (!silent) cat("\n") imputed_data <- cleanMat(m1 = data, m2 = imputed_data, impact = num_adds) imputed_data <- fixData(imputed_data) return(imputed_data) } cleanMat <- function(m1, m2, impact) { #Impact is the number of influences on each column... #We need to clean up and then try to determine what final values should be... #Go through each of the cells... new_mat <- m2 for (a in 1:ncol(m1)) { for (b in 1:nrow(m1)) { if (!is.na(m1[b,a])) { new_mat[b,a] <- m1[b,a] } else if (is.na(m1[b,a])) { new_mat[b,a] <- new_mat[b,a] / impact[a] } } } return(new_mat) } fixData <- function(data) { for (a in 1:ncol(data)) { for (b in 1:nrow(data)) { data[b,a] <- round(data[b,a], digits = 1) } } return(data) } aImp <- function(data, silent = FALSE, ...) { miceArgs <- list(...) miceArgs$data <- data miceArgs$maxit <- 1 miceArgs$m <- 1 miceArgs$printFlag <- !silent requireNamespace("mice") if (!("package:mice" %in% search())) attachNamespace("mice") if (!silent) cat("Starting Algorithm Imputation...\n") impData <- mice::complete(do.call("mice", miceArgs)) if (!silent) cat("Ending Algorithm Imputation...\n") return(impData) } bigquark <- function(data, silent = FALSE) { if (!silent) cat("Calculating Polynomial Effects.\n") poly <- ((data^2)+(data^3))/2 if (!silent) cat("Creating Matrix for Interaction Effects.\n") prod <- matrix(ncol=(ncol(data)-1),nrow=nrow(data)) if (!silent) cat("Calculating Interaction Effects...0%..") for (i in 1:nrow(data)) { if (!silent) printpct(percent = i/nrow(data)) for (j in 1:(ncol(data)-1)) { prod[i,j] <- mean(as.numeric(data[i,j])*as.numeric(data[i,(j+1):ncol(data)])) } } cat("\n") data <- cbind(data,poly,prod) return(data) } compquark <- function(data, silent = FALSE) { if (!silent) cat("Calculating values for the PCA\n") pcam <- pcaquark(data, ncp = ncol(data)) cmp <- list() cmp$pca <- pcam$ind$coord cmp$var <- pcam$eig[,3] colnames(cmp$pca) <- c(paste0("AuxVar",1:ncol(cmp$pca))) return(cmp) } printpct <- function(percent) { if (round(percent, digits = 10) == 0) cat("0%..") if (round(percent, digits = 10) == .10) cat("10%..") if (round(percent, digits = 10) == .20) cat("20%..") if (round(percent, digits = 10) == .30) cat("30%..") if (round(percent, digits = 10) == .40) cat("40%..") if (round(percent, digits = 10) == .50) cat("50%..") if (round(percent, digits = 10) == .60) cat("60%..") if (round(percent, digits = 10) == .70) cat("70%..") if (round(percent, digits = 10) == .80) cat("80%..") if (round(percent, digits = 10) == .90) cat("90%..") if (round(percent, digits = 10) == 1) cat("100%..") } # This function is modified from the FactoMinoR package. pcaquark <- function(X, ncp = 5) { moy.p <- function(V, poids) res <- sum(V * poids)/sum(poids) ec <- function(V, poids) res <- sqrt(sum(V^2 * poids)/sum(poids)) X <- as.data.frame(X) if (any(is.na(X))) { warnings("Missing values are imputed by the mean of the variable: you should use the imputePCA function of the missMDA package") X[is.na(X)] <- matrix(apply(X,2,mean,na.rm=TRUE),ncol=ncol(X),nrow=nrow(X),byrow=TRUE)[is.na(X)] } if (is.null(rownames(X))) rownames(X) <- 1:nrow(X) if (is.null(colnames(X))) colnames(X) <- paste("V", 1:ncol(X), sep = "") colnames(X)[colnames(X) == ""] <- paste("V", 1:sum(colnames(X)==""),sep="") rownames(X)[is.null(rownames(X))] <- paste("row",1:sum(rownames(X)==""),sep="") Xtot <- X if (any(!sapply(X, is.numeric))) { auxi <- NULL for (j in 1:ncol(X)) if (!is.numeric(X[, j])) auxi <- c(auxi, colnames(X)[j]) stop(paste("\nThe following variables are not quantitative: ", auxi)) } ncp <- min(ncp, nrow(X) - 1, ncol(X)) row.w <- rep(1, nrow(X)) row.w.init <- row.w row.w <- row.w/sum(row.w) col.w <- rep(1, ncol(X)) centre <- apply(X, 2, moy.p, row.w) X <- as.matrix(sweep(as.matrix(X), 2, centre, FUN = "-")) ecart.type <- apply(X, 2, ec, row.w) ecart.type[ecart.type <= 1e-16] <- 1 X <- sweep(as.matrix(X), 2, ecart.type, FUN = "/") dist2.ind <- apply(sweep(X,2,sqrt(col.w),FUN="*")^2,1,sum) dist2.var <- apply(sweep(X,1,sqrt(row.w),FUN="*")^2,2,sum) tmp <- svd.triplet.quark(X, row.w = row.w, col.w = col.w, ncp = ncp) eig <- tmp$vs^2 vp <- as.data.frame(matrix(NA, length(eig), 3)) rownames(vp) <- paste("comp", 1:length(eig)) colnames(vp) <- c("eigenvalue","percentage of variance", "cumulative percentage of variance") vp[, "eigenvalue"] <- eig vp[, "percentage of variance"] <- (eig/sum(eig)) * 100 vp[, "cumulative percentage of variance"] <- cumsum(vp[, "percentage of variance"]) V <- tmp$V U <- tmp$U eig <- eig[1:ncp] coord.ind <- sweep(as.matrix(U), 2, sqrt(eig), FUN = "*") coord.var <- sweep(as.matrix(V), 2, sqrt(eig), FUN = "*") contrib.var <- sweep(as.matrix(coord.var^2), 2, eig, "/") contrib.var <- sweep(as.matrix(contrib.var), 1, col.w, "*") dist2 <- dist2.var cor.var <- sweep(as.matrix(coord.var), 1, sqrt(dist2), FUN = "/") cos2.var <- cor.var^2 rownames(coord.var) <- rownames(cos2.var) <- rownames(cor.var) <- rownames(contrib.var) <- colnames(X) colnames(coord.var) <- colnames(cos2.var) <- colnames(cor.var) <- colnames(contrib.var) <- paste("Dim", c(1:ncol(V)), sep = ".") res.var <- list(coord = coord.var[, 1:ncp], cor = cor.var[, 1:ncp], cos2 = cos2.var[, 1:ncp], contrib = contrib.var[, 1:ncp] * 100) dist2 <- dist2.ind cos2.ind <- sweep(as.matrix(coord.ind^2), 1, dist2, FUN = "/") contrib.ind <- sweep(as.matrix(coord.ind^2), 1, row.w/sum(row.w), FUN = "*") contrib.ind <- sweep(as.matrix(contrib.ind), 2, eig, FUN = "/") rownames(coord.ind) <- rownames(cos2.ind) <- rownames(contrib.ind) <- names(dist2) <- rownames(X) colnames(coord.ind) <- colnames(cos2.ind) <- colnames(contrib.ind) <- paste("Dim", c(1:ncol(U)), sep = ".") res.ind <- list(coord = coord.ind[, 1:ncp], cos2 = cos2.ind[, 1:ncp], contrib = contrib.ind[, 1:ncp] * 100, dist = sqrt(dist2)) res <- list(eig = vp, var = res.var, ind = res.ind, svd = tmp) class(res) <- c("PCA", "list") return(res) } # This function is modified from the FactoMinoR package. svd.triplet.quark <- function (X, row.w = NULL, col.w = NULL, ncp = Inf) { tryCatch.W.E <- function(expr) { ## function proposed by Maechlmr W <- NULL w.handler <- function(w) { # warning handler W <<- w invokeRestart("muffleWarning") } list(value = withCallingHandlers(tryCatch(expr, error = function(e) e), warning = w.handler), warning = W) } ncp <- min(ncp,nrow(X)-1,ncol(X)) row.w <- row.w / sum(row.w) X <- sweep(X, 2, sqrt(col.w), FUN = "*") X <- sweep(X, 1, sqrt(row.w), FUN = "*") if (ncol(X) < nrow(X)) { svd.usuelle <- tryCatch.W.E(svd(X, nu = ncp, nv = ncp))$val if (names(svd.usuelle)[[1]] == "message") { svd.usuelle <- tryCatch.W.E(svd(t(X), nu = ncp, nv = ncp))$val if (names(svd.usuelle)[[1]] == "d") { aux <- svd.usuelle$u svd.usuelle$u <- svd.usuelle$v svd.usuelle$v <- aux } else { bb <- eigen(t(X) %*% X, symmetric = TRUE) svd.usuelle <- vector(mode = "list", length = 3) svd.usuelle$d[svd.usuelle$d < 0] <- 0 svd.usuelle$d <- sqrt(svd.usuelle$d) svd.usuelle$v <- bb$vec[,1:ncp] svd.usuelle$u <- sweep(X %*% svd.usuelle$v, 2, svd.usuelle$d[1:ncp], FUN = "/") } } U <- svd.usuelle$u V <- svd.usuelle$v if (ncp > 1) { mult <- sign(apply(V, 2, sum)) mult[mult == 0] <- 1 U <- sweep(U, 2, mult, FUN = "*") V <- sweep(V, 2, mult, FUN = "*") } U <- sweep(as.matrix(U), 1, sqrt(row.w), FUN = "/") V <- sweep(as.matrix(V), 1, sqrt(col.w), FUN = "/") } else { svd.usuelle <- tryCatch.W.E(svd(t(X), nu = ncp, nv = ncp))$val if (names(svd.usuelle)[[1]] == "message") { svd.usuelle <- tryCatch.W.E(svd(X, nu = ncp, nv = ncp))$val if (names(svd.usuelle)[[1]] == "d") { aux <- svd.usuelle$u svd.usuelle$u <- svd.usuelle$v svd.usuelle$v <- aux } else { bb <- eigen(X%*%t(X),symmetric=TRUE) svd.usuelle <- vector(mode = "list", length = 3) svd.usuelle$d[svd.usuelle$d < 0] <- 0 svd.usuelle$d <- sqrt(svd.usuelle$d) svd.usuelle$v <- bb$vec[,1:ncp] svd.usuelle$u <- sweep(t(X) %*% svd.usuelle$v, 2, svd.usuelle$d[1:ncp], FUN = "/") } } U <- svd.usuelle$v V <- svd.usuelle$u mult <- sign(apply(V, 2, sum)) mult[mult == 0] <- 1 V <- sweep(V, 2, mult, FUN = "*") U <- sweep(U, 2, mult, FUN = "*") U <- sweep(U, 1, sqrt(row.w), FUN = "/") V <- sweep(V, 1, sqrt(col.w), FUN = "/") } vs <- svd.usuelle$d[1:min(ncol(X), nrow(X) - 1)] num <- which(vs[1:ncp] < 1e-15) if (length(num)==1) { U[,num] <- U[,num] * vs[num] V[,num] <- V[,num] * vs[num] } if (length(num) > 1) { U[,num] <- sweep(U[,num], 2, vs[num], FUN = "*") V[,num] <- sweep(V[,num], 2, vs[num], FUN = "*") } res <- list(vs = vs, U = U, V = V) return(res) } semTools/R/aa_semTools-deprecated.R0000644000176200001440000000101613432435606016753 0ustar liggesusers### Terrence D. Jorgensen ### Last updated 25 August 2018 ### automatically create documentation for "deprecated" help page #' @title Deprecated functions in package \pkg{semTools}. #' @description The functions listed below are deprecated and will be defunct #' in the near future. When possible, alternative functions with similar #' functionality are also mentioned. Help pages for deprecated functions are #' available at \code{help("semTools-deprecated")}. #' @name semTools-deprecated #' @keywords internal NULL semTools/R/powerAnalysisNested.R0000644000176200001440000002007213432435606016415 0ustar liggesusers### Sunthud Pornprasertmanit, Bell Clinton, Pavel Panko ### Last updated: 9 March 2018 #' Find power given a sample size in nested model comparison #' #' Find the sample size that the power in rejection the samples from the #' alternative pair of RMSEA is just over the specified power. #' #' #' @importFrom stats qchisq pchisq #' #' @param rmsea0A The \eqn{H_0} baseline RMSEA #' @param rmsea0B The \eqn{H_0} alternative RMSEA (trivial misfit) #' @param rmsea1A The \eqn{H_1} baseline RMSEA #' @param rmsea1B The \eqn{H_1} alternative RMSEA (target misfit to be rejected) #' @param dfA degree of freedom of the more-restricted model #' @param dfB degree of freedom of the less-restricted model #' @param n Sample size #' @param alpha The alpha level #' @param group The number of group in calculating RMSEA #' @author Bell Clinton #' #' Pavel Panko (Texas Tech University; \email{pavel.panko@@ttu.edu}) #' #' Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @seealso \itemize{ #' \item \code{\link{plotRMSEApowernested}} to plot the statistical power for #' nested model comparison based on population RMSEA given the sample size #' \item \code{\link{findRMSEAsamplesizenested}} to find the minium sample #' size for a given statistical power in nested model comparison based on #' population RMSEA #' } #' @references #' MacCallum, R. C., Browne, M. W., & Cai, L. (2006). Testing #' differences between nested covariance structure models: Power analysis and #' null hypotheses. \emph{Psychological Methods, 11}(1), 19--35. #' doi:10.1037/1082-989X.11.1.19 #' @examples #' #' findRMSEApowernested(rmsea0A = 0.06, rmsea0B = 0.05, rmsea1A = 0.08, #' rmsea1B = 0.05, dfA = 22, dfB = 20, n = 200, #' alpha = 0.05, group = 1) #' #' @export findRMSEApowernested <- function(rmsea0A = NULL, rmsea0B = NULL, rmsea1A, rmsea1B = NULL, dfA, dfB, n, alpha = 0.05, group = 1) { if(is.null(rmsea0A)) rmsea0A <- 0 if(is.null(rmsea0B)) rmsea0B <- 0 if(is.null(rmsea1B)) rmsea1B <- rmsea0B if(dfA <= dfB) stop("The degree of freedom of the more-restricted model (dfA) should be greater than the degree of freedom of the less-restricted model (dfB)") if(rmsea0A < rmsea0B) stop("In the null-hypothesis models, the RMSEA of the more-restricted model (rmsea0A) should have a higher than or equal to the RMSEA of the less-restricted model (rmsea0B).") if(rmsea1A < rmsea1B) stop("In the alternative-hypothesis models, the RMSEA of the more-restricted model (rmsea1A) should have a higher than or equal to the RMSEA of the less-restricted model (rmsea1B).") ddiff <- dfA-dfB f0a <- (dfA*rmsea0A^2)/group f0b <- (dfB*rmsea0B^2)/group f1a <- (dfA*rmsea1A^2)/group f1b <- (dfB*rmsea1B^2)/group ncp0 <- (n-1)*(f0a-f0b) ncp1 <- (n-1)*(f1a-f1b) cval <- qchisq(1-alpha,ddiff,ncp0) Power <- 1-pchisq(cval,ddiff,ncp1) Power } #' Find sample size given a power in nested model comparison #' #' Find the sample size that the power in rejection the samples from the #' alternative pair of RMSEA is just over the specified power. #' #' #' @param rmsea0A The \eqn{H_0} baseline RMSEA #' @param rmsea0B The \eqn{H_0} alternative RMSEA (trivial misfit) #' @param rmsea1A The \eqn{H_1} baseline RMSEA #' @param rmsea1B The \eqn{H_1} alternative RMSEA (target misfit to be rejected) #' @param dfA degree of freedom of the more-restricted model. #' @param dfB degree of freedom of the less-restricted model. #' @param power The desired statistical power. #' @param alpha The alpha level. #' @param group The number of group in calculating RMSEA. #' @author Bell Clinton #' #' Pavel Panko (Texas Tech University; \email{pavel.panko@@ttu.edu}) #' #' Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' #' @seealso \itemize{ #' \item \code{\link{plotRMSEApowernested}} to plot the statistical power for #' nested model comparison based on population RMSEA given the sample size #' \item \code{\link{findRMSEApowernested}} to find the power for a given #' sample size in nested model comparison based on population RMSEA #' } #' @references #' MacCallum, R. C., Browne, M. W., & Cai, L. (2006). Testing #' differences between nested covariance structure models: Power analysis and #' null hypotheses. \emph{Psychological Methods, 11}(1), 19-35. #' doi:10.1037/1082-989X.11.1.19 #' @examples #' #' findRMSEAsamplesizenested(rmsea0A = 0, rmsea0B = 0, rmsea1A = 0.06, #' rmsea1B = 0.05, dfA = 22, dfB = 20, power = 0.80, #' alpha = .05, group = 1) #' #' @export findRMSEAsamplesizenested <- function(rmsea0A = NULL, rmsea0B = NULL, rmsea1A, rmsea1B = NULL, dfA, dfB, power = 0.80, alpha = .05, group = 1) { if(is.null(rmsea0A)) rmsea0A <- 0 if(is.null(rmsea0B)) rmsea0B <- 0 if(is.null(rmsea1B)) rmsea1B <- rmsea0B if(dfA <= dfB) stop("The degree of freedom of the more-restricted model (dfA) should be greater than the degree of freedom of the less-restricted model (dfB)") if(rmsea0A < rmsea0B) stop("In the null-hypothesis models, the RMSEA of the more-restricted model (rmsea0A) should have a higher than or equal to the RMSEA of the less-restricted model (rmsea0B).") if(rmsea1A < rmsea1B) stop("In the alternative-hypothesis models, the RMSEA of the more-restricted model (rmsea1A) should have a higher than or equal to the RMSEA of the less-restricted model (rmsea1B).") n <- 5:500 pow <- findRMSEApowernested(rmsea0A, rmsea0B, rmsea1A, rmsea1B, dfA, dfB, n, alpha, group = group) if(all(pow > power)) { return("Sample Size <= 5") } else if (all(power > pow)) { repeat { n <- n + 500 pow <- findRMSEApowernested(rmsea0A, rmsea0B, rmsea1A, rmsea1B, dfA, dfB, n, alpha, group = group) if(any(pow > power)) { index <- which(pow > power)[1] return(n[index]/group) } } } else { index <- which(pow > power)[1] return(n[index]/group) } } #' Plot power of nested model RMSEA #' #' Plot power of nested model RMSEA over a range of possible sample sizes. #' #' #' @param rmsea0A The \eqn{H_0} baseline RMSEA #' @param rmsea0B The \eqn{H_0} alternative RMSEA (trivial misfit) #' @param rmsea1A The \eqn{H_1} baseline RMSEA #' @param rmsea1B The \eqn{H_1} alternative RMSEA (target misfit to be rejected) #' @param dfA degree of freedom of the more-restricted model #' @param dfB degree of freedom of the less-restricted model #' @param nlow Lower bound of sample size #' @param nhigh Upper bound of sample size #' @param steps Step size #' @param alpha The alpha level #' @param group The number of group in calculating RMSEA #' @param \dots The additional arguments for the plot function. #' @author Bell Clinton #' #' Pavel Panko (Texas Tech University; \email{pavel.panko@@ttu.edu}) #' #' Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' #' @seealso \itemize{ #' \item \code{\link{findRMSEApowernested}} to find the power for a given #' sample size in nested model comparison based on population RMSEA #' \item \code{\link{findRMSEAsamplesizenested}} to find the minium sample #' size for a given statistical power in nested model comparison based on #' population RMSEA #' } #' @references #' MacCallum, R. C., Browne, M. W., & Cai, L. (2006). Testing #' differences between nested covariance structure models: Power analysis and #' null hypotheses. \emph{Psychological Methods, 11}(1), 19-35. #' doi:10.1037/1082-989X.11.1.19 #' @examples #' #' plotRMSEApowernested(rmsea0A = 0, rmsea0B = 0, rmsea1A = 0.06, #' rmsea1B = 0.05, dfA = 22, dfB = 20, nlow = 50, #' nhigh = 500, steps = 1, alpha = .05, group = 1) #' #' @export plotRMSEApowernested <- function(rmsea0A = NULL, rmsea0B = NULL, rmsea1A, rmsea1B = NULL, dfA, dfB, nlow, nhigh, steps = 1, alpha = .05, group = 1, ...){ nseq <- seq(nlow,nhigh, by=steps) pow1 <- findRMSEApowernested(rmsea0A = rmsea0A, rmsea0B = rmsea0B, rmsea1A = rmsea1A, rmsea1B = rmsea1B, dfA = dfA, dfB = dfB, n = nseq, alpha = alpha, group = group) plot(nseq, pow1, xlab="Sample Size", ylab="Power", main="Compute Power for Nested RMSEA", type="l", ...) } semTools/R/PAVranking.R0000644000176200001440000017077313433602240014417 0ustar liggesusers### Terrence D. Jorgensen ### Last updated: 21 February 2019 ##' Parcel-Allocation Variability in Model Ranking ##' ##' This function quantifies and assesses the consequences of parcel-allocation ##' variability for model ranking of structural equation models (SEMs) that ##' differ in their structural specification but share the same parcel-level ##' measurement specification (see Sterba & Rights, 2016). This function calls ##' \code{\link{parcelAllocation}}---which can be used with only one SEM in ##' isolation---to fit two (assumed) nested models to each of a specified number ##' of random item-to-parcel allocations. Output includes summary information ##' about the distribution of model selection results (including plots) and the ##' distribution of results for each model individually, across allocations ##' within-sample. Note that this function can be used when selecting among more ##' than two competing structural models as well (see instructions below ##' involving the \code{seed} argument). ##' ##' This is based on a SAS macro \code{ParcelAlloc} (Sterba & MacCallum, 2010). ##' The \code{PAVranking} function produces results discussed in Sterba and ##' Rights (2016) relevant to the assessment of parcel-allocation variability in ##' model selection and model ranking. Specifically, the \code{PAVranking} ##' function first calls \code{\link{parcelAllocation}} to generate a given ##' number (\code{nAlloc}) of item-to-parcel allocations, fitting both specified ##' models to each allocation, and providing summaryies of PAV for each model. ##' Additionally, \code{PAVranking} provides the following new summaries: ##' ##' \itemize{ ##' \item{PAV in model selection index values and model ranking between ##' Models \code{model0} and \code{model1}.} ##' \item{The proportion of allocations that converged and the proportion of ##' proper solutions (results are summarized for allocations with both ##' converged and proper allocations only).} ##' } ##' ##' For further details on the benefits of the random allocation of items to ##' parcels, see Sterba (2011) and Sterba and MacCallum (2010). ##' ##' To test whether nested models have equivalent fit, results can be pooled ##' across allocations using the same methods available for pooling results ##' across multiple imputations of missing data (see \bold{Examples}). ##' ##' \emph{Note}: This function requires the \code{lavaan} package. Missing data ##' must be coded as \code{NA}. If the function returns \code{"Error in ##' plot.new() : figure margins too large"}, the user may need to increase ##' size of the plot window (e.g., in RStudio) and rerun the function. ##' ##' ##' @importFrom stats sd ##' @importFrom lavaan parTable lavListInspect lavaanList ##' @importFrom graphics hist ##' ##' @param model0,model1 \code{\link[lavaan]{lavaan}} model syntax specifying ##' nested models (\code{model0} within \code{model1}) to be fitted ##' to the same parceled data. Note that there can be a mixture of ##' items and parcels (even within the same factor), in case certain items ##' should never be parceled. Can be a character string or parameter table. ##' Also see \code{\link[lavaan]{lavaanify}} for more details. ##' @param data A \code{data.frame} containing all observed variables appearing ##' in the \code{model}, as well as those in the \code{item.syntax} used to ##' create parcels. If the data have missing values, multiple imputation ##' before parceling is recommended: submit a stacked data set (with a variable ##' for the imputation number, so they can be separateed later) and set ##' \code{do.fit = FALSE} to return the list of \code{data.frame}s (one per ##' allocation), each of which is a stacked, imputed data set with parcels. ##' @param parcel.names \code{character} vector containing names of all parcels ##' appearing as indicators in \code{model}. ##' @param item.syntax \link[lavaan]{lavaan} model syntax specifying the model ##' that would be fit to all of the unparceled items, including items that ##' should be randomly allocated to parcels appearing in \code{model}. ##' @param nAlloc The number of random items-to-parcels allocations to generate. ##' @param fun \code{character} string indicating the name of the ##' \code{\link[lavaan]{lavaan}} function used to fit \code{model} to ##' \code{data}. Can only take the values \code{"lavaan"}, \code{"sem"}, ##' \code{"cfa"}, or \code{"growth"}. ##' @param alpha Alpha level used as criterion for significance. ##' @param bic.crit Criterion for assessing evidence in favor of one model ##' over another. See Rafferty (1995) for guidelines (default is "very ##' strong evidence" in favor of the model with lower BIC). ##' @param fit.measures \code{character} vector containing names of fit measures ##' to request from each fitted \code{\link[lavaan]{lavaan}} model. See the ##' output of \code{\link[lavaan]{fitMeasures}} for a list of available measures. ##' @param \dots Additional arguments to be passed to ##' \code{\link[lavaan]{lavaanList}}. See also \code{\link[lavaan]{lavOptions}} ##' @param show.progress If \code{TRUE}, show a \code{\link[utils]{txtProgressBar}} ##' indicating how fast each model-fitting iterates over allocations. ##' @param iseed (Optional) Random seed used for parceling items. When the same ##' random seed is specified and the program is re-run, the same allocations ##' will be generated. The seed argument can be used to assess parcel-allocation ##' variability in model ranking when considering more than two models. For each ##' pair of models under comparison, the program should be rerun using the same ##' random seed. Doing so ensures that multiple model comparisons will employ ##' the same set of parcel datasets. \emph{Note}: When using \pkg{parallel} ##' options, you must first type \code{RNGkind("L'Ecuyer-CMRG")} into the R ##' Console, so that the seed will be controlled across cores. ##' @param warn Whether to print warnings when fitting models to each allocation ##' ##' @return ##' \item{model0.results}{Results returned by \code{\link{parcelAllocation}} ##' for \code{model0} (see the \bold{Value} section).} ##' \item{model1.results}{Results returned by \code{\link{parcelAllocation}} ##' for \code{model1} (see the \bold{Value} section).} ##' \item{model0.v.model1}{A \code{list} of model-comparison results, including ##' the following: \itemize{ ##' \item{\code{LRT_Summary:}}{ The average likelihood ratio test across ##' allocations, as well as the \emph{SD}, minimum, maximum, range, and the ##' proportion of allocations for which the test was significant.} ##' \item{\code{Fit_Index_Differences:}}{ Differences in fit indices, organized ##' by what proportion favored each model and among those, what the average ##' difference was.} ##' \item{\code{Favored_by_BIC:}}{ The proportion of allocations in which each ##' model met the criterion (\code{bic.crit}) for a substantial difference ##' in fit.} ##' \item{\code{Convergence_Summary:}}{ The proportion of allocations in which ##' each model (and both models) converged on a solution.} ##' } Histograms are also printed to the current plot-output device.} ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \code{\link{parcelAllocation}} for fitting a single model, ##' \code{\link{poolMAlloc}} for choosing the number of allocations ##' ##' @references ##' Raftery, A. E. (1995). Bayesian model selection in social ##' research. \emph{Sociological Methodology, 25}, 111--163. doi:10.2307/271063 ##' ##' Sterba, S. K. (2011). Implications of parcel-allocation variability for ##' comparing fit of item-solutions and parcel-solutions. \emph{Structural ##' Equation Modeling: A Multidisciplinary Journal, 18}(4), 554--577. ##' doi:10.1080/10705511.2011.607073 ##' ##' Sterba, S. K., & MacCallum, R. C. (2010). Variability in parameter estimates ##' and model fit across repeated allocations of items to parcels. ##' \emph{Multivariate Behavioral Research, 45}(2), 322--358. ##' doi:10.1080/00273171003680302 ##' ##' Sterba, S. K., & Rights, J. D. (2016). Accounting for parcel-allocation ##' variability in practice: Combining sources of uncertainty and choosing the ##' number of allocations. \emph{Multivariate Behavioral Research, 51}(2--3), ##' 296--313. doi:10.1080/00273171.2016.1144502 ##' ##' Sterba, S. K., & Rights, J. D. (2017). Effects of parceling on model ##' selection: Parcel-allocation variability in model ranking. ##' \emph{Psychological Methods, 22}(1), 47--68. doi:10.1037/met0000067 ##' ##' @examples ##' ##' ## Specify the item-level model (if NO parcels were created) ##' ## This must apply to BOTH competing models ##' ##' item.syntax <- c(paste0("f1 =~ f1item", 1:9), ##' paste0("f2 =~ f2item", 1:9)) ##' cat(item.syntax, sep = "\n") ##' ## Below, we reduce the size of this same model by ##' ## applying different parceling schemes ##' ##' ## Specify a 2-factor CFA with correlated factors, using 3-indicator parcels ##' mod1 <- ' ##' f1 =~ par1 + par2 + par3 ##' f2 =~ par4 + par5 + par6 ##' ' ##' ## Specify a more restricted model with orthogonal factors ##' mod0 <- ' ##' f1 =~ par1 + par2 + par3 ##' f2 =~ par4 + par5 + par6 ##' f1 ~~ 0*f2 ##' ' ##' ## names of parcels (must apply to BOTH models) ##' (parcel.names <- paste0("par", 1:6)) ##' ##' \dontrun{ ##' ## override default random-number generator to use parallel options ##' RNGkind("L'Ecuyer-CMRG") ##' ##' PAVranking(model0 = mod0, model1 = mod1, data = simParcel, nAlloc = 100, ##' parcel.names = parcel.names, item.syntax = item.syntax, ##' std.lv = TRUE, # any addition lavaan arguments ##' parallel = "snow") # parallel options ##' ##' ##' ##' ## POOL RESULTS by treating parcel allocations as multiple imputations. ##' ## Details provided in Sterba & Rights (2016); see ?poolMAlloc. ##' ##' ## save list of data sets instead of fitting model yet ##' dataList <- parcelAllocation(mod.parcels, data = simParcel, nAlloc = 100, ##' parcel.names = parcel.names, ##' item.syntax = item.syntax, ##' do.fit = FALSE) ##' ## now fit each model to each data set ##' fit0 <- cfa.mi(mod0, data = dataList, std.lv = TRUE) ##' fit1 <- cfa.mi(mod1, data = dataList, std.lv = TRUE) ##' anova(fit0, fit1) # pooled test statistic comparing models ##' class?lavaan.mi # find more methods for pooling results ##' } ##' ##' @export PAVranking <- function(model0, model1, data, parcel.names, item.syntax, nAlloc = 100, fun = "sem", alpha = .05, bic.crit = 10, fit.measures = c("chisq","df","cfi","tli","rmsea", "srmr","logl","aic","bic","bic2"), ..., show.progress = FALSE, iseed = 12345, warn = FALSE) { if (alpha >= 1 | alpha <= 0) stop('alpha level must be between 0 and 1') bic.crit <- abs(bic.crit) ## fit each model out0 <- parcelAllocation(model = model0, data = data, nAlloc = nAlloc, parcel.names = parcel.names, item.syntax = item.syntax, fun = fun, alpha = alpha, fit.measures = fit.measures, ..., show.progress = show.progress, iseed = iseed, return.fit = TRUE, warn = warn) out1 <- parcelAllocation(model = model1, data = data, nAlloc = nAlloc, parcel.names = parcel.names, item.syntax = item.syntax, fun = fun, alpha = alpha, fit.measures = fit.measures, ..., show.progress = show.progress, iseed = iseed, return.fit = TRUE, warn = warn) ## convergence summary conv0 <- out0$Model@meta$ok conv1 <- out1$Model@meta$ok conv01 <- conv0 & conv1 conv <- data.frame(Proportion_Converged = sapply(list(conv0, conv1, conv01), mean), row.names = c("model0","model1","Both_Models")) ## add proper solutions? I would advise against ## check df matches assumed nesting DF0 <- out0$Fit["df", "Avg"] DF1 <- out1$Fit["df", "Avg"] if (DF0 == DF1) stop('Models have identical df, so they cannot be compared.') if (DF0 < DF1) warning('model0 should be nested within model1, ', 'but df_0 < df_1. Should models be swapped?') temp <- out0 out0 <- out1 out1 <- temp ## Re-run lavaanList to collect model-comparison results if (show.progress) message('Re-fitting model0 to collect model-comparison ', 'statistics\n') oldCall <- out0$Model@call oldCall$model <- parTable(out0$Model) oldCall$dataList <- out0$Model@external$dataList[conv01] if (!is.null(oldCall$parallel)) { if (oldCall$parallel == "snow") { oldCall$parallel <- "no" oldCall$ncpus <- 1L if (warn) warning("Unable to pass lavaan::lavTestLRT() arguments when ", "parallel = 'snow'. Switching to parallel = 'no'. ", "Unless using Windows, parallel = 'multicore' works.") } } PT1 <- parTable(out1$Model) op1 <- lavListInspect(out1$Model, "options") oldCall$FUN <- function(obj) { fit1 <- try(lavaan::lavaan(model = PT1, slotOptions = op1, slotData = obj@Data), silent = TRUE) if (inherits(fit1, "try-error")) return("fit failed") out <- lavaan::lavTestLRT(obj, fit1) if (inherits(out, "try-error")) return("lavTestLRT() failed") out } fit01 <- eval(as.call(oldCall)) ## check if there are any results noLRT <- sapply(fit01@funList, is.character) if (all(noLRT)) stop("No success using lavTestScore() on any allocations.") ## anova() results CHIs <- sapply(fit01@funList[!noLRT], "[", i = 2, j = "Chisq diff") pVals <- sapply(fit01@funList[!noLRT], "[", i = 2, j = "Pr(>Chisq)") LRT <- c(`Avg LRT` = mean(CHIs), df = abs(DF0 - DF1), SD = sd(CHIs), Min = min(CHIs), Max = max(CHIs), Range = max(CHIs) - min(CHIs), `% Sig` = mean(pVals < alpha)) class(LRT) <- c("lavaan.vector","numeric") ## differences in fit indices indices <- fit.measures[!grepl(pattern = "chisq|df|pvalue", fit.measures)] Fit0 <- do.call(cbind, out0$Model@funList[conv01])[indices, ] Fit1 <- do.call(cbind, out1$Model@funList[conv01])[indices, ] ## higher values for model0 Fit01 <- Fit0 - Fit1 higher0 <- Fit0 > Fit1 perc0 <- rowMeans(higher0) avg0 <- rowMeans(Fit01 * higher0) ## higher values for model1 Fit10 <- Fit1 - Fit0 higher1 <- Fit1 > Fit0 perc1 <- rowMeans(higher1) avg1 <- rowMeans(Fit10 * higher1) fitDiff <- data.frame(Prop0 = perc0, Avg0 = avg0, Prop1 = perc1, Avg1 = avg1) class(fitDiff) <- c("lavaan.data.frame","data.frame") attr(fitDiff, "header") <- paste("Note: Higher values of goodness-of-fit", "indices (e.g., CFI) favor that model, but", "higher values of badness-of-fit indices", "(e.g., RMSEA) indicate the competing model", "is favored.\n\n'Prop0' indicates the", "proportion of allocations for which each", "index was higher for model0 (likewise,", "'Prop1' indicates this for model1).\n", "\nAmong those allocations, 'Avg0' or 'Avg1'", "indicates the average amount by which the", "index was higher for that model.") ## favored by BIC favorBIC <- NULL if (any(grepl(pattern = "bic", fit.measures))) { if ("bic" %in% fit.measures) { highBIC <- abs(Fit01["bic",]) >= bic.crit favor0 <- mean(higher1["bic",] & highBIC) favor1 <- mean(higher0["bic",] & highBIC) favorBIC <- data.frame("bic" = c(favor0, favor1), row.names = paste("Evidence Favoring Model", 0:1)) } if ("bic2" %in% fit.measures) { highBIC <- abs(Fit01["bic2",]) >= bic.crit favor0 <- mean(higher1["bic2",] & highBIC) favor1 <- mean(higher0["bic2",] & highBIC) favorBIC2 <- data.frame("bic2" = c(favor0, favor1), row.names = paste("Evidence Favoring Model", 0:1)) if (is.null(favorBIC)) { favorBIC <- favorBIC2 } else favorBIC <- cbind(favorBIC, favorBIC2) } #TODO: add bic.priorN from moreFitIndices() class(favorBIC) <- c("lavaan.data.frame","data.frame") attr(favorBIC, "header") <- paste("Percent of Allocations with |BIC Diff| >", bic.crit) } ## return results list(Model0_Results = out0[c("Estimates","SE","Fit")], Model1_Results = out1[c("Estimates","SE","Fit")], Model0.v.Model1 = list(LRT_Summary = LRT, Fit_Index_Differences = fitDiff, Favored_by_BIC = favorBIC, Convergence_Summary = conv)) } ## ------------ ## old function ## ------------ ## @param nPerPar A list in which each element is a vector, corresponding to ## each factor, indicating sizes of parcels. If variables are left out of ## parceling, they should not be accounted for here (i.e., there should not be ## parcels of size "1"). ## @param facPlc A list of vectors, each corresponding to a factor, specifying ## the item indicators of that factor (whether included in parceling or not). ## Either variable names or column numbers. Variables not listed will not be ## modeled or included in output datasets. ## @param nAlloc The number of random allocations of items to parcels to ## generate. ## @param syntaxA lavaan syntax for Model A. Note that, for likelihood ratio ## test (LRT) results to be interpreted, Model A should be nested within Model ## B (though the function will still provide results when Models A and B are ## nonnested). ## @param syntaxB lavaan syntax for Model B. Note that, for likelihood ratio ## test (LRT) results to be appropriate, Model A should be nested within Model ## B (though the function will still provide results when Models A and B are ## nonnested). ## @param dataset Item-level dataset ## @param parceloutput folder where parceled data sets will be outputted (note ## for Windows users: file path must specified using forward slashes). ## @param names (Optional) A character vector containing the names of parceled ## variables. ## @param leaveout (Optional) A vector of variables to be left out of ## randomized parceling. Either variable names or column numbers are allowed. ## @examples ## ## \dontrun{ ## ## lavaan syntax for Model A: a 2 Uncorrelated ## ## factor CFA model to be fit to parceled data ## ## parmodelA <- ' ## f1 =~ NA*p1f1 + p2f1 + p3f1 ## f2 =~ NA*p1f2 + p2f2 + p3f2 ## p1f1 ~ 1 ## p2f1 ~ 1 ## p3f1 ~ 1 ## p1f2 ~ 1 ## p2f2 ~ 1 ## p3f2 ~ 1 ## p1f1 ~~ p1f1 ## p2f1 ~~ p2f1 ## p3f1 ~~ p3f1 ## p1f2 ~~ p1f2 ## p2f2 ~~ p2f2 ## p3f2 ~~ p3f2 ## f1 ~~ 1*f1 ## f2 ~~ 1*f2 ## f1 ~~ 0*f2 ## ' ## ## ## lavaan syntax for Model B: a 2 Correlated ## ## factor CFA model to be fit to parceled data ## ## parmodelB <- ' ## f1 =~ NA*p1f1 + p2f1 + p3f1 ## f2 =~ NA*p1f2 + p2f2 + p3f2 ## p1f1 ~ 1 ## p2f1 ~ 1 ## p3f1 ~ 1 ## p1f2 ~ 1 ## p2f2 ~ 1 ## p3f2 ~ 1 ## p1f1 ~~ p1f1 ## p2f1 ~~ p2f1 ## p3f1 ~~ p3f1 ## p1f2 ~~ p1f2 ## p2f2 ~~ p2f2 ## p3f2 ~~ p3f2 ## f1 ~~ 1*f1 ## f2 ~~ 1*f2 ## f1 ~~ f2 ## ' ## ## ## specify items for each factor ## f1name <- colnames(simParcel)[1:9] ## f2name <- colnames(simParcel)[10:18] ## ## ## run function ## PAVranking(nPerPar = list(c(3,3,3), c(3,3,3)), facPlc = list(f1name,f2name), ## nAlloc = 100, parceloutput = 0, leaveout = 0, ## syntaxA = parmodelA, syntaxB = parmodelB, dataset = simParcel, ## names = list("p1f1","p2f1","p3f1","p1f2","p2f2","p3f2")) ## } ## # PAVranking <- function(nPerPar, facPlc, nAlloc = 100, parceloutput = 0, syntaxA, syntaxB, # dataset, names = NULL, leaveout = 0, seed = NA, ...) { # if (is.null(names)) # names <- matrix(NA, length(nPerPar), 1) # ## set random seed if specified # if (is.na(seed) == FALSE) # set.seed(seed) # ## allow many tables to be outputted # options(max.print = 1e+06) # # ## Create parceled datasets # if (is.character(dataset)) dataset <- utils::read.csv(dataset) # dataset <- as.matrix(dataset) # # if (nAlloc < 2) # stop("Minimum of two allocations required.") # # if (is.list(facPlc)) { # if (is.numeric(facPlc[[1]][1]) == FALSE) { # facPlcb <- facPlc # Namesv <- colnames(dataset) # # for (i in 1:length(facPlc)) { # for (j in 1:length(facPlc[[i]])) { # facPlcb[[i]][j] <- match(facPlc[[i]][j], Namesv) # } # facPlcb[[i]] <- as.numeric(facPlcb[[i]]) # } # facPlc <- facPlcb # } # # # facPlc2 <- rep(0, sum(sapply(facPlc, length))) # facPlc2 <- rep(0, ncol(dataset)) # # for (i in 1:length(facPlc)) { # for (j in 1:length(facPlc[[i]])) { # facPlc2[facPlc[[i]][j]] <- i # } # } # facPlc <- facPlc2 # } # # if (leaveout != 0) { # if (is.numeric(leaveout) == FALSE) { # leaveoutb <- rep(0, length(leaveout)) # Namesv <- colnames(dataset) # # for (i in 1:length(leaveout)) { # leaveoutb[i] <- match(leaveout[i], Namesv) # } # leaveout <- as.numeric(leaveoutb) # } # k1 <- 0.001 # for (i in 1:length(leaveout)) { # facPlc[leaveout[i]] <- facPlc[leaveout[i]] + k1 # k1 <- k1 + 0.001 # } # } # # if (0 %in% facPlc == TRUE) { # Zfreq <- sum(facPlc == 0) # for (i in 1:Zfreq) { # Zplc <- match(0, facPlc) # dataset <- dataset[, -Zplc] # facPlc <- facPlc[-Zplc] # } # ## this allows for unused variables in dataset, which are specified by zeros, and # ## deleted # } # # if (is.list(nPerPar)) { # nPerPar2 <- c() # for (i in 1:length(nPerPar)) { # Onesp <- sum(facPlc > i & facPlc < i + 1) # nPerPar2 <- c(nPerPar2, nPerPar[i], rep(1, Onesp), recursive = TRUE) # } # nPerPar <- nPerPar2 # } # # Npp <- c() # for (i in 1:length(nPerPar)) { # Npp <- c(Npp, rep(i, nPerPar[i])) # } # # Locate <- sort(round(facPlc)) # Maxv <- max(Locate) - 1 # # if (length(Locate) != length(Npp)) # stop("Parcels incorrectly specified.\", # \" Check input!") # # if (Maxv > 0) { # ## Bug was here. With 1 factor Maxv=0. Skip this with a single factor # for (i in 1:Maxv) { # Mat <- match(i + 1, Locate) # if (Npp[Mat] == Npp[Mat - 1]) # stop("Parcels incorrectly specified.\", # \" Check input!") # } # } # ## warning message if parcel crosses into multiple factors vector, parcel to which # ## each variable belongs vector, factor to which each variables belongs if # ## variables are in the same parcel, but different factors error message given in # ## output # # Onevec <- facPlc - round(facPlc) # NleaveA <- length(Onevec) - sum(Onevec == 0) # NleaveP <- sum(nPerPar == 1) # # if (NleaveA < NleaveP) # warning("Single-variable parcels have been requested.\", # \" Check input!") # # if (NleaveA > NleaveP) # warning("More non-parceled variables have been", " requested than provided for in parcel", # " vector. Check input!") # # if (length(names) > 1) { # if (length(names) != length(nPerPar)) # warning("Number of parcel names provided not equal to number", " of parcels requested") # } # # Data <- c(1:ncol(dataset)) # ## creates a vector of the number of indicators e.g. for three indicators, c(1, 2, # ## 3) # Nfactors <- max(facPlc) # ## scalar, number of factors # Nindicators <- length(Data) # ## scalar, number of indicators # Npar <- length(nPerPar) # ## scalar, number of parcels # Rmize <- runif(Nindicators, 1, Nindicators) # ## create vector of randomly ordered numbers, length of number of indicators # # Data <- rbind(facPlc, Rmize, Data) # ## 'Data' becomes object of three rows, consisting of 1) factor to which each # ## indicator belongs (in order to preserve indicator/factor assignment during # ## randomization) 2) randomly order numbers 3) indicator number # # Results <- matrix(numeric(0), nAlloc, Nindicators) # ## create empty matrix for parcel allocation matrix # # Pin <- nPerPar[1] # for (i in 2:length(nPerPar)) { # Pin <- c(Pin, nPerPar[i] + Pin[i - 1]) # ## creates vector which indicates the range of columns (endpoints) in each parcel # } # # for (i in 1:nAlloc) { # Data[2, ] <- runif(Nindicators, 1, Nindicators) # ## Replace second row with newly randomly ordered numbers # # Data <- Data[, order(Data[2, ])] # ## Order the columns according to the values of the second row # # Data <- Data[, order(Data[1, ])] # ## Order the columns according to the values of the first row in order to preserve # ## factor assignment # # Results[i, ] <- Data[3, ] # ## assign result to allocation matrix # } # # Alpha <- rbind(Results[1, ], dataset) # ## bind first random allocation to dataset 'Alpha' # # Allocations <- list() # ## create empty list for allocation data matrices # # for (i in 1:nAlloc) { # Ineff <- rep(NA, ncol(Results)) # Ineff2 <- c(1:ncol(Results)) # for (inefficient in 1:ncol(Results)) { # Ineff[Results[i, inefficient]] <- Ineff2[inefficient] # } # # Alpha[1, ] <- Ineff # ## replace first row of dataset matrix with row 'i' from allocation matrix # # Beta <- Alpha[, order(Alpha[1, ])] # ## arrangle dataset columns by values of first row assign to temporary matrix # ## 'Beta' # # Temp <- matrix(NA, nrow(dataset), Npar) # ## create empty matrix for averaged parcel variables # # TempAA <- if (length(1:Pin[1]) > 1) # Beta[2:nrow(Beta), 1:Pin[1]] else cbind(Beta[2:nrow(Beta), 1:Pin[1]], Beta[2:nrow(Beta), 1:Pin[1]]) # Temp[, 1] <- rowMeans(TempAA, na.rm = TRUE) # ## fill first column with averages from assigned indicators # for (al in 2:Npar) { # Plc <- Pin[al - 1] + 1 # ## placeholder variable for determining parcel width # TempBB <- if (length(Plc:Pin[al]) > 1) # Beta[2:nrow(Beta), Plc:Pin[al]] else cbind(Beta[2:nrow(Beta), Plc:Pin[al]], Beta[2:nrow(Beta), Plc:Pin[al]]) # Temp[, al] <- rowMeans(TempBB, na.rm = TRUE) # ## fill remaining columns with averages from assigned indicators # } # if (length(names) > 1) # colnames(Temp) <- names # Allocations[[i]] <- Temp # ## assign result to list of parcel datasets # } # # ## Write parceled datasets # if (as.vector(regexpr("/", parceloutput)) != -1) { # replist <- matrix(NA, nAlloc, 1) # for (i in 1:nAlloc) { # ## if (is.na(names)==TRUE) names <- matrix(NA,nrow( # colnames(Allocations[[i]]) <- names # utils::write.table(Allocations[[i]], paste(parceloutput, "/parcelruns", i, # ".dat", sep = ""), # row.names = FALSE, col.names = TRUE) # replist[i, 1] <- paste("parcelruns", i, ".dat", sep = "") # } # utils::write.table(replist, paste(parceloutput, "/parcelrunsreplist.dat", # sep = ""), # quote = FALSE, row.names = FALSE, col.names = FALSE) # } # # # ## Model A estimation # # { # Param_A <- list() # ## list for parameter estimated for each imputation # Fitind_A <- list() # ## list for fit indices estimated for each imputation # Converged_A <- list() # ## list for whether or not each allocation converged # ProperSolution_A <- list() # ## list for whether or not each allocation has proper solutions # ConvergedProper_A <- list() # ## list for whether or not each allocation converged and has proper solutions # # for (i in 1:nAlloc) { # data_A <- as.data.frame(Allocations[[i]], row.names = NULL, optional = FALSE) # ## convert allocation matrix to dataframe for model estimation # fit_A <- lavaan::sem(syntaxA, data = data_A, ...) # ## estimate model in lavaan # if (lavInspect(fit_A, "converged") == TRUE) { # Converged_A[[i]] <- 1 # } else Converged_A[[i]] <- 0 # ## determine whether or not each allocation converged # Param_A[[i]] <- lavaan::parameterEstimates(fit_A)[, c("lhs", "op", "rhs", # "est", "se", "z", "pvalue", "ci.lower", "ci.upper")] # ## assign allocation parameter estimates to list # if (lavInspect(fit_A, "post.check") == TRUE & Converged_A[[i]] == 1) { # ProperSolution_A[[i]] <- 1 # } else ProperSolution_A[[i]] <- 0 # ## determine whether or not each allocation has proper solutions # if (any(is.na(Param_A[[i]][, 5] == TRUE))) # ProperSolution_A[[i]] <- 0 # ## make sure each allocation has existing SE # if (Converged_A[[i]] == 1 & ProperSolution_A[[i]] == 1) { # ConvergedProper_A[[i]] <- 1 # } else ConvergedProper_A[[i]] <- 0 # ## determine whether or not each allocation converged and has proper solutions # # if (ConvergedProper_A[[i]] == 0) # Param_A[[i]][, 4:9] <- matrix(data = NA, nrow(Param_A[[i]]), 6) # ## make parameter estimates null for nonconverged, improper solutions # # if (ConvergedProper_A[[i]] == 1) { # Fitind_A[[i]] <- lavaan::fitMeasures(fit_A, c("chisq", "df", "cfi", # "tli", "rmsea", "srmr", "logl", "bic", "aic")) # } else Fitind_A[[i]] <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA) # ### assign allocation parameter estimates to list # # } # # # nConverged_A <- Reduce("+", Converged_A) # ## count number of converged allocations # # nProperSolution_A <- Reduce("+", ProperSolution_A) # ## count number of allocations with proper solutions # # nConvergedProper_A <- Reduce("+", ConvergedProper_A) # ## count number of allocations with proper solutions # # if (nConvergedProper_A == 0) # stop("All allocations failed to converge and/or yielded improper solutions for Model A and/or B.") # ## stop program if no allocations converge # # Parmn_A <- Param_A[[1]] # ## assign first parameter estimates to mean dataframe # # ParSE_A <- matrix(NA, nrow(Parmn_A), nAlloc) # ParSEmn_A <- Parmn_A[, 5] # # Parsd_A <- matrix(NA, nrow(Parmn_A), nAlloc) # ## assign parameter estimates for S.D. calculation # # Fitmn_A <- Fitind_A[[1]] # ## assign first fit indices to mean dataframe # # Fitsd_A <- matrix(NA, length(Fitmn_A), nAlloc) # ## assign fit indices for S.D. calculation # # Sigp_A <- matrix(NA, nrow(Parmn_A), nAlloc) # ## assign p-values to calculate percentage significant # # Fitind_A <- data.frame(Fitind_A) # ### convert fit index table to data frame # # for (i in 1:nAlloc) { # # Parsd_A[, i] <- Param_A[[i]][, 4] # ## assign parameter estimates for S.D. estimation # # ParSE_A[, i] <- Param_A[[i]][, 5] # # if (i > 1) { # ParSEmn_A <- rowSums(cbind(ParSEmn_A, Param_A[[i]][, 5]), na.rm = TRUE) # } # # Sigp_A[, ncol(Sigp_A) - i + 1] <- Param_A[[i]][, 7] # ## assign p-values to calculate percentage significant # # Fitsd_A[, i] <- Fitind_A[[i]] # ## assign fit indices for S.D. estimation # # if (i > 1) { # Parmn_A[, 4:ncol(Parmn_A)] <- rowSums(cbind(Parmn_A[, 4:ncol(Parmn_A)], # Param_A[[i]][, 4:ncol(Parmn_A)]), na.rm = TRUE) # } # ## add together all parameter estimates # # if (i > 1) # Fitmn_A <- rowSums(cbind(Fitmn_A, Fitind_A[[i]]), na.rm = TRUE) # ## add together all fit indices # # } # # # Sigp_A <- Sigp_A + 0.45 # Sigp_A <- apply(Sigp_A, c(1, 2), round) # Sigp_A <- 1 - as.vector(rowMeans(Sigp_A, na.rm = TRUE)) # ## calculate percentage significant parameters # # Parsum_A <- cbind(apply(Parsd_A, 1, mean, na.rm = TRUE), # apply(Parsd_A, 1, sd, na.rm = TRUE), # apply(Parsd_A, 1, max, na.rm = TRUE), # apply(Parsd_A, 1, min, na.rm = TRUE), # apply(Parsd_A, 1, max, na.rm = TRUE) - apply(Parsd_A, 1, min, na.rm = TRUE), Sigp_A * 100) # colnames(Parsum_A) <- c("Avg Est.", "S.D.", "MAX", "MIN", "Range", "% Sig") # ## calculate parameter S.D., minimum, maximum, range, bind to percentage # ## significant # # ParSEmn_A <- Parmn_A[, 1:3] # ParSEfn_A <- cbind(ParSEmn_A, apply(ParSE_A, 1, mean, na.rm = TRUE), # apply(ParSE_A, 1, sd, na.rm = TRUE), # apply(ParSE_A, 1, max, na.rm = TRUE), # apply(ParSE_A, 1, min, na.rm = TRUE), # apply(ParSE_A, 1, max, na.rm = TRUE) - apply(ParSE_A, 1, min, na.rm = TRUE)) # colnames(ParSEfn_A) <- c("lhs", "op", "rhs", "Avg SE", "S.D.", # "MAX", "MIN", "Range") # # Fitsum_A <- cbind(apply(Fitsd_A, 1, mean, na.rm = TRUE), # apply(Fitsd_A, 1, sd, na.rm = TRUE), # apply(Fitsd_A, 1, max, na.rm = TRUE), # apply(Fitsd_A, 1, min, na.rm = TRUE), # apply(Fitsd_A, 1, max, na.rm = TRUE) - apply(Fitsd_A, 1, min, na.rm = TRUE)) # rownames(Fitsum_A) <- c("chisq", "df", "cfi", "tli", "rmsea", "srmr", "logl", # "bic", "aic") # ## calculate fit S.D., minimum, maximum, range # # Parmn_A[, 4:ncol(Parmn_A)] <- Parmn_A[, 4:ncol(Parmn_A)]/nConvergedProper_A # ## divide totalled parameter estimates by number converged allocations # Parmn_A <- Parmn_A[, 1:3] # ## remove confidence intervals from output # Parmn_A <- cbind(Parmn_A, Parsum_A) # ## bind parameter average estimates to cross-allocation information # Fitmn_A <- Fitmn_A/nConvergedProper_A # ## divide totalled fit indices by number converged allocations # # pChisq_A <- list() # ## create empty list for Chi-square p-values # sigChisq_A <- list() # ## create empty list for Chi-square significance # # for (i in 1:nAlloc) { # pChisq_A[[i]] <- (1 - pchisq(Fitsd_A[1, i], Fitsd_A[2, i])) # ## calculate p-value for each Chi-square # if (is.na(pChisq_A[[i]]) == FALSE & pChisq_A[[i]] < 0.05) { # sigChisq_A[[i]] <- 1 # } else sigChisq_A[[i]] <- 0 # } # ## count number of allocations with significant chi-square # # PerSigChisq_A <- (Reduce("+", sigChisq_A))/nConvergedProper_A * 100 # PerSigChisq_A <- round(PerSigChisq_A, 3) # ## calculate percent of allocations with significant chi-square # # PerSigChisqCol_A <- c(PerSigChisq_A, "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", # "n/a", "n/a") # ## create list of Chi-square Percent Significant and 'n/a' (used for fit summary # ## table) # # options(stringsAsFactors = FALSE) # ## set default option to allow strings into dataframe without converting to factors # # Fitsum_A <- data.frame(Fitsum_A, PerSigChisqCol_A) # colnames(Fitsum_A) <- c("Avg Ind", "S.D.", "MAX", "MIN", "Range", "% Sig") # ### bind to fit averages # # options(stringsAsFactors = TRUE) # ## unset option to allow strings into dataframe without converting to factors # # ParSEfn_A[, 4:8] <- apply(ParSEfn_A[, 4:8], 2, round, digits = 3) # Parmn_A[, 4:9] <- apply(Parmn_A[, 4:9], 2, round, digits = 3) # Fitsum_A[, 1:5] <- apply(Fitsum_A[, 1:5], 2, round, digits = 3) # ## round output to three digits # # Fitsum_A[2, 2:5] <- c("n/a", "n/a", "n/a", "n/a") # ## Change df row to 'n/a' for sd, max, min, and range # # Output_A <- list(Parmn_A, ParSEfn_A, Fitsum_A) # names(Output_A) <- c("Estimates_A", "SE_A", "Fit_A") # ## output summary for model A # # } # # ## Model B estimation # # { # Param <- list() # ## list for parameter estimated for each imputation # Fitind <- list() # ## list for fit indices estimated for each imputation # Converged <- list() # ## list for whether or not each allocation converged # ProperSolution <- list() # ## list for whether or not each allocation has proper solutions # ConvergedProper <- list() # ## list for whether or not each allocation is converged and proper # # for (i in 1:nAlloc) { # data <- as.data.frame(Allocations[[i]], row.names = NULL, optional = FALSE) # ## convert allocation matrix to dataframe for model estimation # fit <- lavaan::sem(syntaxB, data = data, ...) # ## estimate model in lavaan # if (lavInspect(fit, "converged") == TRUE) { # Converged[[i]] <- 1 # } else Converged[[i]] <- 0 # ## determine whether or not each allocation converged # Param[[i]] <- lavaan::parameterEstimates(fit)[, c("lhs", "op", "rhs", # "est", "se", "z", "pvalue", "ci.lower", "ci.upper")] # ## assign allocation parameter estimates to list # if (lavInspect(fit, "post.check") == TRUE & Converged[[i]] == 1) { # ProperSolution[[i]] <- 1 # } else ProperSolution[[i]] <- 0 # ## determine whether or not each allocation has proper solutions # if (any(is.na(Param[[i]][, 5] == TRUE))) # ProperSolution[[i]] <- 0 # ## make sure each allocation has existing SE # if (Converged[[i]] == 1 & ProperSolution[[i]] == 1) { # ConvergedProper[[i]] <- 1 # } else ConvergedProper[[i]] <- 0 # ## determine whether or not each allocation converged and has proper solutions # # if (ConvergedProper[[i]] == 0) # Param[[i]] <- matrix(data = NA, nrow(Param[[i]]), ncol(Param[[i]])) # ## make parameter estimates null for nonconverged, improper solutions # # if (ConvergedProper[[i]] == 1) { # Fitind[[i]] <- lavaan::fitMeasures(fit, c("chisq", "df", "cfi", "tli", # "rmsea", "srmr", "logl", "bic", "aic")) # } else Fitind[[i]] <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA) # ### assign allocation parameter estimates to list # # # } # # # # # nConverged <- Reduce("+", Converged) # ## count number of converged allocations # # nProperSolution <- Reduce("+", ProperSolution) # ## count number of allocations with proper solutions # # nConvergedProper <- Reduce("+", ConvergedProper) # ## count number of allocations with proper solutions # # if (nConvergedProper == 0) # stop("All allocations failed to converge", " and/or yielded improper solutions for", # " Model A and/or B.") # ## stop program if no allocations converge # # Parmn <- Param[[1]] # ## assign first parameter estimates to mean dataframe # # ParSE <- matrix(NA, nrow(Parmn), nAlloc) # ParSEmn <- Parmn[, 5] # # Parsd <- matrix(NA, nrow(Parmn), nAlloc) # ## assign parameter estimates for S.D. calculation # # Fitmn <- Fitind[[1]] # ## assign first fit indices to mean dataframe # # Fitsd <- matrix(NA, length(Fitmn), nAlloc) # ## assign fit indices for S.D. calculation # # Sigp <- matrix(NA, nrow(Parmn), nAlloc) # ## assign p-values to calculate percentage significant # # Fitind <- data.frame(Fitind) # ### convert fit index table to dataframe # # # for (i in 1:nAlloc) { # # Parsd[, i] <- Param[[i]][, 4] # ## assign parameter estimates for S.D. estimation # # ParSE[, i] <- Param[[i]][, 5] # # if (i > 1) # ParSEmn <- rowSums(cbind(ParSEmn, Param[[i]][, 5]), na.rm = TRUE) # # Sigp[, ncol(Sigp) - i + 1] <- Param[[i]][, 7] # ## assign p-values to calculate percentage significant # # # Fitsd[, i] <- Fitind[[i]] # ## assign fit indices for S.D. estimation # # if (i > 1) { # Parmn[, 4:ncol(Parmn)] <- rowSums(cbind(Parmn[, 4:ncol(Parmn)], Param[[i]][, # 4:ncol(Parmn)]), na.rm = TRUE) # } # ## add together all parameter estimates # # if (i > 1) # Fitmn <- rowSums(cbind(Fitmn, Fitind[[i]]), na.rm = TRUE) # ## add together all fit indices # # } # # # Sigp <- Sigp + 0.45 # Sigp <- apply(Sigp, c(1, 2), round) # Sigp <- 1 - as.vector(rowMeans(Sigp, na.rm = TRUE)) # ## calculate percentage significant parameters # # Parsum <- cbind(apply(Parsd, 1, mean, na.rm = TRUE), apply(Parsd, 1, sd, na.rm = TRUE), # apply(Parsd, 1, max, na.rm = TRUE), apply(Parsd, 1, min, na.rm = TRUE), # apply(Parsd, 1, max, na.rm = TRUE) - apply(Parsd, 1, min, na.rm = TRUE), # Sigp * 100) # colnames(Parsum) <- c("Avg Est", "S.D.", "MAX", "MIN", "Range", "% Sig") # ## calculate parameter S.D., minimum, maximum, range, bind to percentage # ## significant # # ParSEmn <- Parmn[, 1:3] # ParSEfn <- cbind(ParSEmn, apply(ParSE, 1, mean, na.rm = TRUE), apply(ParSE, # 1, sd, na.rm = TRUE), apply(ParSE, 1, max, na.rm = TRUE), apply(ParSE, # 1, min, na.rm = TRUE), apply(ParSE, 1, max, na.rm = TRUE) - apply(ParSE, # 1, min, na.rm = TRUE)) # colnames(ParSEfn) <- c("lhs", "op", "rhs", "Avg SE", "S.D.", "MAX", "MIN", # "Range") # # Fitsum <- cbind(apply(Fitsd, 1, mean, na.rm = TRUE), apply(Fitsd, 1, sd, na.rm = TRUE), # apply(Fitsd, 1, max, na.rm = TRUE), apply(Fitsd, 1, min, na.rm = TRUE), # apply(Fitsd, 1, max, na.rm = TRUE) - apply(Fitsd, 1, min, na.rm = TRUE)) # rownames(Fitsum) <- c("chisq", "df", "cfi", "tli", "rmsea", "srmr", "logl", # "bic", "aic") # ## calculate fit S.D., minimum, maximum, range # # Parmn[, 4:ncol(Parmn)] <- Parmn[, 4:ncol(Parmn)]/nConvergedProper # ## divide totalled parameter estimates by number converged allocations # Parmn <- Parmn[, 1:3] # ## remove confidence intervals from output # Parmn <- cbind(Parmn, Parsum) # ## bind parameter average estimates to cross-allocation information # Fitmn <- as.numeric(Fitmn) # ## make fit index values numeric # Fitmn <- Fitmn/nConvergedProper # ## divide totalled fit indices by number converged allocations # # pChisq <- list() # ## create empty list for Chi-square p-values # sigChisq <- list() # ## create empty list for Chi-square significance # # for (i in 1:nAlloc) { # # pChisq[[i]] <- (1 - pchisq(Fitsd[1, i], Fitsd[2, i])) # ## calculate p-value for each Chi-square # # if (is.na(pChisq[[i]]) == FALSE & pChisq[[i]] < 0.05) { # sigChisq[[i]] <- 1 # } else sigChisq[[i]] <- 0 # } # ## count number of allocations with significant chi-square # # PerSigChisq <- (Reduce("+", sigChisq))/nConvergedProper * 100 # PerSigChisq <- round(PerSigChisq, 3) # ## calculate percent of allocations with significant chi-square # # PerSigChisqCol <- c(PerSigChisq, "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", # "n/a", "n/a") # ## create list of Chi-square Percent Significant and 'n/a' (used for fit summary # ## table) # # options(stringsAsFactors = FALSE) # ## set default option to allow strings into dataframe without converting to factors # # Fitsum <- data.frame(Fitsum, PerSigChisqCol) # colnames(Fitsum) <- c("Avg Ind", "S.D.", "MAX", "MIN", "Range", "% Sig") # ### bind to fit averages # # options(stringsAsFactors = TRUE) # ## unset option to allow strings into dataframe without converting to factors # # ParSEfn[, 4:8] <- apply(ParSEfn[, 4:8], 2, round, digits = 3) # Parmn[, 4:9] <- apply(Parmn[, 4:9], 2, round, digits = 3) # Fitsum[, 1:5] <- apply(Fitsum[, 1:5], 2, round, digits = 3) # ## round output to three digits # # Fitsum[2, 2:5] <- c("n/a", "n/a", "n/a", "n/a") # ## Change df row to 'n/a' for sd, max, min, and range # # Output_B <- list(Parmn, ParSEfn, Fitsum) # names(Output_B) <- c("Estimates_B", "SE_B", "Fit_B") # ## output summary for model A # # } # # ## Model Comparison (everything in this section is new) # # { # Converged_AB <- list() # ## create list of convergence comparison for each allocation # ProperSolution_AB <- list() # ## create list of proper solution comparison for each allocation # ConvergedProper_AB <- list() # ## create list of convergence and proper solution comparison for each allocation # lrtest_AB <- list() # ## create list for likelihood ratio test for each allocation # lrchisq_AB <- list() # ## create list for likelihood ratio chi square value # lrchisqp_AB <- list() # ## create list for likelihood ratio test p-value # lrsig_AB <- list() # ## create list for likelihood ratio test significance # # for (i in 1:nAlloc) { # if (Converged_A[[i]] == 1 & Converged[[i]] == 1) { # Converged_AB[[i]] <- 1 # } else Converged_AB[[i]] <- 0 # ## compare convergence # # if (ProperSolution_A[[i]] == 1 & ProperSolution[[i]] == 1) { # ProperSolution_AB[[i]] <- 1 # } else ProperSolution_AB[[i]] <- 0 # ## compare existence of proper solutions # # if (ConvergedProper_A[[i]] == 1 & ConvergedProper[[i]] == 1) { # ConvergedProper_AB[[i]] <- 1 # } else ConvergedProper_AB[[i]] <- 0 # ## compare existence of proper solutions and convergence # # # # if (ConvergedProper_AB[[i]] == 1) { # # data <- as.data.frame(Allocations[[i]], row.names = NULL, optional = FALSE) # ## convert allocation matrix to dataframe for model estimation # fit_A <- lavaan::sem(syntaxA, data = data, ...) # ## estimate model A in lavaan # fit <- lavaan::sem(syntaxB, data = data, ...) # ## estimate model B in lavaan # lrtest_AB[[i]] <- lavaan::lavTestLRT(fit_A, fit) # ## likelihood ratio test comparing A and B # lrtestd_AB <- as.data.frame(lrtest_AB[[i]], row.names = NULL, optional = FALSE) # ## convert lrtest results to dataframe # lrchisq_AB[[i]] <- lrtestd_AB[2, 5] # ## write lrtest chisq as single numeric variable # lrchisqp_AB[[i]] <- lrtestd_AB[2, 7] # ## write lrtest p-value as single numeric variable # if (lrchisqp_AB[[i]] < 0.05) { # lrsig_AB[[i]] <- 1 # } else { # lrsig_AB[[i]] <- 0 # } # ## determine statistical significance of lrtest # # } # } # # lrchisqp_AB <- unlist(lrchisqp_AB, recursive = TRUE, use.names = TRUE) # ## convert lrchisqp_AB from list to vector # lrchisqp_AB <- as.numeric(lrchisqp_AB) # ## make lrchisqp_AB numeric # lrsig_AB <- unlist(lrsig_AB, recursive = TRUE, use.names = TRUE) # ## convert lrsig_AB from list to vector # lrsig_AB <- as.numeric(lrsig_AB) # ### make lrsig_AB numeric # # # nConverged_AB <- Reduce("+", Converged_AB) # ## count number of allocations that converged for both A and B # nProperSolution_AB <- Reduce("+", ProperSolution_AB) # ## count number of allocations with proper solutions for both A and B # nConvergedProper_AB <- Reduce("+", ConvergedProper_AB) # ## count number of allocations that converged and have proper solutions for both A # ## and B # ProConverged_AB <- (nConverged_AB/nAlloc) * 100 # ## calc proportion of allocations that converged for both A and B # nlrsig_AB <- Reduce("+", lrsig_AB) # ## count number of allocations with significant lrtest between A and B # Prolrsig_AB <- (nlrsig_AB/nConvergedProper_AB) * 100 # ## calc proportion of allocations with significant lrtest between A and B # lrchisq_AB <- unlist(lrchisq_AB, recursive = TRUE, use.names = TRUE) # ### convert lrchisq_AB from list to vector # lrchisq_AB <- as.numeric(lrchisq_AB) # ### make lrchisq_AB numeric # AvgLRT_AB <- (Reduce("+", lrchisq_AB))/nConvergedProper_AB # ## calc average LRT # # LRTsum <- cbind(AvgLRT_AB, lrtestd_AB[2, 3], sd(lrchisq_AB, na.rm = TRUE), # max(lrchisq_AB), min(lrchisq_AB), # max(lrchisq_AB) - min(lrchisq_AB), Prolrsig_AB) # colnames(LRTsum) <- c("Avg LRT", "df", "S.D.", "MAX", "MIN", "Range", "% Sig") # ## calculate LRT distribution statistics # # FitDiff_AB <- Fitsd_A - Fitsd # ## compute fit index difference matrix # # for (i in 1:nAlloc) { # if (ConvergedProper_AB[[i]] != 1) # FitDiff_AB[1:9, i] <- 0 # } # ### make fit differences zero for each non-converged allocation # # BICDiff_AB <- list() # AICDiff_AB <- list() # RMSEADiff_AB <- list() # CFIDiff_AB <- list() # TLIDiff_AB <- list() # SRMRDiff_AB <- list() # BICDiffGT10_AB <- list() # ## create list noting each allocation in which A is preferred over B # # BICDiff_BA <- list() # AICDiff_BA <- list() # RMSEADiff_BA <- list() # CFIDiff_BA <- list() # TLIDiff_BA <- list() # SRMRDiff_BA <- list() # BICDiffGT10_BA <- list() # ## create list noting each allocation in which B is preferred over A # # for (i in 1:nAlloc) { # if (FitDiff_AB[8, i] < 0) { # BICDiff_AB[[i]] <- 1 # } else BICDiff_AB[[i]] <- 0 # if (FitDiff_AB[9, i] < 0) { # AICDiff_AB[[i]] <- 1 # } else AICDiff_AB[[i]] <- 0 # if (FitDiff_AB[5, i] < 0) { # RMSEADiff_AB[[i]] <- 1 # } else RMSEADiff_AB[[i]] <- 0 # if (FitDiff_AB[3, i] > 0) { # CFIDiff_AB[[i]] <- 1 # } else CFIDiff_AB[[i]] <- 0 # if (FitDiff_AB[4, i] > 0) { # TLIDiff_AB[[i]] <- 1 # } else TLIDiff_AB[[i]] <- 0 # if (FitDiff_AB[6, i] < 0) { # SRMRDiff_AB[[i]] <- 1 # } else SRMRDiff_AB[[i]] <- 0 # if (FitDiff_AB[8, i] < (-10)) { # BICDiffGT10_AB[[i]] <- 1 # } else BICDiffGT10_AB[[i]] <- 0 # } # nBIC_AoverB <- Reduce("+", BICDiff_AB) # nAIC_AoverB <- Reduce("+", AICDiff_AB) # nRMSEA_AoverB <- Reduce("+", RMSEADiff_AB) # nCFI_AoverB <- Reduce("+", CFIDiff_AB) # nTLI_AoverB <- Reduce("+", TLIDiff_AB) # nSRMR_AoverB <- Reduce("+", SRMRDiff_AB) # nBICDiffGT10_AoverB <- Reduce("+", BICDiffGT10_AB) # ## compute number of 'A preferred over B' for each fit index # # for (i in 1:nAlloc) { # if (FitDiff_AB[8, i] > 0) { # BICDiff_BA[[i]] <- 1 # } else BICDiff_BA[[i]] <- 0 # if (FitDiff_AB[9, i] > 0) { # AICDiff_BA[[i]] <- 1 # } else AICDiff_BA[[i]] <- 0 # if (FitDiff_AB[5, i] > 0) { # RMSEADiff_BA[[i]] <- 1 # } else RMSEADiff_BA[[i]] <- 0 # if (FitDiff_AB[3, i] < 0) { # CFIDiff_BA[[i]] <- 1 # } else CFIDiff_BA[[i]] <- 0 # if (FitDiff_AB[4, i] < 0) { # TLIDiff_BA[[i]] <- 1 # } else TLIDiff_BA[[i]] <- 0 # if (FitDiff_AB[6, i] > 0) { # SRMRDiff_BA[[i]] <- 1 # } else SRMRDiff_BA[[i]] <- 0 # if (FitDiff_AB[8, i] > (10)) { # BICDiffGT10_BA[[i]] <- 1 # } else BICDiffGT10_BA[[i]] <- 0 # } # nBIC_BoverA <- Reduce("+", BICDiff_BA) # nAIC_BoverA <- Reduce("+", AICDiff_BA) # nRMSEA_BoverA <- Reduce("+", RMSEADiff_BA) # nCFI_BoverA <- Reduce("+", CFIDiff_BA) # nTLI_BoverA <- Reduce("+", TLIDiff_BA) # nSRMR_BoverA <- Reduce("+", SRMRDiff_BA) # nBICDiffGT10_BoverA <- Reduce("+", BICDiffGT10_BA) # ## compute number of 'B preferred over A' for each fit index # # BICDiffAvgtemp <- list() # AICDiffAvgtemp <- list() # RMSEADiffAvgtemp <- list() # CFIDiffAvgtemp <- list() # TLIDiffAvgtemp <- list() # SRMRDiffAvgtemp <- list() # BICgt10DiffAvgtemp <- list() # ## create empty list for average fit index differences # # for (i in 1:nAlloc) { # if (BICDiff_AB[[i]] != 1) { # BICDiffAvgtemp[[i]] <- 0 # } else BICDiffAvgtemp[[i]] <- FitDiff_AB[8, i] # if (AICDiff_AB[[i]] != 1) { # AICDiffAvgtemp[[i]] <- 0 # } else AICDiffAvgtemp[[i]] <- FitDiff_AB[9, i] # if (RMSEADiff_AB[[i]] != 1) { # RMSEADiffAvgtemp[[i]] <- 0 # } else RMSEADiffAvgtemp[[i]] <- FitDiff_AB[5, i] # if (CFIDiff_AB[[i]] != 1) { # CFIDiffAvgtemp[[i]] <- 0 # } else CFIDiffAvgtemp[[i]] <- FitDiff_AB[3, i] # if (TLIDiff_AB[[i]] != 1) { # TLIDiffAvgtemp[[i]] <- 0 # } else TLIDiffAvgtemp[[i]] <- FitDiff_AB[4, i] # if (SRMRDiff_AB[[i]] != 1) { # SRMRDiffAvgtemp[[i]] <- 0 # } else SRMRDiffAvgtemp[[i]] <- FitDiff_AB[6, i] # if (BICDiffGT10_AB[[i]] != 1) { # BICgt10DiffAvgtemp[[i]] <- 0 # } else BICgt10DiffAvgtemp[[i]] <- FitDiff_AB[8, i] # } # ## make average fit index difference list composed solely of values where A is # ## preferred over B # # BICDiffAvg_AB <- Reduce("+", BICDiffAvgtemp)/nBIC_AoverB * (-1) # AICDiffAvg_AB <- Reduce("+", AICDiffAvgtemp)/nAIC_AoverB * (-1) # RMSEADiffAvg_AB <- Reduce("+", RMSEADiffAvgtemp)/nRMSEA_AoverB * (-1) # CFIDiffAvg_AB <- Reduce("+", CFIDiffAvgtemp)/nCFI_AoverB # TLIDiffAvg_AB <- Reduce("+", TLIDiffAvgtemp)/nTLI_AoverB # SRMRDiffAvg_AB <- Reduce("+", SRMRDiffAvgtemp)/nSRMR_AoverB * (-1) # BICgt10DiffAvg_AB <- Reduce("+", BICgt10DiffAvgtemp)/nBICDiffGT10_AoverB * # (-1) # ## calc average fit index difference when A is preferred over B # # FitDiffAvg_AoverB <- list(BICDiffAvg_AB, AICDiffAvg_AB, RMSEADiffAvg_AB, CFIDiffAvg_AB, # TLIDiffAvg_AB, SRMRDiffAvg_AB) # ## create list of all fit index differences when A is preferred over B # # FitDiffAvg_AoverB <- unlist(FitDiffAvg_AoverB, recursive = TRUE, use.names = TRUE) # ### convert from list to vector # # for (i in 1:nAlloc) { # if (BICDiff_BA[[i]] != 1) { # BICDiffAvgtemp[[i]] <- 0 # } else BICDiffAvgtemp[[i]] <- FitDiff_AB[8, i] # if (AICDiff_BA[[i]] != 1) { # AICDiffAvgtemp[[i]] <- 0 # } else AICDiffAvgtemp[[i]] <- FitDiff_AB[9, i] # if (RMSEADiff_BA[[i]] != 1) { # RMSEADiffAvgtemp[[i]] <- 0 # } else RMSEADiffAvgtemp[[i]] <- FitDiff_AB[5, i] # if (CFIDiff_BA[[i]] != 1) { # CFIDiffAvgtemp[[i]] <- 0 # } else CFIDiffAvgtemp[[i]] <- FitDiff_AB[3, i] # if (TLIDiff_BA[[i]] != 1) { # TLIDiffAvgtemp[[i]] <- 0 # } else TLIDiffAvgtemp[[i]] <- FitDiff_AB[4, i] # if (SRMRDiff_BA[[i]] != 1) { # SRMRDiffAvgtemp[[i]] <- 0 # } else SRMRDiffAvgtemp[[i]] <- FitDiff_AB[6, i] # if (BICDiffGT10_BA[[i]] != 1) { # BICgt10DiffAvgtemp[[i]] <- 0 # } else BICgt10DiffAvgtemp[[i]] <- FitDiff_AB[8, i] # } # ## make average fit index difference list composed solely of values where B is # ## preferred over A # # BICDiffAvg_BA <- Reduce("+", BICDiffAvgtemp)/nBIC_BoverA # AICDiffAvg_BA <- Reduce("+", AICDiffAvgtemp)/nAIC_BoverA # RMSEADiffAvg_BA <- Reduce("+", RMSEADiffAvgtemp)/nRMSEA_BoverA # CFIDiffAvg_BA <- Reduce("+", CFIDiffAvgtemp)/nCFI_BoverA * (-1) # TLIDiffAvg_BA <- Reduce("+", TLIDiffAvgtemp)/nTLI_BoverA * (-1) # SRMRDiffAvg_BA <- Reduce("+", SRMRDiffAvgtemp)/nSRMR_BoverA # BICgt10DiffAvg_BA <- Reduce("+", BICgt10DiffAvgtemp)/nBICDiffGT10_BoverA # ## calc average fit index difference when B is preferred over A # # FitDiffAvg_BoverA <- list(BICDiffAvg_BA, AICDiffAvg_BA, RMSEADiffAvg_BA, CFIDiffAvg_BA, # TLIDiffAvg_BA, SRMRDiffAvg_BA) # ## create list of all fit index differences when B is preferred over A # # FitDiffAvg_BoverA <- unlist(FitDiffAvg_BoverA, recursive = TRUE, use.names = TRUE) # ### convert from list to vector # # FitDiffBICgt10_AoverB <- nBICDiffGT10_AoverB/nConvergedProper_AB * 100 # ### calculate portion of allocations where A strongly preferred over B # # FitDiffBICgt10_BoverA <- nBICDiffGT10_BoverA/nConvergedProper_AB * 100 # ### calculate portion of allocations where B strongly preferred over A # # FitDiffBICgt10 <- rbind(FitDiffBICgt10_AoverB, FitDiffBICgt10_BoverA) # rownames(FitDiffBICgt10) <- c("Very Strong evidence for A>B", "Very Strong evidence for B>A") # colnames(FitDiffBICgt10) <- "% Allocations" # ### create table of proportions of 'A strongly preferred over B' and 'B strongly # ### preferred over A' # # FitDiff_AoverB <- list(nBIC_AoverB/nConvergedProper_AB * 100, nAIC_AoverB/nConvergedProper_AB * # 100, nRMSEA_AoverB/nConvergedProper_AB * 100, nCFI_AoverB/nConvergedProper_AB * # 100, nTLI_AoverB/nConvergedProper_AB * 100, nSRMR_AoverB/nConvergedProper_AB * # 100) # ### create list of all proportions of 'A preferred over B' # FitDiff_BoverA <- list(nBIC_BoverA/nConvergedProper_AB * 100, nAIC_BoverA/nConvergedProper_AB * # 100, nRMSEA_BoverA/nConvergedProper_AB * 100, nCFI_BoverA/nConvergedProper_AB * # 100, nTLI_BoverA/nConvergedProper_AB * 100, nSRMR_BoverA/nConvergedProper_AB * # 100) # ### create list of all proportions of 'B preferred over A' # # FitDiff_AoverB <- unlist(FitDiff_AoverB, recursive = TRUE, use.names = TRUE) # ### convert from list to vector # # FitDiff_BoverA <- unlist(FitDiff_BoverA, recursive = TRUE, use.names = TRUE) # ### convert from list to vector # # FitDiffSum_AB <- cbind(FitDiff_AoverB, FitDiffAvg_AoverB, FitDiff_BoverA, # FitDiffAvg_BoverA) # colnames(FitDiffSum_AB) <- c("% A>B", "Avg Amount A>B", "% B>A", "Avg Amount B>A") # rownames(FitDiffSum_AB) <- c("bic", "aic", "rmsea", "cfi", "tli", "srmr") # ## create table showing number of allocations in which A>B and B>A as well as # ## average difference values # # for (i in 1:nAlloc) { # is.na(FitDiff_AB[1:9, i]) <- ConvergedProper_AB[[i]] != 1 # } # ### make fit differences missing for each non-converged allocation # # LRThistMax <- max(hist(lrchisqp_AB, plot = FALSE)$counts) # BIChistMax <- max(hist(FitDiff_AB[8, 1:nAlloc], plot = FALSE)$counts) # AIChistMax <- max(hist(FitDiff_AB[9, 1:nAlloc], plot = FALSE)$counts) # RMSEAhistMax <- max(hist(FitDiff_AB[5, 1:nAlloc], plot = FALSE)$counts) # CFIhistMax <- max(hist(FitDiff_AB[3, 1:nAlloc], plot = FALSE)$counts) # TLIhistMax <- max(hist(FitDiff_AB[4, 1:nAlloc], plot = FALSE)$counts) # ### calculate y-axis height for each histogram # # LRThist <- hist(lrchisqp_AB, ylim = c(0, LRThistMax), xlab = "p-value", main = "LRT p-values") # ## plot histogram of LRT p-values # # BIChist <- hist(FitDiff_AB[8, 1:nAlloc], ylim = c(0, BIChistMax), xlab = "BIC_modA - BIC_modB", # main = "BIC Diff") # AIChist <- hist(FitDiff_AB[9, 1:nAlloc], ylim = c(0, AIChistMax), xlab = "AIC_modA - AIC_modB", # main = "AIC Diff") # RMSEAhist <- hist(FitDiff_AB[5, 1:nAlloc], ylim = c(0, RMSEAhistMax), xlab = "RMSEA_modA - RMSEA_modB", # main = "RMSEA Diff") # CFIhist <- hist(FitDiff_AB[3, 1:nAlloc], ylim = c(0, CFIhistMax), xlab = "CFI_modA - CFI_modB", # main = "CFI Diff") # TLIhist <- hist(FitDiff_AB[4, 1:nAlloc], ylim = c(0, TLIhistMax), xlab = "TLI_modA - TLI_modB", # main = "TLI Diff") # ### plot histograms for each index_modA - index_modB # BIChist # AIChist # RMSEAhist # CFIhist # TLIhist # # ConvergedProperSum <- rbind(nConverged_A/nAlloc, nConverged/nAlloc, nConverged_AB/nAlloc, # nConvergedProper_A/nAlloc, nConvergedProper/nAlloc, nConvergedProper_AB/nAlloc) # rownames(ConvergedProperSum) <- c("Converged_A", "Converged_B", "Converged_AB", # "ConvergedProper_A", "ConvergedProper_B", "ConvergedProper_AB") # colnames(ConvergedProperSum) <- "Proportion of Allocations" # ### create table summarizing proportions of converged allocations and allocations # ### with proper solutions # # Output_AB <- list(round(LRTsum, 3), "LRT results are interpretable specifically for nested models", # round(FitDiffSum_AB, 3), round(FitDiffBICgt10, 3), ConvergedProperSum) # names(Output_AB) <- c("LRT Summary, Model A vs. Model B", "Note:", "Fit Index Differences", # "Percent of Allocations with |BIC Diff| > 10", "Converged and Proper Solutions Summary") # ### output for model comparison # # } # # return(list(Output_A, Output_B, Output_AB)) # ## returns output for model A, model B, and the comparison of these # } semTools/R/runMI-methods.R0000644000176200001440000017716613532030541015114 0ustar liggesusers### Terrence D. Jorgensen ### Last updated: 29 August 2019 ### Class and Methods for lavaan.mi object, returned by runMI() ##' Class for a lavaan Model Fitted to Multiple Imputations ##' ##' This class extends the \code{\linkS4class{lavaanList}} class, created by ##' fitting a lavaan model to a list of data sets. In this case, the list of ##' data sets are multiple imputations of missing data. ##' ##' ##' @name lavaan.mi-class ##' @importClassesFrom lavaan lavaanList ##' @aliases lavaan.mi-class show,lavaan.mi-method summary,lavaan.mi-method ##' fitMeasures,lavaan.mi-method fitmeasures,lavaan.mi-method ##' anova,lavaan.mi-method nobs,lavaan.mi-method coef,lavaan.mi-method ##' vcov,lavaan.mi-method fitted,lavaan.mi-method fitted.values,lavaan.mi-method ##' residuals,lavaan.mi-method resid,lavaan.mi-method ##' @docType class ##' ##' @slot coefList \code{list} of estimated coefficients in matrix format (one ##' per imputation) as output by \code{\link[lavaan]{lavInspect}(fit, "est")} ##' @slot phiList \code{list} of model-implied latent-variable covariance ##' matrices (one per imputation) as output by ##' \code{\link[lavaan]{lavInspect}(fit, "cov.lv")} ##' @slot miList \code{list} of modification indices output by ##' \code{\link[lavaan]{modindices}} ##' @slot seed \code{integer} seed set before running imputations ##' @slot lavListCall call to \code{\link[lavaan]{lavaanList}} used to fit the ##' model to the list of imputed data sets in \code{@@DataList}, stored as a ##' \code{list} of arguments ##' @slot imputeCall call to imputation function (if used), stored as a ##' \code{list} of arguments ##' @slot convergence \code{list} of \code{logical} vectors indicating whether, ##' for each imputed data set, (1) the model converged on a solution, (2) ##' \emph{SE}s could be calculated, (3) the (residual) covariance matrix of ##' latent variables (\eqn{\Psi}) is non-positive-definite, and (4) the ##' residual covariance matrix of observed variables (\eqn{\Theta}) is ##' non-positive-definite. ##' @slot lavaanList_slots All remaining slots are from ##' \code{\linkS4class{lavaanList}}, but \code{\link{runMI}} only populates a ##' subset of the \code{list} slots, two of them with custom information: ##' @slot DataList The \code{list} of imputed data sets ##' @slot SampleStatsList List of output from ##' \code{\link[lavaan]{lavInspect}(fit, "sampstat")} applied to each fitted ##' model ##' @slot ParTableList See \code{\linkS4class{lavaanList}} ##' @slot vcovList See \code{\linkS4class{lavaanList}} ##' @slot testList See \code{\linkS4class{lavaanList}} ##' @slot h1List See \code{\linkS4class{lavaanList}}. An additional element is ##' added to the \code{list}: \code{$PT} is the "saturated" model's parameter ##' table, returned by \code{\link[lavaan]{lav_partable_unrestricted}}. ##' @slot baselineList See \code{\linkS4class{lavaanList}} ##' ##' @param object An object of class \code{lavaan.mi} ##' @param se,ci,level,standardized,rsquare,header,output See ##' \code{\link[lavaan]{parameterEstimates}}. \code{output} ##' can also be passed to \code{\link[lavaan]{fitMeasures}}. ##' @param fmi \code{logical} indicating whether to include the Fraction Missing ##' Information (FMI) for parameter estimates in the \code{summary} ##' output (see \bold{Value} section). ##' @param asymptotic \code{logical}. If \code{FALSE} (typically a default, but ##' see \bold{Value} section for details using various methods), pooled ##' tests (of fit or pooled estimates) will be \emph{F} or \emph{t} ##' statistics with associated degrees of freedom (\emph{df}). If ##' \code{TRUE}, the (denominator) \emph{df} are assumed to be ##' sufficiently large for a \emph{t} statistic to follow a normal ##' distribution, so it is printed as a \emph{z} statisic; likewise, ##' \emph{F} times its numerator \emph{df} is printed, assumed to follow ##' a \eqn{\chi^2} distribution. ##' @param scale.W \code{logical}. If \code{TRUE} (default), the \code{vcov} ##' method will calculate the pooled covariance matrix by scaling the ##' within-imputation component by the ARIV (see Enders, 2010, p. 235, ##' for definition and formula). Otherwise, the pooled matrix is ##' calculated as the weighted sum of the within-imputation and ##' between-imputation components (see Enders, 2010, ch. 8, for details). ##' This in turn affects how the \code{summary} method calcualtes its ##' pooled standard errors, as well as the Wald test ##' (\code{\link{lavTestWald.mi}}). ##' @param omit.imps \code{character} vector specifying criteria for omitting ##' imputations from pooled results. Can include any of ##' \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the ##' default setting, which excludes any imputations that did not ##' converge or for which standard errors could not be computed. The ##' last option (\code{"no.npd"}) would exclude any imputations which ##' yielded a nonpositive definite covariance matrix for observed or ##' latent variables, which would include any "improper solutions" such ##' as Heywood cases. NPD solutions are not excluded by default because ##' they are likely to occur due to sampling error, especially in small ##' samples. However, gross model misspecification could also cause ##' NPD solutions, users can compare pooled results with and without ##' this setting as a sensitivity analysis to see whether some ##' imputations warrant further investigation. ##' @param labels \code{logical} indicating whether the \code{coef} output ##' should include parameter labels. Default is \code{TRUE}. ##' @param total \code{logical} (default: \code{TRUE}) indicating whether the ##' \code{nobs} method should return the total sample size or (if ##' \code{FALSE}) a vector of group sample sizes. ##' @param type The meaning of this argument varies depending on which method it ##' it used for. Find detailed descriptions in the \bold{Value} section ##' under \code{coef}, \code{vcov}, and \code{residuals}. ##' @param fit.measures,baseline.model See \code{\link[lavaan]{fitMeasures}}. ##' \code{summary(object, fit.measures = TRUE)} will print (but not ##' return) a table of fit measures to the console. ##' @param ... Additional arguments passed to \code{\link{lavTestLRT.mi}}, or ##' subsequently to \code{\link[lavaan]{lavTestLRT}}. ##' ##' @return ##' ##' \item{coef}{\code{signature(object = "lavaan.mi", type = "free", ##' labels = TRUE, omit.imps = c("no.conv","no.se"))}: ##' See \code{\linkS4class{lavaan}}. Returns the pooled point estimates (i.e., ##' averaged across imputed data sets; see Rubin, 1987).} ##' ##' \item{vcov}{\code{signature(object = "lavaan.mi", scale.W = TRUE, ##' omit.imps = c("no.conv","no.se"), ##' type = c("pooled","between","within","ariv"))}: By default, returns the ##' pooled covariance matrix of parameter estimates (\code{type = "pooled"}), ##' the within-imputations covariance matrix (\code{type = "within"}), the ##' between-imputations covariance matrix (\code{type = "between"}), or the ##' average relative increase in variance (\code{type = "ariv"}) due to ##' missing data.} ##' ##' \item{fitted.values}{\code{signature(object = "lavaan.mi", ##' omit.imps = c("no.conv","no.se"))}: See \code{\linkS4class{lavaan}}. ##' Returns model-implied moments, evaluated at the pooled point estimates.} ##' \item{fitted}{alias for \code{fitted.values}} ##' ##' \item{residuals}{\code{signature(object = "lavaan.mi", ##' type = c("raw","cor"), omit.imps = c("no.conv","no.se"))}: ##' See \code{\linkS4class{lavaan}}. By default (\code{type = "raw"}), returns ##' the difference between the model-implied moments from \code{fitted.values} ##' and the pooled observed moments (i.e., averaged across imputed data sets). ##' Standardized residuals are also available, using Bollen's ##' (\code{type = "cor"} or \code{"cor.bollen"}) or Bentler's ##' (\code{type = "cor.bentler"}) formulas.} ##' \item{resid}{alias for \code{residuals}} ##' ##' \item{nobs}{\code{signature(object = "lavaan.mi", total = TRUE)}: either ##' the total (default) sample size or a vector of group sample sizes ##' (\code{total = FALSE}).} ##' ##' \item{anova}{\code{signature(object = "lavaan.mi", ...)}: ##' Returns a test of model fit for a single model (\code{object}) or test(s) ##' of the difference(s) in fit between nested models passed via \code{...}. ##' See \code{\link{lavTestLRT.mi}} and \code{\link{compareFit}} for details.} ##' ##' \item{fitMeasures}{\code{signature(object = "lavaan.mi", ##' fit.measures = "all", baseline.model = NULL, output = "vector", ##' omit.imps = c("no.conv","no.se"), ...)}: See lavaan's ##' \code{\link[lavaan]{fitMeasures}} for details. Pass additional arguments ##' to \code{\link{lavTestLRT.mi}} via \code{...}.} ##' \item{fitmeasures}{alias for \code{fitMeasures}.} ##' ##' \item{show}{\code{signature(object = "lavaan.mi")}: returns a message about ##' convergence rates and estimation problems (if applicable) across imputed ##' data sets.} ##' ##' \item{summary}{\code{signature(object = "lavaan.mi", se = TRUE, ci = FALSE, ##' level = .95, standardized = FALSE, rsquare = FALSE, fmi = FALSE, ##' scale.W = !asymptotic, omit.imps = c("no.conv","no.se"), ##' asymptotic = FALSE, header = TRUE, output = "text", fit.measures = FALSE)}: ##' see \code{\link[lavaan]{parameterEstimates}} for details. ##' By default, \code{summary} returns pooled point and \emph{SE} ##' estimates, along with \emph{t} test statistics and their associated ##' \emph{df} and \emph{p} values. If \code{ci = TRUE}, confidence intervales ##' are returned with the specified confidence \code{level} (default 95\% CI). ##' If \code{asymptotic = TRUE}, \emph{z} instead of \emph{t} tests are ##' returned. \code{standardized} solution(s) can also be requested by name ##' (\code{"std.lv"} or \code{"std.all"}) or both are returned with \code{TRUE}. ##' \emph{R}-squared for endogenous variables can be requested, as well as the ##' Fraction Missing Information (FMI) for parameter estimates. By default, the ##' output will appear like \code{lavaan}'s \code{summary} output, but if ##' \code{output == "data.frame"}, the returned \code{data.frame} will resemble ##' the \code{parameterEstimates} output. The \code{scale.W} argument is ##' passed to \code{vcov} (see description above). ##' Setting \code{fit.measures=TRUE} will additionally print fit measures to ##' the console, but they will not be returned.} ##' ##' @section Objects from the Class: See the \code{\link{runMI}} function for ##' details. Wrapper functions include \code{\link{lavaan.mi}}, ##' \code{\link{cfa.mi}}, \code{\link{sem.mi}}, and \code{\link{growth.mi}}. ##' ##' @author Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' @references ##' Asparouhov, T., & Muthen, B. (2010). \emph{Chi-square statistics ##' with multiple imputation}. Technical Report. Retrieved from ##' www.statmodel.com ##' ##' Enders, C. K. (2010). \emph{Applied missing data analysis}. New York, NY: ##' Guilford. ##' ##' Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). ##' Significance levels from repeated \emph{p}-values with multiply-imputed ##' data. \emph{Statistica Sinica, 1}(1), 65--92. Retrieved from ##' https://www.jstor.org/stable/24303994 ##' ##' Meng, X.-L., & Rubin, D. B. (1992). Performing likelihood ratio tests with ##' multiply-imputed data sets. \emph{Biometrika, 79}(1), 103--111. ##' doi:10.2307/2337151 ##' ##' Rubin, D. B. (1987). \emph{Multiple imputation for nonresponse in surveys}. ##' New York, NY: Wiley. ##' ##' @examples ##' ##' ## See ?runMI help page ##' setClass("lavaan.mi", contains = "lavaanList", slots = c(coefList = "list", # coefficients in matrix format phiList = "list", # list of model-implied latent covariance matrices miList = "list", # modification indices seed = "integer", # seed set before running imputations lavListCall = "list", # store actual call to lavaanList imputeCall = "list", # store call from imputation, if used convergence = "list")) # also check SEs and Heywood cases ##' @name lavaan.mi-class ##' @aliases show,lavaan.mi-method ##' @export setMethod("show", "lavaan.mi", function(object) { nData <- object@meta$ndat useImps <- sapply(object@convergence, "[[", i = "converged") nConverged <- sum(useImps) SE <- sapply(object@convergence, "[[", "SE") SE[is.na(SE)] <- FALSE Heywood.ov <- sapply(object@convergence, "[[", "Heywood.ov") Heywood.ov[is.na(Heywood.ov)] <- FALSE Heywood.lv <- sapply(object@convergence, "[[", "Heywood.lv") Heywood.lv[is.na(Heywood.lv)] <- FALSE cat('lavaan.mi object based on ', nData, ' imputed data sets. \n', 'See class?lavaan.mi help page for available methods. \n\n', 'Convergence information:\n', 'The model converged on ', nConverged, ' imputed data sets \n\n', sep = "") if (!all(SE)) cat('Standard errors could not be computed for data set(s)', paste(which(!SE), collapse = ", "), '\nTry fitting the', 'model to the individual data set(s) to diagnose', 'problems. If they cannot be fixed, try inspecting the', 'imputations. It may be necessary to reimpute the data', 'with some restrictions imposed. \n\n') if (any(Heywood.ov | Heywood.lv)) cat('Heywood cases detected for data set(s)', paste(which(Heywood.ov | Heywood.lv), collapse = ", "), '\nThese are not necessarily a cause for concern, unless a pooled', 'estimate is also a Heywood case. \n\n') object }) ##' @importFrom stats pt qt pnorm qnorm ##' @importFrom lavaan lavListInspect parTable lavNames ##' @importFrom methods getMethod summary.lavaan.mi <- function(object, se = TRUE, ci = FALSE, level = .95, standardized = FALSE, rsquare = FALSE, fmi = FALSE, scale.W = !asymptotic, omit.imps = c("no.conv","no.se"), asymptotic = FALSE, header = TRUE, output = "text", fit.measures = FALSE) { useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) lavoptions <- lavListInspect(object, "options") ## extract parameter table with attributes for printing PT <- parTable(object) myCols <- c("lhs","op","rhs","exo") if (lavListInspect(object, "ngroups") > 1L) myCols <- c(myCols,"block","group") if (lavListInspect(object, "nlevels") > 1L) myCols <- c(myCols,"block","level") PE <- PT[ , unique(myCols)] free <- PT$free > 0L | PT$op == ":=" STDs <- !(PT$op %in% c("==","<",">")) # which rows can be standardized PE$est <- getMethod("coef","lavaan.mi")(object, type = "all", omit.imps = omit.imps) if (lavoptions$se == "none") { warning('pooled variances and tests unavailable when se="none" is requested') se <- FALSE } if (!se) fmi <- FALSE messPool <- paste0("Rubin's (1987) rules were used to pool point", if (se) " and SE", " estimates across ", m, " imputed data sets", if (se & !asymptotic) ", and to calculate degrees of", if (se & !asymptotic) " freedom for each parameter's t", if (se & !asymptotic) " test and CI.", "\n") if (se) { VCOV <- getMethod("vcov","lavaan.mi")(object, scale.W = scale.W, omit.imps = omit.imps) PE$se <- lavaan::lav_model_vcov_se(object@Model, VCOV = VCOV, lavpartable = object@ParTable) W <- rowMeans(sapply(object@ParTableList[useImps], "[[", i = "se")^2) B <- apply(sapply(object@ParTableList[useImps], "[[", i = "est"), 1, var) Bm <- B + B/m Tot <- W + Bm if (asymptotic) { PE$z[free] <- PE$est[free] / PE$se[free] PE$pvalue <- pnorm(-abs(PE$z))*2 crit <- qnorm(1 - (1 - level) / 2) } else { PE$t[free] <- PE$est[free] / PE$se[free] ## calculate df for t test ## can't do finite-sample correction because Wald z tests have no df ## (see Enders, 2010, p. 231, eq. 8.13 & 8.14) PE$df[free] <- (m - 1) * (1 + W[free] / Bm[free])^2 ## if DF are obscenely large, set them to infinity for pretty printing PE$df <- ifelse(PE$df > 9999, Inf, PE$df) PE$pvalue <- pt(-abs(PE$t), df = PE$df)*2 crit <- qt(1 - (1 - level) / 2, df = PE$df) } if (ci) { PE$ci.lower <- PE$est - crit * PE$se PE$ci.upper <- PE$est + crit * PE$se PE$ci.lower[!free] <- PE$ci.upper[!free] <- PE$est[!free] } } if (is.logical(standardized)) { if (standardized) { standardized <- c("std.lv","std.all") if (length(lavNames(object, "ov.x")) && lavoptions$fixed.x) { standardized <- c(standardized, "std.nox") } } else standardized <- NULL } else standardized <- tolower(as.character(standardized)) if (length(standardized) || rsquare) { ## pooled estimates for standardizedSolution() est <- getMethod("coef", "lavaan.mi")(object, omit.imps = omit.imps) ## updates @Model@GLIST for standardizedSolution(..., GLIST=) object@Model <- lavaan::lav_model_set_parameters(object@Model, x = est) } if ("std.lv" %in% standardized) { PE$std.lv[STDs] <- lavaan::standardizedSolution(object, se = FALSE, type = "std.lv", GLIST = object@Model@GLIST, est = PE$est)$est.std } if ("std.all" %in% standardized) { PE$std.all[STDs] <- lavaan::standardizedSolution(object, se = FALSE, type = "std.all", GLIST = object@Model@GLIST, est = PE$est)$est.std } if ("std.nox" %in% standardized) { PE$std.nox[STDs] <- lavaan::standardizedSolution(object, se = FALSE, type = "std.nox", GLIST = object@Model@GLIST, est = PE$est)$est.std } if (fmi) { PE$fmi[free] <- Bm[free] / Tot[free] PE$riv[free] <- Bm[free] / W[free] # (Enders, 2010, p. 226, eq. 8.10) # == PE$riv[free] <- PE$fmi1[free] / (1 - PE$fmi1[free]) messRIV <- paste("The RIV will exceed 1 whenever between-imputation", "variance exceeds within-imputation variance", "(when FMI(1) > 50%).\n\n") } ## fancy or not? if (output == "text") { PE$label <- PT$label #FIXME: no longer needed? PE$exo <- 0L class(PE) <- c("lavaan.parameterEstimates","lavaan.data.frame","data.frame") attr(PE, "information") <- lavoptions$information attr(PE, "se") <- lavoptions$se attr(PE, "group.label") <- lavListInspect(object, "group.label") attr(PE, "level.label") <- c("within", lavListInspect(object, "cluster")) attr(PE, "bootstrap") <- lavoptions$bootstrap attr(PE, "bootstrap.successful") <- 0L #FIXME: assumes none. Implement Wei & Fan's mixing method? attr(PE, "missing") <- lavoptions$missing attr(PE, "observed.information") <- lavoptions$observed.information attr(PE, "h1.information") <- lavoptions$h1.information attr(PE, "header") <- header # FIXME: lavaan may add more!! if (fmi) cat("\n", messRIV, sep = "") } else { PE$exo <- NULL class(PE) <- c("lavaan.data.frame","data.frame") } ## requested R-squared? endoNames <- c(lavNames(object, "ov.nox"), lavNames(object, "lv.nox")) if (rsquare & length(endoNames)) { isEndo <- sapply(PE$lhs, function(x) x %in% endoNames) rsqPE <- PE[PE$lhs == PE$rhs & PE$op == "~~" & isEndo, ] rsqPE$op <- "r2" for (i in which(!sapply(colnames(PE), function(x) x %in% c("lhs","op","rhs","block", "level","group","est","exo")))) { rsqPE[ , i] <- NA } STD <- lavaan::standardizedSolution(object, se = FALSE, type = "std.all", GLIST = object@Model@GLIST, est = PE$est) isEndoSTD <- sapply(STD$lhs, function(x) x %in% endoNames) std.all <- STD$est.std[STD$lhs == STD$rhs & STD$op == "~~" & isEndoSTD] rsqPE$est <- ifelse(std.all < 0, NA, 1 - std.all) # negative variances if (output == "text") rsqPE$label <- "" PE <- rbind(PE, rsqPE) } if (output == "data.frame") PE <- PE[!(PE$op %in% c("==","<",">")), ] rownames(PE) <- NULL if (output == "text") { getMethod("show", "lavaan.mi")(object) cat(messPool) } if (fit.measures) { indices <- c("chisq","df","pvalue","cfi","tli","rmsea","srmr") FITS <- suppressWarnings(fitMeasures(object, fit.measures = indices, output = "text")) try(print(FITS, add.h0 = TRUE), silent = TRUE) } PE } ##' @name lavaan.mi-class ##' @aliases summary,lavaan.mi-method ##' @export setMethod("summary", "lavaan.mi", summary.lavaan.mi) ##' @name lavaan.mi-class ##' @aliases nobs,lavaan.mi-method ##' @importFrom lavaan lavListInspect ##' @export setMethod("nobs", "lavaan.mi", function(object, total = TRUE) { if (total) return(lavListInspect(object, "ntotal")) #FIXME: cluster N for multilevel? N <- lavListInspect(object, "norig") if (length(N) > 1L) names(N) <- lavListInspect(object, "group.label") N }) ##' @importFrom lavaan parTable coef.lavaan.mi <- function(object, type = "free", labels = TRUE, omit.imps = c("no.conv","no.se")) { useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) PT <- parTable(object) if (type == "user" || type == "all") { type <- "user" idx <- 1:length(PT$lhs) } else if (type == "free") { ## FIXME: duplicated leftover from old way of handling EQ constraints? idx <- which(PT$free > 0L & !duplicated(PT$free)) } ## extract coefficients for converged models coefList <- lapply(object@ParTableList[useImps], "[[", i = "est") out <- colMeans(do.call(rbind, coefList))[idx] ## attach names, set class if (labels) names(out) <- lavaan::lav_partable_labels(PT, type = type) class(out) <- c("lavaan.vector","numeric") out } ##' @name lavaan.mi-class ##' @aliases coef,lavaan.mi-method ##' @export setMethod("coef", "lavaan.mi", coef.lavaan.mi) ##' @importFrom stats cov ##' @importFrom lavaan lavListInspect parTable vcov.lavaan.mi <- function(object, type = c("pooled","between","within","ariv"), scale.W = TRUE, omit.imps = c("no.conv","no.se")) { useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) if (lavListInspect(object, "options")$se == "none") { warning('requested se="none", so only between-imputation (co)variance can', ' be computed') type <- "between" } type <- tolower(type[1]) if (!(type %in% c("pooled","between","within","ariv"))) stop("'", type, "' is not a valid option for 'type'") PT <- parTable(object) ncon <- sum(PT$op == "==") npar <- max(PT$free) - ncon coefList <- lapply(object@ParTableList[useImps], "[[", i = "est") B <- cov(do.call(rbind, coefList)[ , PT$free > 0L & !duplicated(PT$free)]) class(B) <- c("lavaan.matrix.symmetric","matrix") rownames(B) <- colnames(B) <- lavaan::lav_partable_labels(PT, type = "free") if (type == "between") return(B) W <- Reduce("+", lapply(object@vcovList[useImps], function(x) x$vcov)) / m class(W) <- c("lavaan.matrix.symmetric","matrix") dimnames(W) <- dimnames(B) if (type == "within") return(W) ## check whether equality constraints prevent inversion of W if (scale.W || type == "ariv") { inv.W <- if (ncon == 0) try(solve(W), silent = TRUE) else MASS::ginv(W) if (inherits(inv.W, "try-error")) { if (ncon == 0) { warning("Could not invert within-imputation covariance matrix. ", "Generalized inverse used instead.\nIt may be ", "safer to set `scale.W = FALSE' (and `asymptotic = TRUE').") } inv.W <- MASS::ginv(W) } ## relative increase in variance due to missing data r <- (1 + 1/m)/npar * sum(diag(B %*% inv.W)) # Enders (2010, p. 235) eqs. 8.20-21 if (type == "ariv") return(r) Total <- (1 + r) * W # FIXME: asked Yves for a hack, says it can't be inverted back to infoMat } else { ## less reliable, but constraints prevent inversion of W Total <- W + B + (1/m)*B ## Enders (2010, p. 235) eq. 8.19 } ## return pooled variance Total } ##' @name lavaan.mi-class ##' @aliases vcov,lavaan.mi-method ##' @export setMethod("vcov", "lavaan.mi", vcov.lavaan.mi) ##' @importFrom lavaan lavListInspect lavTestLRT anova.lavaan.mi <- function(object, ...) { ## save model names objname <- deparse(substitute(object)) dotnames <- as.character(sapply(substitute(list(...))[-1], deparse)) ## check class if (!inherits(object, "lavaan.mi")) stop("object is not class 'lavaan.mi'") ## check for additional arguments dots <- list(...) if (length(dots)) { ## separate lavaan.mi objects from other lavTestLRT.mi() arguments idx.mi <- which(sapply(dots, inherits, what = "lavaan.mi")) if (length(idx.mi)) { mods <- dots[idx.mi] dots <- dots[-idx.mi] ## save names for mods, so compareFit() doesn't break modnames <- dotnames[idx.mi] nonames <- which(names(mods) == "") names(mods)[nonames] <- modnames[nonames] } else { mods <- NULL modnames <- NULL } LRT.names <- intersect(names(dots), union(names(formals(lavTestLRT)), names(formals(lavTestLRT.mi)))) dots <- if (length(LRT.names)) dots[LRT.names] else NULL if (!is.null(dots$h1)) { #FIXME: this shouldn't be necessary: mods <- c(mods, list(h1 = dots$h1)) dots$h1 <- NULL } } else mods <- NULL ## run compareFit if length(idx.mi) > 1L if (length(mods) == 0L) { argList <- c(list(object = object), dots) results <- do.call(lavTestLRT.mi, argList) } else if (length(mods) == 1L) { argList <- c(list(object = object, h1 = mods[[1]]), dots) results <- do.call(lavTestLRT.mi, argList) } else if (length(mods) > 1L) { modList <- c(list(object), mods) names(modList) <- c(objname, modnames) argList <- c(modList, list(argsLRT = dots, indices = FALSE)) results <- do.call(compareFit, argList)@nested class(results) <- c("lavaan.data.frame","data.frame") attr(results, "header") <- "Nested Model Comparisons:" } results } ##' @name lavaan.mi-class ##' @aliases anova,lavaan.mi-method ##' @export setMethod("anova", "lavaan.mi", anova.lavaan.mi) ##' @importFrom lavaan lavListInspect lavNames ##' @importFrom methods getMethod ## utility function called within fitMeasures.mi() getSRMR <- function(object, type = "cor.bentler", level = "within", include.mean = TRUE, omit.imps = c("no.conv","no.se")) { conditional.x <- lavListInspect(object, "options")$conditional.x include.mean <- include.mean && lavListInspect(object, "meanstructure") include.diag <- type %in% c("cor.bentler","raw") mplus <- type == "mplus" if (mplus) type <- "cor.bollen" ## how many blocks to loop over nG <- lavListInspect(object, "ngroups") nlevels <- lavListInspect(object, "nlevels") ## save relevant sample sizes if (nlevels > 1L && level != "within") { n.per.group <- lavListInspect(object, "nclusters") #FIXME: only works for 2 levels N <- sum(n.per.group) } else { n.per.group <- lavListInspect(object, "nobs") N <- lavListInspect(object, "ntotal") } ## grab residuals R <- getMethod("resid", "lavaan.mi")(object, type = type, omit.imps = omit.imps) if (mplus) Rd <- getMethod("resid", "lavaan.mi")(object, type = "cor.bentler", omit.imps = omit.imps) ## restructure, if necessary if (nG == 1L) { loopBlocks <- 1L ## extract relevant level if (nlevels > 1L) { R <- R[[level]] if (mplus) Rd <- Rd[[level]] } ## to loop over blocks R <- list(R) if (mplus) Rd <- list(Rd) ## multiple groups AND multilevel } else if (nlevels > 1L) { loopBlocks <- 2*(1:nG) if (level == "within") loopBlocks <- loopBlocks - 1L R <- R[loopBlocks] if (mplus) Rd <- Rd[loopBlocks] } else loopBlocks <- 1:nG # no restructure necessary for multigroup 1-level models ## store vector of squared residuals RR <- vector("list", nG) for (b in loopBlocks) { index <- if (conditional.x) "res.cov" else "cov" RR[[b]] <- R[[b]][[index]][lower.tri(R[[b]][[index]], diag = FALSE)]^2 ## only capture means/variances of numeric modeled variables (not conditional.x) vv <- intersect(lavNames(object, type = "ov.num", block = b), lavNames(object, type = "ov.model", block = b)) if (include.diag) RR[[b]] <- c(RR[[b]], diag(R[[b]][[index]])[vv]^2) if (mplus) RR[[b]] <- c(RR[[b]], diag(Rd[[b]][[index]])[vv]^2) if (include.mean) { index <- if (conditional.x) "res.int" else "mean" RR[[b]] <- c(RR[[b]], R[[b]][[index]][vv]^2) } } ## take weighted average of group means as.numeric( (n.per.group %*% sqrt(sapply(RR, mean))) / N ) } ##' @importFrom lavaan lavNames lavListInspect ##' @importFrom stats pchisq uniroot fitMeasures.mi <- function(object, fit.measures = "all", baseline.model = NULL, output = "vector", omit.imps = c("no.conv","no.se"), ...) { useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) lavoptions <- lavListInspect(object, "options") fit.measures <- tolower(fit.measures) if (length(fit.measures) == 0L) fit.measures <- "all" ## narrow down fit indices incremental <- c("cfi","tli","nnfi","rfi","nfi","pnfi","ifi","rni") if ("all" %in% fit.measures) { indices <- c("chisq","df","pvalue","scaling", incremental, "rmsea","rmr","mfi","gammahat") } else { indices <- grep(pattern = paste(c("chisq","df","pvalue","scaling", incremental, "mfi","rmsea", "gammahat","rmr"), collapse = "|"), x = fit.measures, ignore.case = TRUE, value = TRUE) } ## CHI-SQUARED-BASED FIT INDICES notest <- length(lavoptions$test) == 1L && lavoptions$test == "none" if (notest || any(!grepl(pattern = "rmr", x = indices))) { ## check for additional arguments dots <- list(...) if (length(dots)) { LRT.names <- intersect(names(dots), union(names(formals(lavTestLRT)), names(formals(lavTestLRT.mi)))) dots <- if (length(LRT.names)) dots[LRT.names] else list(asymptotic = TRUE) } else dots <- list(asymptotic = TRUE) ## check test options (adapted from lavTestLRT.mi, limits duplicate warnings) test <- dots$test if (is.null(test)) { test <- "d3" # default } else test <- tolower(test[1]) if (tolower(test) %in% c("mr","meng.rubin","likelihood","lrt","mplus","d3")) test <- "D3" if (tolower(test) %in% c("lmrr","li.et.al","pooled.wald","d2")) test <- "D2" if (test == "D3" && !lavoptions$estimator %in% c("ML","PML","FML")) { message('"D3" only available using maximum likelihood estimation. ', 'Changed test to "D2".') test <- "D2" } ## check for robust test.names <- lavoptions$test # lavaan 0.6-5: for now, we only acknowledge the first non-standard @test if (length(test.names) > 1L) { ## remove standard and any bootstrapped tests rm.idx <- which(test.names %in% c("standard","bootstrap","bollen.stine")) if (length(rm.idx) > 0L) { test.names <- test.names[-rm.idx] } ## only acknowledge the first scaled test statistic if (length(test.names) > 1L) { test.names <- test.names[1] } } robust <- any(test.names %in% c("satorra.bentler","yuan.bentler", "yuan.bentler.mplus","scaled.shifted", "mean.var.adjusted","satterthwaite")) if (robust) { ## assign pool.robust option to object if (is.null(dots$pool.robust)) { pool.robust <- formals(lavTestLRT.mi)$pool.robust # default value } else { pool.robust <- dots$pool.robust # user-specified value } } else dots$pool.robust <- pool.robust <- FALSE scaleshift <- any(test.names == "scaled.shifted") if (scaleshift) { if (test == "D3" | !pool.robust) message("If test = 'scaled.shifted' (estimator = 'WLSMV' or 'MLMV'), ", "model comparison is only available by (re)setting test = 'D2' ", "and pool.robust = TRUE.\n", "Control more options by passing arguments to lavTestLRT() via ", "the '...' argument.\n") dots$pool.robust <- pool.robust <- TRUE test <- 'D2' } if (pool.robust && test == "D3") { message('pool.robust = TRUE is only applicable when test = "D2". ', 'Changed test to "D2".') test <- "D2" } dots$test <- test ## pooled test statistic(s) argList <- c(list(object = object), dots) argList$asymptotic <- TRUE # in case it wasn't set in list(...) argList$omit.imps <- omit.imps out <- do.call(lavTestLRT.mi, argList) ## check for scaled test statistic (if not, set robust=FALSE) if (robust && is.na(out["chisq.scaled"])) robust <- FALSE ## fit baseline model if necessary if (any(indices %in% incremental)) { if (inherits(baseline.model, "lavaan.mi")) { baseFit <- baseline.model } else if (inherits(object@external$baseline.model, "lavaan.mi")) { baseFit <- object@external$baseline.model ## MUST fit PTb for "D3" likelihoods, but for "D2" use @baselineList } else if (test == "D2") { baseImps <- object@meta$baseline.ok if (!all(baseImps[useImps])) warning('The default independence model ', 'did not converge for data set(s): ', which(!baseImps[useImps])) w <- sapply(object@baselineList[ which(baseImps[useImps]) ], function(x) x$test$standard[["stat"]]) DF <- mean(sapply(object@baselineList[ which(baseImps[useImps]) ], function(x) x$test$standard[["df"]])) baseOut <- calculate.D2(w, DF, asymptotic = TRUE) if (robust) { if (pool.robust) { w.r <- sapply(object@baselineList[ which(baseImps[useImps]) ], function(x) x$test[[ test.names[1] ]][["stat"]]) DF.r <- mean(sapply(object@baselineList[ which(baseImps[useImps]) ], function(x) x$test[[ test.names[1] ]][["df"]])) base.robust <- calculate.D2(w.r, DF.r, asymptotic = TRUE) names(base.robust) <- paste0(names(base.robust), ".scaled") baseOut <- c(baseOut, base.robust) } else { baseOut <- robustify(ChiSq = baseOut, object = object, baseline = TRUE, useImps = which(baseImps[useImps])) } } baseFit <- NULL # for later checking, to avoid unnecessary calls } else { PTb <- object@baselineList[[ useImps[1] ]]$partable PTb[c("est","se")] <- NULL # FIXME: shouldn't need this line, but lav_partable_merge() fails when # lavaan:::lav_object_extended() returns a NULL slot instead of "plabel" PTb$plabel <- paste0(".p", PTb$id, ".") group <- lavListInspect(object, "group") if (length(group) == 0L) group <- NULL cluster <- lavListInspect(object, "cluster") if (length(cluster) == 0L) cluster <- NULL baseFit <- runMI(model = PTb, data = object@DataList[useImps], group = group, cluster = cluster, test = lavoptions$test, estimator = lavoptions$estimator, fixed.x = lavoptions$fixed.x, se = "none", # to save time conditional.x = lavoptions$conditional.x, ordered = lavListInspect(object, "ordered"), parameterization = lavoptions$parameterization) } if (!is.null(baseFit)) { baseImps <- sapply(baseFit@convergence, "[[", i = "converged") if (!all(baseImps)) warning('baseline.model did not converge for data set(s): ', useImps[!baseImps]) argList <- c(list(object = baseFit), dots) argList$asymptotic <- TRUE # in case it wasn't set in list(...) argList$omit.imps <- setdiff(omit.imps, "no.se") # se="none" in baseFit baseOut <- do.call(lavTestLRT.mi, argList) } # else { already used "D2" with @baselineList info to make baseOut } } X2 <- out[["chisq"]] DF <- out[["df"]] if (robust) { X2.sc <- out[["chisq.scaled"]] DF.sc <- out[["df.scaled"]] ## for mean.var.adjusted, mean DF across imputations if (!pool.robust) ch <- out[["chisq.scaling.factor"]] ## mean c_hat across imputations if (X2 < .Machine$double.eps && DF == 0) ch <- 0 ## for RMSEA if ("rmsea" %in% indices) { d <- mean(sapply(object@testList[useImps], function(x) sum(x[[ test.names[1] ]][["trace.UGamma"]]))) if (is.na(d) || d == 0) d <- NA # FIXME: only relevant when mean.var.adjusted? } } ## for CFI, TLI, etc. if (any(indices %in% incremental)) { bX2 <- baseOut[["chisq"]] bDF <- baseOut[["df"]] out <- c(out, baseline.chisq = bX2, baseline.df = bDF, baseline.pvalue = baseOut[["pvalue"]]) if (robust) { out["baseline.chisq.scaled"] <- bX2.sc <- baseOut[["chisq.scaled"]] out["baseline.df.scaled"] <- bDF.sc <- baseOut[["df.scaled"]] out["baseline.pvalue.scaled"] <- baseOut[["pvalue.scaled"]] if (!pool.robust) { cb <- baseOut[["chisq.scaling.factor"]] out["baseline.chisq.scaling.factor"] <- cb if (scaleshift) out["baseline.chisq.shift.parameters"] <- baseOut[["chisq.shift.parameters"]] } } } if ("cfi" %in% indices) { t1 <- max(X2 - DF, 0) t2 <- max(X2 - DF, bX2 - bDF, 0) out["cfi"] <- if(t1 == 0 && t2 == 0) 1 else 1 - t1/t2 if (robust) { ## scaled t1 <- max(X2.sc - DF.sc, 0) t2 <- max(X2.sc - DF.sc, bX2.sc - bDF.sc, 0) if (is.na(t1) || is.na(t2)) { out["cfi.scaled"] <- NA } else if (t1 == 0 && t2 == 0) { out["cfi.scaled"] <- 1 } else out["cfi.scaled"] <- 1 - t1/t2 ## Brosseau-Liard & Savalei MBR 2014, equation 15 if (!pool.robust & test.names[1] %in% c("satorra.bentler","yuan.bentler","yuan.bentler.mplus")) { t1 <- max(X2 - ch*DF, 0) t2 <- max(X2 - ch*DF, bX2 - cb*bDF, 0) if (is.na(t1) || is.na(t2)) { out["cfi.robust"] <- NA } else if (t1 == 0 && t2 == 0) { out["cfi.robust"] <- 1 } else out["cfi.robust"] <- 1 - t1/t2 } } } if ("rni" %in% indices) { t1 <- X2 - DF t2 <- bX2 - bDF out["rni"] <- if (t2 == 0) NA else 1 - t1/t2 if (robust) { ## scaled t1 <- X2.sc - DF.sc t2 <- bX2.sc - bDF.sc if (is.na(t1) || is.na(t2)) { out["rni.scaled"] <- NA } else if (t2 == 0) { out["rni.scaled"] <- NA } else out["rni.scaled"] <- 1 - t1/t2 ## Brosseau-Liard & Savalei MBR 2014, equation 15 if (!pool.robust & test.names[1] %in% c("satorra.bentler","yuan.bentler","yuan.bentler.mplus")) { t1 <- X2 - ch*DF t2 <- bX2 - cb*bDF if (is.na(t1) || is.na(t2)) { out["rni.robust"] <- NA } else if (t1 == 0 && t2 == 0) { out["rni.robust"] <- NA } else out["rni.robust"] <- 1 - t1/t2 } } } if (any(indices %in% c("tli","nnfi"))) { t1 <- (X2 - DF)*bDF t2 <- (bX2 - bDF)*DF out["tli"] <- out["nnfi"] <- if (DF > 0) 1 - t1/t2 else 1 if (robust) { ## scaled t1 <- (X2.sc - DF.sc)*bDF.sc t2 <- (bX2.sc - bDF.sc)*DF.sc if (is.na(t1) || is.na(t2)) { out["tli.scaled"] <- out["nnfi.scaled"] <- NA } else if (DF > 0 && t2 != 0) { out["tli.scaled"] <- out["nnfi.scaled"] <- 1 - t1/t2 } else { out["tli.scaled"] <- out["nnfi.scaled"] <- 1 } ## Brosseau-Liard & Savalei MBR 2014, equation 15 if (!pool.robust & test.names[1] %in% c("satorra.bentler","yuan.bentler","yuan.bentler.mplus")) { t1 <- (X2 - ch*DF)*bDF t2 <- (bX2 - cb*bDF)*DF if (is.na(t1) || is.na(t2)) { out["tli.robust"] <- out["nnfi.robust"] <- NA } else if (t1 == 0 && t2 == 0) { out["tli.robust"] <- out["nnfi.robust"] <- 1 - t1/t2 } else out["tli.robust"] <- out["nnfi.robust"] <- 1 } } } if ("rfi" %in% indices) { if (DF > 0) { t2 <- bX2 / bDF t1 <- t2 - X2/DF out["rfi"] <- if (t1 < 0 || t2 < 0) 1 else t1/t2 } else out["rfi"] <- 1 if (robust) { if (DF > 0) { t2 <- bX2.sc / bDF.sc t1 <- t2 - X2.sc/DF.sc out["rfi.scaled"] <- if (t1 < 0 || t2 < 0) 1 else t1/t2 } else out["rfi.scaled"] <- 1 } } if ("nfi" %in% indices) { if (DF > 0) { t1 <- bX2 - X2 t2 <- bX2 out["nfi"] <- t1 / t2 } else out["nfi"] <- 1 if (robust) out["nfi.scaled"] <- (bX2.sc - X2.sc) / bX2.sc } if ("pnfi" %in% indices) { t1 <- bX2 - X2 t2 <- bX2 out["pnfi"] <- (DF / bDF) * t1/t2 if (robust) { t1 <- bX2.sc - X2.sc t2 <- bX2.sc out["pnfi.scaled"] <- (DF / bDF) * t1/t2 } } if ("ifi" %in% indices) { t1 <- bX2 - X2 t2 <- bX2 - DF out["ifi"] <- if (t2 < 0) 1 else t1/t2 if (robust) { t1 <- bX2.sc - X2.sc t2 <- bX2.sc - DF.sc if (is.na(t2)) { out["ifi.scaled"] <- NA } else if (t2 < 0) { out["ifi.scaled"] <- 1 } else out["ifi.scaled"] <- t1/t2 } } N <- lavListInspect(object, "ntotal") Ns <- lavListInspect(object, "nobs") nG <- lavListInspect(object, "ngroups") nVars <- length(lavNames(object)) if (!(lavoptions$likelihood == "normal" | lavoptions$estimator %in% c("ML","PML","FML"))) { N <- N - nG Ns <- Ns - 1 } if ("mfi" %in% indices) { out["mfi"] <- exp(-0.5 * (X2 - DF) / N) } if ("rmsea" %in% indices) { N.RMSEA <- max(N, X2*4) # FIXME: good strategy?? if (is.na(X2) || is.na(DF)) { out["rmsea"] <- as.numeric(NA) } else if (DF > 0) { getLambda <- function(lambda, chi, df, p) pchisq(chi, df, ncp=lambda) - p out["rmsea"] <- sqrt( max(0, (X2/N)/DF - 1/N) ) * sqrt(nG) ## lower confidence limit if (getLambda(0, X2, DF, .95) < 0.0) out["rmsea.ci.lower"] <- 0 else { lambda.l <- try(uniroot(f = getLambda, chi = X2, df = DF, p = .95, lower = 0, upper = X2)$root, silent = TRUE) if (inherits(lambda.l, "try-error")) lambda.l <- NA out["rmsea.ci.lower"] <- sqrt( lambda.l/(N*DF) ) * sqrt(nG) } ## upper confidence limit if (getLambda(N.RMSEA, X2, DF, .05) > 0 || getLambda(0, X2, DF, .05) < 0) { out["rmsea.ci.upper"] <- 0 } else { lambda.u <- try(uniroot(f = getLambda, chi = X2, df = DF, p = .05, lower = 0, upper = N.RMSEA)$root, silent = TRUE) if (inherits(lambda.u, "try-error")) lambda.u <- NA out["rmsea.ci.upper"] <- sqrt( lambda.u/(N*DF) ) * sqrt(nG) } ## p value out["rmsea.pvalue"] <- pchisq(X2, DF, ncp = N*DF*0.05^2/nG, lower.tail = FALSE) ## Scaled versions (naive and robust) if (robust & !scaleshift) { ## naive out["rmsea.scaled"] <- sqrt( max(0, (X2/N)/d - 1/N) ) * sqrt(nG) ## lower confidence limit if (DF.sc < 1 | getLambda(0, X2, DF.sc, .95) < 0.0) { out["rmsea.ci.lower.scaled"] <- 0 } else { lambda.l <- try(uniroot(f = getLambda, chi = X2, df = DF.sc, p = .95, lower = 0, upper = X2)$root, silent = TRUE) if (inherits(lambda.l, "try-error")) lambda.l <- NA out["rmsea.ci.lower.scaled"] <- sqrt( lambda.l/(N*DF) ) * sqrt(nG) } ## upper confidence limit if (DF.sc < 1 | getLambda(N.RMSEA, X2, DF.sc, .05) > 0.0) { out["rmsea.ci.upper.scaled"] <- 0 } else { lambda.u <- try(uniroot(f = getLambda, chi = X2, df = DF.sc, p = .05, lower = 0, upper = N.RMSEA)$root, silent = TRUE) if (inherits(lambda.u, "try-error")) lambda.u <- NA out["rmsea.ci.upper.scaled"] <- sqrt( lambda.u/(N*DF) ) * sqrt(nG) } ## p value out["rmsea.pvalue.scaled"] <- pchisq(X2, DF.sc, ncp = N*DF.sc*0.05^2/nG, lower.tail = FALSE) if (!pool.robust & test.names[1] %in% c("satorra.bentler","yuan.bentler","yuan.bentler.mplus")) { ## robust out["rmsea.robust"] <- sqrt( max(0, (X2/N)/DF - ch/N ) ) * sqrt(nG) ## lower confidence limit if (DF.sc < 1 | getLambda(0, X2.sc, DF.sc, .95) < 0.0) { out["rmsea.ci.lower.robust"] <- 0 } else { lambda.l <- try(uniroot(f = getLambda, chi = X2.sc, df = DF.sc, p = .95, lower = 0, upper = X2)$root, silent = TRUE) if (inherits(lambda.l, "try-error")) lambda.l <- NA out["rmsea.ci.lower.robust"] <- sqrt( (ch*lambda.l)/(N*DF.sc) ) * sqrt(nG) } ## upper confidence limit if (DF.sc < 1 | getLambda(N.RMSEA, X2.sc, DF.sc, .05) > 0.0) { out["rmsea.ci.upper.robust"] <- 0 } else { lambda.u <- try(uniroot(f = getLambda, chi = X2.sc, df = DF.sc, p = .05, lower = 0, upper = N.RMSEA)$root, silent = TRUE) if (inherits(lambda.u, "try-error")) lambda.u <- NA out["rmsea.ci.upper.robust"] <- sqrt( (ch*lambda.u)/(N*DF.sc) ) * sqrt(nG) } ## p value ########## To be discovered? } } else if (robust & scaleshift) { ## naive only out["rmsea.scaled"] <- sqrt( max(0, (X2.sc/N)/DF - 1/N) ) * sqrt(nG) ## lower confidence limit if (DF.sc < 1 | getLambda(0, X2.sc, DF.sc, .95) < 0.0) { out["rmsea.ci.lower.scaled"] <- 0 } else { lambda.l <- try(uniroot(f = getLambda, chi = X2.sc, df = DF.sc, p = .95, lower = 0, upper = X2.sc)$root, silent = TRUE) if (inherits(lambda.l, "try-error")) lambda.l <- NA out["rmsea.ci.lower.scaled"] <- sqrt( lambda.l/(N*DF.sc) ) * sqrt(nG) } ## upper confidence limit if (DF.sc < 1 | getLambda(N.RMSEA, X2.sc, DF.sc, .05) > 0.0) { out["rmsea.ci.upper.scaled"] <- 0 } else { lambda.u <- try(uniroot(f = getLambda, chi = X2.sc, df = DF.sc, p = .05, lower = 0, upper = N.RMSEA)$root, silent = TRUE) if (inherits(lambda.u, "try-error")) lambda.u <- NA out["rmsea.ci.upper.scaled"] <- sqrt( lambda.u/(N*DF.sc) ) * sqrt(nG) } ## p value out["rmsea.pvalue.scaled"] <- pchisq(X2.sc, DF.sc, ncp = N*DF.sc*0.05^2/nG, lower.tail = FALSE) } } } if ("gammahat" %in% indices) { out["gammaHat"] <- nVars / (nVars + 2*((X2 - DF) / N)) out["adjGammaHat"] <- 1 - (((nG * nVars * (nVars + 1)) / 2) / DF) * (1 - out["gammaHat"]) if (robust) { out["gammaHat.scaled"] <- nVars / (nVars + 2*((X2.sc - DF.sc) / N)) out["adjGammaHat.scaled"] <- 1 - (((nG * nVars * (nVars + 1)) / 2) / DF.sc) * (1 - out["gammaHat.scaled"]) } } ## END CHI-SQUARED-BASED FIT INDICES } else out <- numeric(0) ## RESIDUALS-BASED FIT INDICES if (any(grepl(pattern = "rmr", x = indices))) { if (lavListInspect(object, "nlevels") > 1L) { out["srmr"] <- NA # to preserve the order in lavaan output out["srmr_within"] <- getSRMR(object, type = "cor", include.mean = FALSE, level = "within", omit.imps = omit.imps) out["srmr_between"] <- getSRMR(object, type = "cor", include.mean = FALSE, level = lavListInspect(object, "cluster"), omit.imps = omit.imps) out["srmr"] <- out["srmr_within"] + out["srmr_between"] } else { out["rmr"] <- getSRMR(object, type = "raw", include.mean = TRUE, omit.imps = omit.imps) out["rmr_nomean"] <- getSRMR(object, type = "raw", include.mean = FALSE, omit.imps = omit.imps) out["srmr_bentler"] <- out["srmr"] <- getSRMR(object, type = "cor.bentler", include.mean = TRUE, omit.imps = omit.imps) out["srmr_bentler_nomean"] <- getSRMR(object, type = "cor.bentler", include.mean = FALSE, omit.imps = omit.imps) out["crmr"] <- getSRMR(object, type = "cor.bollen", include.mean = TRUE, omit.imps = omit.imps) out["crmr_nomean"] <- getSRMR(object, type = "cor.bollen", include.mean = FALSE, omit.imps = omit.imps) out["srmr_mplus"] <- getSRMR(object, type = "mplus", include.mean = TRUE, omit.imps = omit.imps) out["srmr_mplus_nomean"] <- getSRMR(object, type = "mplus", include.mean = FALSE, omit.imps = omit.imps) } ## END RESIDUALS-BASED FIT INDICES } ## return requested measures (loosely matched) if ("all" %in% fit.measures) { fits <- out } else { fits <- out[grepl(pattern = paste(fit.measures, collapse = "|"), x = names(out), ignore.case = TRUE)] fits <- fits[which(!is.na(names(fits)))] } class(fits) <- c("lavaan.vector","numeric") if (output == "text") class(fits) <- c("lavaan.fitMeasures", class(fits)) fits } ##' @name lavaan.mi-class ##' @aliases fitMeasures,lavaan.mi-method ##' @importFrom lavaan fitMeasures ##' @export setMethod("fitMeasures", "lavaan.mi", fitMeasures.mi) ## lowercase 'm' ##' @name lavaan.mi-class ##' @aliases fitmeasures,lavaan.mi-method ##' @importFrom lavaan fitmeasures ##' @export setMethod("fitmeasures", "lavaan.mi", fitMeasures.mi) ##' @importFrom lavaan lavListInspect lavNames ##' @importFrom methods getMethod fitted.lavaan.mi <- function(object, omit.imps = c("no.conv","no.se")) { useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) ## how many blocks to loop over nG <- lavListInspect(object, "ngroups") nlevels <- lavListInspect(object, "nlevels") nBlocks <- nG * nlevels #FIXME: always? group.label <- if (nG > 1L) lavListInspect(object, "group.label") else NULL clus.label <- if (nlevels > 1L) c("within", lavListInspect(object, "cluster")) else NULL if (nBlocks > 1L) { block.label <- paste(rep(group.label, each = nlevels), clus.label, sep = if (nG > 1L && nlevels > 1L) "_" else "") } est <- getMethod("coef", "lavaan.mi")(object, omit.imps = omit.imps) setpar <- lavaan::lav_model_set_parameters(object@Model, x = est) impMats <- lavaan::lav_model_implied(setpar) if (lavListInspect(object, "categorical")) { th.idx <- lavListInspect(object, "th.idx") # to select $(res.)th if (nBlocks == 1L) th.idx <- list(th.idx) # to loop over #FIXME when multilevel accepts categorical } #TODO: adapt to multilevel, multigroup, or both ## loop over (blocks and) moments Implied <- vector("list", nBlocks) for (b in 1:nBlocks) { for (nm in names(impMats)) { ## skip any empty objects if (is.null(impMats[[nm]][[b]])) next Implied[[b]][[nm]] <- impMats[[nm]][[b]] ## assign names and classes if (nm %in% c("cov","res.cov")) { NAMES <- lavNames(object, type = "ov.model", block = b) dimnames(Implied[[b]][[nm]]) <- list(NAMES, NAMES) class(Implied[[b]][[nm]]) <- c("lavaan.matrix.symmetric","matrix") } else if (nm %in% c("mean","res.int")) { Implied[[b]][[nm]] <- as.numeric(Implied[[b]][[nm]]) # remove matrix names(Implied[[b]][[nm]]) <- lavNames(object, type = "ov.model", block = b) class(Implied[[b]][[nm]]) <- c("lavaan.vector","numeric") } else if (nm %in% c("th","res.th")) { #FIXME: When lavaan allows multilevel categorical, thresholds only ## apply once (not to each level, like for all groups). ## Will lavaan return a vector of zeros for all but "within"? ## If not, it will not exist for each block, so count over groups. Implied[[b]][[nm]] <- as.numeric(Implied[[b]][[nm]])[ th.idx[[b]] ] # remove matrix & numeric -means names(Implied[[b]][[nm]]) <- lavNames(object, type = "th", block = b) #FIXME? class(Implied[[b]][[nm]]) <- c("lavaan.vector","numeric") } else if (nm == "group.w") { ## Only for (D)WLS estimation, but when is it relevant? ## For now, assign no names/class ## The remaining only exist when conditional.x } else if (nm %in% c("slopes","res.slopes")) { dimnames(Implied[[b]][[nm]]) <- list(lavNames(object, type = "ov.nox", block = b), lavNames(object, type = "ov.x", block = b)) class(Implied[[b]][[nm]]) <- c("lavaan.matrix","matrix") } else if (nm == "cov.x") { NAMES <- lavNames(object, type = "ov.x", block = b) dimnames(Implied[[b]][[nm]]) <- list(NAMES, NAMES) class(Implied[[b]][[nm]]) <- c("lavaan.matrix.symmetric","matrix") } else if (nm == "mean.x") { Implied[[b]][[nm]] <- as.numeric(Implied[[b]][[nm]]) # remove matrix names(Implied[[b]][[nm]]) <- lavNames(object, type = "ov.x", block = b) class(Implied[[b]][[nm]]) <- c("lavaan.vector","numeric") } ## end loops } } ## drop list for 1 block, or add labels for multiple if (nBlocks == 1L) { Implied <- Implied[[1]] } else names(Implied) <- block.label Implied } ##' @name lavaan.mi-class ##' @aliases fitted,lavaan.mi-method ##' @export setMethod("fitted", "lavaan.mi", fitted.lavaan.mi) ##' @name lavaan.mi-class ##' @aliases fitted.values,lavaan.mi-method ##' @export setMethod("fitted.values", "lavaan.mi", fitted.lavaan.mi) ##' @importFrom lavaan lavListInspect ##' @importFrom methods getMethod ##' @importFrom stats cov2cor resid.lavaan.mi <- function(object, type = c("raw","cor"), omit.imps = c("no.conv","no.se")) { ## @SampleStatsList is (for each imputation) output from: ## getSampStats <- function(obj) lavInspect(obj, "sampstat") useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) ## check type options type <- tolower(type[1]) if (type %in% c("raw","rmr")) { type = "raw" } else if (type %in% c("cor","cor.bollen","crmr")) { type <- "cor.bollen" } else if (type %in% c("cor.bentler","cor.eqs","srmr")) { type <- "cor.bentler" } else stop('type="', type, '" not supported for lavaan.mi objects') ## how many blocks to loop over nG <- lavListInspect(object, "ngroups") nlevels <- lavListInspect(object, "nlevels") nBlocks <- nG * nlevels #FIXME: always? group.label <- if (nG > 1L) lavListInspect(object, "group.label") else NULL clus.label <- if (nlevels > 1L) c("within", lavListInspect(object, "cluster")) else NULL if (nBlocks > 1L) { block.label <- paste(rep(group.label, each = nlevels), clus.label, sep = if (nG > 1L && nlevels > 1L) "_" else "") } if (lavListInspect(object, "categorical")) { th.idx <- lavListInspect(object, "th.idx") # to select $(res.)th if (nBlocks == 1L) th.idx <- list(th.idx) # to loop over #FIXME when multilevel accepts categorical } ## H0-model-implied moments, already pooled ## (moments-list nested in block-list) Implied <- getMethod("fitted", "lavaan.mi")(object, omit.imps = omit.imps) if (nBlocks == 1L) Implied <- list(Implied) # store single block in a block-list ## template to store observed moments & residuals RES <- OBS <- vector("list", nBlocks) ## loop over (blocks and) moments for (b in 1:nBlocks) { for (nm in names(Implied[[b]])) { ## skip if Implied element is not part of the saturated list if (is.null(object@h1List[[ useImps[1] ]]$implied[[nm]][[b]])) next ## H1 (saturated model) implied moments ## (block-list nested in moments-list) momentList <- lapply(object@h1List[useImps], function(x) x$implied[[nm]][[b]]) OBS[[b]][[nm]] <- Reduce("+", momentList) / m #TODO: unnecessary calculation if standardized and nm %in% c("th","slopes") ## remove numeric -means from thresholds if (nm %in% c("th","res.th")) OBS[[b]][[nm]] <- as.numeric(OBS[[b]][[nm]])[ th.idx[[b]] ] ## calculate residuals if (type == "raw") { RES[[b]][[nm]] <- OBS[[b]][[nm]] - Implied[[b]][[nm]] class(RES[[b]][[nm]]) <- class(Implied[[b]][[nm]]) ## correlation residuals } else if (type == "cor.bollen") { if (nm %in% c("cov","res.cov")) { RES[[b]][[nm]] <- cov2cor(OBS[[b]][[nm]]) - cov2cor(Implied[[b]][[nm]]) class(RES[[b]][[nm]]) <- c("lavaan.matrix.symmetric","matrix") ## mean structure } else if (nm == "mean") { std.obs.M <- OBS[[b]][[nm]] / sqrt(diag(OBS[[b]]$cov)) std.mod.M <- Implied[[b]][[nm]] / sqrt(diag(Implied[[b]]$cov)) RES[[b]][[nm]] <- std.obs.M - std.mod.M class(RES[[b]][[nm]]) <- c("lavaan.vector","numeric") } else if (nm == "res.int") { std.obs.M <- OBS[[b]][[nm]] / sqrt(diag(OBS[[b]]$res.cov)) std.mod.M <- Implied[[b]][[nm]] / sqrt(diag(Implied[[b]]$res.cov)) RES[[b]][[nm]] <- std.obs.M - std.mod.M class(RES[[b]][[nm]]) <- c("lavaan.vector","numeric") ## thresholds, slopes, cov.x, mean.x } else { #FIXME: lavaan currently (0.6-4.1399) returns nothing next } ## standardized (by observed SDs) residuals } else if (type == "cor.bentler") { if (nm %in% c("cov","mean")) { SDs <- diag(sqrt(diag(OBS[[b]]$cov))) dimnames(SDs) <- dimnames(OBS[[b]][[nm]]) } else if (nm %in% c("res.cov","res.int")) { SDs <- diag(sqrt(diag(OBS[[b]]$res.cov))) dimnames(SDs) <- dimnames(OBS[[b]][[nm]]) } else { #FIXME: lavaan currently (0.6-4.1399) returns nothing for "th" or "slopes" next } if (nm %in% c("cov","res.cov")) { RES[[b]][[nm]] <- solve(SDs) %*% (OBS[[b]][[nm]] - Implied[[b]][[nm]]) %*% solve(SDs) class(RES[[b]][[nm]]) <- c("lavaan.matrix.symmetric","matrix") } else if (nm %in% c("mean","res.int")) { RES[[b]][[nm]] <- (OBS[[b]][[nm]] - Implied[[b]][[nm]]) / diag(SDs) class(RES[[b]][[nm]]) <- c("lavaan.vector","numeric") } } ## copy names from fitted() results if (is.null(dim(RES[[b]][[nm]]))) { names(RES[[b]][[nm]]) <- names(Implied[[b]][[nm]]) } else dimnames(RES[[b]][[nm]]) <- dimnames(Implied[[b]][[nm]]) ## end loop over moments } ## add type to beginning of each block's list RES[[b]] <- c(list(type = type), RES[[b]]) #TODO: Rename (res.)cov to (res.)cor? lavResiduals() does not } ## drop list for 1 block if (nBlocks == 1L) { RES <- RES[[1]] } else names(RES) <- block.label #FIXME: will lavaan do this in the future? RES } ##' @name lavaan.mi-class ##' @aliases residuals,lavaan.mi-method ##' @export setMethod("residuals", "lavaan.mi", resid.lavaan.mi) ##' @name lavaan.mi-class ##' @aliases resid,lavaan.mi-method ##' @export setMethod("resid", "lavaan.mi", resid.lavaan.mi) semTools/R/residualCovariate.R0000644000176200001440000000354013432435606016061 0ustar liggesusers### Sunthud Pornprasertmanit ### Last updated: 9 March 2018 #' Residual-center all target indicators by covariates #' #' This function will regress target variables on the covariate and replace the #' target variables by the residual of the regression analysis. This procedure #' is useful to control the covariate from the analysis model (Geldhof, #' Pornprasertmanit, Schoemann, & Little, 2013). #' #' #' @importFrom stats lm #' #' @param data The desired data to be transformed. #' @param targetVar Varible names or the position of indicators that users wish #' to be residual centered (as dependent variables) #' @param covVar Covariate names or the position of the covariates using for #' residual centering (as independent variables) onto target variables #' @return The data that the target variables replaced by the residuals #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @seealso \code{\link{indProd}} For creating the indicator products with no #' centering, mean centering, double-mean centering, or residual centering. #' @references Geldhof, G. J., Pornprasertmanit, S., Schoemann, A. M., & #' Little, T. D. (2013). Orthogonalizing through residual centering: #' Extended applications and caveats. \emph{Educational and Psychological #' Measurement, 73}(1), 27--46. doi:10.1177/0013164412445473 #' @examples #' #' dat <- residualCovariate(attitude, 2:7, 1) #' #' @export residualCovariate <- function(data, targetVar, covVar) { x <- as.list(match.call()) cov <- eval(x$covVar) target <- eval(x$targetVar) if (all(is.numeric(cov))) cov <- colnames(data)[cov] if (all(is.numeric(target))) target <- colnames(data)[target] express <- paste("cbind(", paste(target, collapse = ", "), ") ~ ", paste(cov, collapse = " + "), sep = "") data[, target] <- lm(express, data = data)$residuals return(data) } semTools/R/clipboard.R0000644000176200001440000003207213432435606014354 0ustar liggesusers### Sunthud Pornprasertmanit & Terrence D. Jorgensen ### Last updated: 11 April 2017 ### Copy or save each aspect of the lavaan object into a clipboard or a file #' Copy or save the result of \code{lavaan} or \code{FitDiff} objects into a #' clipboard or a file #' #' Copy or save the result of \code{lavaan} or \code{\linkS4class{FitDiff}} #' object into a clipboard or a file. From the clipboard, users may paste the #' result into the Microsoft Excel or spreadsheet application to create a table #' of the output. #' #' #' @aliases clipboard saveFile #' @param object The \code{lavaan} or \code{\linkS4class{FitDiff}} object #' @param what The attributes of the \code{lavaan} object to be copied in the #' clipboard. \code{"summary"} is to copy the screen provided from the #' \code{summary} function. \code{"mifit"} is to copy the result from the #' \code{\link{miPowerFit}} function. Other attributes listed in the #' \code{inspect} method in the \link[lavaan]{lavaan-class} could also be used, #' such as \code{"coef"}, \code{"se"}, \code{"fit"}, \code{"samp"}, and so on. #' For the The \code{\linkS4class{FitDiff}} object, this argument is not active #' yet. #' @param file A file name used for saving the result #' @param tableFormat If \code{TRUE}, save the result in the table format using #' tabs for seperation. Otherwise, save the result as the output screen #' printed in the R console. #' @param fit.measures \code{character} vector specifying names of fit measures #' returned by \code{\link[lavaan]{fitMeasures}} to be copied/saved. Only #' relevant if \code{object} is class \code{\linkS4class{FitDiff}}. #' @param writeArgs \code{list} of additional arguments to be passed to #' \code{\link[utils]{write.table}} #' @param \dots Additional argument listed in the \code{\link{miPowerFit}} #' function (for \code{lavaan} object only). #' @return The resulting output will be saved into a clipboard or a file. If #' using the \code{clipboard} function, users may paste it in the other #' applications. #' @author #' Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' #' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@gmail.com}) #' @examples #' #' \dontrun{ #' library(lavaan) #' HW.model <- ' visual =~ x1 + c1*x2 + x3 #' textual =~ x4 + c1*x5 + x6 #' speed =~ x7 + x8 + x9 ' #' #' fit <- cfa(HW.model, data=HolzingerSwineford1939, group="school", meanstructure=TRUE) #' #' # Copy the summary of the lavaan object #' clipboard(fit) #' #' # Copy the modification indices and the model fit from the miPowerFit function #' clipboard(fit, "mifit") #' #' # Copy the parameter estimates #' clipboard(fit, "coef") #' #' # Copy the standard errors #' clipboard(fit, "se") #' #' # Copy the sample statistics #' clipboard(fit, "samp") #' #' # Copy the fit measures #' clipboard(fit, "fit") #' #' # Save the summary of the lavaan object #' saveFile(fit, "out.txt") #' #' # Save the modification indices and the model fit from the miPowerFit function #' saveFile(fit, "out.txt", "mifit") #' #' # Save the parameter estimates #' saveFile(fit, "out.txt", "coef") #' #' # Save the standard errors #' saveFile(fit, "out.txt", "se") #' #' # Save the sample statistics #' saveFile(fit, "out.txt", "samp") #' #' # Save the fit measures #' saveFile(fit, "out.txt", "fit") #' } #' #' @export clipboard <- function(object, what = "summary", ...) { if (.Platform$OS.type == "windows") { saveFile(object, file="clipboard-128", what=what, tableFormat=TRUE, ...) cat("File saved in the clipboard; please paste it in any program you wish.\n") } else { if (system("pbcopy", ignore.stderr = TRUE) == 0) { saveFile(object, file=pipe("pbcopy", "w"), what=what, tableFormat=TRUE, ...) cat("File saved in the clipboard; please paste it in any program you wish. If you cannot paste it, it is okay because this function works for some computers, which I still have no explanation currently. Please consider using the 'saveFile' function instead.\n") } else if (system("xclip -version", ignore.stderr = TRUE) == 0) { saveFile(object, file=pipe("xclip -i", "w") , what=what, tableFormat=TRUE, ...) cat("File saved in the xclip; please paste it in any program you wish. If you cannot paste it, it is okay because this function works for some computers, which I still have no explanation currently. Please consider using the 'saveFile' function instead.\n") } else { stop("For Mac users, the 'pbcopy' command in the shell file does not work. For linux users, this function depends on the 'xclip' application. Please install and run the xclip application before using this function in R (it does not guarantee to work though). Alternatively, use the 'saveFile' function to write the output into a file.") } } } #' @rdname clipboard #' @export saveFile <- function(object, file, what = "summary", tableFormat = FALSE, fit.measures = "default", writeArgs = list(), ...) { # Check whether the object is in the lavaan class if (is(object, "lavaan")) { saveFileLavaan(object, file, what = what, tableFormat = tableFormat, writeArgs = writeArgs, ...) } else if (is(object, "FitDiff")) { saveFileFitDiff(object, file, what = what, tableFormat = tableFormat, fit.measures = fit.measures, writeArgs = writeArgs) } else { stop("The object must be in the `lavaan' output or the", " output from the compareFit function.") } } ## ---------------- ## Hidden functions ## ---------------- #' @importFrom lavaan lavInspect saveFileLavaan <- function(object, file, what = "summary", tableFormat = FALSE, writeArgs = list(), ...) { if (length(what) > 1) message("only the first `what' option is used") # be case insensitive what <- tolower(what[1]) writeArgs$file <- file if (is.null(writeArgs$sep)) writeArgs$sep <- "\t" if (is.null(writeArgs$quote)) writeArgs$quote <- FALSE if (what == "summary") { if (tableFormat) { copySummary(object, file = file, writeArgs = writeArgs) } else { write(paste(utils::capture.output(summary(object, rsquare = TRUE, fit = TRUE, standardize = TRUE)), collapse = "\n"), file = file) } } else if (what == "mifit") { if (tableFormat) { writeArgs$x <- miPowerFit(object, ...) if (is.null(writeArgs$row.names)) writeArgs$row.names <- FALSE if (is.null(writeArgs$col.names)) writeArgs$col.names <- TRUE } else { write(paste(utils::capture.output(miPowerFit(object, ...)), collapse = "\n"), file = file) } } else { target <- lavInspect(object, what=what) if (tableFormat) { if (is(target, "lavaan.data.frame") || is(target, "data.frame")) { writeArgs$x <- target if (is.null(writeArgs$row.names)) writeArgs$row.names <- FALSE if (is.null(writeArgs$col.names)) writeArgs$col.names <- TRUE } else if (is(target, "list")) { if (is(target[[1]], "list")) { target <- lapply(target, listToDataFrame) target <- mapply(function(x, y) rbind(rep("", ncol(y)), c(x, rep("", ncol(y) - 1)), y), names(target), target, SIMPLIFY = FALSE) writeArgs$x <- do.call(rbind, target) if (is.null(writeArgs$row.names)) writeArgs$row.names <- FALSE if (is.null(writeArgs$col.names)) writeArgs$col.names <- FALSE } else { writeArgs$x <- listToDataFrame(target) if (is.null(writeArgs$row.names)) writeArgs$row.names <- FALSE if (is.null(writeArgs$col.names)) writeArgs$col.names <- FALSE } } else { writeArgs$x <- target if (is.null(writeArgs$row.names)) writeArgs$row.names <- TRUE if (is.null(writeArgs$col.names)) writeArgs$col.names <- TRUE } } else { write(paste(utils::capture.output(target), collapse = "\n"), file = file) } } do.call("write.table", writeArgs) } # copySummary: copy the summary of the lavaan object into the clipboard and # potentially be useful if users paste it into the Excel application # object = lavaan object input copySummary <- function(object, file, writeArgs = list()) { # Capture the output of the lavaan class outputText <- utils::capture.output(lavaan::summary(object, rsquare=TRUE, standardize=TRUE, fit.measure=TRUE)) # Split the text by two spaces outputText <- strsplit(outputText, " ") # Trim and delete the "" elements outputText <- lapply(outputText, function(x) x[x != ""]) outputText <- lapply(outputText, trim) outputText <- lapply(outputText, function(x) x[x != ""]) # Group the output into three sections: fit, parameter estimates, and r-squared cut1 <- grep("Estimate", outputText)[1] cut2 <- grep("R-Square", outputText)[1] set1 <- outputText[1:(cut1 - 1)] set2 <- outputText[cut1:(cut2 - 1)] set3 <- outputText[cut2:length(outputText)] # Assign the number of columns in the resulting data frame and check whether the output contains any labels numcol <- 7 test <- set2[-grep("Estimate", set2)] test <- test[sapply(test, length) >= 2] if (any(sapply(test, function(x) is.na(suppressWarnings(as.numeric(x[2])))))) numcol <- numcol + 1 # A function to parse the fit-measures output set1Parse <- function(x, numcol) { if (length(x) == 0) { return(rep("", numcol)) } else if (length(x) == 1) { return(c(x, rep("", numcol - 1))) } else if ((length(x) >= 2) & (length(x) <= numcol)) { return(c(x[1], rep("", numcol - length(x)), x[2:length(x)])) } else { stop("Cannot parse text") } } set1 <- t(sapply(set1, set1Parse, numcol)) # A function to parse the parameter-estimates output set2Parse <- function(x, numcol) { if (length(x) == 0) return(rep("", numcol)) if (any(grepl("Estimate", x))) return(c(rep("", numcol-length(x)), x)) if (length(x) == 1) { return(c(x, rep("", numcol-1))) } else { group1 <- x[1] group2 <- x[2:length(x)] if (is.na(suppressWarnings(as.numeric(x[2])))) { group1 <- x[1:2] group2 <- x[3:length(x)] } else if (numcol == 8) { group1 <- c(group1, "") } if (length(group2) == 1) { group2 <- c(group2, rep("", 6 - length(group2))) } else if (length(group2) == 4) { group2 <- c(group2, rep("", 6 - length(group2))) } else { group2 <- c(group2[1], rep("", 6 - length(group2)), group2[2:length(group2)]) } return(c(group1, group2)) } } set2 <- t(sapply(set2, set2Parse, numcol)) # A function to parse the r-squared output set3Parse <- function(x, numcol) { if (length(x) == 0) { return(rep("", numcol)) } else { return(c(x, rep("", numcol - length(x)))) } } set3 <- t(sapply(set3, set3Parse, numcol)) # Copy the output into the clipboard writeArgs$x <- rbind(set1, set2, set3) writeArgs$file <- file if (is.null(writeArgs$quote)) writeArgs$quote <- FALSE if (is.null(writeArgs$sep)) writeArgs$sep <- "\t" if (is.null(writeArgs$row.names)) writeArgs$row.names <- FALSE if (is.null(writeArgs$col.names)) writeArgs$col.names <- FALSE do.call("write.table", writeArgs) } # trim function from the R.oo package trim <- function(object) { s <- sub("^[\t\n\f\r ]*", "", as.character(object)); s <- sub("[\t\n\f\r ]*$", "", s); s; } # listToDataFrame: Change a list with multiple elements into a single data.frame listToDataFrame <- function(object) { name <- names(object) # Count the maximum number of column (+1 is for the column for row name) numcol <- max(sapply(object, function(x) ifelse(is(x, "lavaan.matrix") || is(x, "lavaan.matrix.symmetric") || is(x, "matrix") || is(x, "data.frame"), return(ncol(x)), return(1)))) + 1 # Change all objects in the list into a data.frame with the specified column target <- lapply(object, niceDataFrame, numcol) # Paste the name of each object into each data.frame target <- mapply(function(x, y) rbind(rep("", ncol(y)), c(x, rep("", ncol(y) - 1)), y), name, target, SIMPLIFY=FALSE) # Combine into a single data.frame target <- do.call(rbind, target) target[-1,] } # niceDataFrame: Change an object into a data.frame with a specified number of columns and the row and column names are included in the data.frame niceDataFrame <- function(object, numcol) { temp <- NULL if (is(object, "lavaan.matrix.symmetric")) { # save only the lower diagonal of the symmetric matrix temp <- matrix("", nrow(object), ncol(object)) for (i in 1:nrow(object)) { temp[i, 1:i] <- object[i, 1:i] } } else if (is(object, "data.frame") || is(object, "matrix") || is(object, "lavaan.matrix")) { # copy the matrix temp <- object } else if (is(object, "vector") || is(object, "lavaan.vector")) { # transform a vector into a matrix object <- as.matrix(object) temp <- object } else { stop("The 'niceDataFrame' function has a bug. Please contact the developer.") } # Transform into the result with a specified number of columns, excluding the row name result <- matrix("", nrow(temp), numcol - 1) # Parse the column names result[,1:ncol(temp)] <- temp firstRow <- colnames(object) ifelse(is.null(firstRow), firstRow <- rep("", ncol(result)), firstRow <- c(firstRow, rep("", numcol - length(firstRow) - 1))) # Parse the row names result <- rbind(firstRow, result) firstCol <- rownames(object) ifelse(is.null(firstCol), firstCol <- rep("", nrow(result)), firstCol <- c("", firstCol)) result <- cbind(firstCol, result) dimnames(result) <- NULL result } semTools/R/efa.R0000644000176200001440000005530513432435606013154 0ustar liggesusers### Sunthud Pornprasertmanit & Terrence D. Jorgensen ### Last updated: 21 July 2018 ### fit and rotate EFA models in lavaan ## ------------------------------------- ## Exploratory Factor Analysis in lavaan ## ------------------------------------- #' Analyze Unrotated Exploratory Factor Analysis Model #' #' This function will analyze unrotated exploratory factor analysis model. The #' unrotated solution can be rotated by the \code{\link{orthRotate}} and #' \code{\link{oblqRotate}} functions. #' #' This function will generate a lavaan script for unrotated exploratory factor #' analysis model such that (1) all factor loadings are estimated, (2) factor #' variances are fixed to 1, (3) factor covariances are fixed to 0, and (4) the #' dot products of any pairs of columns in the factor loading matrix are fixed #' to zero (Johnson & Wichern, 2002). The reason for creating this function #' in addition to the \code{\link{factanal}} function is that users can enjoy #' some advanced features from the \code{lavaan} package such as scaled #' \eqn{\chi^2}, diagonal weighted least squares for ordinal indicators, or #' full-information maximum likelihood (FIML). #' #' @importFrom lavaan lavInspect parTable #' @importFrom stats factanal #' #' @param data A target \code{data.frame} #' @param nf The desired number of factors #' @param varList Target observed variables. If not specified, all variables in #' \code{data} will be used. #' @param start Use starting values in the analysis from the #' \code{\link{factanal}} \code{function}. If \code{FALSE}, the starting values #' from the \code{lavaan} package will be used. \code{TRUE} is ignored with a #' warning if the \code{aux} argument is used. #' @param aux The list of auxiliary variables. These variables will be included #' in the model by the saturated-correlates approach to account for missing #' information. #' @param \dots Other arguments in the \code{\link[lavaan]{cfa}} function in #' the \code{lavaan} package, such as \code{ordered}, \code{se}, or #' \code{estimator} #' #' @return A \code{lavaan} output of unrotated exploratory factor analysis #' solution. #' #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' #' @examples #' #' unrotated <- efaUnrotate(HolzingerSwineford1939, nf = 3, #' varList=paste0("x", 1:9), estimator = "mlr") #' summary(unrotated, std = TRUE) #' inspect(unrotated, "std") #' #' dat <- data.frame(HolzingerSwineford1939, #' z = rnorm(nrow(HolzingerSwineford1939), 0, 1)) #' unrotated2 <- efaUnrotate(dat, nf = 2, varList = paste0("x", 1:9), aux = "z") #' #' @export efaUnrotate <- function(data, nf, varList = NULL, start = TRUE, aux = NULL, ...) { if (is.null(varList)) varList <- colnames(data) isOrdered <- checkOrdered(data, varList, ...) efaArgs <- list(...) if (!is.null(efaArgs$group)) stop("Multi-group EFA is not currently supported.") efaArgs$data <- data if (!is.null(aux)) { if (isOrdered) { stop("The analysis model or the analysis data have ordered categorical", " variables. The auxiliary variable feature is not available for", " the models for categorical variables with the weighted least", " square approach.") } efaArgs$fixed.x <- FALSE efaArgs$missing <- "fiml" efaArgs$aux <- aux lavaancfa <- function(...) { cfa.auxiliary(...)} } else lavaancfa <- function(...) { lavaan::cfa(...)} nvar <- length(varList) facnames <- paste0("factor", 1:nf) loading <- outer(1:nvar, 1:nf, function(x, y) paste0("load", x, "_", y)) syntax <- "" for (i in 1:nf) { variablesyntax <- paste(paste0(loading[,i], "*", varList), collapse = " + ") factorsyntax <- paste0(facnames[i], " =~ NA*", varList[1], " + ", variablesyntax, "\n") syntax <- paste(syntax, factorsyntax) } syntax <- paste(syntax, paste(paste0(facnames, " ~~ 1*", facnames), collapse = "\n"), "\n") if (!isOrdered) { syntax <- paste(syntax, paste(paste0(varList, " ~ 1"), collapse = "\n"), "\n") } if (nf > 1) { covsyntax <- outer(facnames, facnames, function(x, y) paste0(x, " ~~ 0*", y, "\n"))[lower.tri(diag(nf), diag = FALSE)] syntax <- paste(syntax, paste(covsyntax, collapse = " ")) for (i in 2:nf) { for (j in 1:(i - 1)) { loadconstraint <- paste(paste0(loading[,i], "*", loading[,j]), collapse=" + ") syntax <- paste(syntax, paste0("0 == ", loadconstraint), "\n") } } } if (start) { if (is.null(aux)) { List <- c(list(model = syntax, data = data), list(...)) List$do.fit <- FALSE outtemp <- do.call(lavaancfa, List) covtemp <- lavInspect(outtemp, "sampstat")$cov partemp <- parTable(outtemp) err <- try(startload <- factanal(factors = nf, covmat = covtemp)$loadings[], silent = TRUE) if (is(err, "try-error")) stop("The starting values from the factanal", " function cannot be calculated. Please", " use start = FALSE instead.") startval <- sqrt(diag(diag(covtemp))) %*% startload partemp$ustart[match(as.vector(loading), partemp$label)] <- as.vector(startval) partemp$est <- partemp$se <- NULL syntax <- partemp } else warning("The 'start' argument was ignored because factanal() does", " not support auxiliary variables. When using auxiliary", " variables, set 'start = FALSE' ") } else { ## FIXME: optimizer can't escape loadings == 0 without starting values from ## factanal() or by using theta parameterization ## https://groups.google.com/d/msg/lavaan/ujkHmCVirEY/-LGut4ewAwAJ parameterization <- efaArgs$parameterization if (is.null(parameterization)) parameterization <- lavaan::lavOptions("parameterization") if (isOrdered && parameterization != "theta") warning('If the default parameterization = "delta" returns results with ', 'all factor loadings equal to zero, try either setting start ', '= TRUE or setting parameterization = "theta" instead.') } efaArgs$model <- syntax do.call(lavaancfa, efaArgs) } ## ----------------- ## Class and Methods ## ----------------- #' Class For Rotated Results from EFA #' #' This class contains the results of rotated exploratory factor analysis #' #' #' @name EFA-class #' @aliases EFA-class show,EFA-method summary,EFA-method #' @docType class #' @slot loading Rotated standardized factor loading matrix #' @slot rotate Rotation matrix #' @slot gradRotate gradient of the objective function at the rotated loadings #' @slot convergence Convergence status #' @slot phi: Factor correlation matrix. Will be an identity matrix if #' orthogonal rotation is used. #' @slot se Standard errors of the rotated standardized factor loading matrix #' @slot method Method of rotation #' @slot call The command used to generate this object #' @section Objects from the Class: Objects can be created via the #' \code{\link{orthRotate}} or \code{\link{oblqRotate}} function. #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @seealso \code{\link{efaUnrotate}}; \code{\link{orthRotate}}; #' \code{\link{oblqRotate}} #' @examples #' #' unrotated <- efaUnrotate(HolzingerSwineford1939, nf = 3, #' varList = paste0("x", 1:9), estimator = "mlr") #' summary(unrotated, std = TRUE) #' lavInspect(unrotated, "std") #' #' # Rotated by Quartimin #' rotated <- oblqRotate(unrotated, method = "quartimin") #' summary(rotated) #' setClass("EFA", representation(loading = "matrix", rotate = "matrix", gradRotate = "matrix", convergence = "logical", phi = "matrix", se = "matrix", method = "character", call = "call")) #' @rdname EFA-class #' @aliases show,EFA-method #' @export setMethod("show", signature(object = "EFA"), function(object) { cat("Standardized Rotated Factor Loadings\n") print(printLoadings(object)) cat("\nFactor Correlation\n") print(object@phi) cat("\nMethod of rotation:\t") cat(object@method, "\n") message("The standard errors are close but do not match with other packages.", " Be mindful when using it.") }) #' @rdname EFA-class #' @aliases summary,EFA-method #' @param object object of class \code{EFA} #' @param suppress any standardized loadings less than the specified value #' will not be printed to the screen #' @param sort \code{logical}. If \code{TRUE} (default), factor loadings will #' be sorted by their size in the console output #' @export setMethod("summary", signature(object = "EFA"), function(object, suppress = 0.1, sort = TRUE) { cat("Standardized Rotated Factor Loadings\n") print(printLoadings(object, suppress = suppress, sort = sort)) cat("\nFactor Correlation\n") print(object@phi) cat("\nMethod of rotation:\t") cat(object@method, "\n") cat("\nTest Statistics for Standardized Rotated Factor Loadings\n") print(testLoadings(object)) }) ## ------------------------------ ## Rotation Constructor Functions ## ------------------------------ #' Implement orthogonal or oblique rotation #' #' These functions will implement orthogonal or oblique rotation on #' standardized factor loadings from a lavaan output. #' #' These functions will rotate the unrotated standardized factor loadings by #' orthogonal rotation using the \code{\link[GPArotation]{GPForth}} function or #' oblique rotation using the \code{\link[GPArotation]{GPFoblq}} function the #' \code{GPArotation} package. The resulting rotation matrix will be used to #' calculate standard errors of the rotated standardized factor loading by #' delta method by numerically computing the Jacobian matrix by the #' \code{\link[lavaan]{lav_func_jacobian_simple}} function. #' #' @aliases orthRotate oblqRotate funRotate #' @rdname rotate #' @param object A lavaan output #' @param method The method of rotations, such as \code{"varimax"}, #' \code{"quartimax"}, \code{"geomin"}, \code{"oblimin"}, or any gradient #' projection algorithms listed in the \code{\link[GPArotation]{GPA}} function #' in the \code{GPArotation} package. #' @param fun The name of the function that users wish to rotate the #' standardized solution. The functions must take the first argument as the #' standardized loading matrix and return the \code{GPArotation} object. Check #' this page for available functions: \code{\link[GPArotation]{rotations}}. #' @param \dots Additional arguments for the \code{\link[GPArotation]{GPForth}} #' function (for \code{orthRotate}), the \code{\link[GPArotation]{GPFoblq}} #' function (for \code{oblqRotate}), or the function that users provide in the #' \code{fun} argument. #' @return An \code{linkS4class{EFA}} object that saves the rotated EFA solution #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @examples #' #' \dontrun{ #' #' unrotated <- efaUnrotate(HolzingerSwineford1939, nf = 3, #' varList = paste0("x", 1:9), estimator = "mlr") #' #' # Orthogonal varimax #' out.varimax <- orthRotate(unrotated, method = "varimax") #' summary(out.varimax, sort = FALSE, suppress = 0.3) #' #' # Orthogonal Quartimin #' orthRotate(unrotated, method = "quartimin") #' #' # Oblique Quartimin #' oblqRotate(unrotated, method = "quartimin") #' #' # Geomin #' oblqRotate(unrotated, method = "geomin") #' #' # Target rotation #' library(GPArotation) #' target <- matrix(0, 9, 3) #' target[1:3, 1] <- NA #' target[4:6, 2] <- NA #' target[7:9, 3] <- NA #' colnames(target) <- c("factor1", "factor2", "factor3") #' ## This function works with GPArotation version 2012.3-1 #' funRotate(unrotated, fun = "targetQ", Target = target) #' } #' #' @export orthRotate <- function(object, method = "varimax", ...) { requireNamespace("GPArotation") if (!("package:GPArotation" %in% search())) attachNamespace("GPArotation") mc <- match.call() initL <- getLoad(object) rotated <- GPArotation::GPForth(initL, method=method, ...) # rotateMat <- t(solve(rotated$Th)) # defined but never used LIST <- seStdLoadings(rotated, object, fun = GPArotation::GPForth, MoreArgs = c(method = method, list(...))) # orthogonal <- rotated$orthogonal # define but never used loading <- rotated$loadings rotate <- rotated$Th gradRotate <- rotated$Gq convergence <- rotated$convergence method <- rotated$method phi <- diag(ncol(loading)) lv.names <- colnames(loading) dimnames(phi) <- list(lv.names, lv.names) new("EFA", loading = loading, rotate = rotate, gradRotate = gradRotate, convergence = convergence, phi = phi, se = LIST, method = method, call = mc) } #' @rdname rotate #' @export oblqRotate <- function(object, method = "quartimin", ...) { requireNamespace("GPArotation") if (!("package:GPArotation" %in% search())) attachNamespace("GPArotation") mc <- match.call() initL <- getLoad(object) rotated <- GPArotation::GPFoblq(initL, method = method, ...) # rotateMat <- t(solve(rotated$Th)) # defined but never used LIST <- seStdLoadings(rotated, object, fun = GPArotation::GPFoblq, MoreArgs = c(method = method, list(...))) # orthogonal <- rotated$orthogonal # defined but never used loading <- rotated$loadings rotate <- rotated$Th gradRotate <- rotated$Gq convergence <- rotated$convergence method <- rotated$method phi <- rotated$Phi lv.names <- colnames(loading) dimnames(phi) <- list(lv.names, lv.names) new("EFA", loading=loading, rotate = rotate, gradRotate = gradRotate, convergence = convergence, phi = phi, se = LIST, method = method, call = mc) } #' @rdname rotate #' @export funRotate <- function(object, fun, ...) { stopifnot(is.character(fun)) requireNamespace("GPArotation") if (!("package:GPArotation" %in% search())) attachNamespace("GPArotation") mc <- match.call() initL <- getLoad(object) rotated <- do.call(fun, c(list(L = initL), list(...))) # rotateMat <- t(solve(rotated$Th)) # defined but never used gradRotate <- rotated$Gq LIST <- seStdLoadings(rotated, object, fun = fun, MoreArgs = list(...)) # orthogonal <- rotated$orthogonal # defined but never used loading <- rotated$loadings rotate <- rotated$Th convergence <- rotated$convergence method <- rotated$method phi <- rotated$Phi if (is.null(phi)) phi <- diag(ncol(loading)) lv.names <- colnames(loading) dimnames(phi) <- list(lv.names, lv.names) new("EFA", loading = loading, rotate = rotate, gradRotate = gradRotate, convergence = convergence, phi = phi, se = LIST, method = method, call = mc) } ## ---------------- ## Hidden Functions ## ---------------- printLoadings <- function(object, suppress = 0.1, sort = TRUE) { loading <- object@loading nf <- ncol(loading) loadingText <- sprintf("%.3f", object@loading) sig <- ifelse(testLoadings(object)$p < 0.05, "*", " ") loadingText <- paste0(loadingText, sig) loadingText[abs(loading) < suppress] <- "" loadingText <- matrix(loadingText, ncol = nf, dimnames = dimnames(loading)) lead <- apply(abs(loading), 1, which.max) ord <- NULL if (sort) { for (i in 1:nf) { ord <- c(ord, intersect(order(abs(loading[,i]), decreasing = TRUE), which(lead == i))) } loadingText <- loadingText[ord,] } as.data.frame(loadingText) } #' @importFrom stats pnorm qnorm testLoadings <- function(object, level = 0.95) { se <- object@se loading <- object@loading lv.names <- colnames(loading) z <- loading / se p <- 2 * (1 - pnorm( abs(z) )) crit <- qnorm(1 - (1 - level)/2) est <- as.vector(loading) se <- as.vector(se) warning("The standard error is currently invalid because it does not account", " for the variance of the rotation function. It is simply based on", " the delta method.") out <- data.frame(lhs=lv.names[col(loading)], op = "=~", rhs = rownames(loading)[row(loading)], std.loading = est, se = se, z = as.vector(z), p = as.vector(p), ci.lower = (est - crit*se), ci.upper = (est + crit*se)) class(out) <- c("lavaan.data.frame", "data.frame") out } #' @importFrom lavaan lavInspect getLoad <- function(object, std = TRUE) { out <- lavInspect(object, "est")$lambda #FIXME: check for multiple groups if (std) { impcov <- lavaan::fitted.values(object)$cov impsd <- sqrt(diag(diag(impcov))) out <- solve(impsd) %*% out } rownames(out) <- lavaan::lavNames(object@ParTable, "ov", group = 1) if (!is.null(object@external$aux)) { out <- out[!(rownames(out) %in% object@external$aux),] } class(out) <- c("loadings", out) out } fillMult <- function(X, Y, fillrowx = 0, fillrowy = 0, fillcolx = 0, fillcoly = 0) { tempX <- matrix(0, nrow = nrow(X) + fillrowx, ncol = ncol(X) + fillcolx) tempY <- matrix(0, nrow = nrow(Y) + fillrowy, ncol = ncol(Y) + fillcoly) tempX[1:nrow(X), 1:ncol(X)] <- X tempY[1:nrow(Y), 1:ncol(Y)] <- Y result <- tempX %*% tempY result[1:nrow(X), 1:ncol(Y)] } stdRotatedLoadings <- function(est, object, fun, aux = NULL, rotate = NULL, MoreArgs = NULL) { ov.names <- lavaan::lavNames(object@ParTable, "ov", group = 1) lv.names <- lavaan::lavNames(object@ParTable, "lv", group = 1) ind.names <- setdiff(ov.names, aux) # Compute model-implied covariance matrix partable <- object@ParTable # LY load.idx <- which(partable$op == "=~" & !(partable$rhs %in% lv.names)) loading <- matrix(est[load.idx], ncol = length(lv.names)) loading <- rbind(loading, matrix(0, length(aux), ncol(loading))) # Nu int.idx <- which(partable$op == "~1" & (partable$rhs == "") & (partable$lhs %in% ov.names)) intcept <- matrix(est[int.idx], ncol = 1) # Theta th.idx <- which(partable$op == "~~" & (partable$rhs %in% ov.names) & (partable$lhs %in% ov.names)) theta <- matrix(0, nrow = length(ov.names), ncol = length(ov.names), dimnames = list(ov.names, ov.names)) for (i in th.idx) { theta[partable$lhs[i], partable$rhs[i]] <- theta[partable$rhs[i], partable$lhs[i]] <- est[i] } OV <- loading %*% t(loading) + theta invsd <- solve(sqrt(diag(diag(OV)))) requireNamespace("GPArotation") if (!("package:GPArotation" %in% search())) attachNamespace("GPArotation") # Compute standardized results loading <- invsd %*% loading obj <- do.call(fun, c(list(loading), MoreArgs)) # GPArotation::GPFoblq(loading, method = "geomin") loading <- obj$loadings rotMat <- t(solve(obj$Th)) # %*% rotate est[load.idx] <- as.vector(loading[seq_along(ind.names),]) intcept <- invsd %*% intcept est[int.idx] <- as.vector(intcept) theta <- invsd %*% theta %*% invsd rownames(theta) <- colnames(theta) <- ov.names for(i in th.idx) est[i] <- theta[partable$lhs[i], partable$rhs[i]] # Put phi rv.idx <- which(partable$op == "~~" & partable$rhs %in% lv.names) templhs <- match(partable$lhs[rv.idx], lv.names) temprhs <- match(partable$rhs[rv.idx], lv.names) # rotate2 <- t(solve(rotate)) # phi <- t(rotate2) %*% rotate2 phi <- obj$Phi if (!is.null(phi)) { for (i in seq_along(templhs)) { est[rv.idx[i]] <- phi[templhs[i], temprhs[i]] } } est } #' @importFrom lavaan lavInspect parTable seStdLoadings <- function(rotate, object, fun, MoreArgs) { # object <- efaUnrotate(HolzingerSwineford1939, nf=3, varList=paste0("x", 1:9), estimator="mlr") # initL <- getLoad(object) # rotate <- GPArotation::GPFoblq(initL, method="oblimin") rotMat <- t(solve(rotate$Th)) gradient <- rotate$Gq loading <- rotate$loadings phi <- rotate$Phi if (is.null(phi)) phi <- diag(ncol(loading)) est <- lavaan::parameterEstimates(object)$est aux <- object@external$aux # Standardized results JAC1 <- lavaan::lav_func_jacobian_simple(func = stdRotatedLoadings, x = object@Fit@est, object = object, aux = aux, rotate = rotMat, fun = fun, MoreArgs = MoreArgs) LIST <- lavInspect(object, "list") free.idx <- which(LIST$free > 0L) m <- ncol(phi) phi.idx <- which(LIST$op == "~~" & LIST$lhs != LIST$rhs & (LIST$lhs %in% paste0("factor", 1:m))) JAC1 <- JAC1[c(free.idx, phi.idx), free.idx] VCOV <- as.matrix(lavaan::vcov(object, labels = FALSE)) if(object@Model@eq.constraints) { JAC1 <- JAC1 %*% object@Model@eq.constraints.K } COV1 <- JAC1 %*% VCOV %*% t(JAC1) # I1 <- MASS::ginv(COV1) # I1p <- matrix(0, nrow(I1) + length(phi.idx), ncol(I1) + length(phi.idx)) # I1p[1:nrow(I1), 1:ncol(I1)] <- I1 # phi.idx2 <- nrow(I1) + 1:length(phi.idx) # p <- nrow(loading) # dconlambda <- matrix(0, m^2 - m, p*m) # gradphi <- gradient %*% solve(phi) # lambgradphi <- t(loading) %*% gradphi # lambphi <- loading %*% solve(phi) # lamblamb <- t(loading) %*% loading # runrow <- 1 # descript <- NULL # for(u in 1:m) { # for(v in setdiff(1:m, u)) { # runcol <- 1 # for(r in 1:m) { # for(i in 1:p) { # mir <- (1 - 1/p) * sum(loading[i,]^2) + sum(loading[,r]^2)/p - loading[i,r]^2 # dur <- 0 # if(u == r) dur <- 1 # dconlambda[runrow, runcol] <- dur * gradphi[i, v] + 4 * mir * loading[i, u] * phi[r, v] + (8 - 8/p)*loading[i,r]*loading[i,u]*lambphi[i,v] + 8*loading[i,r]*lamblamb[u,r]*phi[r,v]/p - 8*loading[i,r]^2*loading[i,u]*phi[r,v] # descript <- rbind(descript, c(runrow, runcol, u, v, i, r)) # runcol <- runcol + 1 # } # } # runrow <- runrow + 1 # } # } # dconphi <- matrix(0, m^2 - m, m*(m-1)/2) # runrow <- 1 # descript2 <- NULL # for(u in 1:m) { # for(v in setdiff(1:m, u)) { # runcol <- 1 # for(x in 2:m) { # for(y in 1:(x - 1)) { # dux <- 0 # if(u == x) dux <- 1 # duy <- 0 # if(u == y) duy <- 1 # dconphi[runrow, runcol] <- -(dux * phi[y, v] + duy * phi[x, v]) * lambgradphi[u, u] # descript2 <- rbind(descript2, c(runrow, runcol, u, v, x, y)) # runcol <- runcol + 1 # } # } # runrow <- runrow + 1 # } # } # I2 <- matrix(0, nrow(I1p) + m^2 - m, ncol(I1p) + m^2 - m) # I2[1:nrow(I1p), 1:ncol(I1p)] <- I1p # I2[lamb.idx, 1:(m^2 - m) + nrow(I1p)] <- t(dconlambda) # I2[1:(m^2 - m) + nrow(I1p), lamb.idx] <- dconlambda # I2[phi.idx2, 1:(m^2 - m) + nrow(I1p)] <- t(dconphi) # I2[1:(m^2 - m) + nrow(I1p), phi.idx2] <- dconphi # COV2 <- MASS::ginv(I2)[1:nrow(I1p), 1:ncol(I1p)] COV2 <- COV1 LIST <- LIST[,c("lhs", "op", "rhs", "group")] LIST$se <- rep(NA, length(LIST$lhs)) LIST$se[c(free.idx, phi.idx)] <- sqrt(diag(COV2)) tmp.se <- ifelse( LIST$se == 0.0, NA, LIST$se) lv.names <- lavaan::lavNames(object@ParTable, "lv", group = 1) partable <- parTable(object) idx <- which(partable$op == "=~" & !(partable$rhs %in% lv.names)) matrix(LIST$se[idx], ncol = length(lv.names)) } checkOrdered <- function(dat, varnames, ...) { ord <- list(...)$ordered if(is.null(ord)) { ord <- FALSE } else { ord <- TRUE } if(is.null(dat)) { orderedVar <- FALSE } else { orderedVar <- sapply(dat[,varnames], function(x) "ordered" %in% is(x)) } any(c(ord, orderedVar)) } semTools/R/data.R0000644000176200001440000001133013432435606013320 0ustar liggesusers### Terrence D. Jorgensen ### Last updated: 4 April 2017 ### document example data sets #' Simulated Dataset to Demonstrate Two-way Latent Interaction #' #' A simulated data set with 2 independent factors and 1 dependent factor where #' each factor has three indicators #' #' #' @format A \code{data.frame} with 500 observations of 9 variables. #' \describe{ #' \item{x1}{The first indicator of the first independent factor} #' \item{x2}{The second indicator of the first independent factor} #' \item{x3}{The third indicator of the first independent factor} #' \item{x4}{The first indicator of the second independent factor} #' \item{x5}{The second indicator of the second independent factor} #' \item{x6}{The third indicator of the second independent factor} #' \item{x7}{The first indicator of the dependent factor} #' \item{x8}{The second indicator of the dependent factor} #' \item{x9}{The third indicator of the dependent factor} #' } #' @source Data were generated by the \code{\link[MASS]{mvrnorm}} function in #' the \code{MASS} package. #' @examples head(dat2way) "dat2way" #' Simulated Dataset to Demonstrate Three-way Latent Interaction #' #' A simulated data set with 3 independent factors and 1 dependent factor where #' each factor has three indicators #' #' #' @format A \code{data.frame} with 500 observations of 12 variables. #' \describe{ #' \item{x1}{The first indicator of the first independent factor} #' \item{x2}{The second indicator of the first independent factor} #' \item{x3}{The third indicator of the first independent factor} #' \item{x4}{The first indicator of the second independent factor} #' \item{x5}{The second indicator of the second independent factor} #' \item{x6}{The third indicator of the second independent factor} #' \item{x7}{The first indicator of the third independent factor} #' \item{x8}{The second indicator of the third independent factor} #' \item{x9}{The third indicator of the third independent factor} #' \item{x10}{The first indicator of the dependent factor} #' \item{x11}{The second indicator of the dependent factor} #' \item{x12}{The third indicator of the dependent factor} #' } #' @source Data were generated by the \code{\link[MASS]{mvrnorm}} function in #' the \code{MASS} package. #' @examples head(dat3way) "dat3way" #' Simulated Data set to Demonstrate Categorical Measurement Invariance #' #' A simulated data set with 2 factors with 4 indicators each separated into #' two groups #' #' #' @format A \code{data.frame} with 200 observations of 9 variables. #' \describe{ #' \item{g}{Sex of respondents} #' \item{u1}{Indicator 1} #' \item{u2}{Indicator 2} #' \item{u3}{Indicator 3} #' \item{u4}{Indicator 4} #' \item{u5}{Indicator 5} #' \item{u6}{Indicator 6} #' \item{u7}{Indicator 7} #' \item{u8}{Indicator 8} #' } #' @source Data were generated using the \code{lavaan} package. #' @examples head(datCat) "datCat" #' Simulated Data set to Demonstrate Longitudinal Measurement Invariance #' #' A simulated data set with 1 factors with 3 indicators in three timepoints #' #' #' @format A \code{data.frame} with 200 observations of 10 variables. #' \describe{ #' \item{sex}{Sex of respondents} #' \item{y1t1}{Indicator 1 in Time 1} #' \item{y2t1}{Indicator 2 in Time 1} #' \item{y3t1}{Indicator 3 in Time 1} #' \item{y1t2}{Indicator 1 in Time 2} #' \item{y2t2}{Indicator 2 in Time 2} #' \item{y3t2}{Indicator 3 in Time 2} #' \item{y1t3}{Indicator 1 in Time 3} #' \item{y2t3}{Indicator 2 in Time 3} #' \item{y3t3}{Indicator 3 in Time 3} #' } #' @source Data were generated using the \code{simsem} package. #' @examples head(exLong) "exLong" #' Simulated Data set to Demonstrate Random Allocations of Parcels #' #' A simulated data set with 2 factors with 9 indicators for each factor #' #' #' @format A \code{data.frame} with 800 observations of 18 variables. #' \describe{ #' \item{f1item1}{Item 1 loading on factor 1} #' \item{f1item2}{Item 2 loading on factor 1} #' \item{f1item3}{Item 3 loading on factor 1} #' \item{f1item4}{Item 4 loading on factor 1} #' \item{f1item5}{Item 5 loading on factor 1} #' \item{f1item6}{Item 6 loading on factor 1} #' \item{f1item7}{Item 7 loading on factor 1} #' \item{f1item8}{Item 8 loading on factor 1} #' \item{f1item9}{Item 9 loading on factor 1} #' \item{f2item1}{Item 1 loading on factor 2} #' \item{f2item2}{Item 2 loading on factor 2} #' \item{f2item3}{Item 3 loading on factor 2} #' \item{f2item4}{Item 4 loading on factor 2} #' \item{f2item5}{Item 5 loading on factor 2} #' \item{f2item6}{Item 6 loading on factor 2} #' \item{f2item7}{Item 7 loading on factor 2} #' \item{f2item8}{Item 8 loading on factor 2} #' \item{f2item9}{Item 9 loading on factor 2} #' } #' @source Data were generated using the \code{simsem} package. #' @examples head(simParcel) "simParcel" semTools/R/runMI.R0000644000176200001440000005162613532027657013461 0ustar liggesusers### Terrence D. Jorgensen ### Last updated: 29 August 2019 ### runMI creates lavaan.mi object, inherits from lavaanList class ## ------------- ## Main function ## ------------- ##' Fit a lavaan Model to Multiple Imputed Data Sets ##' ##' This function fits a lavaan model to a list of imputed data sets, and can ##' also implement multiple imputation for a single \code{data.frame} with ##' missing observations, using either the Amelia package or the mice package. ##' ##' ##' @aliases runMI lavaan.mi cfa.mi sem.mi growth.mi ##' @importFrom lavaan lavInspect parTable ##' @importFrom methods getMethod ##' ##' @param model The analysis model can be specified using lavaan ##' \code{\link[lavaan]{model.syntax}} or a parameter table (as returned by ##' \code{\link[lavaan]{parTable}}). ##' @param data A \code{data.frame} with missing observations, or a \code{list} ##' of imputed data sets (if data are imputed already). If \code{runMI} has ##' already been called, then imputed data sets are stored in the ##' \code{@@DataList} slot, so \code{data} can also be a \code{lavaan.mi} object ##' from which the same imputed data will be used for additional analyses. ##' @param fun \code{character}. Name of a specific lavaan function used to fit ##' \code{model} to \code{data} (i.e., \code{"lavaan"}, \code{"cfa"}, ##' \code{"sem"}, or \code{"growth"}). Only required for \code{runMI}. ##' @param \dots additional arguments to pass to \code{\link[lavaan]{lavaan}} or ##' \code{\link[lavaan]{lavaanList}}. See also \code{\link[lavaan]{lavOptions}}. ##' Note that \code{lavaanList} provides parallel computing options, as well as ##' a \code{FUN} argument so the user can extract custom output after the model ##' is fitted to each imputed data set (see \strong{Examples}). TIP: If a ##' custom \code{FUN} is used \emph{and} \code{parallel = "snow"} is requested, ##' the user-supplied function should explicitly call \code{library} or use ##' \code{\link[base]{::}} for any functions not part of the base distribution. ##' @param m \code{integer}. Request the number of imputations. Ignored if ##' \code{data} is already a \code{list} of imputed data sets or a ##' \code{lavaan.mi} object. ##' @param miArgs Addition arguments for the multiple-imputation function ##' (\code{miPackage}). The arguments should be put in a list (see example ##' below). Ignored if \code{data} is already a \code{list} of imputed data ##' sets or a \code{lavaan.mi} object. ##' @param miPackage Package to be used for imputation. Currently these ##' functions only support \code{"Amelia"} or \code{"mice"} for imputation. ##' Ignored if \code{data} is already a \code{list} of imputed data sets or a ##' \code{lavaan.mi} object. ##' @param seed \code{integer}. Random number seed to be set before imputing the ##' data. Ignored if \code{data} is already a \code{list} of imputed data sets ##' or a \code{lavaan.mi} object. ##' ##' @return A \code{\linkS4class{lavaan.mi}} object ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @references ##' Enders, C. K. (2010). \emph{Applied missing data analysis}. New ##' York, NY: Guilford. ##' ##' Rubin, D. B. (1987). \emph{Multiple imputation for nonresponse in surveys}. ##' New York, NY: Wiley. ##' ##' @examples ##' \dontrun{ ##' ## impose missing data for example ##' HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), ##' "ageyr","agemo","school")] ##' set.seed(12345) ##' HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) ##' age <- HSMiss$ageyr + HSMiss$agemo/12 ##' HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ##' ##' ## specify CFA model from lavaan's ?cfa help page ##' HS.model <- ' ##' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ##' ' ##' ##' ## impute data within runMI... ##' out1 <- cfa.mi(HS.model, data = HSMiss, m = 20, seed = 12345, ##' miArgs = list(noms = "school")) ##' ##' ## ... or impute missing data first ##' library(Amelia) ##' set.seed(12345) ##' HS.amelia <- amelia(HSMiss, m = 20, noms = "school", p2s = FALSE) ##' imps <- HS.amelia$imputations ##' out2 <- cfa.mi(HS.model, data = imps) ##' ##' ## same results (using the same seed results in the same imputations) ##' cbind(impute.within = coef(out1), impute.first = coef(out2)) ##' ##' summary(out1, fit.measures = TRUE) ##' summary(out1, ci = FALSE, fmi = TRUE, output = "data.frame") ##' summary(out1, ci = FALSE, stand = TRUE, rsq = TRUE) ##' ##' ## model fit. D3 includes information criteria ##' anova(out1) ##' ## equivalently: ##' lavTestLRT.mi(out1) ##' ## request D2 ##' anova(out1, test = "D2") ##' ## request fit indices ##' fitMeasures(out1) ##' ##' ##' ## fit multigroup model without invariance constraints ##' mgfit.config <- cfa.mi(HS.model, data = imps, estimator = "mlm", ##' group = "school") ##' ## add invariance constraints, and use previous fit as "data" ##' mgfit.metric <- cfa.mi(HS.model, data = mgfit.config, estimator = "mlm", ##' group = "school", group.equal = "loadings") ##' mgfit.scalar <- cfa.mi(HS.model, data = mgfit.config, estimator = "mlm", ##' group = "school", ##' group.equal = c("loadings","intercepts")) ##' ##' ## compare fit of 2 models to test metric invariance ##' ## (scaled likelihood ratio test) ##' lavTestLRT.mi(mgfit.metric, h1 = mgfit.config) ##' ## To compare multiple models, you must use anova() ##' anova(mgfit.config, mgfit.metric, mgfit.scalar) ##' ## or compareFit(), which also includes fit indices for comparison ##' ## (optional: name the models) ##' compareFit(config = mgfit.config, metric = mgfit.metric, ##' scalar = mgfit.scalar, ##' argsLRT = list(test = "D2", method = "satorra.bentler.2010")) ##' ##' ## correlation residuals to investigate local misfit ##' resid(mgfit.scalar, type = "cor.bentler") ##' ## modification indices for fixed parameters, to investigate local misfit ##' modindices.mi(mgfit.scalar) ##' ## or lavTestScore.mi for modification indices about equality constraints ##' lavTestScore.mi(mgfit.scalar) ##' ##' ## Wald test of whether latent means are == (fix 3 means to zero in group 2) ##' eq.means <- ' .p70. == 0 ##' .p71. == 0 ##' .p72. == 0 ' ##' lavTestWald.mi(mgfit.scalar, constraints = eq.means) ##' ##' ##' ##' ## ordered-categorical data ##' data(datCat) ##' lapply(datCat, class) ##' ## impose missing values ##' set.seed(123) ##' for (i in 1:8) datCat[sample(1:nrow(datCat), size = .1*nrow(datCat)), i] <- NA ##' ##' catout <- cfa.mi(' f =~ 1*u1 + 1*u2 + 1*u3 + 1*u4 ', data = datCat, ##' m = 3, seed = 456, ##' miArgs = list(ords = paste0("u", 1:8), noms = "g"), ##' FUN = function(fit) { ##' list(wrmr = lavaan::fitMeasures(fit, "wrmr"), ##' zeroCells = lavaan::lavInspect(fit, "zero.cell.tables")) ##' }) ##' summary(catout) ##' lavTestLRT.mi(catout, test = "D2", pool.robust = TRUE) ##' fitMeasures(catout, fit.measures = c("rmsea","srmr","cfi"), ##' test = "D2", pool.robust = TRUE) ##' ##' ## extract custom output ##' sapply(catout@funList, function(x) x$wrmr) # WRMR for each imputation ##' catout@funList[[1]]$zeroCells # zero-cell tables for first imputation ##' catout@funList[[2]]$zeroCells # zero-cell tables for second imputation ... ##' ##' } ##' ##' @export runMI <- function(model, data, fun = "lavaan", ..., m, miArgs = list(), miPackage = "Amelia", seed = 12345) { CALL <- match.call() dots <- list(...) ## check for (Bollen-Stine) bootstrap request if (all(!is.null(dots$test), tolower(dots$test) %in% c("boot","bootstrap","bollen.stine")) || all(!is.null(dots$se), tolower(dots$se) %in% c("boot","bootstrap"))) { stop('Bootstraping unavailable (and not recommended) in combination with ', 'multiple imputations. For robust confidence intervals of indirect', ' effects, see the ?semTools::monteCarloMed help page. To bootstrap ', 'within each imputation, users can pass a custom function to the ', 'FUN= argument (see ?lavaanList) to save bootstrap distributions in ', 'the @funList slot, then manually combine afterward.') } seed <- as.integer(seed[1]) ## Create (or acknowledge) list of imputed data sets imputedData <- NULL if (missing(data)) { #TODO: check for summary statistics #TODO: make lavaanList() accept lists of summary stats #TODO: Add argument to implement Li Cai's pool-polychorics first, pass # to lavaan for DWLS with pooled WLS.V= and NACOV=, return(lavaan). } else if (is.data.frame(data)) { if (miPackage[1] == "Amelia") { requireNamespace("Amelia") if (!"package:Amelia" %in% search()) attachNamespace("Amelia") imputeCall <- c(list(Amelia::amelia, x = data, m = m, p2s = 0), miArgs) set.seed(seed) imputedData <- unclass(eval(as.call(imputeCall))$imputations) } else if (miPackage[1] == "mice") { requireNamespace("mice") if (!"package:mice" %in% search()) attachNamespace("mice") imputeCall <- c(list(mice::mice, data = data, m = m, diagnostics = FALSE, printFlag = FALSE), miArgs) set.seed(seed) miceOut <- eval(as.call(imputeCall)) imputedData <- list() for (i in 1:m) { imputedData[[i]] <- mice::complete(data = miceOut, action = i, include = FALSE) } } else stop("Currently runMI only supports imputation by Amelia or mice") } else if (is.list(data)) { ## check possibility that it is a mids object (inherits from list) if (requireNamespace("mice", quietly = TRUE)) { if (mice::is.mids(data)) { m <- data$m imputedData <- list() for (i in 1:m) { imputedData[[i]] <- mice::complete(data, action = i, include = FALSE) } imputeCall <- list() } else { seed <- integer(length = 0) imputeCall <- list() imputedData <- data m <- length(data) class(imputedData) <- "list" # override inheritance (e.g., "mi" if Amelia) } } else { ## can't check for mice, so probably isn't mids seed <- integer(length = 0) imputeCall <- list() imputedData <- data m <- length(data) class(imputedData) <- "list" # override inheritance (e.g., "mi" if Amelia) } } else if (is(data, "lavaan.mi")) { seed <- data@seed imputeCall <- data@imputeCall imputedData <- data@DataList m <- length(imputedData) } else stop("data is not a valid input type: a partially observed data.frame,", " a list of imputed data.frames, or previous lavaan.mi object") ## Function to get custom output for lavaan.mi object ## NOTE: Need "lavaan::" to allow for parallel computations .getOutput. <- function(obj) { converged <- lavaan::lavInspect(obj, "converged") if (converged) { se <- lavaan::parTable(obj)$se se.test <- all(!is.na(se)) & all(se >= 0) & any(se != 0) if (lavaan::lavInspect(obj, "ngroups") == 1L && lavaan::lavInspect(obj, "nlevels") == 1L) { Heywood.lv <- det(lavaan::lavInspect(obj, "cov.lv")) <= 0 Heywood.ov <- det(lavaan::lavInspect(obj, "theta")) <= 0 } else { Heywood.lv <- !all(sapply(lavaan::lavInspect(obj, "cov.lv"), det) > 0) Heywood.ov <- !all(sapply(lavaan::lavInspect(obj, "theta"), det) > 0) } } else { se.test <- Heywood.lv <- Heywood.ov <- NA } list(sampstat = lavaan::lavInspect(obj, "sampstat"), coefMats = lavaan::lavInspect(obj, "est"), satPT = data.frame(lavaan::lav_partable_unrestricted(obj), #FIXME: do starting values ALWAYS == estimates? stringsAsFactors = FALSE), modindices = try(lavaan::modindices(obj), silent = TRUE), cov.lv = lavaan::lavInspect(obj, "cov.lv"), #TODO: calculate from pooled estimates for reliability() converged = converged, SE = se.test, Heywood.lv = Heywood.lv, Heywood.ov = Heywood.ov) } ## fit model using lavaanList lavListCall <- list(lavaan::lavaanList, model = model, dataList = imputedData, cmd = fun) lavListCall <- c(lavListCall, dots) lavListCall$store.slots <- c("partable","vcov","test","h1","baseline") lavListCall$FUN <- if (is.null(dots$FUN)) .getOutput. else function(obj) { temp1 <- .getOutput.(obj) temp2 <- dots$FUN(obj) if (!is.list(temp2)) temp2 <- list(userFUN1 = temp2) if (is.null(names(temp2))) names(temp2) <- paste0("userFUN", 1:length(temp2)) duplicatedNames <- which(sapply(names(temp2), function(x) { x %in% c("sampstat","coefMats","satPT","modindices","converged", "SE","Heywood.lv","Heywood.ov","cov.lv") })) for (i in duplicatedNames) names(temp2)[i] <- paste0("userFUN", i) c(temp1, temp2) } fit <- eval(as.call(lavListCall)) ## Store custom @DataList and @SampleStatsList fit@SampleStatsList <- lapply(fit@funList, "[[", i = "sampstat") fit@DataList <- imputedData ## add parameter table to @h1List for (i in 1:m) fit@h1List[[i]] <- c(fit@h1List[[i]], list(PT = fit@funList[[i]]$satPT)) ## assign class and add new slots fit <- as(fit, "lavaan.mi") fit@coefList <- lapply(fit@funList, "[[", i = "coefMats") fit@miList <- lapply(fit@funList, "[[", i = "modindices") fit@phiList <- lapply(fit@funList, "[[", i = "cov.lv") fit@seed <- seed fit@call <- CALL fit@lavListCall <- lavListCall fit@imputeCall <- imputeCall convList <- lapply(fit@funList, "[", i = c("converged","SE", "Heywood.lv","Heywood.ov")) nonConv <- which(sapply(convList, is.null)) if (length(nonConv)) for (i in nonConv) { convList[[i]] <- list(converged = FALSE, SE = NA, Heywood.lv = NA, Heywood.ov = NA) } fit@convergence <- lapply(convList, function(x) do.call(c, x)) conv <- which(sapply(fit@convergence, "[", i = "converged")) if (!length(conv)) warning('The model did not converge for any imputed data sets.') ## keep any remaining funList slots (if allowing users to supply custom FUN) funNames <- names(fit@funList[[1]]) keepIndex <- which(!sapply(funNames, function(x) { x %in% c("sampstat","coefMats","satPT","modindices","converged", "SE","Heywood.lv","Heywood.ov","cov.lv") })) if (length(keepIndex)) { fit@funList <- lapply(fit@funList, "[", i = keepIndex) if (length(keepIndex) > 1L) { keepNames <- funNames[keepIndex] noNames <- which(keepNames == "") for (i in seq_along(noNames)) keepNames[ noNames[i] ] <- paste0("userFUN", i) fit@funList <- lapply(fit@funList, "names<-", value = keepNames) } } else fit@funList <- list() NewStartVals <- try(getMethod("coef", "lavaan.mi")(fit, type = "user", labels = FALSE), silent = TRUE) if (!inherits(NewStartVals, "try-error")) fit@ParTable$start <- NewStartVals fit } ##' @rdname runMI ##' @export lavaan.mi <- function(model, data, ..., m, miArgs = list(), miPackage = "Amelia", seed = 12345) { # runMI(model = model, data = data, fun = "lavaan", ..., # m = m, miArgs = miArgs, miPackage = miPackage, seed = seed) mc <- match.call(expand.dots = TRUE) mc$fun <- "lavaan" mc[[1L]] <- quote(semTools::runMI) eval(mc, parent.frame()) } ##' @rdname runMI ##' @export cfa.mi <- function(model, data, ..., m, miArgs = list(), miPackage = "Amelia", seed = 12345) { # runMI(model = model, data = data, fun = "cfa", ..., # m = m, miArgs = miArgs, miPackage = miPackage, seed = seed) mc <- match.call(expand.dots = TRUE) mc$fun <- "cfa" mc[[1L]] <- quote(semTools::runMI) eval(mc, parent.frame()) } ##' @rdname runMI ##' @export sem.mi <- function(model, data, ..., m, miArgs = list(), miPackage = "Amelia", seed = 12345) { # runMI(model = model, data = data, fun = "sem", ..., # m = m, miArgs = miArgs, miPackage = miPackage, seed = seed) mc <- match.call(expand.dots = TRUE) mc$fun <- "sem" mc[[1L]] <- quote(semTools::runMI) eval(mc, parent.frame()) } ##' @rdname runMI ##' @export growth.mi <- function(model, data, ..., m, miArgs = list(), miPackage = "Amelia", seed = 12345) { # runMI(model = model, data = data, fun = "growth", ..., # m = m, miArgs = miArgs, miPackage = miPackage, seed = seed) mc <- match.call(expand.dots = TRUE) mc$fun <- "growth" mc[[1L]] <- quote(semTools::runMI) eval(mc, parent.frame()) } ## ----------------- ## Utility functions ## ----------------- ##' Calculate the "D2" statistic ##' ##' This is a utility function used to calculate the "D2" statistic for pooling ##' test statistics across multiple imputations. This function is called by ##' several functions used for \code{\linkS4class{lavaan.mi}} objects, such as ##' \code{\link{lavTestLRT.mi}}, \code{\link{lavTestWald.mi}}, and ##' \code{\link{lavTestScore.mi}}. But this function can be used for any general ##' scenario because it only requires a vector of \eqn{\chi^2} statistics (one ##' from each imputation) and the degrees of freedom for the test statistic. ##' See Li, Meng, Raghunathan, & Rubin (1991) and Enders (2010, chapter 8) for ##' details about how it is calculated. ##' ##' @importFrom stats var pf pchisq ##' ##' @param w \code{numeric} vector of Wald \eqn{\chi^2} statistics. Can also ##' be Wald \emph{z} statistics, which will be internally squared to make ##' \eqn{\chi^2} statistics with one \emph{df} (must set \code{DF = 0L}). ##' @param DF degrees of freedom (\emph{df}) of the \eqn{\chi^2} statistics. ##' If \code{DF = 0L} (default), \code{w} is assumed to contain \emph{z} ##' statistics, which will be internally squared. ##' @param asymptotic \code{logical}. If \code{FALSE} (default), the pooled test ##' will be returned as an \emph{F}-distributed statistic with numerator ##' (\code{df1}) and denominator (\code{df2}) degrees of freedom. ##' If \code{TRUE}, the pooled \emph{F} statistic will be multiplied by its ##' \code{df1} on the assumption that its \code{df2} is sufficiently large ##' enough that the statistic will be asymptotically \eqn{\chi^2} distributed ##' with \code{df1}. ##' ##' @return A \code{numeric} vector containing the test statistic, \emph{df}, ##' its \emph{p} value, and 2 missing-data diagnostics: the relative invrease ##' in variance (RIV, or average for multiparameter tests: ARIV) and the ##' fraction missing information (FMI = ARIV / (1 + ARIV)). ##' ##' @seealso \code{\link{lavTestLRT.mi}}, \code{\link{lavTestWald.mi}}, ##' \code{\link{lavTestScore.mi}} ##' ##' @author Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' @references ##' Enders, C. K. (2010). \emph{Applied missing data analysis}. New ##' York, NY: Guilford. ##' ##' Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). ##' Significance levels from repeated \emph{p}-values with multiply-imputed ##' data. \emph{Statistica Sinica, 1}(1), 65--92. Retrieved from ##' https://www.jstor.org/stable/24303994 ##' ##' @examples ##' ## generate a vector of chi-squared values, just for example ##' DF <- 3 # degrees of freedom ##' M <- 20 # number of imputations ##' CHI <- rchisq(M, DF) ##' ##' ## pool the "results" ##' calculate.D2(CHI, DF) # by default, an F statistic is returned ##' calculate.D2(CHI, DF, asymptotic = TRUE) # asymptotically chi-squared ##' ##' ## generate standard-normal values, for an example of Wald z tests ##' Z <- rnorm(M) ##' calculate.D2(Z) # default DF = 0 will square Z to make chisq(DF = 1) ##' ## F test is equivalent to a t test with the denominator DF ##' ##' ##' @export calculate.D2 <- function(w, DF = 0L, asymptotic = FALSE) { if (length(w) == 0L) return(NA) w <- as.numeric(w) DF <- as.numeric(DF) nImps <- sum(!is.na(w)) if (nImps == 0) return(NA) if (DF <= 0L) { ## assume they are Z scores w <- w^2 DF <- 1L } ## pool test statistics if (length(w) > 1L) { w_bar <- mean(w, na.rm = TRUE) ariv <- (1 + 1/nImps) * var(sqrt(w), na.rm = TRUE) test.stat <- (w_bar/DF - ((nImps + 1) * ariv / (nImps - 1))) / (1 + ariv) } else { warning('There was only 1 non-missing value to pool, leading to zero ', 'variance, so D2 cannot be calculated.') test.stat <- ariv <- NA } if (test.stat < 0) test.stat <- 0 if (asymptotic) { out <- c("chisq" = test.stat * DF, df = DF, pvalue = pchisq(test.stat * DF, df = DF, lower.tail = FALSE), ariv = ariv, fmi = ariv / (1 + ariv)) } else { v3 <- DF^(-3 / nImps) * (nImps - 1) * (1 + (1 / ariv))^2 out <- c("F" = test.stat, df1 = DF, df2 = v3, pvalue = pf(test.stat, df1 = DF, df2 = v3, lower.tail = FALSE), ariv = ariv, fmi = ariv / (1 + ariv)) } class(out) <- c("lavaan.vector","numeric") out } semTools/R/compareFit.R0000644000176200001440000004232713531746307014515 0ustar liggesusers### Terrence D. Jorgensen & Sunthud Pornprasertmanit ### Last updated: 29 August 2019 ### source code for compareFit() function and FitDiff class ## ----------------- ## Class and Methods ## ----------------- ##' Class For Representing A Template of Model Fit Comparisons ##' ##' This class contains model fit measures and model fit comparisons among ##' multiple models ##' ##' ##' @name FitDiff-class ##' @aliases FitDiff-class show,FitDiff-method summary,FitDiff-method ##' @docType class ##' ##' @slot name \code{character}. The name of each model ##' @slot model.class \code{character}. One class to which each model belongs ##' @slot nested \code{data.frame}. Model fit comparisons between adjacently ##' nested models that are ordered by their degrees of freedom (\emph{df}) ##' @slot fit \code{data.frame}. Fit measures of all models specified in the ##' \code{name} slot, ordered by their \emph{df} ##' ##' @section Objects from the Class: Objects can be created via the ##' \code{\link{compareFit}} function. ##' ##' ##' @author Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' @seealso \code{\link{compareFit}}; \code{\link{clipboard}} ##' ##' @examples ##' ##' HS.model <- ' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ' ##' ##' out <- measurementInvariance(model = HS.model, data = HolzingerSwineford1939, ##' group = "school", quiet = TRUE) ##' modelDiff <- compareFit(out) ##' summary(modelDiff) ##' summary(modelDiff, fit.measures = "all") ##' summary(modelDiff, fit.measures = c("aic", "bic")) ##' ##' \dontrun{ ##' ## Save results to a file ##' saveFile(modelDiff, file = "modelDiff.txt") ##' ##' ## Copy to a clipboard ##' clipboard(modelDiff) ##' } ##' setClass("FitDiff", slots = c(name = "character", model.class = "character", nested = "data.frame", fit = "data.frame")) ##' @rdname FitDiff-class ##' @aliases show,FitDiff-method ##' @importFrom methods getMethod ##' @export setMethod("show", signature(object = "FitDiff"), function(object) { getMethod("summary", signature = "FitDiff")(object) invisible(object) }) ##' @rdname FitDiff-class ##' @aliases summary,FitDiff-method ##' ##' @param object object of class \code{FitDiff} ##' @param fit.measures \code{character} vector naming fit indices the user can ##' request from \code{\link[lavaan]{fitMeasures}}. If \code{"default"}, the ##' fit measures will be \code{c("chisq", "df", "pvalue", "cfi", "tli", ##' "rmsea", "srmr", "aic", "bic")}. If \code{"all"}, all available fit ##' measures will be returned. ##' @param nd number of digits printed ##' ##' @export setMethod("summary", signature(object = "FitDiff"), function(object, fit.measures = "default", nd = 3) { out <- list() if (nrow(object@nested) > 0L) { cat("################### Nested Model Comparison #########################\n") out$test.statistics <- object@nested if (object@model.class == "lavaan") { print(out$test.statistics, nd = nd) } else { class(out$test.statistics) <- c("lavaan.data.frame","data.frame") stats::printCoefmat(out$test.statistics, P.values = TRUE, has.Pvalue = TRUE) } cat("\n") } noFit <- ncol(object@fit) == 1L && names(object@fit)[1] == "df" if (!noFit) { cat("####################### Model Fit Indices ###########################\n") ## this is the object to return (numeric, no printed daggers) out$fit.indices <- getFitSummary(object, fit.measures, return.diff = FALSE) class(out$fit.indices) <- c("lavaan.data.frame","data.frame") ## print with daggers marking each fit index's preferred model ## (turns "numeric" vectors into "character") badness <- grepl(pattern = c("chisq|rmsea|ic|rmr|ecvi|fmin"), x = colnames(out$fit.indices)) goodness <- grepl(pattern = c("cfi|tli|rfi|nfi|ifi|rni|cn|gfi|mfi"), x = colnames(out$fit.indices)) minvalue <- badness & !goodness minvalue[!badness & !goodness] <- NA fit.integer <- grepl(pattern = c("df|npar|ntotal"), x = colnames(out$fit.indices)) suppressWarnings(fitTab <- as.data.frame(mapply(tagDagger, nd = nd, vec = out$fit.indices, minvalue = minvalue, print_integer = fit.integer), stringsAsFactors = FALSE)) rownames(fitTab) <- object@name colnames(fitTab) <- colnames(out$fit.indices) print(fitTab) cat("\n") if (nrow(object@nested) > 0L) { cat("################## Differences in Fit Indices #######################\n") out$fit.diff <- getFitSummary(object, fit.measures, return.diff = TRUE) class(out$fit.diff) <- c("lavaan.data.frame","data.frame") print(out$fit.diff, nd = nd) cat("\n") } } invisible(out) }) ## "method" for saveFile() function (see "clipboard.R") saveFileFitDiff <- function(object, file, what = "summary", tableFormat = FALSE, fit.measures = "default", writeArgs = list()) { if (tableFormat) { writeArgs$file <- file writeArgs$append <- TRUE if (is.null(writeArgs$sep)) writeArgs$sep <- "\t" if (is.null(writeArgs$quote)) writeArgs$quote <- FALSE if (is.null(writeArgs$row.names)) writeArgs$row.names <- FALSE if (nrow(object@nested) > 0L) { cat("Nested Model Comparison\n\n", file = file, append = TRUE) out <- object@nested #out <- data.frame(model.diff = rownames(out), out) writeArgs$x <- out do.call("write.table", writeArgs) cat("\n\n", file = file, append = TRUE) } out2 <- getFitSummary(object, fit.measures) out2 <- data.frame(model = object@name, out2) cat("Fit Indices Summaries\n\n", file = file, append = TRUE) writeArgs$x <- out2 do.call("write.table", writeArgs) } else { write(paste(utils::capture.output(lavaan::summary(object)), collapse = "\n"), file = file) } } ## -------------------- ## Constructor Function ## -------------------- ##' Build an object summarizing fit indices across multiple models ##' ##' This function will create the template to compare fit indices across ##' multiple fitted lavaan objects. The results can be exported to a clipboard ##' or a file later. ##' ##' @importFrom lavaan lavTestLRT ##' @importMethodsFrom lavaan fitMeasures ##' ##' @param ... fitted \code{lavaan} models or list(s) of \code{lavaan} objects. ##' \code{\linkS4class{lavaan.mi}} objects are also accepted, but all models ##' must belong to the same class. ##' @param nested \code{logical} indicating whether the models in \code{...} are ##' nested. See \code{\link{net}} for an empirical test of nesting. ##' @param argsLRT \code{list} of arguments to pass to ##' \code{\link[lavaan]{lavTestLRT}}, as well as to ##' \code{\link{lavTestLRT.mi}} and \code{\link{fitMeasures}} when ##' comparing \code{\linkS4class{lavaan.mi}} models. ##' @param indices \code{logical} indicating whether to return fit indices from ##' the \code{\link[lavaan]{fitMeasures}} function. ##' @param baseline.model optional fitted \code{\linkS4class{lavaan}} model ##' passed to \code{\link[lavaan]{fitMeasures}} to calculate incremental fit ##' indices. ##' ##' @return A \code{\linkS4class{FitDiff}} object that saves model fit ##' comparisons across multiple models. If the models are not nested, only ##' fit indices for each model are returned. If the models are nested, the ##' differences in fit indices are additionally returned, as well as test ##' statistics comparing each sequential pair of models (ordered by their ##' degrees of freedom). ##' ##' @author Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' @seealso \code{\linkS4class{FitDiff}}, \code{\link{clipboard}} ##' ##' @examples ##' ##' HS.model <- ' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ' ##' ##' fit1 <- cfa(HS.model, data = HolzingerSwineford1939) ##' ##' ## non-nested model ##' m2 <- ' f1 =~ x1 + x2 + x3 + x4 ##' f2 =~ x5 + x6 + x7 + x8 + x9 ' ##' fit2 <- cfa(m2, data = HolzingerSwineford1939) ##' compareFit(fit1, fit2, nested = FALSE) ##' ##' ##' ## nested model comparisons: ##' out <- measurementInvariance(model = HS.model, data = HolzingerSwineford1939, ##' group = "school", quiet = TRUE) ##' compareFit(out) ##' ##' \dontrun{ ##' ## also applies to lavaan.mi objects (fit model to multiple imputations) ##' set.seed(12345) ##' HSMiss <- HolzingerSwineford1939[ , paste("x", 1:9, sep = "")] ##' HSMiss$x5 <- ifelse(HSMiss$x1 <= quantile(HSMiss$x1, .3), NA, HSMiss$x5) ##' HSMiss$x9 <- ifelse(is.na(HSMiss$x5), NA, HSMiss$x9) ##' HSMiss$school <- HolzingerSwineford1939$school ##' HS.amelia <- amelia(HSMiss, m = 20, noms = "school") ##' imps <- HS.amelia$imputations ##' ##' ## request robust test statistics ##' mgfit2 <- cfa.mi(HS.model, data = imps, group = "school", estimator = "mlm") ##' mgfit1 <- cfa.mi(HS.model, data = imps, group = "school", estimator = "mlm", ##' group.equal = "loadings") ##' mgfit0 <- cfa.mi(HS.model, data = imps, group = "school", estimator = "mlm", ##' group.equal = c("loadings","intercepts")) ##' ##' ## request the strictly-positive robust test statistics ##' compareFit(scalar = mgfit0, metric = mgfit1, config = mgfit2, ##' argsLRT = list(asymptotic = TRUE, ##' method = "satorra.bentler.2010")) ##' } ##' ##' @export compareFit <- function(..., nested = TRUE, argsLRT = list(), indices = TRUE, baseline.model = NULL) { ## make sure there is something to do if (!(nested || indices)) { message('User requested neither indices nor tests of nested models.') return(NULL) } ## separate models from lists of models dots <- list(...) idx.list <- sapply(dots, is.list) modLists <- dots[ idx.list] mods <- dots[!idx.list] ## capture names of any arguments passed via dots allnames <- sapply(substitute(list(...))[-1], deparse) listnames <- allnames[ idx.list] modnames <- allnames[!idx.list] ## make sure models are named if (length(mods) && is.null(names(mods))) { names(mods) <- modnames } else for (nn in seq_along(mods)) { if (names(mods)[nn] == "") names(mods)[nn] <- modnames[nn] } ## make sure lists are named if (length(modLists) && is.null(names(modLists))) { names(modLists) <- listnames } else for (nn in seq_along(modLists)) { if (names(modLists)[nn] == "") names(modLists)[nn] <- listnames[nn] } ## within each list, make sure models are named for (i in seq_along(modLists)) { if (length(modLists[[i]]) && is.null(names(modLists[[i]]))) { names(modLists[[i]]) <- seq_along(modLists[[i]]) } else for (nn in seq_along(modLists[[i]])) { if (names(modLists[[i]])[nn] == "") names(modLists[[i]])[nn] <- nn } } ## collapse into a single list of models if (length(modLists)) mods <- c(mods, unlist(modLists)) ## check for lavaan models not.lavaan <- !sapply(mods, inherits, what = c("lavaan","lavaanList")) if (any(not.lavaan)) stop("The following are not fitted lavaan models:\n", paste0(names(which(not.lavaan)), collapse = ", ")) modClass <- unique(sapply(mods, class)) if (length(modClass) > 1L) stop('All models must be of the same class (e.g.,', ' cannot compare lavaan objects to lavaan.mi)') ## grab lavaan.mi options, if relevant if (is.null(argsLRT$pool.robust)) { pool.robust <- formals(lavTestLRT.mi)$pool.robust # default value } else { pool.robust <- argsLRT$pool.robust # user-specified value } if (is.null(argsLRT$test)) { test <- eval(formals(lavTestLRT.mi)$test) # default value } else { test <- argsLRT$test # user-specified value } ## FIT INDICES if (indices) { fitList <- lapply(mods, fitMeasures, baseline.model = baseline.model, pool.robust = pool.robust, test = test) if (length(unique(sapply(fitList, length))) > 1L) { warning('fitMeasures() returned vectors of different lengths for different', ' models, probably because certain options are not the same. Check', ' lavInspect(fit, "options")[c("estimator","test","meanstructure")]', ' for each model, or run fitMeasures() on each model to investigate.') indexList <- lapply(fitList, names) useNames <- names(which(table(unlist(indexList)) == length(fitList))) fitList <- lapply(fitList, "[", i = useNames) } fit <- as.data.frame(do.call(rbind, fitList)) } else { fitList <- lapply(mods, fitMeasures, fit.measures = "df", pool.robust = pool.robust, test = test) ## check for scaled tests nDF <- sapply(fitList, length) if (any(nDF != nDF[1])) stop('Some (but not all) models have robust tests,', ' so they cannot be compared as nested models.') fit <- data.frame(df = sapply(fitList, "[", i = if (any(nDF > 1L)) 2L else 1L)) } ## order models by increasing df (least-to-most constrained) ord <- order(fit$df) #FIXME: what if test == "mean.var.adjusted"? fit <- fit[ord, , drop = FALSE] mods <- mods[ord] ## TEST STATISTICS if (nested) { if (class(mods[[1]]) == "lavaan") { argsLRT$model.names <- names(mods) argsLRT$object <- mods[[1]] nestedout <- do.call(lavTestLRT, c(mods[-1], argsLRT)) } else if (inherits(mods[[1]], "lavaan.mi")) { #FIXME: generalize to lavaan.pool modsA <- mods[-1] modsB <- mods[-length(mods)] fitDiff <- list() for (i in seq_along(modsA)) { fitA <- modsA[[i]] fitB <- modsB[[i]] if (is.null(argsLRT$asymptotic)) argsLRT$asymptotic <- any(lavListInspect(fitA, "options")$test %in% c("satorra.bentler","yuan.bentler", "yuan.bentler.mplus","scaled.shifted", "mean.var.adjusted","satterthwaite")) tempDiff <- do.call(lavTestLRT.mi, c(list(fitA, h1 = fitB), argsLRT)) if (names(tempDiff)[1] == "F") { statNames <- c("F", "df1", "df2", "pvalue") } else statNames <- c("chisq", "df", "pvalue") ## check for scaled if (any(grepl(pattern = "scaled", x = names(tempDiff)))) { statNames <- paste0(statNames, ".scaled") } diffName <- paste(names(modsA)[i], "-", names(modsB)[i]) fitDiff[[diffName]] <- tempDiff[statNames] } nestedout <- as.data.frame(do.call(rbind, fitDiff)) } ## not nested } else nestedout <- data.frame() new("FitDiff", name = names(mods), model.class = modClass, nested = nestedout, fit = fit) } ## ---------------- ## Hidden Functions ## ---------------- noLeadingZero <- function(vec, fmt, nd = 3L) { out <- sprintf(fmt, vec) upper.limit <- paste0(".", paste(rep(9, nd - 1L), collapse = ""), "5") used <- vec < as.numeric(upper.limit) & vec >= 0 used[is.na(used)] <- FALSE out[used] <- substring(out[used], 2) out } tagDagger <- function(vec, minvalue = NA, print_integer = FALSE, nd = 3L) { if (print_integer) { vec <- noLeadingZero(vec, fmt = "%.0f", nd = nd) } else if (is.na(minvalue)) { vec <- noLeadingZero(vec, fmt = paste0("%.", nd, "f"), nd = nd) } else { target <- if (minvalue) min(vec, na.rm = TRUE) else max(vec, na.rm = TRUE) tag <- rep(" ", length(vec)) tag[vec == target] <- "\u2020" vec <- noLeadingZero(vec, fmt = paste0("%.", nd, "f"), nd = nd) vec <- paste0(vec, tag) } vec } getFitSummary <- function(object, fit.measures = "default", return.diff = FALSE) { if (is.null(fit.measures)) fit.measures <- colnames(object@fit) if ("all" %in% fit.measures) fit.measures <- colnames(object@fit) if (length(fit.measures) == 1 && fit.measures == "default") { ## robust or scaled test statistics? if (is.null(object@fit$cfi.scaled)) { fit.measures <- c("chisq","df","pvalue","rmsea","cfi","tli","srmr") } else if (all(!is.na(object@fit$cfi.robust)) && !is.null(object@fit$cfi.robust)) { fit.measures <- c("chisq.scaled","df.scaled","pvalue.scaled", "rmsea.robust","cfi.robust","tli.robust","srmr") } else { fit.measures <- c("chisq.scaled","df.scaled","pvalue.scaled", "rmsea.scaled","cfi.scaled","tli.scaled","srmr") } if ("aic" %in% colnames(object@fit)) { fit.measures <- c(fit.measures, "aic", "bic") } } ## chi-squared difference test already reported, so remove (diff in p-value) if (return.diff) { fit.measures <- fit.measures[!grepl(pattern = "chisq|pvalue|ntotal", x = fit.measures)] } ## return numeric values fitTab <- object@fit[ , colnames(object@fit) %in% fit.measures] if (!return.diff) return(fitTab) ## or return differences in fit indices diffTab <- as.data.frame(do.call(cbind, lapply(fitTab, diff))) rownames(diffTab) <- paste(object@name[-1], "-", object@name[-length(object@name)]) diffTab } semTools/R/fmi.R0000644000176200001440000002546413507672475013211 0ustar liggesusers### Mauricio Garnier Villarreal & Terrence D. Jorgensen ### Last updated: 5 July 2019 ### This function estimates the Fraction of Missing Information for means and ### (co)variances of each variable in a partially observed data set or from ### a list of multiple imputed data sets #' Fraction of Missing Information. #' #' This function estimates the Fraction of Missing Information (FMI) for #' summary statistics of each variable, using either an incomplete data set or #' a list of imputed data sets. #' #' The function estimates a saturated model with \code{\link[lavaan]{lavaan}} #' for a single incomplete data set using FIML, or with \code{\link{lavaan.mi}} #' for a list of imputed data sets. If method = \code{"saturated"}, FMI will be #' estiamted for all summary statistics, which could take a lot of time with #' big data sets. If method = \code{"null"}, FMI will only be estimated for #' univariate statistics (e.g., means, variances, thresholds). The saturated #' model gives more reliable estimates, so it could also help to request a #' subset of variables from a large data set. #' #' #' @importFrom lavaan lavListInspect lavInspect #' #' @param data Either a single \code{data.frame} with incomplete observations, #' or a \code{list} of imputed data sets. #' @param method character. If \code{"saturated"} or \code{"sat"} (default), #' the model used to estimate FMI is a freely estimated covariance matrix and #' mean vector for numeric variables, and/or polychoric correlations and #' thresholds for ordered categorical variables, for each group (if #' applicable). If \code{"null"}, only means and variances are estimated for #' numeric variables, and/or thresholds for ordered categorical variables #' (i.e., covariances and/or polychoric correlations are constrained to zero). #' See Details for more information. #' @param group character. The optional name of a grouping variable, to request #' FMI in each group. #' @param ords character. Optional vector of names of ordered-categorical #' variables, which are not already stored as class \code{ordered} in #' \code{data}. #' @param varnames character. Optional vector of variable names, to calculate #' FMI for a subset of variables in \code{data}. By default, all numeric and #' ordered variables will be included, unless \code{data} is a single #' incomplete \code{data.frame}, in which case only numeric variables can be #' used with FIML estimation. Other variable types will be removed. #' @param exclude character. Optional vector of variable names to exclude from #' the analysis. #' @param fewImps logical. If \code{TRUE}, use the estimate of FMI that applies #' a correction to the estimated between-imputation variance. Recommended when #' there are few imputations; makes little difference when there are many #' imputations. Ignored when \code{data} is not a list of imputed data sets. #' @return \code{fmi} returns a list with at least 2 of the following: #' \item{Covariances}{A list of symmetric matrices: (1) the estimated/pooled #' covariance matrix, or a list of group-specific matrices (if applicable) and #' (2) a matrix of FMI, or a list of group-specific matrices (if applicable). #' Only available if \code{method = "saturated"}.} \item{Variances}{The #' estimated/pooled variance for each numeric variable. Only available if #' \code{method = "null"} (otherwise, it is on the diagonal of Covariances).} #' \item{Means}{The estimated/pooled mean for each numeric variable.} #' \item{Thresholds}{The estimated/pooled threshold(s) for each #' ordered-categorical variable.} \item{message}{A message indicating caution #' when the null model is used.} #' @author Mauricio Garnier Villarreal (University of Kansas; #' \email{mauricio.garniervillarreal@@marquette.edu}) Terrence Jorgensen #' (University of Amsterdam; \email{TJorgensen314@@gmail.com}) #' @references Rubin, D. B. (1987). \emph{Multiple imputation for nonresponse #' in surveys}. New York, NY: Wiley. #' #' Savalei, V. & Rhemtulla, M. (2012). On obtaining estimates of the fraction #' of missing information from full information maximum likelihood. #' \emph{Structural Equation Modeling, 19}(3), 477--494. #' doi:10.1080/10705511.2012.687669 #' #' Wagner, J. (2010). The fraction of missing information as a tool for #' monitoring the quality of survey data. \emph{Public Opinion Quarterly, #' 74}(2), 223--243. doi:10.1093/poq/nfq007 #' @examples #' #' HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), #' "ageyr","agemo","school")] #' set.seed(12345) #' HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) #' age <- HSMiss$ageyr + HSMiss$agemo/12 #' HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) #' #' ## calculate FMI (using FIML, provide partially observed data set) #' (out1 <- fmi(HSMiss, exclude = "school")) #' (out2 <- fmi(HSMiss, exclude = "school", method = "null")) #' (out3 <- fmi(HSMiss, varnames = c("x5","x6","x7","x8","x9"))) #' (out4 <- fmi(HSMiss, group = "school")) #' #' \dontrun{ #' ## ordered-categorical data #' data(datCat) #' lapply(datCat, class) #' ## impose missing values #' set.seed(123) #' for (i in 1:8) datCat[sample(1:nrow(datCat), size = .1*nrow(datCat)), i] <- NA #' ## impute data m = 3 times #' library(Amelia) #' set.seed(456) #' impout <- amelia(datCat, m = 3, noms = "g", ords = paste0("u", 1:8), p2s = FALSE) #' imps <- impout$imputations #' ## calculate FMI, using list of imputed data sets #' fmi(imps, group = "g") #' } #' #' @export fmi <- function(data, method = "saturated", group = NULL, ords = NULL, varnames = NULL, exclude = NULL, fewImps = FALSE) { fiml <- is.data.frame(data) ## check for single data set or list of imputed data sets data1 <- if (fiml) data else data[[1]] ## select user-specified variables vars <- if (is.character(varnames)) varnames else colnames(data1) ## remove grouping variable and user-specified exclusions, if applicable vars <- setdiff(vars, c(group, exclude)) ## check classes ordvars <- vars[sapply(data1[vars], is.ordered)] if (!is.null(ords)) ordvars <- c(ordvars, ords) numvars <- vars[sapply(data1[vars], is.numeric)] vars <- union(numvars, ordvars) numvars <- setdiff(vars, ordvars) if (fiml) { if (length(ordvars)) message(c("By providing a single data set, only the ", "FIML option is available to calculate FMI,", " which requires continuous variables. The ", "following variables were removed: ", paste(ordvars, collapse = ", "))) if (!length(numvars)) stop("No numeric variables were provided.") vars <- numvars } ## construct model covstruc <- outer(vars, vars, function(x, y) paste(x, "~~", y)) if (method == "saturated" | method == "sat") { diag(covstruc)[which(ordvars %in% vars)] <- "" model <- covstruc[lower.tri(covstruc, diag = TRUE)] } else if (method == "null") model <- diag(covstruc) if (length(numvars)) model <- c(model, paste(numvars, "~1")) ## fit model if (fiml) { fit <- lavaan::lavaan(model, data = data, missing = "fiml", group = group) comb.results <- lavaan::parameterEstimates(fit, fmi = TRUE, zstat = FALSE, pvalue = FALSE, ci = FALSE) nG <- lavInspect(fit, "ngroups") if (nG == 1L) comb.results$group <- 1L group.label <- lavInspect(fit, "group.label") } else { fit <- lavaan.mi(model, data, group = group, ordered = ordvars, auto.th = TRUE) comb.results <- getMethod("summary","lavaan.mi")(fit, fmi = TRUE, ci = FALSE, output = "data.frame") nG <- lavListInspect(fit, "ngroups") if (nG == 1L) comb.results$group <- 1L group.label <- lavListInspect(fit, "group.label") if (fewImps) { comb.results["fmi1"] <- NULL names(comb.results)[names(comb.results) == "fmi2"] <- "fmi" } else { comb.results["fmi2"] <- NULL names(comb.results)[names(comb.results) == "fmi1"] <- "fmi" } for (i in c("t","df","pvalue","riv")) comb.results[i] <- NULL } ## Variances from null model, if applicable if (method == "null") { if (length(numvars)) { Variances <- comb.results[comb.results$lhs == comb.results$rhs, c("lhs","group","est","fmi")] colnames(Variances)[c(1, 3)] <- c("variable","coef") if (nG > 1L) Variances$group <- group.label[Variances$group] class(Variances) <- c("lavaan.data.frame","data.frame") ## start list of results results <- list(Variances = Variances) } else results <- list() } else { ## covariances from saturated model, including polychorics (if applicable) if (fiml) { covmat <- lavInspect(fit, "theta") if (nG == 1L) covmat <- list(covmat) } else { useImps <- sapply(fit@convergence, "[[", "converged") m <- sum(useImps) if (nG == 1L) { ThetaList <- lapply(fit@coefList[useImps], function(x) x$theta) covmat <- list(Reduce("+", ThetaList) / m) } else { covmat <- list() for (i in group.label) { groupList <- lapply(fit@coefList[useImps],"[[", i) ThetaList <- lapply(groupList, function(x) x$theta) covmat[[i]] <- Reduce("+", ThetaList) / m } } } fmimat <- covmat covars <- comb.results[comb.results$op == "~~", c("lhs","rhs","group","est","fmi")] for (i in 1:nG) { fmimat[[i]][as.matrix(covars[covars$group == i, 1:2])] <- covars$fmi[covars$group == i] fmimat[[i]][as.matrix(covars[covars$group == i, 2:1])] <- covars$fmi[covars$group == i] } if (nG == 1L) { Covariances <- list(coef = covmat[[1]], fmi = fmimat[[1]]) } else Covariances <- list(coef = covmat, fmi = fmimat) ## start list of results results <- list(Covariances = Covariances) } ## Means, if applicable if (length(numvars)) { results$Means <- comb.results[comb.results$op == "~1" & comb.results$lhs %in% numvars, c("lhs","group","est","fmi")] colnames(results$Means)[c(1, 3)] <- c("variable","coef") if (nG > 1L) results$Means$group <- group.label[results$Means$group] class(results$Means) <- c("lavaan.data.frame","data.frame") } ## Thresholds, if applicable if (length(ordvars)) { results$Thresholds <- comb.results[comb.results$op == "|", c("lhs","rhs","group","est","fmi")] colnames(results$Thresholds)[c(1, 2, 4)] <- c("variable","threshold","coef") if (nG > 1L) results$Thresholds$group <- group.label[results$Thresholds$group] class(results$Thresholds) <- c("lavaan.data.frame","data.frame") } ## return results, with message if using null model if (method == "null") results$message <- paste("Null-model estimates may not be as", "precise as saturated-model estimates.") results } semTools/R/miPowerFit.R0000644000176200001440000003557313432435606014513 0ustar liggesusers### Sunthud Pornprasertmanit ### Last updated: 9 March 2018 #' Modification indices and their power approach for model fit evaluation #' #' The model fit evaluation approach using modification indices and expected #' parameter changes. #' #' In the lavaan object, one can inspect the modification indices and expected #' parameter changes. Those values can be used to evaluate model fit by two #' methods. #' #' First, Saris, Satorra, and van der Veld (2009, pp. 570-573) used the power #' to detect modification indices and expected parameter changes to evaluate #' model fit. First, one should evaluate whether the modification index of each #' parameter is significant. Second, one should evaluate whether the power to #' detect a target expected parameter change is high enough. If the #' modification index is not significant and the power is high, there is no #' misspecification. If the modification index is significant and the power is #' low, the fixed parameter is misspecified. If the modification index is #' significant and the power is high, the expected parameter change is #' investigated. If the expected parameter change is large (greater than the #' the target expected parameter change), the parameter is misspecified. If the #' expected parameter change is low (lower than the target expected parameter #' change), the parameter is not misspecificied. If the modification index is #' not significant and the power is low, the decision is inconclusive. #' #' Second, the confidence intervals of the expected parameter changes are #' formed. These confidence intervals are compared with the range of trivial #' misspecification, which could be (-\code{delta}, \code{delta}) or (0, #' \code{delta}) for nonnegative parameters. If the confidence intervals are #' outside of the range of trivial misspecification, the fixed parameters are #' severely misspecified. If the confidence intervals are inside the range of #' trivial misspecification, the fixed parameters are trivially misspecified. #' If confidence intervals are overlapped the range of trivial #' misspecification, the decision is inconclusive. #' #' @aliases miPowerFit miPowerFit #' @importFrom lavaan lavInspect #' @importFrom stats qnorm qchisq pchisq #' #' @param lavaanObj The lavaan model object used to evaluate model fit #' @param stdLoad The amount of standardized factor loading that one would like #' to be detected (rejected). The default value is 0.4, which is suggested by #' Saris and colleagues (2009, p. 571). #' @param cor The amount of factor or error correlations that one would like to #' be detected (rejected). The default value is 0.1, which is suggested by #' Saris and colleagues (2009, p. 571). #' @param stdBeta The amount of standardized regression coefficients that one #' would like to be detected (rejected). The default value is 0.1, which is #' suggested by Saris and colleagues (2009, p. 571). #' @param intcept The amount of standardized intercept (similar to Cohen's #' \emph{d} that one would like to be detected (rejected). The default value is #' 0.2, which is equivalent to a low effect size proposed by Cohen (1988, #' 1992). #' @param stdDelta The vector of the standardized parameters that one would #' like to be detected (rejected). If this argument is specified, the value #' here will overwrite the other arguments above. The order of the vector must #' be the same as the row order from modification indices from the #' \code{lavaan} object. If a single value is specified, the value will be #' applied to all parameters. #' @param delta The vector of the unstandardized parameters that one would like #' to be detected (rejected). If this argument is specified, the value here #' will overwrite the other arguments above. The order of the vector must be #' the same as the row order from modification indices from the \code{lavaan} #' object. If a single value is specified, the value will be applied to all #' parameters. #' @param cilevel The confidence level of the confidence interval of expected #' parameter changes. The confidence intervals are used in the equivalence #' testing. #' @return A data frame with these variables: #' \enumerate{ #' \item lhs: The left-hand side variable, with respect to the operator in #' in the lavaan \code{\link[lavaan]{model.syntax}} #' \item op: The lavaan syntax operator: "~~" represents covariance, #' "=~" represents factor loading, "~" represents regression, and #' "~1" represents intercept. #' \item rhs: The right-hand side variable #' \item group: The level of the group variable for the parameter in question #' \item mi: The modification index of the fixed parameter #' \item epc: The expected parameter change if the parameter is freely #' estimated #' \item target.epc: The target expected parameter change that represents #' the minimum size of misspecification that one would like to be detected #' by the test with a high power #' \item std.epc: The standardized expected parameter change if the parameter #' is freely estimated #' \item std.target.epc: The standardized target expected parameter change #' \item significant.mi: Represents whether the modification index value is #' significant #' \item high.power: Represents whether the power is enough to detect the #' target expected parameter change #' \item decision.pow: The decision whether the parameter is misspecified #' or not based on Saris et al's method: \code{"M"} represents the parameter #' is misspecified, \code{"NM"} represents the parameter is not misspecified, #' \code{"EPC:M"} represents the parameter is misspecified decided by #' checking the expected parameter change value, \code{"EPC:NM"} represents #' the parameter is not misspecified decided by checking the expected #' parameter change value, and \code{"I"} represents the decision is #' inconclusive. #' \item se.epc: The standard errors of the expected parameter changes. #' \item lower.epc: The lower bound of the confidence interval of expected #' parameter changes. #' \item upper.epc: The upper bound of the confidence interval of expected #' parameter changes. #' \item lower.std.epc: The lower bound of the confidence interval of #' standardized expected parameter changes. #' \item upper.std.epc: The upper bound of the confidence interval of #' standardized expected parameter changes. #' \item decision.ci: The decision whether the parameter is misspecified or #' not based on the confidence interval method: \code{"M"} represents the #' parameter is misspecified, \code{"NM"} represents the parameter is not #' misspecified, and \code{"I"} represents the decision is inconclusive. #' } #' #' The row numbers matches with the results obtained from the #' \code{inspect(object, "mi")} function. #' #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @seealso \code{\link{moreFitIndices}} For the additional fit indices #' information #' @references Cohen, J. (1988). \emph{Statistical power analysis for the #' behavioral sciences} (2nd ed.). Hillsdale, NJ: Erlbaum. #' #' Cohen, J. (1992). A power primer. \emph{Psychological Bulletin, 112}(1), #' 155--159. doi:10.1037/0033-2909.112.1.155 #' #' Saris, W. E., Satorra, A., & van der Veld, W. M. (2009). Testing structural #' equation models or detection of misspecifications? \emph{Structural Equation #' Modeling, 16}(4), 561--582. doi:10.1080/10705510903203433 #' @examples #' #' library(lavaan) #' #' HS.model <- ' visual =~ x1 + x2 + x3 #' textual =~ x4 + x5 + x6 #' speed =~ x7 + x8 + x9 ' #' #' fit <- cfa(HS.model, data = HolzingerSwineford1939, #' group = "sex", meanstructure = TRUE) #' miPowerFit(fit) #' #' model <- ' #' # latent variable definitions #' ind60 =~ x1 + x2 + x3 #' dem60 =~ y1 + a*y2 + b*y3 + c*y4 #' dem65 =~ y5 + a*y6 + b*y7 + c*y8 #' #' # regressions #' dem60 ~ ind60 #' dem65 ~ ind60 + dem60 #' #' # residual correlations #' y1 ~~ y5 #' y2 ~~ y4 + y6 #' y3 ~~ y7 #' y4 ~~ y8 #' y6 ~~ y8 #' ' #' fit2 <- sem(model, data = PoliticalDemocracy, meanstructure = TRUE) #' miPowerFit(fit2, stdLoad = 0.3, cor = 0.2, stdBeta = 0.2, intcept = 0.5) #' #' @export miPowerFit <- function(lavaanObj, stdLoad = 0.4, cor = 0.1, stdBeta = 0.1, intcept = 0.2, stdDelta = NULL, delta = NULL, cilevel = 0.90) { mi <- lavInspect(lavaanObj, "mi") mi <- mi[mi$op != "==",] sigma <- mi[,"epc"] / sqrt(mi[,"mi"]) if (is.null(delta)) { if (is.null(stdDelta)) stdDelta <- getTrivialEpc(mi, stdLoad = stdLoad, cor = cor, stdBeta = stdBeta, intcept = intcept) if (length(stdDelta) == 1) stdDelta <- rep(stdDelta, nrow(mi)) delta <- unstandardizeEpc(mi, stdDelta, findTotalVar(lavaanObj)) } if (length(delta) == 1) delta <- rep(delta, nrow(mi)) ncp <- (delta / sigma)^2 alpha <- 0.05 desiredPow <- 0.80 cutoff <- qchisq(1 - alpha, df = 1) pow <- 1 - pchisq(cutoff, df = 1, ncp = ncp) sigMI <- mi[,"mi"] > cutoff highPow <- pow > desiredPow group <- rep(1, nrow(mi)) if ("group" %in% colnames(mi)) group <- mi[ , "group"] decision <- mapply(decisionMIPow, sigMI = sigMI, highPow = highPow, epc = mi[ , "epc"], trivialEpc = delta) if (is.null(stdDelta)) stdDelta <- standardizeEpc(mi, findTotalVar(lavaanObj), delta = delta) result <- cbind(mi[ , 1:3], group, as.numeric(mi[ , "mi"]), mi[ , "epc"], delta, standardizeEpc(mi, findTotalVar(lavaanObj)), stdDelta, sigMI, highPow, decision) # New method crit <- abs(qnorm((1 - cilevel)/2)) seepc <- abs(result[,6]) / sqrt(abs(result[,5])) lowerepc <- result[,6] - crit * seepc upperepc <- result[,6] + crit * seepc stdlowerepc <- standardizeEpc(mi, findTotalVar(lavaanObj), delta = lowerepc) stdupperepc <- standardizeEpc(mi, findTotalVar(lavaanObj), delta = upperepc) isVar <- mi[,"op"] == "~~" & mi[,"lhs"] == mi[,"rhs"] decisionci <- mapply(decisionCIEpc, targetval = as.numeric(stdDelta), lower = stdlowerepc, upper = stdupperepc, positiveonly = isVar) result <- cbind(result, seepc, lowerepc, upperepc, stdlowerepc, stdupperepc, decisionci) result <- result[!is.na(decision), ] colnames(result) <- c("lhs","op","rhs","group","mi","epc","target.epc", "std.epc","std.target.epc","significant.mi", "high.power","decision.pow","se.epc","lower.epc", "upper.epc","lower.std.epc","upper.std.epc","decision.ci") result <- format(result, scientific = FALSE, digits = 4) return(result) } ## ---------------- ## Hidden Functions ## ---------------- ## totalFacVar: Find total factor variances when regression coeffient matrix ## and factor residual covariance matrix are specified totalFacVar <- function(beta, psi) { ID <- diag(nrow(psi)) total <- solve(ID - beta) %*% psi %*% t(solve(ID - beta)) return(diag(total)) } # findTotalVar: find the total indicator and factor variances #' @importFrom lavaan lavInspect findTotalVar <- function(lavaanObj) { result <- list() nGroups <- lavInspect(lavaanObj, "ngroups") cov.all <- lavInspect(lavaanObj, "cov.all") if (nGroups == 1) cov.all <- list(cov.all) for (i in 1:nGroups) { temp <- diag(cov.all[[i]]) names(temp) <- rownames(cov.all[[i]]) result[[i]] <- temp } return(result) } ## getTrivialEpc: find the trivial misspecified expected parameter changes ## given the type of parameters in each row of modification indices getTrivialEpc <- function(mi, stdLoad=0.4, cor=0.1, stdBeta=0.1, intcept=0.2) { op <- mi[,"op"] result <- gsub("=~", stdLoad, op) result <- gsub("~~", cor, result) result <- gsub("~1", intcept, result) result <- gsub("~", stdBeta, result) return(result) } ## unstandardizeEpc: Transform from standardized EPC to unstandardized EPC unstandardizeEpc <- function(mi, delta, totalVar) { name <- names(totalVar[[1]]) lhsPos <- match(mi[,"lhs"], name) rhsPos <- match(mi[,"rhs"], name) group <- rep(1, nrow(mi)) if("group" %in% colnames(mi)) group <- mi[,"group"] getVar <- function(pos, group) totalVar[[group]][pos] lhsVar <- mapply(getVar, pos=lhsPos, group=group) rhsVar <- mapply(getVar, pos=rhsPos, group=group) FUN <- function(op, lhsVar, rhsVar, delta) { if(op == "|") return(NA) lhsSD <- sqrt(lhsVar) rhsSD <- sqrt(rhsVar) if(!is.numeric(delta)) delta <- as.numeric(delta) if(op == "=~") { return((rhsSD * delta) / lhsSD) } else if (op == "~~") { return(lhsSD * delta * rhsSD) } else if (op == "~1") { return(lhsSD * delta) } else if (op == "~") { return((lhsSD * delta) / rhsSD) } else { return(NA) } } unstdDelta <- mapply(FUN, op=mi[,"op"], lhsVar=lhsVar, rhsVar=rhsVar, delta=delta) return(unstdDelta) } ## unstandardizeEpc: Transform from unstandardized EPC to standardized EPC. ## If delta is null, the unstandardized epc from the modification indices ## data.frame are used standardizeEpc <- function(mi, totalVar, delta = NULL) { if(is.null(delta)) delta <- mi[,"epc"] name <- names(totalVar[[1]]) lhsPos <- match(mi[,"lhs"], name) rhsPos <- match(mi[,"rhs"], name) group <- rep(1, nrow(mi)) if("group" %in% colnames(mi)) group <- mi[,"group"] getVar <- function(pos, group) totalVar[[group]][pos] lhsVar <- mapply(getVar, pos=lhsPos, group=group) rhsVar <- mapply(getVar, pos=rhsPos, group=group) FUN <- function(op, lhsVar, rhsVar, delta) { lhsSD <- sqrt(lhsVar) rhsSD <- sqrt(rhsVar) if(!is.numeric(delta)) delta <- as.numeric(delta) if(op == "=~") { #stdload = beta * sdlatent / sdindicator = beta * lhs / rhs return((delta / rhsSD) * lhsSD) } else if (op == "~~") { #r = cov / (sd1 * sd2) return(delta / (lhsSD * rhsSD)) } else if (op == "~1") { #d = meanDiff/sd return(delta / lhsSD) } else if (op == "~") { #beta = b * sdX / sdY = b * rhs / lhs return((delta / lhsSD) * rhsSD) } else { return(NA) } } stdDelta <- mapply(FUN, op=mi[,"op"], lhsVar=lhsVar, rhsVar=rhsVar, delta=delta) return(stdDelta) } ## decisionMIPow: provide the decision given the significance of modification ## indices and power to detect trivial misspecification decisionMIPow <- function(sigMI, highPow, epc, trivialEpc) { if(is.na(sigMI) | is.na(highPow)) return(NA) if(sigMI & highPow) { if(abs(epc) > abs(trivialEpc)) { return("EPC:M") } else { return("EPC:NM") } } else if (sigMI & !highPow) { return("M") } else if (!sigMI & highPow) { return("NM") } else if (!sigMI & !highPow) { return("I") } else { return(NA) } } decisionCIEpc <- function(targetval, lower, upper, positiveonly = FALSE) { if(is.na(lower) | is.na(upper)) return(NA) if(positiveonly) { if(lower > targetval) { return("M") } else if (upper < targetval) { return("NM") } else { return("I") } } else { negtargetval <- -targetval if(lower > targetval | upper < negtargetval) { return("M") } else if (upper < targetval & negtargetval < lower) { return("NM") } else { return("I") } } } semTools/R/poolMAlloc.R0000644000176200001440000013720013531465623014457 0ustar liggesusers### Authors: ### Jason D. Rights (Vanderbilt University; jason.d.rights@vanderbilt.edu) ### - based on research from/with Sonya Sterba ### - adapted from OLD parcelAllocation() by Corbin Quick and Alexander Schoemann ### - additional "indices" argument added by Terrence D. Jorgensen ### Last updated: 28 August 2019 ##' Pooled estimates and standard errors across M parcel-allocations: Combining ##' sampling variability and parcel-allocation variability. ##' ##' This function employs an iterative algorithm to pick the number of random ##' item-to-parcel allocations needed to meet user-defined stability criteria ##' for a fitted structural equation model (SEM) (see \bold{Details} below for ##' more information). Pooled point and standard-error estimates from this SEM ##' can be outputted at this final selected number of allocations (however, it ##' is more efficient to save the allocations and treat them as multiple ##' imputations using \code{\link{runMI}}; see \bold{See Also} for links with ##' examples). Additionally, new indices (see Sterba & Rights, 2016) are ##' outputted for assessing the relative contributions of parcel-allocation ##' variability vs. sampling variability in each estimate. At each iteration, ##' this function generates a given number of random item-to-parcel allocations, ##' fits a SEM to each allocation, pools estimates across allocations from that ##' iteration, and then assesses whether stopping criteria are met. If stopping ##' criteria are not met, the algorithm increments the number of allocations ##' used (generating all new allocations). ##' ##' For further details on the benefits of the random allocation of items to ##' parcels, see Sterba (2011) and Sterba & MacCallum (2010). ##' ##' This function implements an algorithm for choosing the number of allocations ##' (\emph{M}; described in Sterba & Rights, 2016), pools point and ##' standard-error estimates across these \emph{M} allocations, and produces ##' indices for assessing the relative contributions of parcel-allocation ##' variability vs. sampling variability in each estimate. ##' ##' To obtain pooled test statistics for model fit or model comparison, the ##' \code{list} or parcel allocations can be passed to \code{\link{runMI}} ##' (find \bold{Examples} on the help pages for \code{\link{parcelAllocation}} ##' and \code{\link{PAVranking}}). ##' ##' This function randomly generates a given number (\code{nAllocStart}) of ##' item-to-parcel allocations, fits a SEM to each allocation, and then ##' increments the number of allocations used (by \code{nAllocAdd}) until the ##' pooled point and standard-error estimates fulfill stopping criteria ##' (\code{stopProp} and \code{stopValue}, defined above). A summary of results ##' from the model that was fit to the \emph{M} allocations are returned. ##' ##' Additionally, this function outputs the proportion of allocations with ##' solutions that converged (using a maximum likelihood estimator) as well as ##' the proportion of allocations with solutions that were converged and proper. ##' The converged and proper solutions among the final \emph{M} allocations are ##' used in computing pooled results. ##' ##' Additionally, after each iteration of the algorithm, information useful in ##' monitoring the algorithm is outputted. The number of allocations used at ##' that iteration, the proportion of pooled parameter estimates meeting ##' stopping criteria at the previous iteration, the proportion of pooled ##' standard errors meeting stopping criteria at the previous iteration, and the ##' runtime of that iteration are outputted. When stopping criteria are ##' satisfied, the full set of results are outputted. ##' ##' @importFrom stats sd pnorm pt qt runif pchisq ##' @importFrom lavaan lavInspect ##' ##' @param nPerPar A list in which each element is a vector, corresponding to ##' each factor, indicating sizes of parcels. If variables are left out of ##' parceling, they should not be accounted for here (i.e., there should not be ##' parcels of size "1"). ##' @param facPlc A list of vectors, each corresponding to a factor, specifying ##' the item indicators of that factor (whether included in parceling or not). ##' Either variable names or column numbers. Variables not listed will not be ##' modeled or included in output datasets. ##' @param nAllocStart The number of random allocations of items to parcels to ##' generate in the first iteration of the algorithm. ##' @param nAllocAdd The number of allocations to add with each iteration of the ##' algorithm. Note that if only one iteration is desired, \code{nAllocAdd} can ##' be set to \eqn{0} and results will be output for \code{nAllocStart} ##' allocationsonly. ##' @param syntax lavaan syntax that defines the model. ##' @param dataset Item-level dataset ##' @param parceloutput Optional \code{character}. Path (folder/directory) where ##' \emph{M} (the final selected number of allocations) parceled data sets will ##' be outputted from the iteration where the algorithm met stopping criteria. ##' Note for Windows users: file path must be specified using forward slashes ##' (\code{/}), not backslashes (\code{\\}). See \code{\link[base]{path.expand}} ##' for details. If \code{NULL} (default), nothing is saved to disk. ##' @param stopProp Value used in defining stopping criteria of the algorithm ##' (\eqn{\delta_a} in Sterba & Rights, 2016). This is the minimum proportion of ##' change (in any pooled parameter or pooled standard error estimate listed in ##' \code{selectParam}) that is allowable from one iteration of the algorithm to ##' the next. That is, change in pooled estimates and pooled standard errors ##' from one iteration to the next must all be less than (\code{stopProp}) x ##' (value from former iteration). Note that \code{stopValue} can override this ##' criterion (see below). Also note that values less than .01 are unlikely to ##' lead to more substantively meaningful precision. Also note that if only ##' \code{stopValue} is a desired criterion, \code{stopProp} can be set to 0. ##' @param stopValue Value used in defining stopping criteria of the algorithm ##' (\eqn{\delta_b} in Sterba & Rights, 2016). \code{stopValue} is a minimum ##' allowable amount of absolute change (in any pooled parameter or pooled ##' standard error estimate listed in \code{selectParam}) from one iteration of ##' the algorithm to the next. For a given pooled estimate or pooled standard ##' error, \code{stopValue} is only invoked as a stopping criteria when the ##' minimum change required by \code{stopProp} is less than \code{stopValue}. ##' Note that values less than .01 are unlikely to lead to more substantively ##' meaningful precision. Also note that if only \code{stopProp} is a desired ##' criterion, \code{stopValue} can be set to 0. ##' @param selectParam (Optional) A list of the pooled parameters to be used in ##' defining stopping criteria (i.e., \code{stopProp} and \code{stopValue}). ##' These parameters should appear in the order they are listed in the lavaan ##' syntax. By default, all pooled parameters are used. Note that ##' \code{selectParam} should only contain freely-estimated parameters. In one ##' example from Sterba & Rights (2016) \code{selectParam} included all free ##' parameters except item intercepts and in another example \code{selectParam} ##' included only structural parameters. ##' @param indices Optional \code{character} vector indicating the names of ##' available \code{\link[lavaan]{fitMeasures}} to be included in the output. ##' The first and second elements should be a chi-squared test statistic and its ##' associated degrees of freedom, both of which will be added if missing. If ##' \code{"default"}, the indices will be \code{c("chisq", "df", "cfi", "tli", ##' "rmsea","srmr")}. If a robust test statistic is requested (see ##' \code{\link[lavaan]{lavOptions}}), \code{c("chisq","df")} will be replaced ##' by \code{c("chisq.scaled","df.scaled")}. For the output to include both the ##' naive and robust test statistics, \code{indices} should include both, but ##' put the scaled test statistics first, as in \code{indices = ##' c("chisq.scaled", "df.scaled", "chisq", "df")} ##' @param double (Optional) If set to \code{TRUE}, requires stopping criteria ##' (\code{stopProp} and \code{stopValue}) to be met for all parameters (in ##' \code{selectParam}) for two consecutive iterations of the algorithm. By ##' default, this is set to \code{FALSE}, meaning stopping criteria need only be ##' met at one iteration of the algorithm. ##' @param names (Optional) A character vector containing the names of parceled ##' variables. ##' @param leaveout (Optional) A vector of variables to be left out of ##' randomized parceling. Either variable names or column numbers are allowed. ##' @param useTotalAlloc (Optional) If set to \code{TRUE}, function will output ##' a separate set of results that uses all allocations created by the ##' algorithm, rather than \emph{M} allocations (see "Allocations needed for ##' stability" below). This distinction is further discussed in Sterba and ##' Rights (2016). ##' @param checkConv (Optional) If set to TRUE, function will output pooled ##' estimates and standard errors from 10 iterations post-convergence. ##' @param \dots Additional arguments to be passed to ##' \code{\link[lavaan]{lavaan}}. See also \code{\link[lavaan]{lavOptions}} ##' ##' @return ##' \item{Estimates}{A table containing pooled results across \emph{M} ##' allocations at the iteration where stopping criteria were met. Columns ##' correspond to individual parameter name, pooled estimate, pooled standard ##' error, \emph{p}-value for a \emph{z}-test of the parameter, \emph{z}-based ##' 95\% confidence interval, \emph{p}-value for a \emph{t}-test of the ##' parameter (using degrees of freedom described in Sterba & Rights, 2016), and ##' \emph{t}-based 95\% confidence interval for the parameter.} ##' \item{Fit}{A table containing results related to model fit from the \emph{M} ##' allocations at the iteration where stopping criteria were met. Columns ##' correspond to fit index names, the average of each index across allocations, ##' the standard deviation of each fit index across allocations, the maximum of ##' each fit index across allocations, the minimum of each fit index across ##' allocations, the range of each fit index across allocations, and the percent ##' of the \emph{M} allocations where the chi-square test of absolute fit was ##' significant.} ##' \item{Proportion of converged and proper allocations}{A table ##' containing the proportion of the final \emph{M} allocations that converged ##' (using a maximum likelihood estimator) and the proportion of allocations ##' that converged to proper solutions. Note that pooled estimates, pooled ##' standard errors, and other results are computed using only the converged, ##' proper allocations.} ##' \item{Allocations needed for stability (M)}{The number of allocations ##' (\emph{M}) at which the algorithm's stopping criteria (defined above) were ##' met.} ##' \item{Indices used to quantify uncertainty in estimates due to sample vs. ##' allocation variability}{A table containing individual parameter names, an ##' estimate of the proportion of total variance of a pooled parameter estimate ##' that is attributable to parcel-allocation variability (PPAV), and an estimate ##' of the ratio of the between-allocation variance of a pooled parameter ##' estimate to the within-allocation variance (RPAV). See Sterba & Rights (2016) ##' for more detail.} ##' \item{Total runtime (minutes)}{The total runtime of the function, in minutes. ##' Note that the total runtime will be greater when the specified model ##' encounters convergence problems for some allocations, as is the case with the ##' \code{\link{simParcel}} dataset used below.} ##' ##' @author ##' Jason D. Rights (Vanderbilt University; \email{jason.d.rights@@vanderbilt.edu}) ##' ##' The author would also like to credit Corbin Quick and Alexander Schoemann ##' for providing the original parcelAllocation function on which this function ##' is based. ##' ##' @seealso ##' \code{\link{runMI}} for treating allocations as multiple imputations to ##' pool results across allocations. See \bold{Examples} on help pages for: ##' \itemize{ ##' \item{\code{\link{parcelAllocation}} for fitting a single model} ##' \item{\code{\link{PAVranking}} for comparing 2 models} ##' } ##' ##' @references ##' ##' Sterba, S. K. (2011). Implications of parcel-allocation ##' variability for comparing fit of item-solutions and parcel-solutions. ##' \emph{Structural Equation Modeling, 18}(4), 554--577. ##' doi:10.1080/10705511.2011.607073 ##' ##' Sterba, S. K., & MacCallum, R. C. (2010). Variability in parameter estimates ##' and model fit across random allocations of items to parcels. ##' \emph{Multivariate Behavioral Research, 45}(2), 322--358. ##' doi:10.1080/00273171003680302 ##' ##' Sterba, S. K., & Rights, J. D. (2016). Accounting for parcel-allocation ##' variability in practice: Combining sources of uncertainty and choosing the ##' number of allocations. \emph{Multivariate Behavioral Research, 51}(2--3), ##' 296--313. doi:10.1080/00273171.2016.1144502 ##' ##' Sterba, S. K., & Rights, J. D. (2017). Effects of parceling on model ##' selection: Parcel-allocation variability in model ranking. ##' \emph{Psychological Methods, 22}(1), 47--68. doi:10.1037/met0000067 ##' ##' @examples ##' ##' \dontrun{ ##' ## lavaan syntax: A 2 Correlated ##' ## factor CFA model to be fit to parceled data ##' ##' parmodel <- ' ##' f1 =~ NA*p1f1 + p2f1 + p3f1 ##' f2 =~ NA*p1f2 + p2f2 + p3f2 ##' p1f1 ~ 1 ##' p2f1 ~ 1 ##' p3f1 ~ 1 ##' p1f2 ~ 1 ##' p2f2 ~ 1 ##' p3f2 ~ 1 ##' p1f1 ~~ p1f1 ##' p2f1 ~~ p2f1 ##' p3f1 ~~ p3f1 ##' p1f2 ~~ p1f2 ##' p2f2 ~~ p2f2 ##' p3f2 ~~ p3f2 ##' f1 ~~ 1*f1 ##' f2 ~~ 1*f2 ##' f1 ~~ f2 ##' ' ##' ##' ## specify items for each factor ##' f1name <- colnames(simParcel)[1:9] ##' f2name <- colnames(simParcel)[10:18] ##' ##' ## run function ##' poolMAlloc(nPerPar = list(c(3,3,3), c(3,3,3)), ##' facPlc = list(f1name, f2name), nAllocStart = 10, nAllocAdd = 10, ##' syntax = parmodel, dataset = simParcel, stopProp = .03, ##' stopValue = .03, selectParam = c(1:6, 13:18, 21), ##' names = list("p1f1","p2f1","p3f1","p1f2","p2f2","p3f2"), ##' double = FALSE, useTotalAlloc = FALSE) ##' } ##' ##' ## See examples on ?parcelAllocation and ?PAVranking for how to obtain ##' ## pooled test statistics and other pooled lavaan output. ##' ## Details provided in Sterba & Rights (2016). ##' ##' @export poolMAlloc <- function(nPerPar, facPlc, nAllocStart, nAllocAdd = 0, parceloutput = NULL, syntax, dataset, stopProp, stopValue, selectParam = NULL, indices = "default", double = FALSE, checkConv = FALSE, names = "default", leaveout = 0, useTotalAlloc = FALSE, ...) { message('Note that more options for pooled results are available using the ', 'runMI() function (see Examples on ?parcelAllocation and ?PAVranking)') if (!is.null(parceloutput)) { if (!dir.exists(parceloutput)) stop('invalid directory:\n', paste(parceloutput), "\n\n") } StartTimeFull <- proc.time() if (is.character(dataset)) dataset <- utils::read.csv(dataset) if (indices[1] == "default") indices <- c("chisq", "df", "cfi", "tli", "rmsea","srmr") ## make sure chi-squared and df are the first and second elements requestedChiSq <- grep(pattern = "chisq", indices, value = TRUE) if (length(requestedChiSq) == 0L) { indices <- unique(c("chisq", indices)) } else { indices <- unique(c(requestedChiSq[1], indices)) } requestedDF <- grep(pattern = "df", indices, value = TRUE) if (length(requestedDF) == 0L) { indices <- unique(c(indices[1], "df", indices[-1])) } else { indices <- unique(c(indices[1], requestedDF[1], indices[-1])) } isProperSolution <- function(object) { lavpartable <- object@ParTable lavfit <- object@Fit lavdata <- object@Data lavmodel <- object@Model var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs) if (length(var.idx) > 0L && any(lavfit@est[var.idx] < 0)) return(FALSE) if (length(lavaan::lavaanNames(lavpartable, type = "lv.regular")) > 0L) { ETA <- list(lavInspect(object, "cov.lv")) for (g in 1:lavdata@ngroups) { eigvals <- eigen(ETA[[g]], symmetric = TRUE, only.values = TRUE)$values if (any(eigvals < -1 * .Machine$double.eps^(3/4))) return(FALSE) } } THETA <- list(lavInspect(object, "theta")) for (g in 1:lavdata@ngroups) { num.idx <- lavmodel@num.idx[[g]] if (length(num.idx) > 0L) { eigvals <- eigen(THETA[[g]][unlist(num.idx), unlist(num.idx), drop = FALSE], symmetric = TRUE, only.values = TRUE)$values if (any(eigvals < -1 * .Machine$double.eps^(3/4))) return(FALSE) } } TRUE } nloop <- 0 nAllocStarttemp <- nAllocStart options(max.print = 1e+06) BreakCounter <- NA repeat { StartTime <- proc.time() nloop <- nloop + 1 if (double == TRUE & is.na(BreakCounter) == FALSE) BreakCounter <- BreakCounter + 1 if (checkConv == TRUE & is.na(BreakCounter) == FALSE) BreakCounter <- BreakCounter + 1 if (nloop > 1) { if (is.na(BreakCounter) == TRUE) { Parmn_revFinal <- Parmn_rev[[nloop - 1]] nConvergedOutput <- nConverged nConvergedProperOutput <- nConvergedProper PooledSEwithinvarFinal <- PooledSEwithinvar PooledSEbetweenvarFinal <- PooledSEbetweenvar PooledSEFinal <- PooledSE FitsumOutput <- Fitsum nAllocOutput <- nAllocStart - nAllocAdd AllocationsOutput <- Allocations #ParamFinal <- Param # defined, but never used } ParamPooledSE_temp <- ParamPooledSE ParamTest_temp <- ParamTest PooledSE_temp <- PooledSE ParamPoolSEdiffmin <- abs(ParamPooledSE_temp * stopProp) ParamPoolSEdiffmin[ParamPoolSEdiffmin < stopValue] <- stopValue ParamDiffMin <- abs(ParamTest * stopProp) ParamDiffMin[ParamDiffMin < stopValue] <- stopValue PooledSEmin <- abs(PooledSE * stopProp) PooledSEmin[PooledSEmin < stopValue] <- stopValue } dataset <- as.matrix(dataset) if (nAllocStart < 2) stop("Minimum of two allocations required.") if (is.list(facPlc)) { if (is.numeric(facPlc[[1]][1]) == FALSE) { facPlcb <- facPlc Namesv <- colnames(dataset) for (i in 1:length(facPlc)) { for (j in 1:length(facPlc[[i]])) { facPlcb[[i]][j] <- match(facPlc[[i]][j], Namesv) } facPlcb[[i]] <- as.numeric(facPlcb[[i]]) } facPlc <- facPlcb } facPlc2 <- rep(0, ncol(dataset)) for (i in 1:length(facPlc)) { for (j in 1:length(facPlc[[i]])) { facPlc2[facPlc[[i]][j]] <- i } } facPlc <- facPlc2 } if (leaveout != 0) { if (is.numeric(leaveout) == FALSE) { leaveoutb <- rep(0, length(leaveout)) Namesv <- colnames(dataset) for (i in 1:length(leaveout)) { leaveoutb[i] <- match(leaveout[i], Namesv) } leaveout <- as.numeric(leaveoutb) } k1 <- 0.001 for (i in 1:length(leaveout)) { facPlc[leaveout[i]] <- facPlc[leaveout[i]] + k1 k1 <- k1 + 0.001 } } if (0 %in% facPlc == TRUE) { Zfreq <- sum(facPlc == 0) for (i in 1:Zfreq) { Zplc <- match(0, facPlc) dataset <- dataset[, -Zplc] facPlc <- facPlc[-Zplc] } } if (is.list(nPerPar)) { nPerPar2 <- c() for (i in 1:length(nPerPar)) { Onesp <- sum(facPlc > i & facPlc < i + 1) nPerPar2 <- c(nPerPar2, nPerPar[i], rep(1, Onesp), recursive = TRUE) } nPerPar <- nPerPar2 } Npp <- c() for (i in 1:length(nPerPar)) { Npp <- c(Npp, rep(i, nPerPar[i])) } Locate <- sort(round(facPlc)) Maxv <- max(Locate) - 1 if (length(Locate) != length(Npp)) { stop("** ERROR! ** Parcels incorrectly specified. Check input!") } if (Maxv > 0) { for (i in 1:Maxv) { Mat <- match(i + 1, Locate) if (Npp[Mat] == Npp[Mat - 1]) { stop("** ERROR! ** Parcels incorrectly specified. Check input!") } } } Onevec <- facPlc - round(facPlc) NleaveA <- length(Onevec) - sum(Onevec == 0) NleaveP <- sum(nPerPar == 1) if (NleaveA < NleaveP) { warning("** WARNING! ** Single-variable parcels have been requested.", " Check input!") } if (NleaveA > NleaveP) warning("** WARNING! ** More non-parceled variables have been requested", " than provided for in parcel vector. Check input!") if (length(names) > 1) { if (length(names) != length(nPerPar)) { warning("** WARNING! ** Number of parcel names provided not equal to", " number of parcels requested. Check input!") } } Data <- c(1:ncol(dataset)) # Nfactors <- max(facPlc) # defined but never used Nindicators <- length(Data) Npar <- length(nPerPar) Rmize <- runif(Nindicators, 1, Nindicators) Data <- rbind(facPlc, Rmize, Data) Results <- matrix(numeric(0), nAllocStart, Nindicators) Pin <- nPerPar[1] for (i in 2:length(nPerPar)) { Pin <- c(Pin, nPerPar[i] + Pin[i - 1]) } for (i in 1:nAllocStart) { Data[2, ] <- runif(Nindicators, 1, Nindicators) Data <- Data[, order(Data[2, ])] Data <- Data[, order(Data[1, ])] Results[i, ] <- Data[3, ] } Alpha <- rbind(Results[1, ], dataset) Allocations <- list() for (i in 1:nAllocStart) { Ineff <- rep(NA, ncol(Results)) Ineff2 <- c(1:ncol(Results)) for (inefficient in 1:ncol(Results)) { Ineff[Results[i, inefficient]] <- Ineff2[inefficient] } Alpha[1, ] <- Ineff Beta <- Alpha[, order(Alpha[1, ])] Temp <- matrix(NA, nrow(dataset), Npar) TempAA <- if (length(1:Pin[1]) > 1) { Beta[2:nrow(Beta), 1:Pin[1]] } else cbind(Beta[2:nrow(Beta), 1:Pin[1]], Beta[2:nrow(Beta), 1:Pin[1]]) Temp[, 1] <- rowMeans(TempAA, na.rm = TRUE) for (al in 2:Npar) { Plc <- Pin[al - 1] + 1 TempBB <- if (length(Plc:Pin[al]) > 1) { Beta[2:nrow(Beta), Plc:Pin[al]] } else cbind(Beta[2:nrow(Beta), Plc:Pin[al]], Beta[2:nrow(Beta), Plc:Pin[al]]) Temp[, al] <- rowMeans(TempBB, na.rm = TRUE) } if (length(names) > 1) { colnames(Temp) <- names } Allocations[[i]] <- Temp } Param <- list() Fitind <- list() Converged <- list() ProperSolution <- list() ConvergedProper <- list() for (i in 1:(nAllocStart)) { data_parcel <- as.data.frame(Allocations[[i]], row.names = NULL, optional = FALSE) fit <- lavaan::sem(syntax, data = data_parcel, ...) ## if a robust estimator was requested, update fit indices accordingly requestedTest <- lavInspect(fit, "options")$test if (any(requestedTest %in% c("satorra.bentler","yuan.bentler", "yuan.bentler.mplus","scaled.shifted", "mean.var.adjusted","satterthwaite"))) { indices[1:2] <- c("chisq.scaled","df.scaled") } else indices[1:2] <- c("chisq","df") ## check convergence and solution if (lavInspect(fit, "converged") == TRUE) { Converged[[i]] <- 1 } else Converged[[i]] <- 0 Param[[i]] <- lavaan::parameterEstimates(fit)[, c("lhs", "op", "rhs", "est", "se", "z", "pvalue", "ci.lower", "ci.upper")] if (isProperSolution(fit) == TRUE & Converged[[i]] == 1) { ProperSolution[[i]] <- 1 } else ProperSolution[[i]] <- 0 if (any(is.na(Param[[i]][, 5] == TRUE))) ProperSolution[[i]] <- 0 if (Converged[[i]] == 1 & ProperSolution[[i]] == 1) { ConvergedProper[[i]] <- 1 } else ConvergedProper[[i]] <- 0 if (ConvergedProper[[i]] == 0) Param[[i]][, 4:9] <- matrix(data = NA, nrow(Param[[i]]), 6) if (ConvergedProper[[i]] == 1) { Fitind[[i]] <- lavaan::fitMeasures(fit, indices) if (!all(indices %in% names(Fitind[[i]]))) { invalidIndices <- setdiff(indices, names(Fitind[[i]])) Fitind[[i]][invalidIndices] <- NA } } else Fitind[[i]] <- rep(NA, length(indices)) } nConverged <- Reduce("+", Converged) nProperSolution <- Reduce("+", ProperSolution) nConvergedProper <- Reduce("+", ConvergedProper) if (nConvergedProper == 0) stop("All allocations failed to converge and/or", " yielded improper solutions for a given loop.") Parmn <- Param[[1]] if (is.null(selectParam)) selectParam <- 1:nrow(Parmn) ParSE <- matrix(NA, nrow(Parmn), nAllocStart) ParSEmn <- Parmn[, 5] Parsd <- matrix(NA, nrow(Parmn), nAllocStart) Fitmn <- Fitind[[1]] Fitsd <- matrix(NA, length(Fitmn), nAllocStart) Sigp <- matrix(NA, nrow(Parmn), nAllocStart) Fitind <- data.frame(Fitind) ParamSEsquared <- list() for (i in 1:nAllocStart) { ParamSEsquared[[i]] <- cbind(Param[[i]][, 5], Param[[i]][, 5]) if (any(is.na(ParamSEsquared[[i]]) == TRUE)) ParamSEsquared[[i]] <- 0 ParamSEsquared[[i]] <- apply(as.data.frame(ParamSEsquared[[i]]), 1, prod) Parsd[, i] <- Param[[i]][, 4] ParSE[, i] <- Param[[i]][, 5] Sigp[, ncol(Sigp) - i + 1] <- Param[[i]][, 7] Fitsd[, i] <- Fitind[[i]] } Sigp <- Sigp + 0.45 Sigp <- apply(Sigp, c(1, 2), round) Sigp <- 1 - as.vector(rowMeans(Sigp, na.rm = TRUE)) Parsum <- cbind(apply(Parsd, 1, mean, na.rm = TRUE), apply(Parsd, 1, sd, na.rm = TRUE), apply(Parsd, 1, max, na.rm = TRUE), apply(Parsd, 1, min, na.rm = TRUE), apply(Parsd, 1, max, na.rm = TRUE) - apply(Parsd, 1, min, na.rm = TRUE), Sigp) colnames(Parsum) <- c("Avg Est.", "S.D.", "MAX", "MIN", "Range", "% Sig") ParSEmn <- Parmn[, 1:3] ParSEfn <- cbind(ParSEmn, apply(ParSE, 1, mean, na.rm = TRUE), apply(ParSE, 1, sd, na.rm = TRUE), apply(ParSE, 1, max, na.rm = TRUE), apply(ParSE, 1, min, na.rm = TRUE), apply(ParSE, 1, max, na.rm = TRUE) - apply(ParSE, 1, min, na.rm = TRUE)) colnames(ParSEfn) <- c("lhs", "op", "rhs", "Avg SE", "S.D.", "MAX", "MIN", "Range") Fitsum <- cbind(apply(Fitsd, 1, mean, na.rm = TRUE), apply(Fitsd, 1, sd, na.rm = TRUE), apply(Fitsd, 1, max, na.rm = TRUE), apply(Fitsd, 1, min, na.rm = TRUE), apply(Fitsd, 1, max, na.rm = TRUE) - apply(Fitsd, 1, min, na.rm = TRUE)) rownames(Fitsum) <- indices Parmn[, 4:ncol(Parmn)] <- Parmn[, 4:ncol(Parmn)]/nConvergedProper Parmn <- Parmn[, 1:3] Parmn <- cbind(Parmn, Parsum) Fitmn <- Fitmn/nConvergedProper pChisq <- list() sigChisq <- list() for (i in 1:nAllocStart) { pChisq[[i]] <- (1 - pchisq(Fitsd[1, i], Fitsd[2, i])) if (is.na(pChisq[[i]]) == FALSE & pChisq[[i]] < 0.05) { sigChisq[[i]] <- 1 } else sigChisq[[i]] <- 0 } PerSigChisq <- (Reduce("+", sigChisq))/nConvergedProper * 100 PerSigChisq <- round(PerSigChisq, 4) PerSigChisqCol <- c(PerSigChisq, # however many indices != chisq(.scaled) rep("n/a", sum(!grepl(pattern = "chisq", x = indices)))) options(stringsAsFactors = FALSE) Fitsum <- data.frame(Fitsum, PerSigChisqCol) colnames(Fitsum) <- c("Avg Ind", "S.D.", "MAX", "MIN", "Range", "% Sig") options(stringsAsFactors = TRUE) PooledSEwithinvar <- Reduce("+", ParamSEsquared)/nConvergedProper PooledSEbetweenvar <- Parmn[, 5]^2 PooledSE <- sqrt(PooledSEwithinvar + PooledSEbetweenvar + PooledSEbetweenvar/nConvergedProper) ParamPooledSE <- c(Parmn[, 4], PooledSE) ParamTest <- Parmn[, 4] if (nloop > 1) { ParamPoolSEdiff <- abs(ParamPooledSE_temp - ParamPooledSE) Paramdiff <- abs(ParamTest_temp - ParamTest) PooledSEdiff <- abs(PooledSE - PooledSE_temp) ParamPoolSEdifftest <- ParamPoolSEdiff - ParamPoolSEdiffmin ParamPoolSEdifftest[ParamPoolSEdifftest <= 0] <- 0 ParamPoolSEdifftest[ParamPoolSEdifftest > 0] <- 1 Paramdifftest <- Paramdiff - ParamDiffMin Paramdifftest[Paramdifftest <= 0] <- 0 Paramdifftest[Paramdifftest > 0] <- 1 PooledSEdifftest <- PooledSEdiff - PooledSEmin PooledSEdifftest[PooledSEdifftest <= 0] <- 0 PooledSEdifftest[PooledSEdifftest > 0] <- 1 if (nloop == 2) { ParamPoolSEdifftesttable <- cbind(ParamPoolSEdifftest) Paramdifftesttable <- cbind(Paramdifftest) PooledSEdifftesttable <- cbind(PooledSEdifftest) } if (nloop > 2) { ParamPoolSEdifftesttable <- cbind(ParamPoolSEdifftesttable, ParamPoolSEdifftest) Paramdifftesttable <- cbind(Paramdifftesttable, Paramdifftest) PooledSEdifftesttable <- cbind(PooledSEdifftesttable, PooledSEdifftest) } PropStopParam <- 1 - (Reduce("+", Paramdifftesttable[selectParam, nloop - 1])/length(selectParam)) PropStopPooled <- 1 - (Reduce("+", PooledSEdifftesttable[selectParam, nloop - 1])/length(selectParam)) PropStopParamPooled <- 1 - (Reduce("+", ParamPoolSEdifftesttable[c(selectParam, selectParam + nrow(Parmn)), nloop - 1]) / (2 * length(selectParam))) if (checkConv == TRUE & is.na(BreakCounter) == TRUE) { print(nAllocStart) print("Proportion of pooled estimates meeting stop criteria:") print(PropStopParam) print("Proportion of pooled SE meeting stop criteria:") print(PropStopPooled) } if (checkConv == FALSE) { print(nAllocStart) print("Proportion of pooled estimates meeting stop criteria:") print(PropStopParam) print("Proportion of pooled SE meeting stop criteria:") print(PropStopPooled) } } nAllocStart <- nAllocStart + nAllocAdd StopTime <- proc.time() - StartTime print("Runtime:") print(StopTime) Parmn_rev <- list() Parmn_rev[[nloop]] <- cbind(Parmn[, 1:4], PooledSE) Parmn_rev[[nloop]][, 4:5] <- sapply(Parmn_rev[[nloop]][,4:5], as.numeric) colnames(Parmn_rev[[nloop]]) <- c("lhs", "op", "rhs","Estimate", "Pooled SE") if (nloop == 1) { Param_revTemp <- cbind(Parmn[, 1:3], Parmn_rev[[nloop]][,4]) Param_revTemp[, 4] <- as.numeric(Param_revTemp[,4]) Param_revTotal <- cbind(Param_revTemp) PooledSE_revTemp <- cbind(Parmn[, 1:3], Parmn_rev[[nloop]][,5]) PooledSE_revTemp[, 4] <- as.numeric(PooledSE_revTemp[,4]) PooledSE_revTotal <- cbind(PooledSE_revTemp) } if (nloop > 1) { Param_revTemp <- cbind(Parmn_rev[[nloop]][, 4]) Param_revTemp <- as.numeric(Param_revTemp) Param_revTotal <- cbind(Param_revTotal, Param_revTemp) PooledSE_revTemp <- cbind(Parmn_rev[[nloop]][, 5]) PooledSE_revTemp <- as.numeric(PooledSE_revTemp) PooledSE_revTotal <- cbind(PooledSE_revTotal, PooledSE_revTemp) } if (nloop == 1) { ParamTotal <- Param FitindTotal <- Fitind AllocationsTotal <- Allocations nAllocTotal <- nAllocStart - nAllocAdd nConvergedTotal <- nConverged nProperSolutionTotal <- nProperSolution nConvergedProperTotal <- nConvergedProper } if (nloop > 1) { ParamTotal <- c(ParamTotal, Param) FitindTotal <- c(FitindTotal, Fitind) AllocationsTotal <- c(AllocationsTotal, Allocations) nAllocTotal <- nAllocTotal + nAllocStart - nAllocAdd nConvergedTotal <- nConverged + nConvergedTotal nProperSolution <- nProperSolution + nProperSolutionTotal nConvergedProperTotal <- nConvergedProper + nConvergedProperTotal } if (nloop > 1 & double == TRUE & is.na(BreakCounter) == FALSE & BreakCounter == 2) { if (Reduce("+", ParamPoolSEdifftesttable[c(selectParam, selectParam + nrow(Parmn_rev[[nloop]])), nloop - 1]) == 0) break } if (nloop > 1 & double == TRUE) { if (Reduce("+", ParamPoolSEdifftesttable[c(selectParam, selectParam + nrow(Parmn_rev[[nloop]])), nloop - 1]) == 0) { BreakCounter <- 1 } else BreakCounter <- NA } if (nloop > 1 & checkConv == TRUE & is.na(BreakCounter) == TRUE) { if (Reduce("+", ParamPoolSEdifftesttable[c(selectParam, selectParam + nrow(Parmn_rev[[nloop]])), nloop - 1]) == 0) BreakCounter <- 0 } if (nloop > 1 & double == FALSE & checkConv == FALSE) { if (Reduce("+", ParamPoolSEdifftesttable[c(selectParam, selectParam + nrow(Parmn_rev[[nloop]])), nloop - 1]) == 0) break } if (nAllocAdd == 0) break if (checkConv == TRUE & is.na(BreakCounter) == FALSE & BreakCounter == 9) break } if (nAllocAdd == 0) { Parmn_revFinal <- Parmn_rev[[nloop]] nConvergedOutput <- nConverged nConvergedProperOutput <- nConvergedProper PooledSEwithinvarFinal <- PooledSEwithinvar PooledSEbetweenvarFinal <- PooledSEbetweenvar PooledSEFinal <- PooledSE FitsumOutput <- Fitsum nAllocOutput <- nAllocStart - nAllocAdd AllocationsOutput <- Allocations } if (!is.null(parceloutput)) { replist <- matrix(NA, nAllocOutput, 1) for (i in 1:(nAllocOutput)) { colnames(AllocationsOutput[[i]]) <- names utils::write.table(AllocationsOutput[[i]], file = paste(parceloutput, "/parcelruns", i, ".dat", sep = ""), row.names = FALSE, col.names = TRUE) replist[i, 1] <- paste("parcelruns", i, ".dat", sep = "") } utils:: write.table(replist, paste(parceloutput, "/parcelrunsreplist.dat", sep = ""), quote = FALSE, row.names = FALSE, col.names = FALSE) } if (useTotalAlloc == TRUE) { ParmnTotal <- ParamTotal[[1]] ParSETotal <- matrix(NA, nrow(ParmnTotal), nAllocTotal) ParSEmnTotal <- ParmnTotal[, 5] ParsdTotal <- matrix(NA, nrow(ParmnTotal), nAllocTotal) FitmnTotal <- FitindTotal[[1]] FitsdTotal <- matrix(NA, length(FitmnTotal), nAllocTotal) SigpTotal <- matrix(NA, nrow(ParmnTotal), nAllocTotal) FitindTotal <- data.frame(FitindTotal) ParamSEsquaredTotal <- list() for (i in 1:nAllocTotal) { ParamSEsquaredTotal[[i]] <- cbind(ParamTotal[[i]][,5], ParamTotal[[i]][, 5]) if (any(is.na(ParamSEsquaredTotal[[i]]) == TRUE)) ParamSEsquaredTotal[[i]] <- 0 ParamSEsquaredTotal[[i]] <- apply(as.data.frame(ParamSEsquaredTotal[[i]]),1, prod) ParsdTotal[, i] <- ParamTotal[[i]][, 4] ParSETotal[, i] <- ParamTotal[[i]][, 5] SigpTotal[, ncol(Sigp) - i + 1] <- ParamTotal[[i]][,7] FitsdTotal[, i] <- FitindTotal[[i]] } SigpTotal <- SigpTotal + 0.45 SigpTotal <- apply(SigpTotal, c(1, 2), round) SigpTotal <- 1 - as.vector(rowMeans(SigpTotal, na.rm = TRUE)) ParsumTotal <- cbind(apply(ParsdTotal, 1, mean, na.rm = TRUE), apply(ParsdTotal, 1, sd, na.rm = TRUE), apply(ParsdTotal, 1, max, na.rm = TRUE), apply(ParsdTotal, 1, min, na.rm = TRUE), apply(ParsdTotal, 1, max, na.rm = TRUE) - apply(ParsdTotal, 1, min, na.rm = TRUE), SigpTotal) colnames(ParsumTotal) <- c("Avg Est.", "S.D.", "MAX", "MIN", "Range", "% Sig") ParSEmnTotal <- ParmnTotal[, 1:3] ParSEfnTotal <- cbind(ParSEmnTotal, apply(ParSETotal, 1, mean, na.rm = TRUE), apply(ParSETotal, 1, sd, na.rm = TRUE), apply(ParSETotal, 1, max, na.rm = TRUE), apply(ParSETotal, 1, min, na.rm = TRUE), apply(ParSETotal, 1, max, na.rm = TRUE) - apply(ParSETotal, 1, min, na.rm = TRUE)) colnames(ParSEfnTotal) <- c("lhs", "op", "rhs", "Avg SE", "S.D.", "MAX", "MIN", "Range") FitsumTotal <- cbind(apply(FitsdTotal, 1, mean, na.rm = TRUE), apply(FitsdTotal, 1, sd, na.rm = TRUE), apply(FitsdTotal, 1, max, na.rm = TRUE), apply(FitsdTotal, 1, min, na.rm = TRUE), apply(FitsdTotal, 1, max, na.rm = TRUE) - apply(FitsdTotal, 1, min, na.rm = TRUE)) rownames(FitsumTotal) <- indices ParmnTotal[, 4:ncol(ParmnTotal)] <- ParmnTotal[,4:ncol(Parmn)]/nConvergedProperTotal ParmnTotal <- ParmnTotal[, 1:3] ParmnTotal <- cbind(ParmnTotal, ParsumTotal) FitmnTotal <- FitmnTotal/nConvergedProperTotal pChisqTotal <- list() sigChisqTotal <- list() for (i in 1:nAllocTotal) { pChisqTotal[[i]] <- (1 - pchisq(FitsdTotal[1,i], FitsdTotal[2, i])) if (is.na(pChisqTotal[[i]]) == FALSE & pChisqTotal[[i]] < 0.05) { sigChisqTotal[[i]] <- 1 } else sigChisqTotal[[i]] <- 0 } PerSigChisqTotal <- (Reduce("+", sigChisqTotal))/nConvergedProperTotal * 100 PerSigChisqTotal <- round(PerSigChisqTotal, 4) PerSigChisqColTotal <- c(PerSigChisqTotal, "n/a", "n/a", "n/a", "n/a") options(stringsAsFactors = FALSE) FitsumTotal <- data.frame(FitsumTotal, PerSigChisqColTotal) colnames(FitsumTotal) <- c("Avg Ind", "S.D.", "MAX", "MIN", "Range", "% Sig") options(stringsAsFactors = TRUE) PooledSEwithinvarTotal <- Reduce("+", ParamSEsquaredTotal)/nConvergedProperTotal PooledSEbetweenvarTotal <- ParmnTotal[, 5]^2 PooledSETotal <- sqrt(PooledSEwithinvarTotal + PooledSEbetweenvarTotal + PooledSEbetweenvarTotal/nConvergedProperTotal) ParamPooledSETotal <- c(ParmnTotal[, 4], PooledSETotal) ParamTestTotal <- ParmnTotal[, 4] Parmn_revTotal <- cbind(ParmnTotal[, 1:4], PooledSETotal) Parmn_revTotal[, 4:5] <- sapply(Parmn_revTotal[,4:5], as.numeric) colnames(Parmn_revTotal) <- c("lhs", "op", "rhs", "Estimate", "Pooled SE") df_tTotal <- (nConvergedProperTotal - 1) * (1 + (nConvergedProperTotal * PooledSEwithinvarTotal)/(nConvergedProperTotal * PooledSEbetweenvarTotal + PooledSEbetweenvarTotal))^2 crit_tTotal <- abs(qt(0.05/2, df_tTotal)) pval_zTotal <- 2 * (1 - pnorm(abs(Parmn_revTotal[, 4]/PooledSETotal))) pval_tTotal <- 2 * (1 - pt(abs(Parmn_revTotal[, 4]/PooledSETotal), df = df_tTotal)) CI95_Lower_zTotal <- Parmn_revTotal[, 4] - 1.959963985 * PooledSETotal CI95_Upper_zTotal <- Parmn_revTotal[, 4] + 1.959963985 * PooledSETotal CI95_Lower_tTotal <- Parmn_revTotal[, 4] - crit_tTotal * PooledSETotal CI95_Upper_tTotal <- Parmn_revTotal[, 4] + crit_tTotal * PooledSETotal Parmn_revTotal <- cbind(Parmn_revTotal, pval_zTotal, CI95_Lower_zTotal, CI95_Upper_zTotal, pval_tTotal, CI95_Lower_tTotal, CI95_Upper_tTotal) colnames(Parmn_revTotal) <- c("lhs", "op", "rhs", "Pooled Est", "Pooled SE", "pval_z", "CI95_LB_z", "CI95_UB_z", "pval_t", "CI95_LB_t", "CI95_UB_t") for (i in 1:nrow(Parmn_revTotal)) { if (Parmn_revTotal[i, 5] == 0) Parmn_revTotal[i, 6:11] <- NA } RPAVTotal <- (PooledSEbetweenvarTotal + (PooledSEbetweenvarTotal/(nConvergedProperTotal)))/PooledSEwithinvarTotal PPAVTotal <- (((nConvergedProperTotal + 1)/(nConvergedProperTotal)) * PooledSEbetweenvarTotal)/(PooledSEwithinvarTotal + (((nConvergedProperTotal + 1)/(nConvergedProperTotal)) * PooledSEbetweenvarTotal)) PAVtableTotal <- cbind(ParmnTotal[1:3], RPAVTotal, PPAVTotal) Parmn_revTotal[, 4:11] <- apply(Parmn_revTotal[, 4:11], 2, round, digits = 4) FitsumTotal[, 1:5] <- apply(FitsumTotal[, 1:5], 2, round, digits = 4) PAVtableTotal[, 4:5] <- apply(PAVtableTotal[, 4:5], 2, round, digits = 4) FitsumTotal[2, 2:5] <- c("n/a", "n/a", "n/a", "n/a") ConvergedProperSumTotal <- rbind((nConvergedTotal)/(nAllocTotal), (nConvergedProperTotal)/(nAllocTotal)) rownames(ConvergedProperSumTotal) <- c("Converged", "Converged and Proper") colnames(ConvergedProperSumTotal) <- "Proportion of Allocations" } if (nAllocAdd != 0) { if (nloop == 2) { PropParamMet <- matrix(data = 1, nrow(Parmn), 1) PropPooledSEMet <- matrix(data = 1, nrow(Parmn), 1) } if (nloop != 2) { PropParamMet <- (1 - apply(Paramdifftesttable[, 1:nloop - 1], 1, mean)) * 100 PropPooledSEMet <- (1 - apply(PooledSEdifftesttable[,1:nloop - 1], 1, mean)) * 100 } FirstParamMet <- apply(Paramdifftesttable == 0, 1, which.max) FirstPooledSEMet <- apply(PooledSEdifftesttable == 0, 1, which.max) } if (nAllocAdd == 0) { PropParamMet <- matrix(data = NA, nrow(Parmn), 1) PropPooledSEMet <- matrix(data = NA, nrow(Parmn), 1) FirstParamMet <- matrix(data = NA, nrow(Parmn), 1) FirstPooledSEMet <- matrix(data = NA, nrow(Parmn), 1) } PerLoops <- cbind(Parmn[, 1:3], PropParamMet, PropPooledSEMet) colnames(PerLoops) <- c("lhs", "op", "rhs", "Param Criteria Met", "PooledSE Criteria Met") FirstLoops <- cbind(Parmn[, 1:3], FirstParamMet, FirstPooledSEMet) colnames(FirstLoops) <- c("lhs", "op", "rhs", "Param Criteria Met", "PooledSE Criteria Met") NumbAllocations <- cbind(Parmn[, 1:3], (FirstParamMet - 1) * nAllocAdd + nAllocStarttemp, (FirstPooledSEMet - 1) * nAllocAdd + nAllocStarttemp) colnames(NumbAllocations) <- c("lhs", "op", "rhs", "Param Criteria Met", "PooledSE Criteria Met") if (nAllocAdd != 0) { for (i in 1:nrow(Parmn)) { if ((i %in% selectParam) == FALSE) PerLoops[i, 4:5] <- NA if ((i %in% selectParam) == FALSE) FirstLoops[i, 4:5] <- NA if ((i %in% selectParam) == FALSE) NumbAllocations[i, 4:5] <- NA } } df_t <- (nConvergedProperOutput - 1) * (1 + (nConvergedProperOutput * PooledSEwithinvarFinal) / (nConvergedProperOutput * PooledSEbetweenvarFinal + PooledSEbetweenvarFinal))^2 crit_t <- abs(qt(0.05/2, df_t)) pval_z <- 2 * (1 - pnorm(abs(Parmn_revFinal[, 4]/PooledSEFinal))) pval_t <- 2 * (1 - pt(abs(Parmn_revFinal[, 4]/PooledSEFinal), df = df_t)) CI95_Lower_z <- Parmn_revFinal[, 4] - 1.959963985 * PooledSEFinal CI95_Upper_z <- Parmn_revFinal[, 4] + 1.959963985 * PooledSEFinal CI95_Lower_t <- Parmn_revFinal[, 4] - crit_t * PooledSEFinal CI95_Upper_t <- Parmn_revFinal[, 4] + crit_t * PooledSEFinal Parmn_revFinal <- cbind(Parmn_revFinal, pval_z, CI95_Lower_z, CI95_Upper_z, pval_t, CI95_Lower_t, CI95_Upper_t) colnames(Parmn_revFinal) <- c("lhs", "op", "rhs", "Pooled Est", "Pooled SE", "pval_z", "CI95_LB_z", "CI95_UB_z", "pval_t", "CI95_LB_t", "CI95_UB_t") for (i in 1:nrow(Parmn_revFinal)) { if (Parmn_revFinal[i, 5] == 0 | is.na(Parmn_revFinal[i, 5]) == TRUE) Parmn_revFinal[i, 6:11] <- NA } RPAV <- (PooledSEbetweenvarFinal + (PooledSEbetweenvarFinal/(nConvergedProperOutput)))/PooledSEwithinvarFinal PPAV <- (((nConvergedProperOutput + 1)/(nConvergedProperOutput)) * PooledSEbetweenvarFinal)/(PooledSEwithinvarFinal + (((nConvergedProperOutput + 1)/(nConvergedProperOutput)) * PooledSEbetweenvarFinal)) PAVtable <- cbind(Parmn[1:3], RPAV, PPAV) colnames(Param_revTotal) <- c("lhs", "op", "rhs", c(1:nloop)) colnames(PooledSE_revTotal) <- c("lhs", "op", "rhs", c(1:nloop)) Param_revTotal[, 4:(nloop + 3)] <- sapply(Param_revTotal[, 4:(nloop + 3)], as.numeric) PooledSE_revTotal[, 4:(nloop + 3)] <- sapply(PooledSE_revTotal[, 4:(nloop + 3)], as.numeric) Parmn_revFinal[, 4:11] <- apply(Parmn_revFinal[, 4:11], 2, round, digits = 4) FitsumOutput[, 1:5] <- apply(FitsumOutput[, 1:5], 2, round, digits = 4) if (nAllocAdd != 0) Param_revTotal[, 4:(nloop + 3)] <- apply(Param_revTotal[, 4:(nloop + 3)], 2, round, digits = 8) if (nAllocAdd == 0) Param_revTotal[, 4] <- round(Param_revTotal[, 4], 8) if (nAllocAdd != 0) PooledSE_revTotal[, 4:(nloop + 3)] <- apply(PooledSE_revTotal[, 4:(nloop + 3)], 2, round, digits = 8) if (nAllocAdd == 0) PooledSE_revTotal[, 4] <- round(PooledSE_revTotal[, 4], 8) PAVtable[, 4:5] <- apply(PAVtable[, 4:5], 2, round, digits = 4) FitsumOutput[2, 2:5] <- c("n/a", "n/a", "n/a", "n/a") ConvergedProperSum <- rbind((nConvergedOutput)/(nAllocOutput), (nConvergedProperOutput)/(nAllocOutput)) rownames(ConvergedProperSum) <- c("Converged", "Converged and Proper") colnames(ConvergedProperSum) <- "Proportion of Allocations" StopTimeFull <- proc.time() - StartTimeFull if (useTotalAlloc == FALSE) { Output_mod <- list(Parmn_revFinal, FitsumOutput, ConvergedProperSum, nAllocOutput, PAVtable, StopTimeFull[[3]]/60) names(Output_mod) <- c("Estimates", "Fit", "Proportion of Converged and Proper Allocations", "Allocations needed for stability (M)", "Indices to quantify uncertainty in estimates due to sampling vs. allocation variability", "Total runtime (minutes)") } if (useTotalAlloc == TRUE) { Output_mod <- list(Parmn_revFinal, FitsumOutput, ConvergedProperSum, nAllocOutput, PAVtable, Parmn_revTotal, FitsumTotal, ConvergedProperSumTotal, nAllocTotal, PAVtableTotal, StopTimeFull[[3]]/60) names(Output_mod) <- c("Estimates (using M allocations)", "Fit (using M allocations)", "Proportion of Converged and Proper Allocations (using M allocations)", "Allocations needed for stability (M)", "Indices to quantify uncertainty in estimates due to sampling vs. allocation variability (using M allocations)", "Estimates (using all allocations)", "Fit (using all allocations)", "Proportion of Converged and Proper Allocations (using all allocations)", "Total Allocations used by algorithm", "Indices to quantify uncertainty in estimates due to sampling vs. allocation variability (using all allocations)", "Total runtime (minutes)") } if (exists("invalidIndices")) { if (length(invalidIndices)) message('\n\nInvalid fit indices requested: ', paste(invalidIndices, collapse = ", "), "\n\n") } return(Output_mod) } semTools/R/longInvariance.R0000644000176200001440000006204213432435606015354 0ustar liggesusers### Sunthud Pornprasertmanit & Yves Rosseel ### Last updated: 1 September 2018 ##' Measurement Invariance Tests Within Person ##' ##' Testing measurement invariance across timepoints (longitudinal) or any ##' context involving the use of the same scale in one case (e.g., a dyad case ##' with husband and wife answering the same scale). The measurement invariance ##' uses a typical sequence of model comparison tests. This function currently ##' works with only one scale, and only with continuous indicators. ##' ##' If \code{strict = FALSE}, the following four models are tested in order: ##' \enumerate{ ##' \item Model 1: configural invariance. The same factor structure is ##' imposed on all units. ##' \item Model 2: weak invariance. The factor loadings are constrained to be ##' equal across units. ##' \item Model 3: strong invariance. The factor loadings and intercepts are ##' constrained to be equal across units. ##' \item Model 4: The factor loadings, intercepts and means are constrained to ##' be equal across units. ##' } ##' ##' Each time a more restricted model is fitted, a \eqn{\Delta\chi^2} test is ##' reported, comparing the current model with the previous one, and comparing ##' the current model to the baseline model (Model 1). In addition, the ##' difference in CFA is also reported (\eqn{\Delta}CFI). ##' ##' If \code{strict = TRUE}, the following five models are tested in order: ##' ##' \enumerate{ ##' \item Model 1: configural invariance. The same factor structure is imposed ##' on all units. ##' \item Model 2: weak invariance. The factor loadings are constrained to be ##' equal across units. ##' \item Model 3: strong invariance. The factor loadings and intercepts are ##' constrained to be equal across units. ##' \item Model 4: strict invariance. The factor loadings, intercepts and ##' residual variances are constrained to be equal across units. ##' \item Model 5: The factor loadings, intercepts, residual variances and ##' means are constrained to be equal across units. ##' } ##' ##' Note that if the \eqn{\chi^2} test statistic is scaled (eg. a Satorra-Bentler ##' or Yuan-Bentler test statistic), a special version of the \eqn{\Delta\chi^2} ##' test is used as described in \url{http://www.statmodel.com/chidiff.shtml} ##' ##' ##' @param model lavaan syntax or parameter table ##' @param varList A list containing indicator names of factors used in the ##' invariance testing, such as the list that the first element is the vector ##' of indicator names in the first timepoint and the second element is the ##' vector of indicator names in the second timepoint. The order of indicator ##' names should be the same (but measured in different times or different ##' units). ##' @param auto The order of autocorrelation on the measurement errors on the ##' similar items across factor (e.g., Item 1 in Time 1 and Time 2). If 0 is ##' specified, the autocorrelation will be not imposed. If 1 is specified, ##' the autocorrelation will imposed for the adjacent factor listed in ##' \code{varList}. The maximum number can be specified is the number of ##' factors specified minus 1. If \code{"all"} is specified, the maximum ##' number of order will be used. ##' @param constrainAuto If \code{TRUE}, the function will equate the ##' auto-\emph{covariance} to be equal within the same item across factors. ##' For example, the covariance of item 1 in time 1 and time 2 is equal to ##' the covariance of item 1 in time 2 and time 3. ##' @param fixed.x See \code{\link[lavaan]{lavaan}.} ##' @param std.lv See \code{\link[lavaan]{lavaan}.} ##' @param group See \code{\link[lavaan]{lavaan}.} ##' @param group.equal See \code{\link[lavaan]{lavaan}.} ##' @param group.partial See \code{\link[lavaan]{lavaan}.} ##' @param strict If \code{TRUE}, the sequence requires strict invariance. See ##' @param warn See \code{\link[lavaan]{lavaan}.} ##' @param debug See \code{\link[lavaan]{lavaan}.} details for more information. ##' @param quiet If \code{FALSE} (default), a summary is printed out containing ##' an overview of the different models that are fitted, together with some ##' model comparison tests. If \code{TRUE}, no summary is printed. ##' @param fit.measures Fit measures used to calculate the differences between ##' nested models. ##' @param baseline.model custom baseline model passed to ##' \code{\link[lavaan]{fitMeasures}} ##' @param method The method used to calculate likelihood ratio test. See ##' \code{\link[lavaan]{lavTestLRT}} for available options ##' @param ... Additional arguments in the \code{\link[lavaan]{lavaan}} ##' function. See also \code{\link[lavaan]{lavOptions}} ##' ##' @return Invisibly, all model fits in the sequence are returned as a list. ##' ##' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' Yves Rosseel (Ghent University; \email{Yves.Rosseel@@UGent.be}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @references Vandenberg, R. J., and Lance, C. E. (2000). A review and ##' synthesis of the measurement invariance literature: Suggestions, ##' practices, and recommendations for organizational research. ##' \emph{Organizational Research Methods, 3}(1), 4--70. ##' doi:10.1177/109442810031002 ##' ##' @examples ##' ##' model <- ' f1t1 =~ y1t1 + y2t1 + y3t1 ##' f1t2 =~ y1t2 + y2t2 + y3t2 ##' f1t3 =~ y1t3 + y2t3 + y3t3 ' ##' ##' ## Create list of variables ##' var1 <- c("y1t1", "y2t1", "y3t1") ##' var2 <- c("y1t2", "y2t2", "y3t2") ##' var3 <- c("y1t3", "y2t3", "y3t3") ##' constrainedVar <- list(var1, var2, var3) ##' ##' ## Invariance of the same factor across timepoints ##' longInvariance(model, auto = 1, constrainAuto = TRUE, ##' varList = constrainedVar, data = exLong) ##' ##' ## Invariance of the same factor across timepoints and groups ##' longInvariance(model, auto = 1, constrainAuto = TRUE, ##' varList = constrainedVar, data = exLong, group = "sex", ##' group.equal = c("loadings", "intercepts")) ##' ##' @name longInvariance-deprecated ##' @usage ##' longInvariance(model, varList, auto = "all", constrainAuto = FALSE, ##' fixed.x = TRUE, std.lv = FALSE, group = NULL, ##' group.equal = "", group.partial = "", strict = FALSE, ##' warn = TRUE, debug = FALSE, quiet = FALSE, ##' fit.measures = "default", baseline.model = NULL, ##' method = "satorra.bentler.2001", ...) ##' @seealso \code{\link{semTools-deprecated}} ##' @keywords internal NULL ##' @rdname semTools-deprecated ##' ##' @export longInvariance <- function(model, varList, auto = "all", constrainAuto = FALSE, fixed.x = TRUE, std.lv = FALSE, group = NULL, group.equal = "", group.partial = "", strict = FALSE, warn = TRUE, debug = FALSE, quiet = FALSE, fit.measures = "default", baseline.model = NULL, method = "satorra.bentler.2001", ...) { .Deprecated(msg = c("The longInvariance function is deprecated, and ", "it will cease to be included in future versions of ", "semTools. See help('semTools-deprecated) for details.")) List <- list(...) # Find the number of groups ngroups <- 1 if (!is.null(group)) { if (!is.null(List$data)) { ngroups <- length(unique(List$data[,group])) } else if (!is.null(List$sample.cov)) { ngroups <- length(List$sample.cov) } else { stop("Cannot find the specifying variable name in the 'group' argument.") } } # Get the lavaan parameter table if (is.character(model)) { lavaanParTable <- lavaan::lavaanify(model = model, meanstructure = TRUE, int.ov.free = TRUE, int.lv.free = FALSE, orthogonal = FALSE, fixed.x = fixed.x, std.lv = std.lv, auto.fix.first = ifelse(std.lv, FALSE, TRUE), auto.fix.single = TRUE, auto.var = TRUE, auto.cov.lv.x = TRUE, auto.cov.y = TRUE, ngroups = ngroups, group.equal = group.equal, group.partial = group.partial, debug = debug, warn = warn, as.data.frame. = TRUE) } else if (is.list(model)) { if (!is.null(model$lhs) && !is.null(model$op) && !is.null(model$rhs) && !is.null(model$free)) { lavaanParTable <- model } else if (is.character(model[[1]])) { stop("lavaan ERROR: model is a list, but not a parameterTable?") } } else { cat("model type: ", class(model), "\n") stop("lavaan ERROR: model is not of type character or list") } # Error checking on the varList argument and get the factor name corresponding to each elements of the list facName <- lapply(varList, function(vec, pt) pt$lhs[(pt$op == "=~") & (pt$rhs %in% vec)], pt = lavaanParTable) if (any(sapply(facName, function(x) length(unique(x)) > 1))) stop("The factor names of the same element of the 'varList' are not the same.") if (length(unique(sapply(facName, function(x) length(x)))) > 1) stop("The numbers of variables in each element are not equal.") facName <- unlist(lapply(facName, unique)) # Impose the autocorrelation in the parameter table if (auto != 0) { if (is.numeric(auto) && auto >= length(varList)) stop("The number of lag in auto-correlation is not possible in the current number of timepoints.") if (auto == "all") auto <- length(varList) - 1 for (k in 1:ngroups) { for (i in 1:length(varList[[1]])) { name <- sapply(varList, function(x, element) x[element], element = i) for (j in 1:auto) { vec <- 1:(length(varList) - j) lavaanParTable <- freeParTable(lavaanParTable, name[vec], "~~", name[vec + j], k, ustart = NA) if (constrainAuto & (length(vec) > 1)) lavaanParTable <- constrainParTable(lavaanParTable, name[vec], "~~", name[vec + j], k) } } } } # Fit configural invariance fitConfigural <- try(lavaan::lavaan(lavaanParTable, ..., group = group, group.equal = group.equal, group.partial = group.partial, warn = TRUE, debug = FALSE), silent = TRUE) # Create the parameter table for metric invariance ptMetric <- lavaanParTable if (std.lv) { for (k in 1:ngroups) { # Free variances of factor 2, 3, ... ptMetric <- freeParTable(ptMetric, facName[-1], "~~", facName[-1], k, ustart = NA) # Constrain factor loadings for (i in 1:length(varList[[1]])) { ptMetric <- constrainParTable(ptMetric, facName, "=~", sapply(varList, function(x, element) x[element], element = i), k) } } ptMetric$ustart[(ptMetric$op == "=~") & (ptMetric$rhs %in% sapply(varList, function(x, element) x[element], element = 1))] <- 1 } else { for (k in 1:ngroups) { # Constrain factor loadings but keep marker variables for (i in 2:length(varList[[1]])) { ptMetric <- constrainParTable(ptMetric, facName, "=~", sapply(varList, function(x, element) x[element], element = i), k) } } } fitMetric <- try(lavaan::lavaan(ptMetric, ..., group = group, group.equal = group.equal, group.partial = group.partial, warn = TRUE, debug = FALSE), silent = TRUE) # Create the parameter table for scalar invariance ptScalar <- ptMetric for (k in 1:ngroups) { # Free means of factors 2, 3, ... ptScalar <- freeParTable(ptScalar, facName[-1], "~1", "", k, ustart = NA) # Constrain measurement intercepts for (i in 1:length(varList[[1]])) { ptScalar <- constrainParTable(ptScalar, sapply(varList, function(x, element) x[element], element = i), "~1", "", k) } } ptScalar$ustart[(ptMetric$op == "~1") & (ptMetric$rhs %in% facName)] <- 0 fitScalar <- try(lavaan::lavaan(ptScalar, ..., group = group, group.equal = group.equal, group.partial = group.partial, warn = TRUE, debug = FALSE), silent = TRUE) ptMeans <- ptScalar # Create the parameter table for strict invariance if specified ptStrict <- ptScalar fitStrict <- NULL if (strict) { ptStrict <- ptScalar for (k in 1:ngroups) { # Constrain measurement error variances for (i in 1:length(varList[[1]])) { name <- sapply(varList, function(x, element) x[element], element = i) ptStrict <- constrainParTable(ptStrict, name, "~~", name, k) } } fitStrict <- try(lavaan::lavaan(ptStrict, ..., group = group, group.equal = group.equal, group.partial = group.partial, warn = TRUE, debug = FALSE), silent = TRUE) ptMeans <- ptStrict } # Create the parameter table for mean equality # Constrain factor means to be equal for (k in 1:ngroups) { ptMeans <- fixParTable(ptMeans, facName[-1], "~1", "", k, ustart = 0) } fitMeans <- try(lavaan::lavaan(ptMeans, ..., group = group, group.equal = group.equal, group.partial = group.partial, warn = TRUE, debug = FALSE), silent = TRUE) FIT <- invisible(list(fit.configural = fitConfigural, fit.loadings = fitMetric, fit.intercepts = fitScalar, fit.residuals = fitStrict, fit.means = fitMeans)) FIT <- FIT[!sapply(FIT, is.null)] if (!quiet) printInvarianceResult(FIT, fit.measures, baseline.model, method) # Modify these functions from measurementInvariance function # if(!quiet) { # cat("\n#################### Measurement invariance tests ####################\n") # cat("\nThe order of autocorrelation: ", auto, "\n") # cat("\n#################### Model 1: configural invariance:\n") # printFitLine(fitConfigural) # cat("\n#################### Model 2: weak invariance (equal loadings):\n") # printFitLine(fitMetric) # cat("\n[Model 1 versus model 2]\n") # difftest(fitConfigural, fitMetric) # cat("\n#################### Model 3: strong invariance (equal loadings + intercepts):\n") # printFitLine(fitScalar) # cat("\n[Model 1 versus model 3]\n") # difftest(fitConfigural, fitScalar) # cat("\n[Model 2 versus model 3]\n") # difftest(fitMetric, fitScalar) # if(strict) { # cat("\n#################### Model 4: strict invariance (equal loadings + intercepts + residuals):\n") # printFitLine(fitStrict) # cat("\n[Model 1 versus model 4]\n") # difftest(fitConfigural, fitStrict) # cat("\n[Model 2 versus model 4]\n") # difftest(fitMetric, fitStrict) # cat("\n[Model 3 versus model 4]\n") # difftest(fitScalar, fitStrict) # cat("\n#################### Model 5: equal loadings + intercepts + residuals + means:\n") # printFitLine(fitMeans, horizontal=TRUE) # cat("\n[Model 1 versus model 5]\n") # difftest(fitConfigural, fitMeans) # cat("\n[Model 2 versus model 5]\n") # difftest(fitMetric, fitMeans) # cat("\n[Model 3 versus model 5]\n") # difftest(fitScalar, fitMeans) # cat("\n[Model 4 versus model 5]\n") # difftest(fitStrict, fitMeans) # } else { # cat("\n#################### Model 4: equal loadings + intercepts + means:\n") # printFitLine(fitMeans) # cat("\n[Model 1 versus model 4]\n") # difftest(fitConfigural, fitMeans) # cat("\n[Model 2 versus model 4]\n") # difftest(fitMetric, fitMeans) # cat("\n[Model 3 versus model 4]\n") # difftest(fitScalar, fitMeans) # } # } # return(invisible(list(fit.configural = fitConfigural, fit.loadings = fitMetric, fit.intercepts = fitScalar, fit.residuals = fitStrict, fit.means = fitMeans))) invisible(FIT) } ## ---------------- ## Hidden Functions ## ---------------- # rearrangeFreeElement: Rearrange the number listed in 'free' in parameter tables rearrangeFreeElement <- function(vec) { vec2 <- vec vec <- vec[vec != 0] uvec <- unique(vec) newvec <- 1:length(unique(vec)) vec2[vec2 != 0] <- newvec[match(vec, uvec)] class(vec2) <- "integer" vec2 } # rearrangept: Rearrange parameter table and plabel rearrangept <- function(pt) { createplabel <- function(num) { result <- paste0(".p", num, ".") result[num == 0] <- "" result } oldfree <- pt$free newfree <- rearrangeFreeElement(oldfree) oldplabel <- pt$plabel newplabel <- createplabel(seq_along(pt$op)) eqpos <- which(pt$op == "==") newplabel[eqpos] <- "" if (length(eqpos) > 0) { eqlhs <- pt$lhs[eqpos] eqrhs <- pt$rhs[eqpos] matchlhs <- match(eqlhs, oldplabel) matchrhs <- match(eqrhs, oldplabel) neweqlhs <- newplabel[matchlhs] neweqrhs <- newplabel[matchrhs] neweqlhs[is.na(matchlhs)] <- eqlhs[is.na(matchlhs)] neweqrhs[is.na(matchrhs)] <- eqrhs[is.na(matchrhs)] pt$lhs[eqpos] <- neweqlhs pt$rhs[eqpos] <- neweqrhs } pt$free <- newfree pt$plabel <- newplabel pt } # freeParTable: Free elements in parameter table #FIXME: used in singleParamTest and partialInvariance freeParTable <- function(parTable, lhs, op, rhs, group, ustart = NA) { parTable$start <- parTable$est <- parTable$se <- NULL target <- cbind(lhs, op, rhs, group) for (i in 1:nrow(target)) { targetElem <- matchElement(parTable = parTable, vec = target[i,]) ptargetElem <- parTable$plabel[targetElem] if ((length(targetElem) == 0) || is.na(targetElem)) { newline <- list(lhs = as.character(target[i, 1]), op = as.character(target[i, 2]), rhs = as.character(target[i, 3]), group = as.integer(target[i, 4]), free = as.integer(max(parTable$free) + 1), ustart = as.numeric(NA)) parTable <- patMerge(pt1 = parTable, pt2 = newline) } else { if (parTable$free[targetElem] == 0) { parTable$ustart[targetElem] <- ustart parTable$user[targetElem] <- 1 parTable$free[targetElem] <- max(parTable$free) + 1 } equalelement <- which(parTable$op == "==") rmelem <- intersect(union(match(ptargetElem, parTable$lhs), match(ptargetElem, parTable$rhs)), equalelement) if (length(rmelem) > 0) parTable <- removeEqCon(parTable, rmelem) } } parTable <- rearrangept(parTable) parTable } # fixParTable: Fix elements in parameter table #FIXME: used in singleParamTest and partialInvariance fixParTable <- function(parTable, lhs, op, rhs, group, ustart = NA) { parTable$start <- parTable$est <- parTable$se <- NULL target <- cbind(lhs, op, rhs, group) element <- apply(target, 1, matchElement, parTable=parTable) for (i in 1:nrow(target)) { ## Why was Sunthud printing warnings? (originally used warnings(), not warning()...) # if (parTable$free[element[i]] == 0) warning('The parameter ', lhs, op, rhs, # ' in group ', group, # ' is already fixed.') # equalelement <- which(parTable$op == "==") # targetElem <- matchElement(parTable = parTable, vec = target[i,]) # ptargetElem <- parTable$plabel[targetElem] # rmelem <- intersect(union(match(ptargetElem, parTable$lhs), match(ptargetElem, parTable$rhs)), equalelement) # if(length(rmelem) > 0) parTable <- removeEqCon(parTable, rmelem) parTable$ustart[element[i]] <- ustart parTable$user[element[i]] <- 1 parTable$free[element[i]] <- 0 } parTable <- rearrangept(parTable) # rearrangePlabel with change all equality constraints parTable } # constrainParTable: Impose equality constraints in any set of elements in the parameter table #FIXME: used in partialInvariance constrainParTable <- function(parTable, lhs, op, rhs, group) { parTable$start <- parTable$est <- parTable$se <- NULL target <- cbind(lhs, op, rhs, group) element <- apply(target, 1, matchElement, parTable=parTable) # id lhs op rhs user group free ustart exo label plabel start for (i in 2:length(element)) { len <- length(parTable$id) newline <- list(lhs = parTable$plabel[element[1]], op = "==", rhs = parTable$plabel[element[i]]) if (!any(parTable$lhs == newline$lhs & parTable$op == newline$op & parTable$rhs == newline$rhs)) parTable <- patMerge(pt1 = parTable, pt2 = newline) } parTable } # matchElement: Find the number of row that have the specification in vec (lhs, op, rhs, group) #FIXME: used in partialInvariance matchElement <- function(parTable, vec) { if (is.null(parTable$group)) { return(which((parTable$lhs == vec[1]) & (parTable$op == vec[2]) & (parTable$rhs == vec[3]))) } else { return(which((parTable$lhs == vec[1]) & (parTable$op == vec[2]) & (parTable$rhs == vec[3]) & (parTable$group == vec[4]))) } } #FIXME: used in partialInvariance getValue <- function(parTable, est, lhs, op, rhs, group) { target <- cbind(lhs, op, rhs, group) element <- apply(target, 1, matchElement, parTable = parTable) free <- parTable$free[element] out <- parTable$ustart[element] out[free != 0] <- est[free[free != 0]] out } # removeEqCon: Remove equality constraints #FIXME: used in singleParamTest removeEqCon <- function(pt, element) { pt <- lapply(pt, "[", -element) pt$id <- seq_along(pt$id) pt } #FIXME: used in singleParamTest patMerge <- function (pt1 = NULL, pt2 = NULL, remove.duplicated = FALSE, fromLast = FALSE, warn = TRUE) { pt1 <- as.data.frame(pt1, stringsAsFactors = FALSE) pt2 <- as.data.frame(pt2, stringsAsFactors = FALSE) stopifnot(!is.null(pt1$lhs), !is.null(pt1$op), !is.null(pt1$rhs), !is.null(pt2$lhs), !is.null(pt2$op), !is.null(pt2$rhs)) if (is.null(pt1$group) && is.null(pt2$group)) { TMP <- rbind(pt1[, c("lhs", "op", "rhs", "group")], pt2[, c("lhs", "op", "rhs", "group")]) } else { if (is.null(pt1$group) && !is.null(pt2$group)) { pt1$group <- rep(1L, length(pt1$lhs)) } else if (is.null(pt2$group) && !is.null(pt1$group)) { pt2$group <- rep(1L, length(pt2$lhs)) } TMP <- rbind(pt1[, c("lhs", "op", "rhs", "group")], pt2[, c("lhs", "op", "rhs", "group")]) } if (is.null(pt1$user) && !is.null(pt2$user)) { pt1$user <- rep(0L, length(pt1$lhs)) } else if (is.null(pt2$user) && !is.null(pt1$user)) { pt2$user <- rep(0L, length(pt2$lhs)) } if (is.null(pt1$free) && !is.null(pt2$free)) { pt1$free <- rep(0L, length(pt1$lhs)) } else if (is.null(pt2$free) && !is.null(pt1$free)) { pt2$free <- rep(0L, length(pt2$lhs)) } if (is.null(pt1$ustart) && !is.null(pt2$ustart)) { pt1$ustart <- rep(0, length(pt1$lhs)) } else if (is.null(pt2$ustart) && !is.null(pt1$ustart)) { pt2$ustart <- rep(0, length(pt2$lhs)) } if (is.null(pt1$exo) && !is.null(pt2$exo)) { pt1$exo <- rep(0L, length(pt1$lhs)) } else if (is.null(pt2$exo) && !is.null(pt1$exo)) { pt2$exo <- rep(0L, length(pt2$lhs)) } if (is.null(pt1$label) && !is.null(pt2$label)) { pt1$label <- rep("", length(pt1$lhs)) } else if (is.null(pt2$label) && !is.null(pt1$label)) { pt2$label <- rep("", length(pt2$lhs)) } if (is.null(pt1$plabel) && !is.null(pt2$plabel)) { pt1$plabel <- rep("", length(pt1$lhs)) } else if (is.null(pt2$plabel) && !is.null(pt1$plabel)) { pt2$plabel <- rep("", length(pt2$lhs)) } if (is.null(pt1$start) && !is.null(pt2$start)) { pt1$start <- rep(as.numeric(NA), length(pt1$lhs)) } else if (is.null(pt2$start) && !is.null(pt1$start)) { pt2$start <- rep(as.numeric(NA), length(pt2$lhs)) } if (!is.null(pt1$est)) pt1$est <- NULL if (!is.null(pt2$est)) pt2$est <- NULL if (!is.null(pt1$se)) pt1$se <- NULL if (!is.null(pt2$se)) pt2$se <- NULL if (remove.duplicated) { idx <- which(duplicated(TMP, fromLast = fromLast)) if (length(idx)) { if (warn) { warning("lavaan WARNING: duplicated parameters are ignored:\n", paste(apply(pt1[idx, c("lhs", "op", "rhs")], 1, paste, collapse = " "), collapse = "\n")) } if (fromLast) { pt1 <- pt1[-idx, ] } else { idx <- idx - nrow(pt1) pt2 <- pt2[-idx, ] } } } else if (!is.null(pt1$start) && !is.null(pt2$start)) { for (i in 1:length(pt1$lhs)) { idx <- which(pt2$lhs == pt1$lhs[i] & pt2$op == pt1$op[i] & pt2$rhs == pt1$rhs[i] & pt2$group == pt1$group[i]) pt2$start[idx] <- pt1$start[i] } } if (is.null(pt1$id) && !is.null(pt2$id)) { nid <- max(pt2$id) pt1$id <- (nid + 1L):(nid + nrow(pt1)) } else if (is.null(pt2$id) && !is.null(pt1$id)) { nid <- max(pt1$id) pt2$id <- (nid + 1L):(nid + nrow(pt2)) } NEW <- base::merge(pt1, pt2, all = TRUE, sort = FALSE) NEW } semTools/R/runMI-Wald.R0000644000176200001440000003070113532030064014317 0ustar liggesusers### Terrence D. Jorgensen & Yves Rosseel ### Last updated: 29 August 2019 ### Pooled Wald test for multiple imputations ### Borrowed source code from lavaan/R/lav_test_Wald.R ##' Wald Test for Multiple Imputations ##' ##' Wald test for testing a linear hypothesis about the parameters of lavaan ##' models fitted to multiple imputed data sets. Statistics for constraining ##' one or more free parameters in a model can be calculated from the pooled ##' point estimates and asymptotic covariance matrix of model parameters ##' using Rubin's (1987) rules, or by pooling the Wald test statistics ##' across imputed data sets (Li, Meng, Raghunathan, & Rubin, 1991). ##' ##' The constraints are specified using the \code{"=="} operator. ##' Both the left-hand side and the right-hand side of the equality can contain ##' a linear combination of model parameters, or a constant (like zero). ##' The model parameters must be specified by their user-specified labels from ##' the \code{link[lavaan]{model.syntax}}. Names of defined parameters ##' (using the ":=" operator) can be included too. ##' ##' @aliases lavTestWald.mi ##' @importFrom lavaan parTable lavListInspect ##' @importFrom stats pchisq pf ##' @importFrom methods getMethod ##' ##' @param object An object of class \code{\linkS4class{lavaan.mi}}. ##' @param constraints A \code{character} string (typically between single ##' quotes) containing one or more equality constraints. ##' See examples for more details ##' @param test \code{character} indicating which pooling method to use. ##' \code{"D1"} or \code{"Rubin"} (default) indicates Rubin's (1987) rules ##' will be applied to the point estimates and the asymptotic covariance ##' matrix of model parameters, and those pooled values will be used to ##' calculate the Wald test in the usual manner. \code{"D2"}, \code{"LMRR"}, ##' or \code{"Li.et.al"} indicate that the complete-data Wald test statistic ##' should be calculated using each imputed data set, which will then be ##' pooled across imputations, as described in Li, Meng, Raghunathan, & Rubin ##' (1991) and Enders (2010, chapter 8). ##' @param asymptotic \code{logical}. If \code{FALSE} (default), the pooled test ##' will be returned as an \emph{F}-distributed statistic with numerator ##' (\code{df1}) and denominator (\code{df2}) degrees of freedom. ##' If \code{TRUE}, the pooled \emph{F} statistic will be multiplied by its ##' \code{df1} on the assumption that its \code{df2} is sufficiently large ##' enough that the statistic will be asymptotically \eqn{\chi^2} distributed ##' with \code{df1}. ##' @param scale.W \code{logical}. If \code{FALSE}, the pooled ##' asymptotic covariance matrix of model parameters is calculated as the ##' weighted sum of the within-imputation and between-imputation components. ##' Otherwise, the pooled asymptotic covariance matrix of model parameters is ##' calculated by scaling the within-imputation component by the ##' average relative increase in variance (ARIV; see Enders, 2010, p. 235), ##' which is \emph{only} consistent when requesting the \emph{F} test (i.e., ##' \code{asymptotic = FALSE}. Ignored (irrelevant) if \code{test = "D2"}. ##' @param omit.imps \code{character} vector specifying criteria for omitting ##' imputations from pooled results. Can include any of ##' \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the ##' default setting, which excludes any imputations that did not ##' converge or for which standard errors could not be computed. The ##' last option (\code{"no.npd"}) would exclude any imputations which ##' yielded a nonpositive definite covariance matrix for observed or ##' latent variables, which would include any "improper solutions" such ##' as Heywood cases. ##' @param verbose \code{logical}. If \code{TRUE}, print the restriction ##' matrix and the estimated restricted values. ##' @param warn \code{logical}. If \code{TRUE}, print warnings if they occur. ##' ##' @return ##' A vector containing the Wald test statistic (either an \code{F} or ##' \eqn{\chi^2} statistic, depending on the \code{asymptotic} argument), ##' the degrees of freedom (numerator and denominator, if ##' \code{asymptotic = FALSE}), and a \emph{p} value. If ##' \code{asymptotic = FALSE}, the relative invrease in variance (RIV, or ##' average for multiparameter tests: ARIV) used to calculate the denominator ##' \emph{df} is also returned as a missing-data diagnostic, along with the ##' fraction missing information (FMI = ARIV / (1 + ARIV)). ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' Adapted from \pkg{lavaan} source code, written by ##' Yves Rosseel (Ghent University; \email{Yves.Rosseel@@UGent.be}) ##' ##' @references ##' Enders, C. K. (2010). \emph{Applied missing data analysis}. ##' New York, NY: Guilford. ##' ##' Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). ##' Significance levels from repeated \emph{p}-values with multiply-imputed ##' data. \emph{Statistica Sinica, 1}(1), 65--92. Retrieved from ##' https://www.jstor.org/stable/24303994 ##' ##' Rubin, D. B. (1987). \emph{Multiple imputation for nonresponse in surveys}. ##' New York, NY: Wiley. ##' ##' @seealso \code{\link[lavaan]{lavTestWald}} ##' ##' @examples ##' \dontrun{ ##' ## impose missing data for example ##' HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), ##' "ageyr","agemo","school")] ##' set.seed(12345) ##' HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) ##' age <- HSMiss$ageyr + HSMiss$agemo/12 ##' HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ##' ##' ## impute missing data ##' library(Amelia) ##' set.seed(12345) ##' HS.amelia <- amelia(HSMiss, m = 20, noms = "school", p2s = FALSE) ##' imps <- HS.amelia$imputations ##' ##' ## specify CFA model from lavaan's ?cfa help page ##' HS.model <- ' ##' visual =~ x1 + b1*x2 + x3 ##' textual =~ x4 + b2*x5 + x6 ##' speed =~ x7 + b3*x8 + x9 ##' ' ##' ##' fit <- cfa.mi(HS.model, data = imps) ##' ##' ## Testing whether a single parameter equals zero yields the 'chi-square' ##' ## version of the Wald z statistic from the summary() output, or the ##' ## 'F' version of the t statistic from the summary() output, depending ##' ## whether asymptotic = TRUE or FALSE ##' lavTestWald.mi(fit, constraints = "b1 == 0") # default D1 statistic ##' lavTestWald.mi(fit, constraints = "b1 == 0", test = "D2") # D2 statistic ##' ##' ## The real advantage is simultaneously testing several equality ##' ## constraints, or testing more complex constraints: ##' con <- ' ##' 2*b1 == b3 ##' b2 - b3 == 0 ##' ' ##' lavTestWald.mi(fit, constraints = con) # default F statistic ##' lavTestWald.mi(fit, constraints = con, asymptotic = TRUE) # chi-squared ##' ##' } ##' ##' @export lavTestWald.mi <- function(object, constraints = NULL, test = c("D1","D2"), asymptotic = FALSE, scale.W = !asymptotic, omit.imps = c("no.conv","no.se"), verbose = FALSE, warn = TRUE) { stopifnot(inherits(object, "lavaan.mi")) useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) test <- tolower(test[1]) if (test %in% c("d2", "lmrr", "li.et.al")) test <- "D2" if (test %in% c("d1", "rubin")) test <- "D1" if (!test %in% c("D1","D2")) stop('Invalid choice of "test" argument.') message('\nWald test calculated using se = "', lavListInspect(object, "options")$se, '"\n') if (test == "D2") { oldCall <- object@lavListCall if (!is.null(oldCall$parallel)) { if (oldCall$parallel == "snow") { oldCall$parallel <- "no" oldCall$ncpus <- 1L if (warn) warning("Unable to pass lavaan::lavTestWald() arguments ", "when parallel='snow'. Switching to parallel='no'.", " Unless using Windows, parallel='multicore' works.") } } ## call lavaanList() again to run lavTestWald() on each imputation oldCall$FUN <- function(obj) { out <- try(lavaan::lavTestWald(object = obj, constraints = constraints, verbose = FALSE), silent = TRUE) if (inherits(out, "try-error")) return(NULL) do.call(c, out[1:2]) } FIT <- eval(as.call(oldCall)) ## check if there are any results noStats <- sapply(FIT@funList, is.null) if (all(noStats)) stop("No success using lavTestWald() on any imputations.") ## template to fill in pooled values ## at a minimum, pool the total score test chiList <- sapply(FIT@funList[ intersect(useImps, which(!noStats)) ], "[[", i = 1) DF <- FIT@funList[[ intersect(useImps, which(!noStats))[1] ]][[2]] out <- calculate.D2(chiList, DF = DF, asymptotic) class(out) <- c("lavaan.vector","numeric") return(out) } # else test == "D1", making 'scale.W=' relevant ## "borrowed" lavTestWald() if (is.null(constraints) || nchar(constraints) == 0L) stop("constraints are empty") # remove == constraints from parTable, save as list PT <- parTable(object) partable <- as.list(PT[PT$op != "==", ]) # parse constraints FLAT <- lavaan::lavParseModelString( constraints ) CON <- attr(FLAT, "constraints") LIST <- list() if (length(CON) > 0L) { lhs <- unlist(lapply(CON, "[[", i = "lhs")) op <- unlist(lapply(CON, "[[", i = "op")) rhs <- unlist(lapply(CON, "[[", i = "rhs")) LIST$lhs <- c(LIST$lhs, lhs) # FIXME: why concatenate with NULL? LIST$op <- c(LIST$op, op) LIST$rhs <- c(LIST$rhs, rhs) } else stop("no equality constraints found in constraints argument") # theta = free parameters only (equality-constrained allowed) theta <- getMethod("coef", "lavaan.mi")(object, omit.imps = omit.imps) #object@optim$x # build constraint function ceq.function <- lavaan::lav_partable_constraints_ceq(partable = partable, con = LIST, debug = FALSE) # compute jacobian restrictions JAC <- try(lavaan::lav_func_jacobian_complex(func = ceq.function, x = theta), silent = TRUE) if (inherits(JAC, "try-error")) { # eg. pnorm() JAC <- lavaan::lav_func_jacobian_simple(func = ceq.function, x = theta) } if (verbose) {cat("Restriction matrix (jacobian):\n"); print(JAC); cat("\n")} # linear restriction theta.r <- ceq.function( theta ) if (verbose) {cat("Restricted theta values:\n"); print(theta.r); cat("\n")} # get VCOV VCOV <- getMethod("vcov","lavaan.mi")(object, scale.W = scale.W, omit.imps = omit.imps) # restricted vcov info.r <- JAC %*% VCOV %*% t(JAC) # Wald test statistic test.stat <- as.numeric(t(theta.r) %*% solve( info.r ) %*% theta.r) # number of constraints (k in Enders (2010, p. 235) eqs. 8.23-25) DF <- nrow(JAC) if (asymptotic) { out <- c("chisq" = test.stat, df = DF, pvalue = pchisq(test.stat, df = DF, lower.tail = FALSE)) } else { W <- getMethod("vcov", "lavaan.mi")(object, type = "within", omit.imps = omit.imps) B <- getMethod("vcov", "lavaan.mi")(object, type = "between", omit.imps = omit.imps) #FIXME: only valid for linear constraints? ## restricted B & W components of VCOV W.r <- JAC %*% W %*% t(JAC) B.r <- JAC %*% B %*% t(JAC) ## relative increase in variance due to missing data W.inv <- MASS::ginv(W.r) ariv <- (1 + 1/m) * sum(diag(B.r %*% W.inv)) / DF ## calculate denominator DF for F statistic a <- DF*(m - 1) if (a > 4) { v2 <- 4 + (a - 4) * (1 + (1 - 2/a)*(1 / ariv))^2 # Enders (eq. 8.24) } else { v2 <- a*(1 + 1/DF) * (1 + 1/ariv)^2 / 2 # Enders (eq. 8.25) } out <- c("F" = test.stat / DF, df1 = DF, df2 = v2, pvalue = pf(test.stat / DF, df1 = DF, df2 = v2, lower.tail = FALSE), ariv = ariv, fmi = ariv / (1 + ariv)) } class(out) <- c("lavaan.vector","numeric") out } semTools/R/parcelAllocation.R0000644000176200001440000004306013433602242015661 0ustar liggesusers### Terrence D. Jorgensen ### Last updated: 21 February 2019 ##' Random Allocation of Items to Parcels in a Structural Equation Model ##' ##' This function generates a given number of randomly generated item-to-parcel ##' allocations, fits a model to each allocation, and provides averaged results ##' over all allocations. ##' ##' This function implements the random item-to-parcel allocation procedure ##' described in Sterba (2011) and Sterba and MacCallum (2010). The function ##' takes a single data set with item-level data, randomly assigns items to ##' parcels, fits a structural equation model to the parceled data (using ##' \link[lavaan]{lavaanList}), and repeats this process for a user-specified ##' number of random allocations. Results from all fitted models are summarized ##' in the output. For further details on the benefits of randomly allocating ##' items to parcels, see Sterba (2011) and Sterba and MccCallum (2010). ##' ##' @importFrom stats sd qnorm ##' @importFrom lavaan parTable lavInspect lavaanList lavaanify lavNames ##' ##' @param model \code{\link[lavaan]{lavaan}} model syntax specifying the model ##' fit to (at least some) parceled data. Note that there can be a mixture of ##' items and parcels (even within the same factor), in case certain items ##' should never be parceled. Can be a character string or parameter table. ##' Also see \code{\link[lavaan]{lavaanify}} for more details. ##' @param data A \code{data.frame} containing all observed variables appearing ##' in the \code{model}, as well as those in the \code{item.syntax} used to ##' create parcels. If the data have missing values, multiple imputation ##' before parceling is recommended: submit a stacked data set (with a variable ##' for the imputation number, so they can be separateed later) and set ##' \code{do.fit = FALSE} to return the list of \code{data.frame}s (one per ##' allocation), each of which is a stacked, imputed data set with parcels. ##' @param parcel.names \code{character} vector containing names of all parcels ##' appearing as indicators in \code{model}. ##' @param item.syntax \link[lavaan]{lavaan} model syntax specifying the model ##' that would be fit to all of the unparceled items, including items that ##' should be randomly allocated to parcels appearing in \code{model}. ##' @param nAlloc The number of random items-to-parcels allocations to generate. ##' @param fun \code{character} string indicating the name of the ##' \code{\link[lavaan]{lavaan}} function used to fit \code{model} to ##' \code{data}. Can only take the values \code{"lavaan"}, \code{"sem"}, ##' \code{"cfa"}, or \code{"growth"}. ##' @param alpha Alpha level used as criterion for significance. ##' @param fit.measures \code{character} vector containing names of fit measures ##' to request from each fitted \code{\link[lavaan]{lavaan}} model. See the ##' output of \code{\link[lavaan]{fitMeasures}} for a list of available measures. ##' @param \dots Additional arguments to be passed to ##' \code{\link[lavaan]{lavaanList}}. See also \code{\link[lavaan]{lavOptions}} ##' @param show.progress If \code{TRUE}, show a \code{\link[utils]{txtProgressBar}} ##' indicating how fast the model-fitting iterates over allocations. ##' @param iseed (Optional) Random seed used for parceling items. When the same ##' random seed is specified and the program is re-run, the same allocations ##' will be generated. Using the same \code{iseed} argument will ensure the ##' any model is fit to the same parcel allocations. \emph{Note}: When using ##' \pkg{parallel} options, you must first type \code{RNGkind("L'Ecuyer-CMRG")} ##' into the R Console, so that the seed will be controlled across cores. ##' @param do.fit If \code{TRUE} (default), the \code{model} is fitted to each ##' parceled data set, and the summary of results is returned (see the Value ##' section below). If \code{FALSE}, the items are randomly parceled, but the ##' model is not fit; instead, the \code{list} of \code{data.frame}s is ##' returned (so assign it to an object). ##' @param return.fit If \code{TRUE}, a \code{\link[lavaan]{lavaanList}} object ##' is returned with the \code{list} of results across allocations ##' @param warn Whether to print warnings when fitting \code{model} to each allocation ##' ##' @return ##' \item{Estimates}{A \code{data.frame} containing results related to ##' parameter estimates with columns corresponding to their names; average ##' and standard deviation across allocations; minimum, maximum, and range ##' across allocations; and the proportion of allocations in which each ##' parameter estimate was significant.} ##' \item{SE}{A \code{data.frame} containing results similar to ##' \code{Estimates}, but related to the standard errors of parameter ##' estimates.} ##' \item{Fit}{A \code{data.frame} containing results related to model fit, ##' with columns corresponding to fit index names; their average and ##' standard deviation across allocations; the minimum, maximum, and range ##' across allocations; and (if the test statistic or RMSEA is included in ##' \code{fit.measures}) the proportion of allocations in which each ##' test of (exact or close) fit was significant.} ##' \item{Model}{A \code{\link[lavaan]{lavaanList}} object containing results ##' of the \code{model} fitted to each parcel allocation. Only returned if ##' \code{return.fit = TRUE}.} ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \code{\link{PAVranking}} for comparing 2 models, ##' \code{\link{poolMAlloc}} for choosing the number of allocations ##' ##' @references ##' ##' Sterba, S. K. (2011). Implications of parcel-allocation ##' variability for comparing fit of item-solutions and parcel-solutions. ##' \emph{Structural Equation Modeling, 18}(4), 554--577. ##' doi:10.1080/10705511.2011.607073 ##' ##' Sterba, S. K. & MacCallum, R. C. (2010). Variability in parameter estimates ##' and model fit across random allocations of items to parcels. ##' \emph{Multivariate Behavioral Research, 45}(2), 322--358. ##' doi:10.1080/00273171003680302 ##' ##' Sterba, S. K., & Rights, J. D. (2016). Accounting for parcel-allocation ##' variability in practice: Combining sources of uncertainty and choosing the ##' number of allocations. \emph{Multivariate Behavioral Research, 51}(2--3), ##' 296--313. doi:10.1080/00273171.2016.1144502 ##' ##' Sterba, S. K., & Rights, J. D. (2017). Effects of parceling on model ##' selection: Parcel-allocation variability in model ranking. ##' \emph{Psychological Methods, 22}(1), 47--68. doi:10.1037/met0000067 ##' ##' @examples ##' ##' ## Fit 2-factor CFA to simulated data. Each factor has 9 indicators. ##' ##' ## Specify the item-level model (if NO parcels were created) ##' item.syntax <- c(paste0("f1 =~ f1item", 1:9), ##' paste0("f2 =~ f2item", 1:9)) ##' cat(item.syntax, sep = "\n") ##' ## Below, we reduce the size of this same model by ##' ## applying different parceling schemes ##' ##' ##' ## 3-indicator parcels ##' mod.parcels <- ' ##' f1 =~ par1 + par2 + par3 ##' f2 =~ par4 + par5 + par6 ##' ' ##' ## names of parcels ##' (parcel.names <- paste0("par", 1:6)) ##' ##' \dontrun{ ##' ## override default random-number generator to use parallel options ##' RNGkind("L'Ecuyer-CMRG") ##' ##' parcelAllocation(mod.parcels, data = simParcel, nAlloc = 100, ##' parcel.names = parcel.names, item.syntax = item.syntax, ##' std.lv = TRUE, # any addition lavaan arguments ##' parallel = "snow") # parallel options ##' ##' ##' ##' ## POOL RESULTS by treating parcel allocations as multiple imputations ##' ## Details provided in Sterba & Rights (2016); see ?poolMAlloc. ##' ##' ## save list of data sets instead of fitting model yet ##' dataList <- parcelAllocation(mod.parcels, data = simParcel, nAlloc = 100, ##' parcel.names = parcel.names, ##' item.syntax = item.syntax, ##' do.fit = FALSE) ##' ## now fit the model to each data set ##' fit.parcels <- cfa.mi(mod.parcels, data = dataList, std.lv = TRUE) ##' summary(fit.parcels) # uses Rubin's rules ##' anova(fit.parcels) # pooled test statistic ##' class?lavaan.mi # find more methods for pooling results ##' } ##' ##' ##' ## multigroup example ##' simParcel$group <- 0:1 # arbitrary groups for example ##' mod.mg <- ' ##' f1 =~ par1 + c(L2, L2)*par2 + par3 ##' f2 =~ par4 + par5 + par6 ##' ' ##' ## names of parcels ##' (parcel.names <- paste0("par", 1:6)) ##' ##' parcelAllocation(mod.mg, data = simParcel, parcel.names, item.syntax, ##' std.lv = TRUE, group = "group", group.equal = "loadings", ##' nAlloc = 20, show.progress = TRUE) ##' ##' ##' ##' ## parcels for first factor, items for second factor ##' mod.items <- ' ##' f1 =~ par1 + par2 + par3 ##' f2 =~ f2item2 + f2item7 + f2item8 ##' ' ##' ## names of parcels ##' (parcel.names <- paste0("par", 1:3)) ##' ##' parcelAllocation(mod.items, data = simParcel, parcel.names, item.syntax, ##' nAlloc = 20, std.lv = TRUE) ##' ##' ##' ##' ## mixture of 1- and 3-indicator parcels for second factor ##' mod.mix <- ' ##' f1 =~ par1 + par2 + par3 ##' f2 =~ f2item2 + f2item7 + f2item8 + par4 + par5 + par6 ##' ' ##' ## names of parcels ##' (parcel.names <- paste0("par", 1:6)) ##' ##' parcelAllocation(mod.mix, data = simParcel, parcel.names, item.syntax, ##' nAlloc = 20, std.lv = TRUE) ##' ##' @export parcelAllocation <- function(model, data, parcel.names, item.syntax, nAlloc = 100, fun = "sem", alpha = .05, fit.measures = c("chisq","df","cfi", "tli","rmsea","srmr"), ..., show.progress = FALSE, iseed = 12345, do.fit = TRUE, return.fit = FALSE, warn = FALSE) { if (nAlloc < 2) stop("Minimum of two allocations required.") if (!fun %in% c("sem","cfa","growth","lavaan")) stop("'fun' argument must be either 'lavaan', 'cfa', 'sem', or 'growth'") lavArgs <- list(...) lavArgs$model <- item.syntax lavArgs$data <- data lavArgs$do.fit <- FALSE ## fit item-level model to data item.fit <- do.call(fun, lavArgs) item.PT <- parTable(item.fit) ## construct parameter table for parcel-level model if (is.character(model)) { ## default lavaanify arguments ptArgs <- formals(lavaanify) ## arguments passed to lavaan by user fitArgs <- lavInspect(item.fit, "call")[-1] ## overwrite defaults with user's values sameArgs <- intersect(names(ptArgs), names(fitArgs)) ptArgs[sameArgs] <- fitArgs[sameArgs] ptArgs$model <- model if (is.null(ptArgs$model.type)) ptArgs$model.type <- "sem" if (ptArgs$model.type != "growth") ptArgs$model.type <- "sem" ptArgs$ngroups <- lavInspect(item.fit, "ngroups") PT <- do.call("lavaanify", ptArgs) } else if (is.data.frame(model)) { PT <- model } else stop("'model' argument must be a character string of lavaan model", " syntax or a lavaan parameter table. See ?lavaanify help page.") ## check that both models specify the same factors factorNames <- lavNames(PT, type = "lv") if (!all(sort(lavNames(item.PT, type = "lv")) == sort(factorNames))) { stop("'model' and 'item.syntax' arguments specify different factors.\n", "'model' specifies: ", paste(sort(factorNames), collapse = ", "), "\n", "'item.syntax' specifies: ", paste(sort(lavNames(item.PT, type = "lv")), collapse = ", ")) } ## for each factor, assign item sets to parcel sets assignments <- list() for (i in factorNames) { ## all indicators from parcel-level model parcels <- PT$rhs[PT$lhs == i & PT$op == "=~"] ## all indicators from item-level model items <- item.PT$rhs[item.PT$lhs == i & item.PT$op == "=~"] ## exclude observed indicators from parceling scheme if specified ## in parcel-level model assignments[[i]]$parcels <- setdiff(parcels, names(data)) assignments[[i]]$items <- setdiff(items, parcels) ## Does this factor have parcels? If not, omit this factor from next loop if (length(assignments[[i]]$parcels) == 0L) { factorNames <- factorNames[-which(factorNames == i)] next } ## how many items per parcel? nItems <- length(assignments[[i]]$items) nParcels <- length(assignments[[i]]$parcels) assignments[[i]]$nPerParcel <- rep(nItems %/% nParcels, nParcels) if (nItems %% nParcels > 0) for (j in 1:(nItems %% nParcels)) { assignments[[i]]$nPerParcel[j] <- assignments[[i]]$nPerParcel[j] + 1 } names(assignments[[i]]$nPerParcel) <- assignments[[i]]$parcels } ## for each allocation, create parcels from items dataList <- list() for (i in 1:nAlloc) { dataList[[i]] <- data for (j in factorNames) { ## create a random assignment pattern ranAss <- sample(rep(names(assignments[[j]]$nPerParcel), times = assignments[[j]]$nPerParcel)) ## add each parcel to a copy of the original data set for (k in assignments[[j]]$parcels) { ## which items were selected for this parcel? ranVars <- assignments[[j]]$items[ranAss == k] ## calculate row means of those items, save as parcel dataList[[i]][ , k] <- rowMeans(data[ , ranVars]) } } } if (!do.fit) return(dataList) ## fit parcel-level model to list of data sets set.seed(iseed) # in case not using parallel fitList <- lavaanList(model, dataList, cmd = fun, ..., warn = warn, iseed = iseed, FUN = lavaan::fitMeasures, show.progress = show.progress) ## for which data sets did the model converge? conv <- fitList@meta$ok if (!any(conv)) stop("The model did not converge for any allocations.") if (!all(conv)) message("The model did not converge for the following ", "allocations: ", paste(which(!conv), collapse = ", ")) ## tools to extract output getOutput <- function(x, sig = FALSE) { c(Avg = mean(x, na.rm = TRUE), SD = sd(x, na.rm = TRUE), Min = min(x, na.rm = TRUE), Max = max(x, na.rm = TRUE), Range = max(x, na.rm = TRUE) - min(x, na.rm = TRUE)) } out <- list() myCols <- c("lhs","op","rhs","group", "block","label") template <- data.frame(fitList@ParTableList[[which(conv)[1]]][myCols]) ## parameter estimates Est <- sapply(fitList@ParTableList[conv], function(x) x$est) out$Estimates <- cbind(template, t(apply(Est, 1, getOutput))) ## standard errors SE <- sapply(fitList@ParTableList[conv], function(x) x$se) ## Any for which SE could not be calculated? missingSE <- apply(SE, 2, function(x) any(is.na(x))) if (!all(missingSE)) { if (any(missingSE)) message("Standard errors could not be computed for ", "the following allocations: ", paste(which(missingSE), collapse = ", ")) out$SE <- cbind(template, t(apply(SE[ , !missingSE], 1, getOutput))) ## add significance test results to $Estimates Sig <- abs(Est[, !missingSE] / SE[, !missingSE]) > qnorm(alpha / 2, lower.tail = FALSE) out$Estimates$Percent_Sig <- rowMeans(Sig) out$Estimates$Percent_Sig[fitList@ParTableList[[which(conv)[1]]]$free == 0L] <- NA } else { message("Standard errors could not be calculated for any converged", " data sets, so no significance tests could be conducted.") out$SE <- NULL } ## fit measures Fit <- do.call(cbind, fitList@funList[conv])[fit.measures, ] out$Fit <- data.frame(t(apply(Fit, 1, getOutput))) if (any(grepl(pattern = "chisq", fit.measures))) { out$Fit$Percent_Sig <- NA if ("chisq" %in% fit.measures) { pvalues <- sapply(fitList@funList[conv], "[", i = "pvalue") out$Fit["chisq", "Percent_Sig"] <- mean(pvalues < alpha, na.rm = TRUE) } if ("chisq.scaled" %in% fit.measures) { pvalues <- sapply(fitList@funList[conv], "[", i = "pvalue.scaled") out$Fit["chisq.scaled", "Percent_Sig"] <- mean(pvalues < alpha, na.rm = TRUE) } } if (any(grepl(pattern = "rmsea", fit.measures))) { if (is.null(out$Fit$Percent_Sig)) out$Fit$Percent_Sig <- NA if ("rmsea" %in% fit.measures) { pvalues <- sapply(fitList@funList[conv], "[", i = "rmsea.pvalue") out$Fit["rmsea", "Percent_Sig"] <- mean(pvalues < alpha, na.rm = TRUE) } if ("rmsea.scaled" %in% fit.measures) { pvalues <- sapply(fitList@funList[conv], "[", i = "rmsea.pvalue.scaled") out$Fit["rmsea.scaled", "Percent_Sig"] <- mean(pvalues < alpha, na.rm = TRUE) } } ## check for robust test if (any(grepl(pattern = "scaled", names(fitList@funList[conv][[1]]))) & !any(grepl(pattern = "scaled", fit.measures))) { warning('Robust test requested, but "fit.measures" argument does not', ' include any scaled measures (e.g., "chisq.scaled", ', '"rmsea.scaled", or "rmsea.robust").') } ## remove rows that do not correspond to estimates out$Estimates <- out$Estimates[fitList@ParTableList[[which(conv)[1]]]$group > 0L, ] if (!is.null(out$SE)) out$SE <- out$SE[fitList@ParTableList[[which(conv)[1]]]$group > 0L, ] ## assign class for lavaan's print method class(out$Estimates) <- c("lavaan.data.frame","data.frame") if (!is.null(out$SE)) class(out$SE) <- c("lavaan.data.frame","data.frame") class(out$Fit) <- c("lavaan.data.frame","data.frame") ## return output if (return.fit) { out$Model <- fitList out$Model@external$dataList <- dataList } out } semTools/R/imposeStart.R0000644000176200001440000001045313432435606014726 0ustar liggesusers### Sunthud Pornprasertmanit ### Last updated: 2 April 2017 #' Specify starting values from a lavaan output #' #' This function will save the parameter estimates of a lavaan output and #' impose those parameter estimates as starting values for another analysis #' model. The free parameters with the same names or the same labels across two #' models will be imposed the new starting values. This function may help to #' increase the chance of convergence in a complex model (e.g., #' multitrait-multimethod model or complex longitudinal invariance model). #' #' #' @param out The \code{lavaan} output that users wish to use the parameter #' estimates as staring values for an analysis model #' @param expr The original code that users use to run a lavaan model #' @param silent Logical to print the parameter table with new starting values #' @return A fitted lavaan model #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @examples #' #' ## The following example show that the longitudinal weak invariance model #' ## using effect coding was not convergent with three time points but convergent #' ## with two time points. Thus, the parameter estimates from the model with #' ## two time points are used as starting values of the three time points. #' ## The model with new starting values is convergent properly. #' #' weak2time <- ' #' # Loadings #' f1t1 =~ LOAD1*y1t1 + LOAD2*y2t1 + LOAD3*y3t1 #' f1t2 =~ LOAD1*y1t2 + LOAD2*y2t2 + LOAD3*y3t2 #' #' # Factor Variances #' f1t1 ~~ f1t1 #' f1t2 ~~ f1t2 #' #' # Factor Covariances #' f1t1 ~~ f1t2 #' #' # Error Variances #' y1t1 ~~ y1t1 #' y2t1 ~~ y2t1 #' y3t1 ~~ y3t1 #' y1t2 ~~ y1t2 #' y2t2 ~~ y2t2 #' y3t2 ~~ y3t2 #' #' # Error Covariances #' y1t1 ~~ y1t2 #' y2t1 ~~ y2t2 #' y3t1 ~~ y3t2 #' #' # Factor Means #' f1t1 ~ NA*1 #' f1t2 ~ NA*1 #' #' # Measurement Intercepts #' y1t1 ~ INT1*1 #' y2t1 ~ INT2*1 #' y3t1 ~ INT3*1 #' y1t2 ~ INT4*1 #' y2t2 ~ INT5*1 #' y3t2 ~ INT6*1 #' #' # Constraints for Effect-coding Identification #' LOAD1 == 3 - LOAD2 - LOAD3 #' INT1 == 0 - INT2 - INT3 #' INT4 == 0 - INT5 - INT6 #' ' #' model2time <- lavaan(weak2time, data = exLong) #' #' weak3time <- ' #' # Loadings #' f1t1 =~ LOAD1*y1t1 + LOAD2*y2t1 + LOAD3*y3t1 #' f1t2 =~ LOAD1*y1t2 + LOAD2*y2t2 + LOAD3*y3t2 #' f1t3 =~ LOAD1*y1t3 + LOAD2*y2t3 + LOAD3*y3t3 #' #' # Factor Variances #' f1t1 ~~ f1t1 #' f1t2 ~~ f1t2 #' f1t3 ~~ f1t3 #' #' # Factor Covariances #' f1t1 ~~ f1t2 + f1t3 #' f1t2 ~~ f1t3 #' #' # Error Variances #' y1t1 ~~ y1t1 #' y2t1 ~~ y2t1 #' y3t1 ~~ y3t1 #' y1t2 ~~ y1t2 #' y2t2 ~~ y2t2 #' y3t2 ~~ y3t2 #' y1t3 ~~ y1t3 #' y2t3 ~~ y2t3 #' y3t3 ~~ y3t3 #' #' # Error Covariances #' y1t1 ~~ y1t2 #' y2t1 ~~ y2t2 #' y3t1 ~~ y3t2 #' y1t1 ~~ y1t3 #' y2t1 ~~ y2t3 #' y3t1 ~~ y3t3 #' y1t2 ~~ y1t3 #' y2t2 ~~ y2t3 #' y3t2 ~~ y3t3 #' #' # Factor Means #' f1t1 ~ NA*1 #' f1t2 ~ NA*1 #' f1t3 ~ NA*1 #' #' # Measurement Intercepts #' y1t1 ~ INT1*1 #' y2t1 ~ INT2*1 #' y3t1 ~ INT3*1 #' y1t2 ~ INT4*1 #' y2t2 ~ INT5*1 #' y3t2 ~ INT6*1 #' y1t3 ~ INT7*1 #' y2t3 ~ INT8*1 #' y3t3 ~ INT9*1 #' #' # Constraints for Effect-coding Identification #' LOAD1 == 3 - LOAD2 - LOAD3 #' INT1 == 0 - INT2 - INT3 #' INT4 == 0 - INT5 - INT6 #' INT7 == 0 - INT8 - INT9 #' ' #' ### The following command does not provide convergent result #' # model3time <- lavaan(weak3time, data = exLong) #' #' ### Use starting values from the model with two time points #' model3time <- imposeStart(model2time, lavaan(weak3time, data = exLong)) #' summary(model3time) #' #' @export imposeStart <- function(out, expr, silent = TRUE) { if(!is(out, "lavaan")) stop("The first argument of the function must be a lavaan output.") template2 <- template <- substitute(expr) template2$do.fit <- FALSE model <- eval(expr = template2, enclos = parent.frame()) ptmodel <- parTable(model) coefmodel <- lavaan::coef(model) coefout <- lavaan::coef(out) start <- coefout[match(names(coefmodel), names(coefout))] ptmodel$start[ptmodel$free != 0] <- start[ptmodel$free[ptmodel$free != 0]] ptmodel$est <- NULL ptmodel$se <- NULL if(!silent) { cat("########## Model with imposed starting values #########\n") print(ptmodel) } if("model" %in% names(template)) { template$model <- ptmodel } else { template[[2]] <- ptmodel } eval(expr = template, enclos = parent.frame()) } semTools/R/indProd.R0000644000176200001440000003251013432435606014011 0ustar liggesusers### Sunthud Pornprasertmanit and Alexander M. Schoemann ### Last updated: 9 March 2018 ### prepare product indicators for 2-way and 3-way interactions in SEM #' Make products of indicators using no centering, mean centering, double-mean #' centering, or residual centering #' #' The \code{indProd} function will make products of indicators using no #' centering, mean centering, double-mean centering, or residual centering. The #' \code{orthogonalize} function is the shortcut of the \code{indProd} function #' to make the residual-centered indicators products. #' #' #' @aliases indProd orthogonalize #' @importFrom stats lm #' #' @param data The desired data to be transformed. #' @param var1 Names or indices of the variables loaded on the first factor #' @param var2 Names or indices of the variables loaded on the second factor #' @param var3 Names or indices of the variables loaded on the third factor #' (for three-way interaction) #' @param match Specify \code{TRUE} to use match-paired approach (Marsh, Wen, & #' Hau, 2004). If \code{FALSE}, the resulting products are all possible #' products. #' @param meanC Specify \code{TRUE} for mean centering the main effect #' indicator before making the products #' @param residualC Specify \code{TRUE} for residual centering the products by #' the main effect indicators (Little, Bovaird, & Widaman, 2006). #' @param doubleMC Specify \code{TRUE} for centering the resulting products #' (Lin et. al., 2010) #' @param namesProd The names of resulting products #' @return The original data attached with the products. #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) Alexander #' Schoemann (East Carolina University; \email{schoemanna@@ecu.edu}) #' @seealso \itemize{ \item \code{\link{probe2WayMC}} For probing the two-way #' latent interaction when the results are obtained from mean-centering, or #' double-mean centering. \item \code{\link{probe3WayMC}} For probing the #' three-way latent interaction when the results are obtained from #' mean-centering, or double-mean centering. \item \code{\link{probe2WayRC}} #' For probing the two-way latent interaction when the results are obtained #' from residual-centering approach. \item \code{\link{probe3WayRC}} For #' probing the two-way latent interaction when the results are obtained from #' residual-centering approach. \item \code{\link{plotProbe}} Plot the simple #' intercepts and slopes of the latent interaction. } #' @references Marsh, H. W., Wen, Z. & Hau, K. T. (2004). Structural equation #' models of latent interactions: Evaluation of alternative estimation #' strategies and indicator construction. \emph{Psychological Methods, 9}(3), #' 275--300. doi:10.1037/1082-989X.9.3.275 #' #' Lin, G. C., Wen, Z., Marsh, H. W., & Lin, H. S. (2010). Structural equation #' models of latent interactions: Clarification of orthogonalizing and #' double-mean-centering strategies. \emph{Structural Equation Modeling, 17}(3), #' 374--391. doi:10.1080/10705511.2010.488999 #' #' Little, T. D., Bovaird, J. A., & Widaman, K. F. (2006). On the merits of #' orthogonalizing powered and product terms: Implications for modeling #' interactions among latent variables. \emph{Structural Equation Modeling, #' 13}(4), 497--519. doi:10.1207/s15328007sem1304_1 #' @examples #' #' ## Mean centering / two-way interaction / match-paired #' dat <- indProd(attitude[ , -1], var1 = 1:3, var2 = 4:6) #' #' ## Residual centering / two-way interaction / match-paired #' dat2 <- indProd(attitude[ , -1], var1 = 1:3, var2 = 4:6, match = FALSE, #' meanC = FALSE, residualC = TRUE, doubleMC = FALSE) #' #' ## Double-mean centering / two-way interaction / match-paired #' dat3 <- indProd(attitude[ , -1], var1 = 1:3, var2 = 4:6, match = FALSE, #' meanC = TRUE, residualC = FALSE, doubleMC = TRUE) #' #' ## Mean centering / three-way interaction / match-paired #' dat4 <- indProd(attitude[ , -1], var1 = 1:2, var2 = 3:4, var3 = 5:6) #' #' ## Residual centering / three-way interaction / match-paired #' dat5 <- orthogonalize(attitude[ , -1], var1 = 1:2, var2 = 3:4, var3 = 5:6, #' match = FALSE) #' #' ## Double-mean centering / three-way interaction / match-paired #' dat6 <- indProd(attitude[ , -1], var1 = 1:2, var2 = 3:4, var3 = 5:6, #' match = FALSE, meanC = TRUE, residualC = TRUE, #' doubleMC = TRUE) #' #' @export indProd <- function(data, var1, var2, var3 = NULL, match = TRUE, meanC = TRUE, residualC = FALSE, doubleMC = TRUE, namesProd = NULL) { # Get all variable names if (all(is.numeric(var1))) var1 <- colnames(data)[var1] if (all(is.numeric(var2))) var2 <- colnames(data)[var2] if (!is.null(var3) && all(is.numeric(var3))) var3 <- colnames(data)[var3] dat1 <- data[, var1] dat2 <- data[, var2] dat3 <- NULL if (!is.null(var3)) dat3 <- data[, var3] # Mean centering on the original indicators if (meanC) { dat1 <- scale(dat1, scale = FALSE) dat2 <- scale(dat2, scale = FALSE) if (!is.null(dat3)) dat3 <- scale(dat3, scale = FALSE) } if (match) { # Check whether the number of variables are equal across variable sets if (length(var1) != length(var2)) stop("If the match-paired approach is used, the number of", " variables in all sets must be equal.") if (!is.null(var3) && (length(var1) != length(var3))) stop("If the match-paired approach is used, the number of", " variables in all three sets must be equal.") datProd <- NULL if (is.null(var3)) { # Two-way interaction datProd <- dat1 * dat2 if (residualC) { notmissing <- which(!apply(datProd, 1, function(x) any(is.na(x)))) colnames(datProd) <- paste("interactionProduct", 1:ncol(datProd), sep = "") # Write the expression for linear model and residualize the products temp <- data.frame(datProd, dat1, dat2) express <- paste("cbind(", paste(colnames(datProd), collapse = ", "), ") ~ ", paste(c(colnames(dat1), colnames(dat2)), collapse = " + "), sep = "") datProd[notmissing,] <- lm(express, data = temp)$residuals } } else { # Three-way interaction datProd2way <- cbind(dat1 * dat2, dat1 * dat3, dat2 * dat3) datProd3way <- dat1 * dat2 * dat3 if (residualC) { notmissing2way <- which(!apply(datProd2way, 1, function(x) any(is.na(x)))) colnames(datProd2way) <- paste("interaction2Product", 1:ncol(datProd2way), sep = "") # Write the expression for linear model and residualize the two-way products temp2 <- data.frame(datProd2way, dat1, dat2, dat3) express2 <- paste("cbind(", paste(colnames(datProd2way), collapse = ", "), ") ~ ", paste(c(colnames(dat1), colnames(dat2), colnames(dat3)), collapse = " + "), sep = "") datProd2way[notmissing2way,] <- lm(express2, data = temp2)$residuals # Making all possible products to residualize the 3-way interaction datProd2wayFull <- matrix(0, nrow(data), 1) for (i in 1:length(var1)) datProd2wayFull <- data.frame(datProd2wayFull, matrix(rep(dat1[, i], length(var2)), ncol = length(var2)) * dat2) for (i in 1:length(var1)) datProd2wayFull <- data.frame(datProd2wayFull, matrix(rep(dat1[, i], length(var3)), ncol = length(var3)) * dat3) for (i in 1:length(var2)) datProd2wayFull <- data.frame(datProd2wayFull, matrix(rep(dat2[, i], length(var3)), ncol = length(var3)) * dat3) datProd2wayFull <- datProd2wayFull[, -1] colnames(datProd2wayFull) <- paste("interaction2Product", 1:ncol(datProd2wayFull), sep = "") notmissing3way <- which(!apply(datProd3way, 1, function(x) any(is.na(x)))) colnames(datProd3way) <- paste("interaction3Product", 1:ncol(datProd3way), sep = "") # Write the expression for linear model and residualize the three-way products temp3 <- data.frame(datProd3way, dat1, dat2, dat3, datProd2wayFull) express3 <- paste("cbind(", paste(colnames(datProd3way), collapse = ", "), ") ~ ", paste(c(colnames(dat1), colnames(dat2), colnames(dat3), colnames(datProd2wayFull)), collapse = " + "), sep = "") datProd3way[notmissing3way,] <- lm(express3, data = temp3)$residuals } datProd <- cbind(datProd2way, datProd3way) } ## Mean-centering the final product if (doubleMC) datProd <- scale(datProd, scale = FALSE) ## Rename the obtained product terms if (is.null(namesProd)) { if (is.null(var3)) { colnames(datProd) <- paste(var1, var2, sep = ".") } else { colnames(datProd) <- c(paste(var1, var2, sep = "."), paste(var1, var3, sep = "."), paste(var2, var3, sep = "."), paste(var1, var2, var3, sep = ".")) } } else { colnames(datProd) <- namesProd } } else { datProd <- NULL if (is.null(var3)) { # Create all possible combinations of the products of indicators datProd <- matrix(0, nrow(data), 1) for (i in 1:length(var1)) datProd <- data.frame(datProd, matrix(rep(dat1[, i], length(var2)), ncol = length(var2)) * dat2) datProd <- datProd[, -1] if (residualC) { notmissing <- which(!apply(datProd, 1, function(x) any(is.na(x)))) colnames(datProd) <- paste("interactionProduct", 1:ncol(datProd), sep = "") # Write the expression for linear model and residualize the two-way products temp <- data.frame(datProd, dat1, dat2) express <- paste("cbind(", paste(colnames(datProd), collapse = ", "), ") ~ ", paste(c(colnames(dat1), colnames(dat2)), collapse = " + "), sep = "") datProd[notmissing,] <- lm(express, data = temp)$residuals } } else { # Create all possible combinations of the products of indicators datProd2way <- matrix(0, nrow(data), 1) for (i in 1:length(var1)) datProd2way <- data.frame(datProd2way, matrix(rep(dat1[, i], length(var2)), ncol = length(var2)) * dat2) for (i in 1:length(var1)) datProd2way <- data.frame(datProd2way, matrix(rep(dat1[, i], length(var3)), ncol = length(var3)) * dat3) for (i in 1:length(var2)) datProd2way <- data.frame(datProd2way, matrix(rep(dat2[, i], length(var3)), ncol = length(var3)) * dat3) datProd3way <- matrix(0, nrow(data), 1) for (i in 1:length(var1)) { for(j in 1:length(var2)) { datProd3way <- data.frame(datProd3way, matrix(rep(dat1[, i], length(var3)), ncol = length(var3)) * matrix(rep(dat2[, j], length(var3)), ncol = length(var3)) * dat3) } } datProd2way <- datProd2way[, -1] datProd3way <- datProd3way[, -1] if (residualC) { notmissing2way <- which(!apply(datProd2way, 1, function(x) any(is.na(x)))) colnames(datProd2way) <- paste("interaction2Product", 1:ncol(datProd2way), sep = "") # Write the expression for linear model and residualize the two-way products temp2 <- data.frame(datProd2way, dat1, dat2, dat3) express2 <- paste("cbind(", paste(colnames(datProd2way), collapse = ", "), ") ~ ", paste(c(colnames(dat1), colnames(dat2), colnames(dat3)), collapse = " + "), sep = "") datProd2way[notmissing2way,] <- lm(express2, data = temp2)$residuals notmissing3way <- which(!apply(datProd3way, 1, function(x) any(is.na(x)))) colnames(datProd3way) <- paste("interaction3Product", 1:ncol(datProd3way), sep = "") # Write the expression for linear model and residualize the three-way products temp3 <- data.frame(datProd3way, dat1, dat2, dat3, datProd2way) express3 <- paste("cbind(", paste(colnames(datProd3way), collapse = ", "), ") ~ ", paste(c(colnames(dat1), colnames(dat2), colnames(dat3), colnames(datProd2way)), collapse = " + "), sep = "") datProd3way[notmissing3way,] <- lm(express3, data = temp3)$residuals } datProd <- cbind(datProd2way, datProd3way) } ## Double-mean centering if (doubleMC) datProd <- scale(datProd, scale = FALSE) ## Name the resulting product terms if (is.null(namesProd)) { temp <- NULL if (is.null(var3)) { for (i in 1:length(var1)) temp <- c(temp, paste(var1[i], var2, sep = ".")) } else { for (i in 1:length(var1)) temp <- c(temp, paste(var1[i], var2, sep = ".")) for (i in 1:length(var1)) temp <- c(temp, paste(var1[i], var3, sep = ".")) for (i in 1:length(var2)) temp <- c(temp, paste(var2[i], var3, sep = ".")) for (i in 1:length(var1)) { for(j in 1:length(var2)) { temp <- c(temp, paste(var1[i], var2[j], var3, sep = ".")) } } } colnames(datProd) <- temp } else { colnames(datProd) <- namesProd } } ## Bind the products back to the original data data.frame(data, datProd) } #' @rdname indProd #' @export orthogonalize <- function(data, var1, var2, var3 = NULL, match = TRUE, namesProd = NULL) { indProd(data = data, var1 = var1, var2 = var2, var3 = var3, match = match, meanC = FALSE, residualC = TRUE, doubleMC = FALSE, namesProd = namesProd) } semTools/R/htmt.R0000644000176200001440000001173713476433166013405 0ustar liggesusers### Ylenio Longo ### Last updated: 7 June 2019 ##' Assessing Discriminant Validity using Heterotrait-Monotrait Ratio ##' ##' This function assesses discriminant validity through the ##' heterotrait-monotrait ratio (HTMT) of the correlations (Henseler, Ringlet & ##' Sarstedt, 2015). Specifically, it assesses the geometric-mean correlation ##' among indicators across constructs (i.e. heterotrait-heteromethod ##' correlations) relative to the geometric-mean correlation among indicators ##' within the same construct (i.e. monotrait-heteromethod correlations). ##' The resulting HTMT values are interpreted as estimates of inter-construct ##' correlations. Absolute values of the correlations are recommended to ##' calculate the HTMT matrix. Correlations are estimated using the ##' \code{\link[lavaan]{lavCor}} function in the \pkg{lavaan} package. ##' ##' ##' @importFrom stats cov2cor ##' ##' @param model lavaan \link[lavaan]{model.syntax} of a confirmatory factor ##' analysis model where at least two factors are required for indicators ##' measuring the same construct. ##' @param data A \code{data.frame} or data \code{matrix} ##' @param sample.cov A covariance or correlation matrix can be used, instead of ##' \code{data}, to estimate the HTMT values. ##' @param missing If "listwise", cases with missing values are removed listwise ##' from the data frame. If "direct" or "ml" or "fiml" and the estimator is ##' maximum likelihood, an EM algorithm is used to estimate the unrestricted ##' covariance matrix (and mean vector). If "pairwise", pairwise deletion is ##' used. If "default", the value is set depending on the estimator and the ##' mimic option (see details in \link[lavaan]{lavCor}). ##' @param ordered Character vector. Only used if object is a \code{data.frame}. ##' Treat these variables as ordered (ordinal) variables. Importantly, all ##' other variables will be treated as numeric (unless \code{is.ordered} in ##' \code{data}). (see also \link[lavaan]{lavCor}) ##' @param absolute logical. Whether HTMT values should be estimated based on ##' absolute correlations (recommended and default is \code{TRUE}) ##' ##' @return A matrix showing HTMT values (i.e., discriminant validity) between ##' each pair of factors ##' ##' @author ##' Ylenio Longo (University of Nottingham; \email{yleniolongo@@gmail.com}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @references ##' Henseler, J., Ringle, C. M., & Sarstedt, M. (2015). A new criterion for ##' assessing discriminant validity in variance-based structural equation ##' modeling. \emph{Journal of the Academy of Marketing Science, 43}(1), ##' 115--135. doi:10.1007/s11747-014-0403-8 ##' ##' @examples ##' ##' HS.model <- ' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ' ##' ##' dat <- HolzingerSwineford1939[, paste0("x", 1:9)] ##' htmt(HS.model, dat) ##' ##' @export htmt <- function (model, data = NULL, sample.cov = NULL, missing = "listwise", ordered = NULL, absolute = TRUE) { model <- lavaan::lavaanify(model) model <- model[model$op %in% "=~", ] factors <- unique(model$lhs) nf <- length(factors) var <- list() for (i in 1:nf) { var[[i]] <- model$rhs[which(model$lhs %in% factors[i])] } varnames <- c(unlist(var)) if(!is.null(data)) { # if data if(any(! varnames %in% colnames(data))) { absent.vars <- which(! varnames %in% colnames(data)) stop("Missing observed variables in the dataset: ", paste(varnames[absent.vars], collapse = " ")) } data <- data[ , c(varnames)] R <- lavaan::lavCor(data, missing = missing, ordered = ordered) rownames(R) <- names(data) colnames(R) <- names(data) } else { if (any(! varnames %in% colnames(sample.cov))) { absent.vars <- which(! varnames %in% colnames(sample.cov)) stop("Missing observed variables in the covariance or correlation matrix: ", paste(varnames[absent.vars], collapse = " ")) } diagR <- diag(sample.cov) if (max(diagR) != 1 & min(diagR) != 1) { #if covariance matrix R <- cov2cor(sample.cov[varnames, varnames]) } else { # if correlation matrix R <- sample.cov[varnames, varnames] } } if (absolute) { R <- abs(R) } diag(R) <- NA m.cor.w <- list() for (i in 1:nf) { m.cor.w[[i]] <- mean(R[var[[i]], var[[i]]], na.rm = TRUE) } m.cor.w <- as.numeric(m.cor.w) comb <- expand.grid(1:nf, 1:nf) g <- list() for (i in 1:nrow(comb)) { g[[i]] <- sqrt(m.cor.w[comb[i, 2]] * m.cor.w[comb[i, 1]]) } g <- as.numeric(g) paste(comb[, 2], comb[, 1]) m.cor.a <- list() for (i in 1:nrow(comb)) { m.cor.a[[i]] <- mean(R[var[[comb[i, 2]]], var[[comb[i, 1]]]], na.rm = TRUE) } m.cor.a <- as.numeric(m.cor.a) outhtmt <- m.cor.a / g res <- matrix(outhtmt, nrow = nf, ncol = nf, dimnames = list(factors)) colnames(res) <- factors class(res) <- c("lavaan.matrix.symmetric", "matrix") res } semTools/R/runMI-LRT.R0000644000176200001440000006423613532027665014120 0ustar liggesusers### Terrence D. Jorgensen & Yves Rosseel ### Last updated: 29 August 2019 ### Pooled likelihood ratio test for multiple imputations ### Borrowed source code from lavaan/R/lav_test_LRT.R ## ------------- ## Main function ## ------------- ##' Likelihood Ratio Test for Multiple Imputations ##' ##' Likelihood ratio test (LRT) for lavaan models fitted to multiple imputed ##' data sets. Statistics for comparing nested models can be calculated by ##' pooling the likelihood ratios across imputed data sets, as described by ##' Meng & Rubin (1992), or by pooling the LRT statistics from each imputation, ##' as described by Li, Meng, Raghunathan, & Rubin (1991). ##' ##' The Meng & Rubin (1992) method, also referred to as the \code{"D3"} ##' statistic, is only applicable when using a likelihood-based estimator. ##' Otherwise (e.g., DWLS for categorical outcomes), users are notified that ##' \code{test} was set to \code{"D2"}. ##' ##' \code{test = "Mplus"} implies \code{"D3"} and \code{asymptotic = TRUE} ##' (see Asparouhov & Muthen, 2010). ##' ##' Note that unlike \code{\link[lavaan]{lavTestLRT}}, \code{lavTestLRT} can ##' only be used to compare a single pair of models, not a longer list of ##' models. To compare several nested models fitted to multiple imputations, ##' see examples on the \code{\link{compareFit}} help page. ##' ##' @aliases lavTestLRT.mi ##' @importFrom lavaan lavListInspect parTable lavTestLRT ##' @importFrom stats cov pchisq pf ##' ##' @param object,h1 An object of class \code{\linkS4class{lavaan.mi}}. ##' \code{object} should be nested within (more constrained than) \code{h1}. ##' @param test \code{character} indicating which pooling method to use. ##' \code{"D3"}, \code{"mr"}, or \code{"meng.rubin"} (default) requests the ##' method described by Meng & Rubin (1992). \code{"D2"}, \code{"LMRR"}, ##' or \code{"Li.et.al"} requests the complete-data LRT statistic should be ##' calculated using each imputed data set, which will then be pooled across ##' imputations, as described in Li, Meng, Raghunathan, & Rubin (1991). ##' Find additional details in Enders (2010, chapter 8). ##' @param omit.imps \code{character} vector specifying criteria for omitting ##' imputations from pooled results. Can include any of ##' \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the ##' default setting, which excludes any imputations that did not ##' converge or for which standard errors could not be computed. The ##' last option (\code{"no.npd"}) would exclude any imputations which ##' yielded a nonpositive definite covariance matrix for observed or ##' latent variables, which would include any "improper solutions" such ##' as Heywood cases. ##' @param asymptotic \code{logical}. If \code{FALSE} (default), the pooled test ##' will be returned as an \emph{F}-distributed statistic with numerator ##' (\code{df1}) and denominator (\code{df2}) degrees of freedom. ##' If \code{TRUE}, the pooled \emph{F} statistic will be multiplied by its ##' \code{df1} on the assumption that its \code{df2} is sufficiently large ##' enough that the statistic will be asymptotically \eqn{\chi^2} distributed ##' with \code{df1}. ##' @param pool.robust \code{logical}. Ignored unless \code{test = "D2"} and a ##' robust test was requested. If \code{pool.robust = TRUE}, the robust test ##' statistic is pooled, whereas \code{pool.robust = FALSE} will pool ##' the naive test statistic (or difference statistic) and apply the average ##' scale/shift parameter to it (unavailable for mean- and variance-adjusted ##' difference statistics, so \code{pool.robust} will be set \code{TRUE}). ##' @param ... Additional arguments passed to \code{\link[lavaan]{lavTestLRT}}, ##' only if \code{test = "D2"} and \code{pool.robust = TRUE} ##' ##' @return ##' A vector containing the LRT statistic (either an \code{F} or \eqn{\chi^2} ##' statistic, depending on the \code{asymptotic} argument), its degrees of ##' freedom (numerator and denominator, if \code{asymptotic = FALSE}), its ##' \emph{p} value, and 2 missing-data diagnostics: the relative invrease ##' in variance (RIV, or average for multiparameter tests: ARIV) and the ##' fraction missing information (FMI = ARIV / (1 + ARIV)). Robust statistics ##' will also include the average (across imputations) scaling factor and ##' (if relevant) shift parameter(s), unless \code{pool.robust = TRUE}. ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' @references ##' Enders, C. K. (2010). \emph{Applied missing data analysis}. ##' New York, NY: Guilford. ##' ##' Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). ##' Significance levels from repeated \emph{p}-values with multiply-imputed ##' data. \emph{Statistica Sinica, 1}(1), 65--92. Retrieved from ##' https://www.jstor.org/stable/24303994 ##' ##' Meng, X.-L., & Rubin, D. B. (1992). Performing likelihood ratio tests with ##' multiply-imputed data sets. \emph{Biometrika, 79}(1), 103--111. ##' doi:10.2307/2337151 ##' ##' Rubin, D. B. (1987). \emph{Multiple imputation for nonresponse in surveys}. ##' New York, NY: Wiley. ##' ##' @seealso \code{\link[lavaan]{lavTestLRT}}, \code{\link{compareFit}} ##' ##' @examples ##' \dontrun{ ##' ## impose missing data for example ##' HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), ##' "ageyr","agemo","school")] ##' set.seed(12345) ##' HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) ##' age <- HSMiss$ageyr + HSMiss$agemo/12 ##' HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ##' ##' ## impute missing data ##' library(Amelia) ##' set.seed(12345) ##' HS.amelia <- amelia(HSMiss, m = 20, noms = "school", p2s = FALSE) ##' imps <- HS.amelia$imputations ##' ##' ## specify CFA model from lavaan's ?cfa help page ##' HS.model <- ' ##' visual =~ x1 + b1*x2 + x3 ##' textual =~ x4 + b2*x5 + x6 ##' speed =~ x7 + b3*x8 + x9 ##' ' ##' ##' fit1 <- cfa.mi(HS.model, data = imps, estimator = "mlm") ##' fit0 <- cfa.mi(HS.model, data = imps, estimator = "mlm", orthogonal = TRUE) ##' ##' ## By default, use D3. ##' ## Must request a chi-squared statistic to be robustified. ##' lavTestLRT.mi(fit1, h1 = fit0, asymptotic = TRUE) ##' ##' ## Using D2, you can either robustify the pooled naive statistic ... ##' lavTestLRT.mi(fit1, h1 = fit0, asymptotic = TRUE, test = "D2") ##' ## ... or pool the robust chi-squared statistic ##' lavTestLRT.mi(fit1, h1 = fit0, asymptotic = TRUE, test = "D2", ##' pool.robust = TRUE) ##' } ##' ##' @export lavTestLRT.mi <- function(object, h1 = NULL, test = c("D3","D2"), omit.imps = c("no.conv","no.se"), asymptotic = FALSE, pool.robust = FALSE, ...) { ## check class if (!inherits(object, "lavaan.mi")) stop("object is not class 'lavaan.mi'") useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) DF0 <- object@testList[[ useImps[1] ]]$standard[["df"]] ## model comparison? if (!is.null(h1)) { if (!inherits(h1, "lavaan.mi")) stop("h1 is not class 'lavaan.mi'") if (!all(lavListInspect(object, "options")$test == lavListInspect(h1, "options")$test)) { stop('Different (sets of) test statistics were requested for the 2 models.') } useImps1 <- rep(TRUE, length(h1@DataList)) if ("no.conv" %in% omit.imps) useImps1 <- sapply(h1@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps1 <- useImps1 & sapply(h1@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(h1@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(h1@convergence, "[[", i = "Heywood.ov") useImps1 <- useImps1 & !(Heywood.lv | Heywood.ov) } m <- sum(useImps1) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps1 <- which(useImps1) h0.not.h1 <- setdiff(useImps, useImps1) if (length(h0.not.h1)) warn0 <- paste('\n\nFor the following imputations, ', '"object" converged but not "h1":', paste(h0.not.h1, collapse = ", "), '\n\n') h1.not.h0 <- setdiff(useImps1, useImps) if (length(h1.not.h0)) warn1 <- paste('\n\nFor the following imputations, ', '"h1" converged but not "object":', paste(h1.not.h0, collapse = ", "), '\n\n') if (length(c(h0.not.h1, h1.not.h0))) warning('The models being compared did not converge on the same set of ', 'imputations. ', if (length(h0.not.h1)) warn0 else '', if (length(h1.not.h0)) warn1 else '', 'Likelihood ratio test conducted using only the ', 'imputations for which both models converged.') useImps <- intersect(useImps, useImps1) m <- length(useImps) # yes, redefine if necessary if (m == 0L) stop('For no imputations did both models converge.') ## check DF DF1 <- h1@testList[[ useImps[1] ]]$standard[["df"]] if (DF0 == DF1) stop("models have equal degrees of freedom") if (DF0 < DF1) { H0 <- h1 h1 <- object object <- H0 H0 <- DF1 DF1 <- DF0 DF0 <- H0 } DF <- DF0 - DF1 } else DF <- DF0 ## only keep arguments relevant to pass to lavTestLRT (if D2) dots <- list(...)[names(formals(lavTestLRT))] ## check test-pooling options, for backward compatibility? test <- tolower(test[1]) if (test == "mplus") { test <- "D3" asymptotic <- TRUE } if (tolower(test) %in% c("mr","meng.rubin","likelihood","lrt","d3")) test <- "D3" if (tolower(test) %in% c("lmrr","li.et.al","pooled.wald","d2")) test <- "D2" if (test == "D3" && !lavListInspect(object, "options")$estimator %in% c("ML","PML","FML")) { message('"D3" only available using maximum likelihood estimation. ', 'Changed test to "D2".') test <- "D2" } ## check for robust test.names <- lavListInspect(object, "options")$test # lavaan 0.6-5: for now, we only acknowledge the first non-standard @test if (length(test.names) > 1L) { ## remove standard and any bootstrapped tests rm.idx <- which(test.names %in% c("standard","bootstrap","bollen.stine")) if (length(rm.idx) > 0L) { test.names <- test.names[-rm.idx] } ## only acknowledge the first scaled test statistic if (length(test.names) > 1L) { test.names <- test.names[1] } } robust <- any(test.names %in% c("satorra.bentler","yuan.bentler", "yuan.bentler.mplus","scaled.shifted", "mean.var.adjusted","satterthwaite")) if (!robust) pool.robust <- FALSE scaleshift <- any(test.names == "scaled.shifted") if (scaleshift && !is.null(h1)) { if (test == "D3" | !pool.robust) message("If test = 'scaled.shifted' (estimator = 'WLSMV' or 'MLMV'), ", "model comparison is only available by (re)setting test = 'D2' ", "and pool.robust = TRUE.\n", "Control more options by passing arguments to lavTestLRT() via ", "the '...' argument.\n") pool.robust <- TRUE test <- 'D2' } if (robust && !pool.robust && !asymptotic) { message('Robust correction can only be applied to pooled chi-squared ', 'statistic, not F statistic. "asymptotic" was switched to TRUE.') asymptotic <- TRUE } if (pool.robust && test == "D3") { message('pool.robust = TRUE is only applicable when test = "D2". ', 'Changed test to "D2".') test <- "D2" } ## calculate pooled test: if (robust && pool.robust) { ## pool both the naive and robust test statistics, return both to ## make output consistent across options out.naive <- D2.LRT(object, h1 = h1, useImps = useImps, asymptotic = asymptotic, pool.robust = FALSE) out.robust <- D2.LRT(object, h1 = h1, useImps = useImps, LRTargs = dots, asymptotic = asymptotic, pool.robust = TRUE) out <- c(out.naive, out.robust) } else if (test == "D2") { out <- D2.LRT(object, h1 = h1, useImps = useImps, asymptotic = asymptotic, pool.robust = pool.robust) } else if (test == "D3") { out <- D3.LRT(object, h1 = h1, useImps = useImps, asymptotic = asymptotic, omit.imps = omit.imps) } ## If test statistic is negative, return without any indices or robustness if (asymptotic) { if (out[["chisq"]] == 0) { message('Negative pooled test statistic was set to zero, so fit will ', 'appear to be arbitrarily perfect. ', if (robust) 'Robust corrections uninformative, not returned.', '\n') class(out) <- c("lavaan.vector","numeric") return(out) } } else { if (out[["F"]] == 0) { message('Negative pooled test statistic was set to zero, so fit will ', 'appear to be arbitrarily perfect.\n') class(out) <- c("lavaan.vector","numeric") return(out) } } ## If robust statistics were not pooled above, robustify naive statistics if (robust & !pool.robust) { out <- robustify(ChiSq = out, object = object, h1 = h1, useImps = useImps) if (scaleshift) { extraWarn <- ' and shift parameter' } else if (any(test.names == "mean.var.adjusted")) { extraWarn <- ' and degrees of freedom' } else extraWarn <- '' message('Robust corrections are made by pooling the naive chi-squared ', 'statistic across ', m, ' imputations for which the model ', 'converged, then applying the average (across imputations) scaling', ' factor', extraWarn, ' to that pooled value. \n', 'To instead pool the robust test statistics, set test = "D2" and ', 'pool.robust = TRUE. \n') } class(out) <- c("lavaan.vector","numeric") out } ## ---------------- ## Hidden Functions ## ---------------- ##' @importFrom lavaan lavListInspect parTable lavTestLRT D2.LRT <- function(object, h1 = NULL, useImps, asymptotic = FALSE, pool.robust = FALSE, LRTargs = list()) { warn <- lavListInspect(object, "options")$warn if (pool.robust && !is.null(h1)) { PT1 <- parTable(h1) op1 <- lavListInspect(h1, "options") oldCall <- object@lavListCall #re-run lavaanList() and save DIFFTEST if (!is.null(oldCall$parallel)) { if (oldCall$parallel == "snow") { oldCall$parallel <- "no" oldCall$ncpus <- 1L if (warn) warning("Unable to pass lavaan::lavTestLRT() arguments when ", "parallel = 'snow'. Switching to parallel = 'no'. ", "Unless using Windows, parallel = 'multicore' works.") } } ## call lavaanList() again to run lavTestLRT() on each imputation oldCall$FUN <- function(obj) { fit1 <- try(lavaan::lavaan(PT1, slotOptions = op1, slotData = obj@Data), silent = TRUE) if (inherits(fit1, "try-error")) { return("fit failed") } else { argList <- c(list(object = obj, fit1), LRTargs) } out <- try(do.call(lavTestLRT, argList), silent = TRUE) if (inherits(out, "try-error")) return("lavTestLRT() failed") c(chisq = out[2, "Chisq diff"], df = out[2, "Df diff"]) } FIT <- eval(as.call(oldCall)) ## check if there are any results noFit <- sapply(FIT@funList, function(x) x[1] == "fit failed") noLRT <- sapply(FIT@funList, function(x) x[1] == "lavTestLRT() failed") if (all(noFit | noLRT)) stop("No success using lavTestScore() on any imputations.") chiList <- sapply(FIT@funList[ intersect(which(!(noFit | noLRT)), useImps) ], "[[", i = "chisq") dfList <- sapply(FIT@funList[ intersect(which(!(noFit | noLRT)), useImps) ], "[[", i = "df") out <- calculate.D2(chiList, DF = mean(dfList), asymptotic) names(out) <- paste0(names(out), ".scaled") class(out) <- c("lavaan.vector","numeric") return(out) } ## else, return model fit OR naive difference test to be robustified test.names <- lavListInspect(object, "options")$test # lavaan 0.6-5: for now, we only acknowledge the first non-standard @test if (length(test.names) > 1L) { ## remove standard and any bootstrapped tests rm.idx <- which(test.names %in% c("standard","bootstrap","bollen.stine")) if (length(rm.idx) > 0L) { test.names <- test.names[-rm.idx] } ## only acknowledge the first scaled test statistic if (length(test.names) > 1L) { test.names <- test.names[1] } } ## pool Wald tests if (is.null(h1)) { test <- if (pool.robust) test.names[1] else "standard" DF <- mean(sapply(object@testList[useImps], function(x) x[[test]][["df"]] )) w <- sapply(object@testList[useImps], function(x) x[[test]][["stat"]]) } else { ## this will not get run if !pool.robust because logic catches that first DF0 <- mean(sapply(object@testList[useImps], function(x) x$standard[["df"]])) DF1 <- mean(sapply( h1@testList[useImps], function(x) x$standard[["df"]])) DF <- DF0 - DF1 w0 <- sapply(object@testList[useImps], function(x) x$standard[["stat"]]) w1 <- sapply( h1@testList[useImps], function(x) x$standard[["stat"]]) w <- w0 - w1 ## check DF if (DF < 0) { w <- -1*w DF <- -1*DF } } out <- calculate.D2(w, DF, asymptotic) ## add .scaled suffix if (pool.robust) names(out) <- paste0(names(out), ".scaled") ## for 1 model, add extra info (redundant if pool.robust) if (is.null(h1) & !pool.robust) { PT <- parTable(object) out <- c(out, npar = max(PT$free) - sum(PT$op == "=="), ntotal = lavListInspect(object, "ntotal")) } class(out) <- c("lavaan.vector","numeric") out } ##' @importFrom lavaan parTable lavaan lavListInspect ##' @importFrom methods getMethod getLLs <- function(object, useImps, saturated = FALSE, omit.imps = c("no.conv","no.se")) { ## FIXME: lavaanList does not return info when fixed because no convergence! dataList <- object@DataList[useImps] lavoptions <- lavListInspect(object, "options") group <- lavListInspect(object, "group") if (length(group) == 0L) group <- NULL cluster <- lavListInspect(object, "cluster") if (length(cluster) == 0L) cluster <- NULL if (saturated) { #FIXME: below is legacy code, no longer needed? # fit <- lavaan(parTable(object), data = dataList[[ useImps[1] ]], # slotOptions = lavoptions, group = group, cluster = cluster) # ## use saturated parameter table as new model # PT <- lavaan::lav_partable_unrestricted(fit) # ## fit saturated parameter table to each imputation, return estimates # satParams <- lapply(object@DataList[useImps], function(d) { # parTable(lavaan(model = PT, data = d, slotOptions = lavoptions, # group = group, cluster = cluster))$est # }) # ## fix them to pooled estimates # PT$ustart <- colMeans(do.call(rbind, satParams)) PT <- object@h1List[[ useImps[1] ]]$PT coefList <- lapply(object@h1List[useImps], function(x) x$PT$ustart) PT$ustart <- colMeans(do.call(rbind, coefList)) ## set all parameters fixed PT$free <- 0L PT$user <- 1L PT$start <- NULL PT$est <- NULL PT$se <- NULL } else { ## save parameter table as new model PT <- parTable(object) ## set all parameters fixed PT$free <- 0L PT$user <- 1L ## fix them to pooled estimates fixedValues <- getMethod("coef","lavaan.mi")(object, type = "user", omit.imps = omit.imps) PT$ustart <- fixedValues PT$start <- NULL PT$est <- NULL PT$se <- NULL ## omit (in)equality constraints and user-defined parameters params <- !(PT$op %in% c("==","<",">",":=")) PT <- PT[params, ] } ## return log-likelihoods sapply(dataList, function(d) { lavaan::logLik(lavaan(PT, data = d, slotOptions = lavoptions, group = group, cluster = cluster)) }) #TODO: use "dry-run" trick from blavaan:::postpred() to save computing time } ##' @importFrom stats pf pchisq ##' @importFrom lavaan lavListInspect parTable D3.LRT <- function(object, h1 = NULL, useImps, asymptotic = FALSE, omit.imps = c("no.conv","no.se")) { ## NOTE: Need to pass omit.imps= to getLLs(), which calls coef() method N <- lavListInspect(object, "ntotal") m <- length(useImps) if (is.null(h1)) { DF <- object@testList[[ useImps[1] ]]$standard[["df"]] } else { DF1 <- h1@testList[[ useImps[1] ]]$standard[["df"]] DF0 <- object@testList[[ useImps[1] ]]$standard[["df"]] DF <- DF0 - DF1 if (DF < 0) stop('The "object" model must be nested within (i.e., have ', 'fewer degrees of freedom than) the "h1" model.') } ## calculate m log-likelihoods under pooled H0 estimates LL0 <- getLLs(object, useImps, omit.imps = omit.imps) ## calculate m log-likelihoods under pooled H1 estimates if (is.null(h1)) { LL1 <- getLLs(object, useImps, saturated = TRUE, omit.imps = omit.imps) } else { LL1 <- getLLs(h1, useImps, omit.imps = omit.imps) } #FIXME: check whether LL1 or LL0 returned errors? add try()? ## calculate average of m LRTs LRT_con <- mean(-2*(LL0 - LL1)) # getLLs() already applies [useImps] ## average chisq across imputations if (is.null(h1)) { LRT_bar <- mean(sapply(object@testList[useImps], function(x) x$standard$stat)) } else { LRT_bar <- mean(sapply(object@testList[useImps], function(x) x$standard$stat) - sapply(h1@testList[useImps], function(x) x$standard$stat)) } ## calculate average relative increase in variance a <- DF*(m - 1) ariv <- ((m + 1) / a) * (LRT_bar - LRT_con) test.stat <- LRT_con / (DF*(1 + ariv)) if (is.na(test.stat)) stop('D3 test statistic could not be calculated. ', 'Try the D2 pooling method.') #FIXME: check whether model-implied Sigma is NPD if (test.stat < 0) { message('Negative test statistic set to zero \n') test.stat <- 0 } if (asymptotic) { out <- c("chisq" = test.stat * DF, df = DF, pvalue = pchisq(test.stat * DF, df = DF, lower.tail = FALSE), ariv = ariv, fmi = ariv / (1 + ariv)) } else { ## F statistic if (a > 4) { v4 <- 4 + (a - 4) * (1 + (1 - (2 / a))*(1 / ariv))^2 # Enders (eq. 8.34) } else { v4 <- a*(1 + 1/DF)*(1 + 1/ariv)^2 / 2 # Enders (eq. 8.35) # v4 <- (DF + 1)*(m - 1)*(1 + (1 / ariv))^2 / 2 # Grund et al. (eq. 9) } out <- c("F" = test.stat, df1 = DF, df2 = v4, pvalue = pf(test.stat, df1 = DF, df2 = v4, lower.tail = FALSE), ariv = ariv, fmi = ariv / (1 + ariv)) } ## add log-likelihood and AIC/BIC for target model if (is.null(h1)) { PT <- parTable(object) npar <- max(PT$free) - sum(PT$op == "==") out <- c(out, npar = npar, ntotal = N, logl = mean(LL0), unrestricted.logl = mean(LL1), aic = -2*mean(LL0) + 2*npar, bic = -2*mean(LL0) + npar*log(N), bic2 = -2*mean(LL0) + npar*log((N + 2) / 24)) ## NOTE: Mplus reports the average of m likelihoods evaluated at the ## m point estimates, not evaluated at the pooled point estimates. ## Mplus also uses those to calcluate AIC and BIC. } class(out) <- c("lavaan.vector","numeric") out } ##' @importFrom stats pchisq ##' @importFrom lavaan lavListInspect robustify <- function(ChiSq, object, h1 = NULL, baseline = FALSE, useImps) { test.names <- lavListInspect(object, "options")$test # lavaan 0.6-5: for now, we only acknowledge the first non-standard @test if (length(test.names) > 1L) { ## remove standard and any bootstrapped tests rm.idx <- which(test.names %in% c("standard","bootstrap","bollen.stine")) if (length(rm.idx) > 0L) { test.names <- test.names[-rm.idx] } ## only acknowledge the first scaled test statistic if (length(test.names) > 1L) { test.names <- test.names[1] } } scaleshift <- any(test.names %in% "scaled.shifted") if (baseline) { TEST <- lapply(object@baselineList[useImps], "[[", i = "test") } else TEST <- object@testList[useImps] d0 <- mean(sapply(TEST, function(x) x[[ test.names[1] ]][["df"]])) c0 <- mean(sapply(TEST, function(x) x[[ test.names[1] ]][["scaling.factor"]])) if (!is.null(h1)) { d1 <- mean(sapply(h1@testList[useImps], function(x) x[[ test.names[1] ]][["df"]])) c1 <- mean(sapply(h1@testList[useImps], function(x) x[[ test.names[1] ]][["scaling.factor"]])) delta_c <- (d0*c0 - d1*c1) / (d0 - d1) ChiSq["chisq.scaled"] <- ChiSq[["chisq"]] / delta_c ChiSq["df.scaled"] <- d0 - d1 ChiSq["pvalue.scaled"] <- pchisq(ChiSq[["chisq.scaled"]], df = ChiSq[["df.scaled"]], lower.tail = FALSE) ChiSq["chisq.scaling.factor"] <- delta_c } else { ChiSq["chisq.scaled"] <- ChiSq[["chisq"]] / c0 ChiSq["df.scaled"] <- d0 if (scaleshift) { ## add average shift parameter (or average of sums, if nG > 1) shift <- mean(sapply(TEST, function(x) sum(x[[ test.names[1] ]][["shift.parameter"]]) )) ChiSq["chisq.scaled"] <- ChiSq[["chisq.scaled"]] + shift ChiSq["pvalue.scaled"] <- pchisq(ChiSq[["chisq.scaled"]], df = ChiSq[["df.scaled"]], lower.tail = FALSE) ChiSq["chisq.scaling.factor"] <- c0 ChiSq["chisq.shift.parameters"] <- shift } else { ChiSq["pvalue.scaled"] <- pchisq(ChiSq[["chisq.scaled"]], df = ChiSq[["df.scaled"]], lower.tail = FALSE) ChiSq["chisq.scaling.factor"] <- c0 } } ChiSq } semTools/R/runMI-modification.R0000644000176200001440000004276313532030004016102 0ustar liggesusers### Terrence D. Jorgensen & Yves rosseel ### Last updated: 29 August 2019 ### adaptation of lavaan::modindices() for lavaan.mi-class objects ##' Modification Indices for Multiple Imputations ##' ##' Modification indices (1-\emph{df} Lagrange multiplier tests) from a ##' latent variable model fitted to multiple imputed data sets. Statistics ##' for releasing one or more fixed or constrained parameters in model can ##' be calculated by pooling the gradient and information matrices ##' across imputed data sets in a method analogous to the Wald test proposed by ##' Li, Meng, Raghunathan, & Rubin (1991), or by pooling the complete-data ##' score-test statistics across imputed data sets (Li et al., 1991). ##' ##' @name modindices.mi ##' @aliases modificationIndices.mi modificationindices.mi modindices.mi ##' @importFrom lavaan lavInspect lavListInspect lavNames ##' @importFrom methods getMethod ##' @importFrom stats cov pchisq qchisq ##' ##' @param object An object of class \code{\linkS4class{lavaan.mi}} ##' @param test \code{character} indicating which pooling method to use. ##' \code{test = "D2"} (default) indicates that modification indices that were ##' calculated within each imputed data set will be pooled across imputations, ##' as described in Li, Meng, Raghunathan, & Rubin (1991) and Enders (2010). ##' \code{"D1"} indicates Li et al.'s (1991) proposed Wald test will be ##' applied to the gradient and information, and those pooled values will be ##' used to calculate modification indices in the usual manner. ##' @param omit.imps \code{character} vector specifying criteria for omitting ##' imputations from pooled results. Can include any of ##' \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the ##' default setting, which excludes any imputations that did not ##' converge or for which standard errors could not be computed. The ##' last option (\code{"no.npd"}) would exclude any imputations which ##' yielded a nonpositive definite covariance matrix for observed or ##' latent variables, which would include any "improper solutions" such ##' as Heywood cases. ##' @param standardized \code{logical}. If \code{TRUE}, two extra columns ##' (\code{$sepc.lv} and \code{$sepc.all}) will contain standardized values ##' for the EPCs. In the first column (\code{$sepc.lv}), standardizization is ##' based on the variances of the (continuous) latent variables. In the second ##' column (\code{$sepc.all}), standardization is based on both the variances ##' of both (continuous) observed and latent variables. (Residual) covariances ##' are standardized using (residual) variances. ##' @param cov.std \code{logical}. \code{TRUE} if \code{test == "D2"}. ##' If \code{TRUE} (default), the (residual) ##' observed covariances are scaled by the square-root of the diagonal elements ##' of the \eqn{\Theta} matrix, and the (residual) latent covariances are ##' scaled by the square-root of the diagonal elements of the \eqn{\Psi} ##' matrix. If \code{FALSE}, the (residual) observed covariances are scaled by ##' the square-root of the diagonal elements of the model-implied covariance ##' matrix of observed variables (\eqn{\Sigma}), and the (residual) latent ##' covariances are scaled by the square-root of the diagonal elements of the ##' model-implied covariance matrix of the latent variables. ##' @param information \code{character} indicating the type of information ##' matrix to use (check \code{\link{lavInspect}} for available options). ##' \code{"expected"} information is the default, which provides better ##' control of Type I errors. ##' @param power \code{logical}. If \code{TRUE}, the (post-hoc) power is ##' computed for each modification index, using the values of \code{delta} ##' and \code{alpha}. ##' @param delta The value of the effect size, as used in the post-hoc power ##' computation, currently using the unstandardized metric of the \code{$epc} ##' column. ##' @param alpha The significance level used for deciding if the modification ##' index is statistically significant or not. ##' @param high.power If the computed power is higher than this cutoff value, ##' the power is considered 'high'. If not, the power is considered 'low'. ##' This affects the values in the \code{$decision} column in the output. ##' @param sort. \code{logical}. If \code{TRUE}, sort the output using the ##' values of the modification index values. Higher values appear first. ##' @param minimum.value \code{numeric}. Filter output and only show rows with a ##' modification index value equal or higher than this minimum value. ##' @param maximum.number \code{integer}. Filter output and only show the first ##' maximum number rows. Most useful when combined with the \code{sort.} option. ##' @param na.remove \code{logical}. If \code{TRUE} (default), filter output by ##' removing all rows with \code{NA} values for the modification indices. ##' @param op \code{character} string. Filter the output by selecting only those ##' rows with operator \code{op}. ##' ##' @note When \code{test = "D2"}, each (S)EPC will be pooled by taking its ##' average across imputations. When \code{test = "D1"}, EPCs will be ##' calculated in the standard way using the pooled gradient and information, ##' and SEPCs will be calculated by standardizing the EPCs using model-implied ##' (residual) variances. ##' ##' @return A \code{data.frame} containing modification indices and (S)EPCs. ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' Adapted from \pkg{lavaan} source code, written by ##' Yves Rosseel (Ghent University; \email{Yves.Rosseel@@UGent.be}) ##' ##' \code{test = "D1"} method proposed by ##' Maxwell Mansolf (University of California, Los Angeles; ##' \email{mamansolf@@gmail.com}) #FIXME: replace note with reference once accepted paper has a DOI ##' ##' @references ##' Enders, C. K. (2010). \emph{Applied missing data analysis}. ##' New York, NY: Guilford. ##' ##' Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). ##' Significance levels from repeated \emph{p}-values with multiply-imputed ##' data.\emph{Statistica Sinica, 1}(1), 65--92. Retrieved from ##' https://www.jstor.org/stable/24303994 ##' ##' @examples ##' \dontrun{ ##' ## impose missing data for example ##' HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), ##' "ageyr","agemo","school")] ##' set.seed(12345) ##' HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) ##' age <- HSMiss$ageyr + HSMiss$agemo/12 ##' HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ##' ##' ## impute missing data ##' library(Amelia) ##' set.seed(12345) ##' HS.amelia <- amelia(HSMiss, m = 20, noms = "school", p2s = FALSE) ##' imps <- HS.amelia$imputations ##' ##' ## specify CFA model from lavaan's ?cfa help page ##' HS.model <- ' ##' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ##' ' ##' ##' out <- cfa.mi(HS.model, data = imps) ##' ##' modindices.mi(out) # default: Li et al.'s (1991) "D2" method ##' modindices.mi(out, test = "D1") # Li et al.'s (1991) "D1" method ##' ##' } ##' ##' @export modindices.mi <- function(object, test = c("D2","D1"), omit.imps = c("no.conv","no.se"), standardized = TRUE, cov.std = TRUE, information = "expected", # power statistics? power = FALSE, delta = 0.1, alpha = 0.05, high.power = 0.75, # customize output sort. = FALSE, minimum.value = 0.0, maximum.number = nrow(LIST), na.remove = TRUE, op = NULL) { stopifnot(inherits(object, "lavaan.mi")) useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) test <- tolower(test[1]) N <- lavListInspect(object, "ntotal") #FIXME: if (lavoptions$mimic == "EQS") N <- N - 1 # not in lavaan, why? # not ready for estimator = "PML" if (object@Options$estimator == "PML") { stop("Modification indices not yet implemented for estimator PML.") } # sanity check if (power) standardized <- TRUE ## use first available modification indices as template to store pooled results ngroups <- lavListInspect(object, "ngroups") nlevels <- object@Data@nlevels #FIXME: lavListInspect(object, "nlevels") myCols <- c("lhs","op","rhs") if (ngroups > 1L) myCols <- c(myCols,"block","group") if (nlevels > 1L) myCols <- c(myCols,"block","level") myCols <- unique(myCols) for (i in useImps) { LIST <- object@miList[[i]][myCols] nR <- try(nrow(LIST), silent = TRUE) if (class(nR) == "try-error" || is.null(nR)) { if (i == max(useImps)) { stop("No modification indices could be computed for any imputations.") } else next } else break } ## D2 pooling method if (test == "d2") { chiList <- lapply(object@miList[useImps], "[[", i = "mi") ## imputations in columns, parameters in rows pooledList <- apply(do.call(cbind, chiList), 1, function(x) { calculate.D2(x, DF = 1, asymptotic = TRUE) }) LIST$mi <- pooledList[1, ] # could be "F" or "chisq" ## diagnostics LIST$riv <- pooledList["ariv", ] LIST$fmi <- pooledList["fmi", ] ## also take average of epc & sepc.all epcList <- lapply(object@miList[useImps], "[[", i = "epc") LIST$epc <- rowMeans(do.call(cbind, epcList)) if (standardized) { sepcList <- lapply(object@miList[useImps], "[[", i = "sepc.lv") LIST$sepc.lv <- rowMeans(do.call(cbind, sepcList)) sepcList <- lapply(object@miList[useImps], "[[", i = "sepc.all") LIST$sepc.all <- rowMeans(do.call(cbind, sepcList)) fixed.x <- lavListInspect(object, "options")$fixed.x && length(lavNames(object, "ov.x")) if (fixed.x && "sepc.nox" %in% colnames(object@miList[useImps][[1]])) { sepcList <- lapply(object@miList[useImps], "[[", i = "sepc.nox") LIST$sepc.nox <- rowMeans(do.call(cbind, sepcList)) } } } else { scoreOut <- lavTestScore.mi(object, add = cbind(LIST, user = 10L, free = 1, start = 0), test = "d1", omit.imps = omit.imps, epc = TRUE, scale.W = FALSE, asymptotic = TRUE, information = information)$uni LIST$mi <- scoreOut$X2 LIST$riv <- scoreOut$riv LIST$fmi <- scoreOut$fmi LIST$epc <- scoreOut$epc #FIXME: use average across imputations? # standardize? if (standardized) { ## Need full parameter table for lavaan::standardizedSolution() ## Merge parameterEstimates() with modindices() oldPE <- getMethod("summary","lavaan.mi")(object, se = FALSE, output = "data.frame", omit.imps = omit.imps) PE <- lavaan::lav_partable_merge(oldPE, cbind(LIST, est = 0), remove.duplicated = TRUE, warn = FALSE) ## merge EPCs, using parameter labels (unavailable for estimates) rownames(LIST) <- paste0(LIST$lhs, LIST$op, LIST$rhs, ".g", LIST$group) #FIXME: multilevel? rownames(PE) <- paste0(PE$lhs, PE$op, PE$rhs, ".g", PE$group) PE[rownames(LIST), "epc"] <- LIST$epc ## need "exo" column? PT <- parTable(object) if ("exo" %in% names(PT)) { rownames(PT) <- paste0(PT$lhs, PT$op, PT$rhs, ".g", PT$group) PE[rownames(PT), "exo"] <- PT$exo } else PE$exo <- 0L rownames(LIST) <- NULL rownames(PE) <- NULL EPC <- PE$epc if (cov.std) { # replace epc values for variances by est values var.idx <- which(PE$op == "~~" & PE$lhs == PE$rhs) EPC[ var.idx ] <- PE$est[ var.idx ] } # two problems: # - EPC of variances can be negative, and that is perfectly legal # - EPC (of variances) can be tiny (near-zero), and we should # not divide by tiny variables small.idx <- which(PE$op == "~~" & PE$lhs == PE$rhs & abs(EPC) < sqrt( .Machine$double.eps ) ) if (length(small.idx) > 0L) EPC[small.idx] <- as.numeric(NA) # get the sign EPC.sign <- sign(PE$epc) ## pooled estimates for standardizedSolution() pooledest <- getMethod("coef", "lavaan.mi")(object, omit.imps = omit.imps) ## update @Model@GLIST for standardizedSolution(..., GLIST=) object@Model <- lavaan::lav_model_set_parameters(object@Model, x = pooledest) PE$sepc.lv <- EPC.sign * lavaan::standardizedSolution(object, se = FALSE, type = "std.lv", cov.std = cov.std, partable = PE, GLIST = object@Model@GLIST, est = abs(EPC))$est.std PE$sepc.all <- EPC.sign * lavaan::standardizedSolution(object, se = FALSE, type = "std.all", cov.std = cov.std, partable = PE, GLIST = object@Model@GLIST, est = abs(EPC))$est.std fixed.x <- lavListInspect(object, "options")$fixed.x && length(lavNames(object, "ov.x")) if (fixed.x) { PE$sepc.nox <- EPC.sign * lavaan::standardizedSolution(object, se = FALSE, type = "std.nox", cov.std = cov.std, partable = PE, GLIST = object@Model@GLIST, est = abs(EPC))$est.std } if (length(small.idx) > 0L) { PE$sepc.lv[small.idx] <- 0 PE$sepc.all[small.idx] <- 0 if (fixed.x) PE$sepc.nox[small.idx] <- 0 } ## remove unnecessary columns, then merge if (is.null(LIST$block)) PE$block <- NULL PE$est <- NULL PE$mi <- NULL PE$epc <- NULL PE$exo <- NULL LIST <- merge(LIST, PE, sort = FALSE) class(LIST) <- c("lavaan.data.frame","data.frame") } } # power? if (power) { LIST$sepc.lv <- NULL LIST$delta <- delta # FIXME: this is using epc in unstandardized metric # this would be much more useful in standardized metric # we need a standardize.est.all.reverse function... LIST$ncp <- (LIST$mi / (LIST$epc*LIST$epc)) * (delta*delta) LIST$power <- 1 - pchisq(qchisq((1.0 - alpha), df=1), df=1, ncp=LIST$ncp) LIST$decision <- character( length(LIST$power) ) # five possibilities (Table 6 in Saris, Satorra, van der Veld, 2009) mi.significant <- ifelse( 1 - pchisq(LIST$mi, df=1) < alpha, TRUE, FALSE ) high.power <- LIST$power > high.power # FIXME: sepc.all or epc?? #epc.high <- LIST$sepc.all > LIST$delta epc.high <- LIST$epc > LIST$delta LIST$decision[ which(!mi.significant & !high.power)] <- "(i)" LIST$decision[ which( mi.significant & !high.power)] <- "**(m)**" LIST$decision[ which(!mi.significant & high.power)] <- "(nm)" LIST$decision[ which( mi.significant & high.power & !epc.high)] <- "epc:nm" LIST$decision[ which( mi.significant & high.power & epc.high)] <- "*epc:m*" #LIST$decision[ which(mi.significant & high.power) ] <- "epc" #LIST$decision[ which(mi.significant & !high.power) ] <- "***" #LIST$decision[ which(!mi.significant & !high.power) ] <- "(i)" } # sort? if (sort.) { LIST <- LIST[order(LIST$mi, decreasing = TRUE),] } if (minimum.value > 0.0) { LIST <- LIST[!is.na(LIST$mi) & LIST$mi > minimum.value,] } if (maximum.number < nrow(LIST)) { LIST <- LIST[seq_len(maximum.number),] } if (na.remove) { idx <- which(is.na(LIST$mi)) if (length(idx) > 0) LIST <- LIST[-idx,] } if (!is.null(op)) { idx <- LIST$op %in% op if (length(idx) > 0) LIST <- LIST[idx,] } # add header # TODO: small explanation of the columns in the header? # attr(LIST, "header") <- # c("modification indices for newly added parameters only; to\n", # "see the effects of releasing equality constraints, use the\n", # "lavTestScore() function") LIST } ## alias ##' @rdname modindices.mi ##' @aliases modindices.mi modificationIndices.mi ##' @export modificationIndices.mi <- modindices.mi semTools/R/singleParamTest.R0000644000176200001440000002354113432435606015520 0ustar liggesusers### Sunthud Pornprasertmanit ### Last updated: 25 June 2018 #' Single Parameter Test Divided from Nested Model Comparison #' #' In comparing two nested models, \eqn{\Delta\chi^2} test may indicate that #' two models are different. However, like other omnibus tests, researchers do #' not know which fixed parameters or constraints make these two models #' different. This function will help researchers identify the significant #' parameter. #' #' This function first identify the differences between these two models. The #' model with more free parameters is referred to as parent model and the model #' with less free parameters is referred to as nested model. Three tests are #' implemented here: #' #' \enumerate{ #' \item \code{free}: The nested model is used as a template. Then, #' one parameter indicating the differences between two models is free. The new #' model is compared with the nested model. This process is repeated for all #' differences between two models. #' \item\code{fix}: The parent model is used #' as a template. Then, one parameter indicating the differences between two #' models is fixed or constrained to be equal to other parameters. The new #' model is then compared with the parent model. This process is repeated for #' all differences between two models. #' \item\code{mi}: No longer available #' because the test of modification indices is not consistent. For example, if #' two parameters are equality constrained, the modification index from the #' first parameter is not equal to the second parameter. #' } #' #' Note that this function does not adjust for the inflated Type I error rate #' from multiple tests. #' #' @param model1 Model 1. #' @param model2 Model 2. Note that two models must be nested models. Further, #' the order of parameters in their parameter tables are the same. That is, #' nested models with different scale identifications may not be able to test #' by this function. #' @param return.fit Return the submodels fitted by this function #' @param method The method used to calculate likelihood ratio test. See #' \code{\link[lavaan]{lavTestLRT}} for available options #' @return If \code{return.fit = FALSE}, the result tables are provided. #' \eqn{\chi^2} and \emph{p} value are provided for all methods. Note that the #' \eqn{\chi^2} is all based on 1 \emph{df}. Expected parameter changes #' and their standardized forms are also provided. #' #' If \code{return.fit = TRUE}, a list with two elements are provided. The #' first element is the tabular result. The second element is the submodels #' used in the \code{free} and \code{fix} methods. #' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) #' @examples #' #' library(lavaan) #' #' # Nested model comparison by hand #' HS.model1 <- ' visual =~ x1 + x2 + x3 #' textual =~ x4 + x5 + x6' #' HS.model2 <- ' visual =~ a*x1 + a*x2 + a*x3 #' textual =~ b*x4 + b*x5 + b*x6' #' #' m1 <- cfa(HS.model1, data = HolzingerSwineford1939, std.lv = TRUE, #' estimator = "MLR") #' m2 <- cfa(HS.model2, data = HolzingerSwineford1939, std.lv = TRUE, #' estimator = "MLR") #' anova(m1, m2) #' singleParamTest(m1, m2) #' #' ## Nested model comparison from the measurementInvariance function #' HW.model <- ' visual =~ x1 + x2 + x3 #' textual =~ x4 + x5 + x6 #' speed =~ x7 + x8 + x9 ' #' #' models <- measurementInvariance(model = HW.model, data = HolzingerSwineford1939, #' group = "school") #' singleParamTest(models[[1]], models[[2]]) #' #' ## Note that the comparison between weak (Model 2) and scalar invariance #' ## (Model 3) cannot be done by this function # because the weak invariance #' ## model fixes factor means as 0 in Group 2 but the strong invariance model #' ## frees the factor means in Group 2. Users may try to compare #' ## strong (Model 3) and means invariance models by this function. #' #' @export singleParamTest <- function(model1, model2, return.fit = FALSE, method = "satorra.bentler.2001") { # Check nested models without any swaps if(lavaan::fitMeasures(model1, "df")[[1]] > lavaan::fitMeasures(model2, "df")[[1]]) { fit0 <- model1 fit1 <- model2 } else { fit0 <- model2 fit1 <- model1 } # fit0 = Nested model, fit1 = Parent model pt1 <- parTable(fit1) pt0 <- parTable(fit0) namept1 <- paramNameFromPt(pt1) namept0 <- paramNameFromPt(pt0) # Two possible constraints: fixed parameters and equality constraints free1 <- (pt1$free != 0) & !(duplicated(pt1$free)) free0 <- (pt0$free != 0) & !(duplicated(pt0$free)) iscon1 <- pt1$op == "==" iscon0 <- pt0$op == "==" con1 <- list(id = integer(0), lhs = character(0), op = character(0), rhs = character(0)) con0 <- list(id = integer(0), lhs = character(0), op = character(0), rhs = character(0)) if(any(iscon1)) con1 <- list(id = pt1$id[iscon1], lhs = pt1$lhs[iscon1], op = pt1$op[iscon1], rhs = pt1$rhs[iscon1]) if(any(iscon0)) con0 <- list(id = pt0$id[iscon0], lhs = pt0$lhs[iscon0], op = pt0$op[iscon0], rhs = pt0$rhs[iscon0]) if(length(free1[!iscon1]) != length(free0[!iscon0])) stop("Parameter tables in two models do not have equal lengths. This function does not work.") if(!all(free1[free0])) stop("Model are not nested or are not arranged in the way that this function works.") if(sum(iscon1) > sum(iscon0)) stop("There are equality constraints in the model with less degrees of freedom that do not exist in the model with higher degrees of freedom. Thus, two models are not nested.") if(!all.equal(lapply(pt1[2:4], "[", !iscon1), lapply(pt0[2:4], "[", !iscon0))) stop("This function needs parameter tables of two models to have the same orders of the same parameters.") # Find fixed values or constraints difffree <- !free0[!iscon0] & free1[!iscon1] textcon1 <- paste0(con1$lhs, con1$op, con1$rhs) textcon0 <- paste0(con0$lhs, con0$op, con0$rhs) indexsamecon <- match(textcon1, textcon0) indexdiffcon <- setdiff(seq_along(textcon0), indexsamecon) diffcon <- lapply(con0, "[", indexdiffcon) fixval <- which(difffree) index <- c(fixval, diffcon$id) if(length(index) <= 0) stop("Two models are identical. No single parameter test can be done.") # Find nested model and release 1-by-1 freeCon <- matrix(NA, length(index), 2) colnames(freeCon) <- c("free.chi", "free.p") listFreeCon <- list() runnum <- 1 for(i in seq_along(fixval)) { temp <- freeParTable(pt0, pt0$lhs[fixval[i]], pt0$op[fixval[i]], pt0$rhs[fixval[i]], pt0$group[fixval[i]]) tryresult <- try(tempfit <- refit(temp, fit0), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit0, method = method), silent = TRUE) if(!is(compresult, "try-error")) freeCon[runnum,] <- unlist(modelcomp[2, c(5, 7)]) } listFreeCon <- c(listFreeCon, tryresult) runnum <- runnum + 1 } rownames(freeCon)[seq_along(fixval)] <- names(listFreeCon)[seq_along(fixval)] <- namept0[fixval] for(i in seq_along(diffcon$id)) { temp <- removeEqCon(pt0, diffcon$id[i]) tryresult <- try(tempfit <- refit(temp, fit0), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit0, method = method), silent = TRUE) if(!is(compresult, "try-error")) freeCon[runnum,] <- unlist(modelcomp[2, c(5, 7)]) } listFreeCon <- c(listFreeCon, tryresult) runnum <- runnum + 1 } poscon <- seq_along(diffcon$id) + length(fixval) rownames(freeCon)[poscon] <- names(listFreeCon)[poscon] <- namept0[diffcon$id] # Find parent model and constrain 1-by-1 fixCon <- matrix(NA, length(index), 2) colnames(fixCon) <- c("fix.chi", "fix.p") listFixCon <- list() runnum <- 1 for(i in seq_along(fixval)) { temp <- fixParTable(pt1, pt1$lhs[fixval[i]], pt1$op[fixval[i]], pt1$rhs[fixval[i]], pt1$group[fixval[i]], pt0$ustart[fixval[i]]) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[runnum,] <- unlist(modelcomp[2,c(5, 7)]) } listFixCon <- c(listFixCon, tryresult) runnum <- runnum + 1 } rownames(fixCon)[seq_along(fixval)] <- names(listFixCon)[seq_along(fixval)] <- namept0[fixval] for(i in seq_along(diffcon$id)) { temp <- patMerge(pt1, list(lhs = diffcon$lhs[i], op = diffcon$op[i], rhs = diffcon$rhs[i])) tryresult <- try(tempfit <- refit(temp, fit1), silent = TRUE) if(!is(tryresult, "try-error")) { compresult <- try(modelcomp <- lavaan::lavTestLRT(tempfit, fit1, method = method), silent = TRUE) if(!is(compresult, "try-error")) fixCon[runnum,] <- unlist(modelcomp[2, c(5, 7)]) } listFixCon <- c(listFixCon, tryresult) runnum <- runnum + 1 } poscon <- seq_along(diffcon$id) + length(fixval) rownames(fixCon)[poscon] <- names(listFixCon)[poscon] <- namept0[diffcon$id] result <- cbind(freeCon, fixCon) if(return.fit) { return(invisible(list(result = result, models = list(free = listFreeCon, fix = listFixCon)))) } else { return(result) } } ## ---------------- ## Hidden Functions ## ---------------- paramNameFromPt <- function(pt) { ngroups <- max(pt$group) result <- NULL if (ngroups == 1) { result <- paste0(pt$lhs, pt$op, pt$rhs) } else { grouplab <- paste0(".g", pt$group) grouplab[grouplab == ".g0" | grouplab == ".g1"] <- "" result <- paste0(pt$lhs, pt$op, pt$rhs, grouplab) } con <- pt$op == "==" pt$lhs[con] <- result[match(pt$lhs[con], pt$plabel)] pt$rhs[con] <- result[match(pt$rhs[con], pt$plabel)] result[con] <- paste(pt$lhs[con], pt$op[con], pt$rhs[con]) result } #' @importFrom lavaan lavInspect refit <- function(pt, object, resetstart = TRUE) { if (resetstart && "start" %in% names(pt)) pt <- pt[-which("start" == names(pt))] previousCall <- lavInspect(object, "call") args <- previousCall[-1] args$model <- pt funcall <- as.character(previousCall[[1]]) tempfit <- do.call(funcall[length(funcall)], args) } semTools/R/tukeySEM.R0000644000176200001440000000513213432435606014120 0ustar liggesusers### Alexander M. Schoemann ### Last updated: 9 March 2018 #' Tukey's WSD post-hoc test of means for unequal variance and sample size #' #' This function computes Tukey's WSD post hoc test of means when variances and #' sample sizes are not equal across groups. It can be used as a post hoc test #' when comparing latent means in multiple group SEM. #' #' After conducting an omnibus test of means across three of more groups, #' researchers often wish to know which sets of means differ at a particular #' Type I error rate. Tukey's WSD test holds the error rate stable across #' multiple comparisons of means. This function implements an adaptation of #' Tukey's WSD test from Maxwell & Delaney (2004), that allows variances and #' sample sizes to differ across groups. #' #' #' @importFrom stats ptukey #' #' @param m1 Mean of group 1. #' @param m2 Mean of group 2. #' @param var1 Variance of group 1. #' @param var2 Variance of group 2. #' @param n1 Sample size of group 1. #' @param n2 Sample size of group 2. #' @param ng Total number of groups to be compared (i.e., the number of groups #' compared in the omnibus test). #' @return A vector with three elements: #' \enumerate{ #' \item \code{q}: The \emph{q} statistic #' \item \code{df}: The degrees of freedom for the \emph{q} statistic #' \item \code{p}: A \emph{p} value based on the \emph{q} statistic, \emph{df}, #' and the total number of groups to be compared #' } #' @author Alexander M. Schoemann (East Carolina University; #' \email{schoemanna@@ecu.edu}) #' @references Maxwell, S. E., & Delaney, H. D. (2004). \emph{Designing #' experiments and analyzing data: A model comparison perspective} (2nd ed.). #' Mahwah, NJ: Lawrence Erlbaum Associates. #' @examples #' #' ## For a case where three groups have been compared: #' ## Group 1: mean = 3.91, var = 0.46, n = 246 #' ## Group 2: mean = 3.96, var = 0.62, n = 465 #' ## Group 3: mean = 2.94, var = 1.07, n = 64 #' #' ## compare group 1 and group 2 #' tukeySEM(3.91, 3.96, 0.46, 0.62, 246, 425, 3) #' #' ## compare group 1 and group 3 #' tukeySEM(3.91, 2.94, 0.46, 1.07, 246, 64, 3) #' #' ## compare group 2 and group 3 #' tukeySEM(3.96, 2.94, 0.62, 1.07, 465, 64, 3) #' #' @export tukeySEM <- function(m1, m2, var1, var2, n1, n2, ng) { qNum <- abs(m1 - m2) qDenom <- sqrt(((var1/n1) + (var2/n2))/2) Tukeyq <- qNum / qDenom Tukeydf <- ((var1/n1) + (var2/n2))^2 / (((var1/n1)^2 / (n1 - 1)) + ((var2/n2)^2 / (n2 - 2))) c(q = Tukeyq, df = Tukeydf, p = 1 - ptukey(Tukeyq, ng, Tukeydf)) } ##Example from Schoemann (2013) ##Bio vs. policial science on evo misconceptions #tukeySEM(3.91, 3.96,.46, .62, 246, 425,3) semTools/R/EmpKaiser.R0000644000176200001440000001357213432435606014301 0ustar liggesusers### Ylenio Longo ### Last updated: 9 March 2018 #' Empirical Kaiser criterion #' #' Identify the number of factors to extract based on the Empirical Kaiser #' Criterion (EKC). The analysis can be run on a \code{data.frame} or data #' \code{matrix} (\code{data}), or on a correlation or covariance matrix #' (\code{sample.cov}) and the sample size (\code{sample.nobs}). A #' \code{data.frame} is returned with two columns: the eigenvalues from your #' data or covariance matrix and the reference eigenvalues. The number of #' factors suggested by the Empirical Kaiser Criterion (i.e. the sample #' eigenvalues greater than the reference eigenvalues), and the number of #' factors suggested by the original Kaiser Criterion (i.e. sample eigenvalues #' > 1) is printed above the output. #' #' #' @importFrom stats cov cov2cor #' #' @param data A \code{data.frame} or data \code{matrix} containing columns of #' variables to be factor-analyzed. #' @param sample.cov A covariance or correlation matrix can be used, instead of #' \code{data}, to estimate the eigenvalues. #' @param sample.nobs Number of observations (i.e. sample size) if #' \code{is.null(data)} and \code{sample.cov} is used. #' @param missing If "listwise", cases with missing values are removed listwise #' from the data frame. If "direct" or "ml" or "fiml" and the estimator is #' maximum likelihood, an EM algorithm is used to estimate the unrestricted #' covariance matrix (and mean vector). If "pairwise", pairwise deletion is #' used. If "default", the value is set depending on the estimator and the #' mimic option (see details in \link[lavaan]{lavCor}). #' @param ordered Character vector. Only used if object is a \code{data.frame}. #' Treat these variables as ordered (ordinal) variables. Importantly, all other #' variables will be treated as numeric (unless \code{is.ordered == TRUE} in #' \code{data}). (see also \link[lavaan]{lavCor}) #' @param plot logical. Whether to print a scree plot comparing the sample #' eigenvalues with the reference eigenvalues. #' @return A \code{data.frame} showing the sample and reference eigenvalues. #' #' The number of factors suggested by the Empirical Kaiser Criterion (i.e. the #' sample eigenvalues greater than the reference eigenvalues) is returned as an #' attribute (see Examples). #' #' The number of factors suggested by the original Kaiser Criterion (i.e. #' sample eigenvalues > 1) is also printed as a header to the \code{data.frame} #' @author Ylenio Longo (University of Nottingham; #' \email{yleniolongo@@gmail.com}) #' #' Terrence D. Jorgensen (University of Amsterdam; #' \email{TJorgensen314@@gmail.com}) #' @references Braeken, J., & van Assen, M. A. L. M. (in press). An empirical Kaiser #' criterion. \emph{Psychological Methods, 22}(3), 450--466. doi:10.1037/met0000074 #' @examples #' #' ## Simulate data with 3 factors #' model <- ' #' f1 =~ .3*x1 + .5*x2 + .4*x3 #' f2 =~ .3*x4 + .5*x5 + .4*x6 #' f3 =~ .3*x7 + .5*x8 + .4*x9 #' ' #' dat <- simulateData(model, seed = 123) #' ## save summary statistics #' myCovMat <- cov(dat) #' myCorMat <- cor(dat) #' N <- nrow(dat) #' #' ## Run the EKC function #' (out <- efa.ekc(dat)) #' #' ## To extract the recommended number of factors using the EKC: #' attr(out, "nfactors") #' #' ## If you do not have raw data, you can use summary statistics #' (x1 <- efa.ekc(sample.cov = myCovMat, sample.nobs = N, plot = FALSE)) #' (x2 <- efa.ekc(sample.cov = myCorMat, sample.nobs = N, plot = FALSE)) #' #' @export efa.ekc <- function(data = NULL, sample.cov = NULL, sample.nobs = NULL, missing = "default", ordered = NULL, plot = TRUE) { ## if data if (!is.null(data)) { data <- as.data.frame(data) R <- lavaan::lavCor(data, missing = missing, ordered = ordered) #correlations j <- dim(data)[2] #number of variables n <- dim(data)[1] #sample size } else { ## if covariance matrix if (max(diag(sample.cov)) != 1 & min(diag(sample.cov)) != 1) { R <- cov2cor(sample.cov) j <- dim(R)[2] #number of variables n <- sample.nobs #sample size } else { ## if correlation matrix R <- sample.cov j <- dim(R)[2] #number of variables n <- sample.nobs #sample size } } g <- j/n #gamma: var / sample l <- (1 + sqrt(g))^2 #1st reference eigenvalue e <- eigen(R)$values #eigenvalues v <- cumsum(e) #Define cumulatively summed eigenvalue vector v1 <- v[1:j - 1] #omit last element v2 <- c(0, v1) #put a zero upfront w <- sort(1:j, decreasing = TRUE) #eigenvalue order vector ref <- (((j - v2)/w) * l) #EKC reference eigenvalues # results Eigenvalues <- data.frame(Sample = e, Ref = ref) #sample and reference eigenvalues rownames(Eigenvalues) <- 1:j class(Eigenvalues) <- c("lavaan.data.frame","data.frame") ## add no. factors to extract as attribute, using each criterion nfactors_EKC <- which(!(Eigenvalues[, 1] > Eigenvalues[, 2]))[1] - 1 # EKC nfactors_KC <- which(!(Eigenvalues[, 1] > 1))[1] - 1 # Kaiser Criterion attr(Eigenvalues, "header") <- paste(" Empirical Kaiser Criterion suggests", nfactors_EKC, "factors.\n", "Traditional Kaiser Criterion suggests", nfactors_KC, "factors.") attr(Eigenvalues, "nfactors") <- nfactors_EKC if (plot) { plot(Eigenvalues[, 1], type = "b", pch = 20, cex = 0.9, col = "black", main = "Empirical Kaiser Criterion\nScree Plot", ylab = "Eigenvalues", ylim = c(min(Eigenvalues), max(ceiling(Eigenvalues))), xlab = "Factor Number", xlim = c(1, j)) lines(Eigenvalues[, 2], lty = "dashed", col = "blue") legend("topright", c(" Data", " Empirical\n Reference", " Kaiser Criterion"), col = c("black","blue","gray"), bty = "n", pch = c(20, NA, NA), lty = c("solid","dashed","solid"), merge = TRUE) abline(h = 1, col = "gray") # Kaiser Criterion } return(Eigenvalues) } semTools/R/powerAnalysisSS.R0000644000176200001440000001563613432435606015532 0ustar liggesusers### Alexander M. Schoemann & Terrence D. Jorgensen ### Last updated: 15 September 2018 ### Function to apply Satorra & Saris method for chi-squared power analysis ## Steps: ## 1. Specify model (use lavaan syntax based on simulateData) ## 2. get model implied covariance matrix ## 3. Fit model with parameter constrained to 0 (or take a model specification for multiparameter tests?) ## 4. Use chi square from step 3 as non-centrality parameter to get power. ## Alternatively, combine steps 1 and 2 by providng population moments directly ##' Power for model parameters ##' ##' Apply Satorra & Saris (1985) method for chi-squared power analysis. ##' ##' Specify all non-zero parameters in a population model, either by using ##' lavaan syntax (\code{popModel}) or by submitting a population covariance ##' matrix (\code{Sigma}) and optional mean vector (\code{mu}) implied by the ##' population model. Then specify an analysis model that constrains at least ##' one nonzero parameter to an incorrect value. Note the number in the ##' \code{nparam} argument. ##' ##' ##' @importFrom stats qchisq pchisq ##' ##' @param powerModel lavaan \code{\link[lavaan]{model.syntax}} for the model to ##' be analyzed. This syntax should constrain at least one nonzero parameter ##' to 0 (or another number). ##' @param n \code{integer}. Sample size used in power calculation, or a vector ##' of sample sizes if analyzing a multigroup model. If ##' \code{length(n) < length(Sigma)} when \code{Sigma} is a list, \code{n} will ##' be recycled. ##' @param nparam \code{integer}. Number of invalid constraints in \code{powerModel}. ##' @param popModel lavaan \code{\link[lavaan]{model.syntax}} specifying the ##' data-generating model. This syntax should specify values for all nonzero ##' paramters in the model. If \code{length(n) > 1}, the same population ##' values will be used for each group. Different population values per group ##' can only be specified by utilizing \code{Sigma} (and \code{mu}). ##' @param mu numeric or list. For a single-group model, a vector of population ##' means. For a multigroup model, a list of vectors (one per group). If ##' \code{mu} and \code{popModel} are missing, mean structure will be excluded ##' from the analysis. ##' @param Sigma matrix or list. For a single-group model, a population covariance ##' matrix. For a multigroup model, a list of matrices (one per group). If ##' missing, popModel will be used to generate a model-implied Sigma. ##' @param fun character. Name of lavaan function used to fit \code{powerModel} ##' (i.e., \code{"cfa"}, \code{"sem"}, \code{"growth"}, or \code{"lavaan"}). ##' @param alpha Type I error rate used to set a criterion for rejecting H0. ##' @param ... additional arguments to pass to \code{\link[lavaan]{lavaan}}. ##' ##' @author ##' Alexander M. Schoemann (East Carolina University; \email{schoemanna@@ecu.edu}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @references ##' Satorra, A., & Saris, W. E. (1985). Power of the likelihood ratio ##' test in covariance structure analysis. \emph{Psychometrika, 50}, 83--90. ##' doi:10.1007/BF02294150 ##' ##' @examples ##' ## Specify population values. Note every paramter has a fixed value. ##' modelP <- ' ##' f1 =~ .7*V1 + .7*V2 + .7*V3 + .7*V4 ##' f2 =~ .7*V5 + .7*V6 + .7*V7 + .7*V8 ##' f1 ~~ .3*f2 ##' f1 ~~ 1*f1 ##' f2 ~~ 1*f2 ##' V1 ~~ .51*V1 ##' V2 ~~ .51*V2 ##' V3 ~~ .51*V3 ##' V4 ~~ .51*V4 ##' V5 ~~ .51*V5 ##' V6 ~~ .51*V6 ##' V7 ~~ .51*V7 ##' V8 ~~ .51*V8 ##' ' ##' ## Specify analysis model. Note parameter of interest f1~~f2 is fixed to 0. ##' modelA <- ' ##' f1 =~ V1 + V2 + V3 + V4 ##' f2 =~ V5 + V6 + V7 + V8 ##' f1 ~~ 0*f2 ##' ' ##' ## Calculate power ##' SSpower(powerModel = modelA, popModel = modelP, n = 150, nparam = 1, ##' std.lv = TRUE) ##' ##' ## Get power for a range of sample sizes ##' ##' Ns <- seq(100, 500, 40) ##' Power <- rep(NA, length(Ns)) ##' for(i in 1:length(Ns)) { ##' Power[i] <- SSpower(powerModel = modelA, popModel = modelP, ##' n = Ns[i], nparam = 1, std.lv = TRUE) ##' } ##' plot(x = Ns, y = Power, type = "l", xlab = "Sample Size") ##' ##' ## Specify second population to calculate power for multigroup model ##' ##' popMoments1 <- fitted(cfa(modelP)) ##' modelP2 <- ' ##' f1 =~ .7*V1 + .7*V2 + .7*V3 + .7*V4 ##' f2 =~ .7*V5 + .7*V6 + .7*V7 + .7*V8 ##' f1 ~~ .5*f2 ## higher correlation in Group 2 ##' f1 ~~ 1*f1 ##' f2 ~~ 1*f2 ##' V1 ~~ .51*V1 ##' V2 ~~ .51*V2 ##' V3 ~~ .51*V3 ##' V4 ~~ .51*V4 ##' V5 ~~ .51*V5 ##' V6 ~~ .51*V6 ##' V7 ~~ .51*V7 ##' V8 ~~ .51*V8 ##' ' ##' popMoments2 <- fitted(cfa(modelP2)) ##' modelA2 <- ' ##' f1 =~ V1 + V2 + V3 + V4 ##' f2 =~ V5 + V6 + V7 + V8 ##' f1 ~~ c(0, 0)*f2 ##' ' ##' mu <- list(popMoments1$mean, popMoments2$mean) # ignored if NULL ##' Sigma <- list(popMoments1$cov, popMoments2$cov) ##' SSpower(powerModel = modelA2, mu = mu, Sigma = Sigma, ##' n = c(60, 65), nparam = 2) ##' ##' @export SSpower <- function(powerModel, n, nparam, popModel, mu, Sigma, fun = "cfa", alpha = .05, ...) { if (missing(Sigma)) { ## Two item list, first item is covariance matrix, second item is mean vector popMoments <- lavaan::fitted(do.call(fun, list(model = popModel))) ## without data, can't apply fitted() to multigroup model syntax, so ## save the same fitted moments for each group if (length(n) > 1L) { Sigma <- list() mu <- if (!is.null(popMoments[[1]]$mean)) list() else NULL for (g in 1:length(n)) { Sigma[[g]] <- popMoments$cov if (!is.null(popMoments[[g]]$mean)) mu[[g]] <- popMoments$mean } } else { ## single group Sigma <- popMoments$cov mu <- popMoments$mean } } else { ## otherwise, user-supplied moments if (is.list(Sigma)) { nG <- length(Sigma) if (length(n) < nG) n <- rep(n, length.out = nG) if (length(n) > nG) n <- n[1:nG] no.mu <- missing(mu) if (!no.mu) null.mu <- any(sapply(mu, is.null)) if (no.mu || null.mu) { mu <- NULL } } else if (is.matrix(Sigma)) { n <- n[[1]] if (missing(mu)) { mu <- NULL } else if (!is.numeric(mu) || !!is.null(mu)) { stop('mu must be a numeric vector, or a list (one vector per group)') } } else stop('Sigma must be a covariance matrix, or a list (one matrix per group)') } ## Fit (probably constrained) analysis model dots <- list(...) funArgs <- list(model = powerModel, sample.cov = Sigma, sample.mean = mu, sample.nobs = n) useArgs <- c(funArgs, dots[setdiff(names(dots), names(funArgs))]) fit <- do.call(fun, useArgs) ## get NCP from chi square ncp <- lavaan::fitmeasures(fit)["chisq"] ## critical value under H0 critVal <- qchisq(alpha, df = nparam, lower.tail = FALSE) ## return power pchisq(critVal, df = nparam, ncp = ncp, lower.tail = FALSE) } semTools/R/NET.R0000644000176200001440000002617113453334114013041 0ustar liggesusers### Terrence D. Jorgensen ### Last updated: 10 April 2019 ### semTools functions for Nesting and Equivalence Testing ## ----------------- ## Class and Methods ## ----------------- ##' Class For the Result of Nesting and Equivalence Testing ##' ##' This class contains the results of nesting and equivalence testing among ##' multiple models ##' ##' ##' @name Net-class ##' @aliases Net-class show,Net-method summary,Net-method ##' @docType class ##' ##' @slot test Logical \code{matrix} indicating nesting/equivalence among models ##' @slot df The degrees of freedom of tested models ##' ##' @section Objects from the Class: Objects can be created via the ##' \code{\link{net}} function. ##' ##' @param object An object of class \code{Net}. ##' ##' @return ##' \item{show}{\code{signature(object = "Net")}: prints the logical matrix of ##' test results.} ##' \item{summary}{\code{signature(object = "Net")}: prints a narrative ##' description of results. The original \code{object} is invisibly returned.} ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \code{\link{net}} ##' ##' @examples ##' ##' # See the example in the net function. ##' setClass("Net", representation(test = "matrix", df = "vector")) ##' @rdname Net-class ##' @aliases show,Net-method ##' @export setMethod("show", "Net", function(object) { if (length(object@test)) { m <- as.matrix(unclass(object@test)) m[upper.tri(m, diag = TRUE)] <- "" cat(" If cell [R, C] is TRUE, the model in row R is nested within column C. If the models also have the same degrees of freedom, they are equivalent. NA indicates the model in column C did not converge when fit to the implied means and covariance matrix from the model in row R. The hidden diagonal is TRUE because any model is equivalent to itself. The upper triangle is hidden because for models with the same degrees of freedom, cell [C, R] == cell [R, C]. For all models with different degrees of freedom, the upper diagonal is all FALSE because models with fewer degrees of freedom (i.e., more parameters) cannot be nested within models with more degrees of freedom (i.e., fewer parameters). \n") print(m, quote = FALSE) } else { cat(data.class(object@test), "(0)\n", sep = "") } invisible(object) }) ##' @rdname Net-class ##' @aliases summary,Net-method ##' @export setMethod("summary", "Net", function(object) { DFs <- object@df x <- object@test mods <- colnames(x) for (R in 2:nrow(x)) { for (C in (R - 1):1) { ## if model didn't converge (logical value is missing), go to next iteration if (is.na(x[R, C])) next ## if the models are not nested, go to next iteration if (!x[R, C]) next ## choose message based on whether models are equivalent or nested if (identical(DFs[R], DFs[C])) { rel <- "equivalent to" } else { rel <- "nested within" } cat("Model \"", mods[R], "\" is ", rel, " model \"", mods[C], "\"\n", sep = "") } } invisible(object) }) ## -------------------- ## Constructor Function ## -------------------- ##' Nesting and Equivalence Testing ##' ##' This test examines whether models are nested or equivalent based on Bentler ##' and Satorra's (2010) procedure. ##' ##' The concept of nesting/equivalence should be the same regardless of ##' estimation method. However, the particular method of testing ##' nesting/equivalence (as described in Bentler & Satorra, 2010) employed by ##' the net function analyzes summary statistics (model-implied means and ##' covariance matrices, not raw data). In the case of robust methods like MLR, ##' the raw data is only utilized for the robust adjustment to SE and chi-sq, ##' and the net function only checks the unadjusted chi-sq for the purposes of ##' testing nesting/equivalence. This method does not apply to models that ##' estimate thresholds for categorical data, so an error message will be issued ##' if such a model is provided. ##' ##' ##' @importFrom lavaan lavInspect ##' ##' @param \dots The \code{lavaan} objects used for test of nesting and ##' equivalence ##' @param crit The upper-bound criterion for testing the equivalence of models. ##' Models are considered nested (or equivalent) if the difference between ##' their \eqn{\chi^2} fit statistics is less than this criterion. ##' ##' @return The \linkS4class{Net} object representing the outputs for nesting ##' and equivalent testing, including a logical matrix of test results and a ##' vector of degrees of freedom for each model. ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @references ##' Bentler, P. M., & Satorra, A. (2010). Testing model nesting and equivalence. ##' \emph{Psychological Methods, 15}(2), 111--123. doi:10.1037/a0019625 ##' ##' @examples ##' ##' \dontrun{ ##' m1 <- ' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ' ##' ##' ##' m2 <- ' f1 =~ x1 + x2 + x3 + x4 ##' f2 =~ x5 + x6 + x7 + x8 + x9 ' ##' ##' m3 <- ' visual =~ x1 + x2 + x3 ##' textual =~ eq*x4 + eq*x5 + eq*x6 ##' speed =~ x7 + x8 + x9 ' ##' ##' fit1 <- cfa(m1, data = HolzingerSwineford1939) ##' fit1a <- cfa(m1, data = HolzingerSwineford1939, std.lv = TRUE) # Equivalent to fit1 ##' fit2 <- cfa(m2, data = HolzingerSwineford1939) # Not equivalent to or nested in fit1 ##' fit3 <- cfa(m3, data = HolzingerSwineford1939) # Nested in fit1 and fit1a ##' ##' tests <- net(fit1, fit1a, fit2, fit3) ##' tests ##' summary(tests) ##' } ##' ##' @export net <- function(..., crit = .0001) { ## put fitted objects in a list fitList <- list(...) nFits <- length(fitList) ## check that they are all lavaan objects notLavaan <- sapply(fitList, class) != "lavaan" if (any(notLavaan)) { fitNames <- sapply(as.list(substitute(list(...)))[-1], deparse) stop(paste("The following arguments are not fitted lavaan objects:\n", paste(fitNames[notLavaan], collapse = "\t"))) } ## check for meanstructure meanstructure <- sapply(fitList, function(x) lavInspect(x, "options")$meanstructure) if (!(all(meanstructure) || !any(meanstructure))) stop('Some (but not all) fitted lavaan objects include a mean structure. ', 'Please re-fit all models with the argument meanstructure=TRUE.') ## check whether any models include categorical outcomes catMod <- sapply(fitList, function(x) lavInspect(x, "options")$categorical) if (any(catMod)) stop("This method only applies to continuous outcomes.") ## get degrees of freedom for each model DFs <- sapply(fitList, function(x) lavInspect(x, "fit")["df"]) ## name according to named objects, with DF in parentheses fitNames <- names(fitList) dotNames <- sapply(as.list(substitute(list(...)))[-1], deparse) if (is.null(names(fitList))) { fitNames <- dotNames } else { noName <- which(fitNames == "") fitNames[noName] <- dotNames[noName] } names(fitList) <- paste(fitNames, " (df = ", DFs, ")", sep = "") ## sort list according to DFs fitList <- fitList[order(DFs)] fitNames <- fitNames[order(DFs)] orderedDFs <- DFs[order(DFs)] ## create structure for sequence of tests (logical matrix), FALSE by default nestMat <- matrix(FALSE, nFits, nFits, dimnames = list(names(fitList), fitNames)) diag(nestMat) <- TRUE # every model is equivalent with itself ## Loop through sorted models in sequence of most to least restricted model for (R in 2:nrow(nestMat)) { for (C in (R - 1):1) { ## test for nesting/equivalence nestMat[R, C] <- x.within.y(x = fitList[[R]], y = fitList[[C]], crit = crit) ## if models are equivalent, set above-diagonal value to TRUE if (identical(orderedDFs[R], orderedDFs[C])) nestMat[C, R] <- nestMat[R, C] if (C == 1) next # to prevent the next 2 tests from returning an error ## if model didn't converge (logical value is missing), go to next iteration if (is.na(nestMat[R, C]) | is.na(nestMat[R - 1, C - 1])) next ## check whether nesting is implied, to skip unnecessary tests if (nestMat[R, C] & nestMat[R - 1, C - 1]) { nestMat[R, C - 1] <- TRUE next } } } out <- new("Net", test = nestMat, df = orderedDFs) out } ## -------------------------------------------------------------------- ## Hidden Function to test whether model "x" is nested within model "y" ## -------------------------------------------------------------------- #' @importFrom lavaan lavInspect x.within.y <- function(x, y, crit = .0001) { if (length(c(lavaan::lavNames(x, "ov.ord"), lavaan::lavNames(y, "ov.ord")))) stop("The net() function is not available for categorical-data estimators.") exoX <- lavInspect(x, "options")$fixed.x & length(lavaan::lavNames(x, "ov.x")) exoY <- lavInspect(y, "options")$fixed.x & length(lavaan::lavNames(y, "ov.x")) if (exoX | exoY) { stop(c("The net() function does not work with exogenous variables.\n", "Fit the model again with 'fixed.x = FALSE'")) } ## variable names Xnames <- lavaan::lavNames(x) Ynames <- lavaan::lavNames(y) if (!identical(sort(Xnames), sort(Ynames))) stop("Models do not contain the same variables") ## check that the analyzed data matches xData <- sort(unlist(lavInspect(x, "sampstat"))) yData <- sort(unlist(lavInspect(y, "sampstat"))) if (!isTRUE(all.equal(xData, yData, tolerance = crit))) stop("Sample statistics differ. Models must apply to the same data") #FIXME: this method requires raw data # xData <- lavInspect(x, "data") # if (is.list(xData)) xData <- do.call(rbind, xData) # xData <- xData[ , order(Xnames)] # yData <- lavInspect(y, "data") # if (is.list(yData)) yData <- do.call(rbind, yData) # yData <- yData[ , order(Ynames)] # if (!identical(xData, yData)) stop("Models must apply to the same data") ## check degrees of freedom support nesting structure if (lavInspect(x, "fit")["df"] < lavInspect(y, "fit")["df"]) stop("x cannot be nested within y because y is more restricted than x") ## model-implied moments Sigma <- lavInspect(x, "cov.ov") if (lavInspect(x, "options")$meanstructure) { Mu <- lavInspect(x, "mean.ov") } else { Mu <- NULL } N <- lavInspect(x, "nobs") if (N != lavInspect(y, "nobs")) stop("Sample sizes differ. Models must apply to the same data") ## fit model and check that chi-squared < crit suppressWarnings(try(newFit <- lavaan::update(y, data = NULL, sample.cov = Sigma, sample.mean = Mu, sample.nobs = N, estimator = "ML", #FIXME: Yves added sample.th= argument se = "none", # to save time test = "standard"))) if(!lavInspect(newFit, "converged")) return(NA) else { result <- lavInspect(newFit, "fit")["chisq"] < crit names(result) <- NULL if (lavInspect(x, "fit")["df"] == lavInspect(y, "fit")["df"]) return(c(Equivalent = result)) } c(Nested = result) } semTools/R/measEq.R0000644000176200001440000034323413532163747013642 0ustar liggesusers### Terrence D. Jorgensen ### Last updated: 30 August 2019 ### lavaan model syntax-writing engine for new measEq() to replace ### measurementInvariance(), measurementInvarianceCat(), and longInvariance() ## ---------------------- ## Model-Fitting Function ## ---------------------- measEq <- function(configural.model, ID.fac = c("std.lv","auto.fix.first","effects.coding"), ID.cat = c("Wu.Estabrook.2016","Mplus","Millsap.Tein.2004","LISREL"), ID.thr = c(1L, 2L), # only for ID.cat == "Millsap.Tein.2004" group = NULL, longFacNames = list(), longIndNames = list(), #group.equal = "", long.equal = "", group.partial = "", long.partial = "", auto = "all", extra = NULL, test.seq = c("thresholds","loadings","intercepts","means", "lv.variances","residuals"), # optional resid/lv.autocov, residual/lv.covariances #fixed.x = TRUE, strict = FALSE, quiet = FALSE, warn = TRUE, debug = FALSE, alpha = .05, fit.measures = "default", argsLRT = list(), ...) { #TODO: check GLIST structure for multilevel (with single and multiple groups) #TODO: compatibility with auxiliary(), runMI(), parcelAllocation(), permuteMeasEq() #TODO: develop automated anchorSelection() and measEq.partial() #TODO: if (inherits(configural.model, "lavaan.measEq")) {continue sequence}? #TODO: add argument to accept measEq.partial output, to continue sequence (or make and update() method?) if (is.character(group.partial)) { if (group.partial == "" && length(group.partial) == 1L) { group.partial <- data.frame(lhs = character(0), op = character(0), rhs = character(0)) } else { group.partial <- lavaan::lavParseModelString(group.partial, as.data.frame. = TRUE, warn = warn, debug = debug) } } #TODO: else {extract information from a measEq.partial object} if (is.character(long.partial)) { if (long.partial == "" && length(long.partial) == 1L) { long.partial <- data.frame(lhs = character(0), op = character(0), rhs = character(0)) } else { long.partial <- lavaan::lavParseModelString(long.partial, as.data.frame. = TRUE, warn = warn, debug = debug) } } #TODO: else {extract information from a measEq.partial object} ## pass arguments to measEq.syntax(), which performs checks } ## ----------------- ## Class and Methods ## ----------------- ##' Class for Representing a Measurement-Equivalence Model ##' ##' This class of object stores information used to automatically generate ##' lavaan model syntax to represent user-specified levels of measurement ##' equivalence/invariance across groups and/or repeated measures. See ##' \code{\link{measEq.syntax}} for details. ##' ##' ##' @name measEq.syntax-class ##' @aliases measEq.syntax-class show,measEq.syntax-method ##' summary,measEq.syntax-method as.character,measEq.syntax-method ##' update,measEq.syntax-method ##' @docType class ##' ##' @slot package \code{character} indicating the software package used to ##' represent the model. Currently, only \code{"lavaan"} is available, which ##' uses the LISREL representation (see \code{\link[lavaan]{lavOptions}}). ##' In the future, \code{"OpenMx"} may become available, using RAM ##' representation. ##' @slot model.type \code{character}. Currently, only "cfa" is available. ##' Future versions may allow for MIMIC / RFA models, where invariance can be ##' tested across levels of exogenous variables explicitly included as ##' predictors of indicators, controlling for their effects on (or correlation ##' with) the common factors. ##' @slot call The function call as returned by \code{match.call()}, with ##' some arguments updated if necessary for logical consistency. ##' @slot meanstructure \code{logical} indicating whether a mean structure is ##' included in the model. ##' @slot numeric \code{character} vector naming \code{numeric} manifest indicators. ##' @slot ordered \code{character} vector naming \code{ordered} indicators. ##' @slot parameterization \code{character}. See \code{\link[lavaan]{lavOptions}}. ##' @slot specify \code{list} of parameter matrices, similar in form to the ##' output of \code{\link[lavaan]{lavInspect}(fit, "free")}. These matrices ##' are \code{logical}, indicating whether each parameter should be specified ##' in the model syntax. ##' @slot values \code{list} of parameter matrices, similar in form to the ##' output of \code{\link[lavaan]{lavInspect}(fit, "free")}. These matrices ##' are \code{numeric}, indicating whether each parameter should be freely ##' estimated (indicated by \code{NA}) or fixed to a particular value. ##' @slot labels \code{list} of parameter matrices, similar in form to the ##' output of \code{\link[lavaan]{lavInspect}(fit, "free")}. These matrices ##' contain \code{character} labels used to constrain parameters to equality. ##' @slot constraints \code{character} vector containing additional equality ##' constraints used to identify the model when \code{ID.fac = "fx"}. ##' @slot ngroups \code{integer} indicating the number of groups. ##' ##' @param x,object an object of class \code{measEq.syntax} ##' @param package \code{character} indicating the package for which the ##' syntax should be generated. Currently, only \code{"lavaan"}. ##' @param single \code{logical} indicating whether to concatenate lavaan ##' \code{\link[lavaan]{model.syntax}} into a single \code{character} string. ##' Setting \code{FALSE} will return a vector of strings, which may be ##' convenient (or even necessary to prevent an error) in ##' models with long variable names, many variables, or many groups. ##' @param verbose \code{logical} indicating whether to print a summary to the ##' screen (default). If \code{FALSE}, only a pattern matrix is returned. ##' @param ... Additional arguments to the \code{call}, or arguments with ##' changed values. ##' @param evaluate If \code{TRUE}, evaluate the new \code{call}; otherwise, ##' return the new \code{call}. ##' ##' @return ##' \item{summary}{\code{signature(object = "measEq.syntax", verbose = TRUE)}: ##' A \code{character} matrix indicating the pattern of \code{numeric}, ##' \code{ordered}, or latent indicators loading on common factors. ##' By default (\code{verbose = TRUE}), \code{summary} also prints descriptive ##' details about the model, including the numbers of indicators and factors, ##' and which parameters are constrained to equality.} ##' \item{show}{\code{signature(object = "measEq.syntax")}: Prints a message ##' about how to use the object for model fitting. Invisibly returns the ##' object.} ##' \item{update}{\code{signature(object = "measEq.syntax"), ..., ##' evaluate = TRUE}: Creates a new object with updated arguments.} ##' \item{as.character}{\code{signature(x = "measEq.syntax", package = "lavaan")}: ##' Converts the \code{measEq.syntax} object to model syntax that can be ##' copy/pasted into a syntax file to be edited before analysis, or simply ##' passed to \code{\link[lavaan]{lavaan}} to fit the model to data.} ##' ##' @author Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' @examples ##' ## See ?measEq.syntax help page ##' setClass("measEq.syntax", slots = c(package = "character", # lavaan, OpenMx in the future? model.type = "character", # cfa, extend to mimic/rfa? call = "call", meanstructure = "logical", numeric = "character", ordered = "character", parameterization = "character", specify = "list", values = "list", labels = "list", constraints = "character", ngroups = "integer")) ##' @rdname measEq.syntax-class ##' @aliases as.character,measEq.syntax-method ##' @export setMethod("as.character", "measEq.syntax", function(x, package = "lavaan", single = TRUE) { pmatList <- c("lambda","tau","nu","delta","theta","alpha","psi") #TODO: if (package = "OpenMx") concatenate matrices for RAM specification script <- character(0) ## loop over pmats, send all groups together for (pm in pmatList) { if (!pm %in% names(x@specify[[1]])) next if (pm == "lambda" && "beta" %in% names(x@specify[[1]])) { ## add higher-order loadings to lambda matrix specify.l <- lapply(x@specify, "[[", i = "lambda") value.l <- lapply(x@values , "[[", i = "lambda") label.l <- lapply(x@labels , "[[", i = "lambda") specify.b <- lapply(x@specify, "[[", i = "beta") value.b <- lapply(x@values , "[[", i = "beta") label.b <- lapply(x@labels , "[[", i = "beta") specify <- mapply(rbind, specify.l, specify.b, SIMPLIFY = FALSE) value <- mapply(rbind, value.l , value.b , SIMPLIFY = FALSE) label <- mapply(rbind, label.l , label.b , SIMPLIFY = FALSE) } else { specify <- lapply(x@specify, "[[", i = pm) value <- lapply(x@values, "[[", i = pm) label <- lapply(x@labels, "[[", i = pm) } script <- c(script, write.lavaan.syntax(pmat = pm, specify = specify, value = value, label = label)) } if (length(x@constraints)) script <- c(script, "## MODEL CONSTRAINTS:\n", x@constraints, "") ## convert GLIST objects to a character string if (single) return(paste(script, collapse = "\n")) script }) ##' @rdname measEq.syntax-class ##' @aliases show,measEq.syntax-method ##' @export setMethod("show", "measEq.syntax", function(object) { cat('This object contains information for specifying a CFA using lavaan', 'model syntax.\nTo print the syntax (to copy/paste it into an R script),', 'use the as.character() method:\n\n\tcat(as.character(object))\n\nTo fit', 'this model to data, save the syntax to an object and pass it to lavaan:', '\n\n\tmodel <- as.character(object)\n\tfit <- lavaan(model, ...)', '\n\nTo view some key features of the model use: \n\n\tsummary(object)') invisible(object) }) ##' @rdname measEq.syntax-class ##' @aliases summary,measEq.syntax-method ##' @export setMethod("summary", "measEq.syntax", function(object, verbose = TRUE) { nG <- object@ngroups nOrd <- length(object@ordered) higher <- !is.null(object@specify[[1]]$beta) ## create pattern matrix lambda <- object@specify[[1]]$lambda lambda[!lambda] <- "" for (RR in 1:nrow(lambda)) { if (rownames(lambda)[RR] %in% object@ordered) { lambda[RR, object@specify[[1]]$lambda[RR, ] ] <- "ord" } else lambda[RR, object@specify[[1]]$lambda[RR, ] ] <- "num" } if (higher) { beta <- object@specify[[1]]$beta beta[!beta] <- "" for (RR in 1:nrow(beta)) { beta[RR, object@specify[[1]]$beta[RR, ] ] <- "lat" } rownames(beta) <- paste("**", rownames(beta), "**") lambda <- rbind(lambda, beta) } if (!verbose) return(lambda) ## Basics: number of groups, factors, and indicators (higher order?); ID.fac nHigher <- if (higher) sum(apply(object@specify[[1]]$beta, 2, any)) else 0L if (object@call$ID.fac == "ul" && !object@meanstructure) { ID.fac.text <- 'first indicator`s factor loading was fixed to 1.' } else if (object@call$ID.fac == "ul" && object@meanstructure) { ID.fac.text <- paste('first indicator`s intercept and factor loading were', 'fixed to 0 and 1, respectively.') } else if (object@call$ID.fac == "uv" && !object@meanstructure) { ID.fac.text <- paste('factor variances were fixed to 1, unless equality', 'constraints on factor loadings allow them to be freed.') } else if (object@call$ID.fac == "uv" && object@meanstructure) { ID.fac.text <- paste('factor means and variances were fixed to 0 and 1,', 'respectively, unless equality constraints on', 'measurement parameters allow them to be freed.') } else if (object@call$ID.fac == "fx") { ID.fac.text <- paste('factor loadings were constrained to average 1', if (object@meanstructure) 'and intercepts were constrained to average 0', 'within each factor. In models with partial', 'invariance, only the factor loadings', if (object@meanstructure) 'and intercepts', 'that were constrained to equality across ALL groups', 'and repeated measures (when applicable) are used to', 'identify the common-factor distribution.') } cat('This lavaan model syntax specifies a CFA with ', nrow(object@specify[[1]]$lambda), ' manifest indicators ', if (nOrd == 1L) { paste0('(', nOrd, ' of which is ordinal) ') } else if (nOrd > 1L) { paste0('(', nOrd, ' of which are ordinal) ') }, 'of ', ncol(object@specify[[1]]$lambda), ' common factor(s)', if (nHigher == 1L) { paste(',', nHigher, 'of which is a higher-order factor. ') } else if (nHigher > 1L) { paste(',', nHigher, 'of which are higher-order factors. ') } else '.\n\n', 'To identify the ', if (object@meanstructure) 'location and ', 'scale of each common factor, the ', ID.fac.text, "\n\n", sep = '') ## if (ordered) ID.cat and parameterization if (nOrd) { if (object@call$ID.cat == "wu") { ID.cat.author <- 'recommended by Wu & Estabrook (2016). ' ID.cat.DOI <- 'https://doi.org/10.1007/s11336-016-9506-0 \n\n' } else if (object@call$ID.cat == "millsap") { ID.cat.author <- 'recommended by Millsap & Tein (2004). ' } else if (object@call$ID.cat == "mplus") { ID.cat.author <- 'used by default in the Mplus (and lavaan) software. ' } else if (object@call$ID.cat == "lisrel") { ID.cat.author <- 'used by default in the LISREL software. ' } if (object@call$ID.cat != "wu") ID.cat.DOI <- 'http://dx.doi.org/10.1207/S15327906MBR3903_4 \n\n' cat('The location and scale of each latent item-response underlying ', nOrd, ' ordinal indicators were identified using the "', object@parameterization, '" parameterization, and the identification constraints ', ID.cat.author, 'For details, read:\n\n\t', ID.cat.DOI, sep = '') } ## number of occassions per longitudinal construct if (length(object@call$longFacNames)) { cat('The following factors were measured on multiple occasions:\n') for (f in names(object@call$longFacNames)) { cat('\t"', f, '" was measured on ', length(object@call$longFacNames[[f]]), ' occasions\n', sep = '') } cat('\n') } ## print pattern matrix cat('Pattern matrix indicating num(eric), ord(ered), and lat(ent)', 'indicators per factor:\n\n') print(lambda, quote = FALSE) cat('\n') ## without any constraints, call it the configural model no.group.equal <- length(object@call$group.equal) == 1L && object@call$group.equal == "" no.long.equal <- length(object@call$long.equal) == 1L && object@call$long.equal == "" if (no.group.equal && no.long.equal) { cat('\nThis model hypothesizes only configural invariance.\n\n') ## return pattern matrix return(invisible(lambda)) } ## otherwise, print the constraints & exceptions ## constrained parameters across groups (+ partial exceptions) if (nG > 1L) { if (no.group.equal) { cat('No parameters were constrained to equality across groups.\n') } else { cat('The following types of parameter were constrained to', 'equality across groups:\n\n') for (i in object@call$group.equal) { group.partial <- object@call$group.partial ## first, check for exceptions if (i == "loadings") { man.ind <- group.partial$rhs %in% rownames(object@specify[[1]]$lambda) group.partial <- group.partial[group.partial$op == "=~" & man.ind, ] } else if (i == "regressions") { lat.ind <- group.partial$rhs %in% colnames(object@specify[[1]]$lambda) group.partial <- group.partial[group.partial$op == "=~" & lat.ind, ] } else if (i == "thresholds") { man.ind <- group.partial$lhs %in% rownames(object@specify[[1]]$lambda) group.partial <- group.partial[group.partial$op == "|" & man.ind, ] } else if (i == "residuals") { man.ind <- group.partial$rhs %in% rownames(object@specify[[1]]$lambda) same.ind <- group.partial$rhs == group.partial$lhs group.partial <- group.partial[group.partial$op == "~~" & man.ind & same.ind, ] } else if (i == "residual.covariances") { man.ind <- group.partial$rhs %in% rownames(object@specify[[1]]$lambda) same.ind <- group.partial$rhs == group.partial$lhs group.partial <- group.partial[group.partial$op == "~~" & man.ind & !same.ind, ] } else if (i == "lv.variances") { lat <- group.partial$rhs %in% colnames(object@specify[[1]]$lambda) same <- group.partial$rhs == group.partial$lhs group.partial <- group.partial[group.partial$op == "~~" & lat & same, ] } else if (i == "lv.covariances") { lat <- group.partial$rhs %in% colnames(object@specify[[1]]$lambda) same <- group.partial$rhs == group.partial$lhs group.partial <- group.partial[group.partial$op == "~~" & lat & !same, ] } else if (i == "intercepts") { man.ind <- group.partial$lhs %in% rownames(object@specify[[1]]$lambda) group.partial <- group.partial[group.partial$op == "~1" & man.ind, ] } else if (i == "means") { lat <- group.partial$lhs %in% colnames(object@specify[[1]]$lambda) group.partial <- group.partial[group.partial$op == "~1" & lat, ] } ## then print a message cat('\t', i, if (nrow(group.partial)) ', with the exception of:\n', '\n', sep = '') if (nrow(group.partial)) { rownames(group.partial) <- paste(" row-", rownames(group.partial), ": ", sep = "") print(group.partial) cat('\n') } } } cat('\n') } ## constrained parameters across repeated measures (+ partial exceptions) if (length(object@call$longFacNames)) { if (no.long.equal) { cat('No parameters were constrained to equality across repeated measures:\n') } else { cat('The following types of parameter were constrained to equality', 'across repeated measures:\n\n') for (i in object@call$long.equal) { long.partial <- object@call$long.partial ## first, check for exceptions if (i == "loadings") { man.ind <- long.partial$rhs %in% names(object@call$longIndNames) long.partial <- long.partial[long.partial$op == "=~" & man.ind, ] } else if (i == "regressions") { lat.ind <- long.partial$rhs %in% names(object@call$longFacNames) long.partial <- long.partial[long.partial$op == "=~" & lat.ind, ] } else if (i == "thresholds") { man.ind <- long.partial$lhs %in% names(object@call$longIndNames) long.partial <- long.partial[long.partial$op == "|" & man.ind, ] } else if (i == "residuals") { man.ind <- long.partial$rhs %in% names(object@call$longIndNames) same.ind <- long.partial$rhs == long.partial$lhs long.partial <- long.partial[long.partial$op == "~~" & man.ind & same.ind, ] } else if (i == "lv.variances") { lat <- long.partial$rhs %in% names(object@call$longFacNames) same <- long.partial$rhs == long.partial$lhs long.partial <- long.partial[long.partial$op == "~~" & lat & same, ] } else if (i == "intercepts") { man.ind <- long.partial$lhs %in% names(object@call$longIndNames) long.partial <- long.partial[long.partial$op == "~1" & man.ind, ] } else if (i == "means") { lat <- long.partial$lhs %in% names(object@call$longFacNames) long.partial <- long.partial[long.partial$op == "~1" & lat, ] } ## then print a message cat('\t', i, if (nrow(long.partial)) ', with the exception of:\n', '\n', sep = '') if (nrow(long.partial)) { rownames(long.partial) <- paste(" row-", rownames(long.partial), ": ", sep = "") print(long.partial) cat('\n') } } } cat('\n') } ## return pattern matrix invisible(lambda) }) ##' @rdname measEq.syntax-class ##' @aliases update,measEq.syntax-method ##' @importFrom stats update ##' @export setMethod("update", "measEq.syntax", function(object, ..., evaluate = TRUE) { call <- object@call #extras <- match.call(expand.dots = FALSE)$... extras <- list(...) existing <- !is.na(match(names(extras), names(call))) for (a in names(extras)[existing]) call[[a]] <- extras[[a]] if (any(!existing)) { call <- c(as.list(call), extras[!existing]) call <- as.call(call) } if (evaluate) { out <- eval(call, parent.frame()) } else out <- call out }) ## ----------------------- ## Syntax-Writing Function ## ----------------------- ##' Syntax for measurement equivalence ##' ##' Automatically generates \code{lavaan} model syntax to specify a confirmatory ##' factor analysis (CFA) model with equality constraints imposed on ##' user-specified measurement (or structural) parameters. Optionally returns ##' the fitted model (if data are provided) representing some chosen level of ##' measurement equivalence/invariance across groups and/or repeated measures. ##' ##' This function is a pedagogical and analytical tool to generate model syntax ##' representing some level of measurement equivalence/invariance across any ##' combination of multiple groups and/or repeated measures. Support is provided ##' for confirmatory factor analysis (CFA) models with simple or complex ##' structure (i.e., cross-loadings and correlated residuals are allowed). ##' For any complexities that exceed the limits of automation, this function is ##' intended to still be useful by providing a means to generate syntax that ##' users can easily edit to accommodate their unique situations. ##' ##' Limited support is provided for bifactor models and higher-order constructs. ##' Because bifactor models have cross-loadings by definition, the option ##' \code{ID.fac = "effects.code"} is unavailable. \code{ID.fac = "UV"} is ##' recommended for bifactor models, but \code{ID.fac = "UL"} is available on ##' the condition that each factor has a unique first indicator in the ##' \code{configural.model}. In order to maintain generality, higher-order ##' factors may include a mix of manifest and latent indicators, but they must ##' therefore require \code{ID.fac = "UL"} to avoid complications with ##' differentiating lower-order vs. higher-order (or mixed-level) factors. ##' The keyword \code{"loadings"} in \code{group.equal} or \code{long.equal} ##' constrains factor loadings of all manifest indicators (including loadings on ##' higher-order factors that also have latent indicators), whereas the keyword ##' \code{"regressions"} constrains factor loadings of latent indicators. Users ##' can edit the model syntax manually to adjust constraints as necessary, or ##' clever use of the \code{group.partial} or \code{long.partial} arguments ##' could make it possible for users to still automated their model syntax. ##' The keyword \code{"intercepts"} constrains the intercepts of all manifest ##' indicators, and the keyword \code{"means"} constrains intercepts and means ##' of all latent common factors, regardless of whether they are latent ##' indicators of higher-order factors. To test equivalence of lower-order and ##' higher-order intercepts/means in separate steps, the user can either ##' manually edit their generated syntax or conscientiously exploit the ##' \code{group.partial} or \code{long.partial} arguments as necessary. ##' ##' \strong{\code{ID.fac}:} If the \code{configural.model} fixes any (e.g., ##' the first) factor loadings, the generated syntax object will retain those ##' fixed values. This allows the user to retain additional constraints that ##' might be necessary (e.g., if there are only 1 or 2 indicators). Some methods ##' must be used in conjunction with other settings: ##' \itemize{ ##' \item \code{ID.cat = "Millsap"} requires \code{ID.fac = "UL"} and ##' \code{parameterization = "theta"}. ##' \item \code{ID.cat = "LISREL"} requires \code{parameterization = "theta"}. ##' \item \code{ID.fac = "effects.code"} is unavailable when there are any ##' cross-loadings. ##' } ##' ##' \strong{\code{ID.cat}:} Wu & Estabrook (2016) recommended constraining ##' thresholds to equality first, and doing so should allow releasing any ##' identification constraints no longer needed. For each \code{ordered} ##' indicator, constraining one threshold to equality will allow the item's ##' intercepts to be estimated in all but the first group or repeated measure. ##' Constraining a second threshold (if applicable) will allow the item's ##' (residual) variance to be estimated in all but the first group or repeated ##' measure. For binary data, there is no independent test of threshold, ##' intercept, or residual-variance equality. Equivalence of thresholds must ##' also be assumed for three-category indicators. These guidelines provide the ##' least restrictive assumptions and tests, and are therefore the default. ##' ##' The default setting in M\emph{plus} is similar to Wu & Estabrook (2016), ##' except that intercepts are always constrained to zero (so they are assumed ##' to be invariant without testing them). Millsap & Tein (2004) recommended ##' \code{parameterization = "theta"} and identified an item's residual variance ##' in all but the first group (or occasion; Liu et al., 2017) by constraining ##' its intercept to zero and one of its thresholds to equality. A second ##' threshold for the reference indicator (so \code{ID.fac = "UL"}) is used to ##' identify the common-factor means in all but the first group/occasion. The ##' LISREL software fixes the first threshold to zero and (if applicable) the ##' second threshold to 1, and assumes any remaining thresholds to be equal ##' across groups / repeated measures; thus, the intercepts are always ##' identified, and residual variances (\code{parameterization = "theta"}) are ##' identified except for binary data, when they are all fixed to one. ##' ##' \strong{Repeated Measures:} If each repeatedly measured factor is measured ##' by the same indicators (specified in the same order in the ##' \code{configural.model}) on each occasion, without any cross-loadings, the ##' user can let \code{longIndNames} be automatically generated. Generic names ##' for the repeatedly measured indicators are created using the name of the ##' repeatedly measured factors (i.e., \code{names(longFacNames)}) and the ##' number of indicators. So the repeatedly measured first indicator ##' (\code{"ind"}) of a longitudinal construct called "factor" would be ##' generated as \code{"._factor_ind.1"}. ##' ##' The same types of parameter can be specified for \code{long.equal} as for ##' \code{group.equal} (see \code{\link[lavaan]{lavOptions}} for a list), except ##' for \code{"residual.covariances"} or \code{"lv.covariances"}. Instead, users ##' can constrain \emph{auto}covariances using keywords \code{"resid.autocov"} ##' or \code{"lv.autocov"}. Note that \code{group.equal = "lv.covariances"} or ##' \code{group.equal = "residual.covariances"} will constrain any ##' autocovariances across groups, along with any other covariances the user ##' specified in the \code{configural.model}. Note also that autocovariances ##' cannot be specified as exceptions in \code{long.partial}, so anything more ##' complex than the \code{auto} argument automatically provides should instead ##' be manually specified in the \code{configural.model}. ##' ##' When users set \code{orthogonal=TRUE} in the \code{configural.model} (e.g., ##' in bifactor models of repeatedly measured constructs), autocovariances of ##' each repeatedly measured factor will still be freely estimated in the ##' generated syntax. ##' ##' \strong{Missing Data:} If users wish to utilize the \code{\link{auxiliary}} ##' function to automatically include auxiliary variables in conjunction with ##' \code{missing = "FIML"}, they should first generate the hypothesized-model ##' syntax, then submit that syntax as the model to \code{auxiliary()}. ##' If users utilized \code{\link{runMI}} to fit their \code{configural.model} ##' to multiply imputed data, that model can also be passed to the ##' \code{configural.model} argument, and if \code{return.fit = TRUE}, the ##' generated model will be fitted to the multiple imputations. ##' ##' @importFrom lavaan lavInspect lavNames cfa ##' ##' @param configural.model A model with no measurement-invariance constraints ##' (i.e., representing only configural invariance), unless required for model ##' identification. \code{configural.model} can be either: ##' \itemize{ ##' \item lavaan \code{\link[lavaan]{model.syntax}} or a parameter table ##' (as returned by \code{\link[lavaan]{parTable}}) specifying the ##' configural model. Using this option, the user can also provide ##' either raw \code{data} or summary statistics via \code{sample.cov} ##' and (optionally) \code{sample.mean}. In order to include ##' thresholds in the syntax, raw \code{data} must be provided. #FIXME: lavaan has a new sample.th= argument ##' See \code{\link[lavaan]{lavaan}}. ##' \item a fitted \code{\linkS4class{lavaan}} model (e.g., as returned by ##' \code{\link[lavaan]{cfa}}) estimating the configural model ##' } ##' ##' @param ... Additional arguments (e.g., \code{data}, \code{ordered}, or ##' \code{parameterization}) passed to the \code{\link[lavaan]{lavaan}} ##' function. See also \code{\link[lavaan]{lavOptions}}. ##' ##' @param ID.fac \code{character}. The method for identifying common-factor ##' variances and (if \code{meanstructure = TRUE}) means. Three methods are ##' available, which go by different names in the literature: ##' \itemize{ ##' \item Standardize the common factor (mean = 0, \emph{SD} = 1) by ##' specifying any of: \code{"std.lv"}, \code{"unit.variance"}, ##' \code{"UV"}, \code{"fixed.factor"}, ##' \code{"fixed-factor"} ##' \item Choose a reference indicator by specifying any of: ##' \code{"auto.fix.first"}, \code{"unit.loading"}, \code{"UL"}, ##' \code{"marker"}, \code{"ref"}, \code{"ref.indicator"}, ##' \code{"reference.indicator"}, \code{"reference-indicator"}, ##' \code{"marker.variable"}, \code{"marker-variable"} ##' \item Apply effects-code constraints to loadings and intercepts by ##' specifying any of: \code{"FX"}, \code{"EC"}, \code{"effects"}, ##' \code{"effects.coding"}, \code{"effects-coding"}, ##' \code{"effects.code"}, \code{"effects-code"} ##' } ##' See Kloessner & Klopp (2019) for details about all three methods. ##' ##' @param ID.cat \code{character}. The method for identifying (residual) ##' variances and intercepts of latent item-responses underlying any ##' \code{ordered} indicators. Four methods are available: ##' \itemize{ ##' \item To follow Wu & Estabrook's (2016) guidelines (default), specify ##' any of: \code{"Wu.Estabrook.2016"}, \code{"Wu.2016"}, ##' \code{"Wu.Estabrook"}, \code{"Wu"}, \code{"Wu2016"}. ##' \item To use the default settings of M\emph{plus} and \code{lavaan}, ##' specify any of: \code{"default"}, \code{"Mplus"}, \code{"Muthen"}. ##' Details provided in Millsap & Tein (2004). ##' \item To use the constraints recommended by Millsap & Tein (2004; see ##' also Liu et al., 2017) ##' specify any of: \code{"millsap"}, \code{"millsap.2004"}, ##' \code{"millsap.tein.2004"} ##' \item To use the default settings of LISREL, specify \code{"LISREL"} ##' or \code{"Joreskog"}. Details provided in Millsap & Tein (2004). ##' } ##' See \strong{Details} and \strong{References} for more information. ##' ##' @param ID.thr \code{integer}. Only relevant when ##' \code{ID.cat = "Millsap.Tein.2004"}. Used to indicate which thresholds ##' should be constrained for identification. The first integer indicates the ##' threshold used for all indicators, the second integer indicates the ##' additional threshold constrained for a reference indicator (ignored if ##' binary). ##' ##' @param group optional \code{character} indicating the name of a grouping ##' variable. See \code{\link[lavaan]{cfa}}. ##' ##' @param group.equal optional \code{character} vector indicating type(s) of ##' parameter to equate across groups. Ignored if \code{is.null(group)}. ##' See \code{\link[lavaan]{lavOptions}}. ##' ##' @param group.partial optional \code{character} vector or a parameter table ##' indicating exceptions to \code{group.equal} (see ##' \code{\link[lavaan]{lavOptions}}). Any variables not appearing in the ##' \code{configural.model} will be ignored, and any parameter constraints ##' needed for identification (e.g., two thresholds per indicator when ##' \code{ID.cat = "Millsap"}) will be removed. ##' ##' @param longFacNames optional named \code{list} of \code{character} vectors, ##' each indicating multiple factors in the model that are actually the same ##' construct measured repeatedly. See \strong{Details} and \strong{Examples}. ##' ##' @param longIndNames optional named \code{list} of \code{character} vectors, ##' each indicating multiple indicators in the model that are actually the ##' same indicator measured repeatedly. See \strong{Details} and ##' \strong{Examples}. ##' ##' @param long.equal optional \code{character} vector indicating type(s) of ##' parameter to equate across repeated measures. Ignored if no factors are ##' indicated as repeatedly measured in \code{longFacNames}. ##' ##' @param long.partial optional \code{character} vector or a parameter table ##' indicating exceptions to \code{long.equal}. Any longitudinal variable ##' names not appearing in \code{names(longFacNames)} or ##' \code{names(longIndNames)} will be ignored, and any parameter constraints ##' needed for identification will be removed. ##' ##' @param auto Used to automatically included autocorrelated measurement errors ##' among repeatedly measured indicators in \code{longIndNames}. Specify a ##' single \code{integer} to set the maximum order (e.g., \code{auto = 1L} ##' indicates that an indicator's unique factors should only be correlated ##' between adjacently measured occasions). \code{auto = TRUE} or \code{"all"} ##' will specify residual covariances among all possible lags per repeatedly ##' measured indicator in \code{longIndNames}. ##' ##' @param warn,debug \code{logical}. Passed to \code{\link[lavaan]{lavaan}} ##' and \code{\link[lavaan]{lavParseModelString}}. ##' See \code{\link[lavaan]{lavOptions}}. ##' ##' @param return.fit \code{logical} indicating whether the generated syntax ##' should be fitted to the provided \code{data} (or summary statistics, if ##' provided via \code{sample.cov}). If \code{configural.model} is a fitted ##' lavaan model, the generated syntax will be fitted using the \code{update} ##' method (see \code{\linkS4class{lavaan}}), and \dots will be passed to ##' \code{\link[lavaan]{lavaan}}. If neither data nor a fitted lavaan model ##' were provided, this must be \code{FALSE}. If \code{TRUE}, the generated ##' \code{measEq.syntax} object will be included in the \code{lavaan} object's ##' \code{@@external} slot, accessible by \code{fit@@external$measEq.syntax}. ##' ##' @return By default, an object of class \code{\linkS4class{measEq.syntax}}. ##' If \code{return.fit = TRUE}, a fitted \code{\link[lavaan]{lavaan}} ##' model, with the \code{measEq.syntax} object stored in the ##' \code{@@external} slot, accessible by \code{fit@@external$measEq.syntax}. ##' ##' @author Terrence D. Jorgensen (University of Amsterdam; ##' \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \code{\link{compareFit}} ##' ##' @references ##' Kloessner, S., & Klopp, E. (2019). Explaining constraint interaction: How ##' to interpret estimated model parameters under alternative scaling methods. ##' \emph{Structural Equation Modeling, 26}(1), 143--155. ##' doi:10.1080/10705511.2018.1517356 ##' ##' Liu, Y., Millsap, R. E., West, S. G., Tein, J.-Y., Tanaka, R., & Grimm, ##' K. J. (2017). Testing measurement invariance in longitudinal data with ##' ordered-categorical measures. \emph{Psychological Methods, 22}(3), ##' 486--506. doi:10.1037/met0000075 ##' ##' Millsap, R. E., & Tein, J.-Y. (2004). Assessing factorial invariance in ##' ordered-categorical measures. \emph{Multivariate Behavioral Research, 39}(3), ##' 479--515. doi:10.1207/S15327906MBR3903_4 ##' ##' Wu, H., & Estabrook, R. (2016). Identification of confirmatory factor ##' analysis models of different levels of invariance for ordered categorical ##' outcomes. \emph{Psychometrika, 81}(4), 1014--1045. ##' doi:10.1007/s11336-016-9506-0 ##' ##' @examples ##' mod.cat <- ' FU1 =~ u1 + u2 + u3 + u4 ##' FU2 =~ u5 + u6 + u7 + u8 ' ##' ## the 2 factors are actually the same factor (FU) measured twice ##' longFacNames <- list(FU = c("FU1","FU2")) ##' ##' ## configural model: no constraints across groups or repeated measures ##' syntax.config <- measEq.syntax(configural.model = mod.cat, data = datCat, ##' ordered = paste0("u", 1:8), ##' parameterization = "theta", ##' ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", ##' group = "g", longFacNames = longFacNames) ##' ## print lavaan syntax to the Console ##' cat(as.character(syntax.config)) ##' ## print a summary of model features ##' summary(syntax.config) ##' ##' ## threshold invariance ##' syntax.thresh <- measEq.syntax(configural.model = mod.cat, data = datCat, ##' ordered = paste0("u", 1:8), ##' parameterization = "theta", ##' ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", ##' group = "g", group.equal = "thresholds", ##' longFacNames = longFacNames, ##' long.equal = "thresholds") ##' ## notice that constraining 4 thresholds allows intercepts and residual ##' ## variances to be freely estimated in all but the first group & occasion ##' cat(as.character(syntax.thresh)) ##' ## print a summary of model features ##' summary(syntax.thresh) ##' ##' ##' ## Fit a model to the data either in a subsequent step: ##' mod.config <- as.character(syntax.config) ##' fit.config <- cfa(mod.config, data = datCat, group = "g", ##' ordered = paste0("u", 1:8), parameterization = "theta") ##' ## or in a single step: ##' fit.thresh <- measEq.syntax(configural.model = mod.cat, data = datCat, ##' ordered = paste0("u", 1:8), ##' parameterization = "theta", ##' ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", ##' group = "g", group.equal = "thresholds", ##' longFacNames = longFacNames, ##' long.equal = "thresholds", return.fit = TRUE) ##' ## compare their fit to test threshold invariance ##' anova(fit.config, fit.thresh) ##' ##' ##' ## -------------------------------------------------------- ##' ## RECOMMENDED PRACTICE: fit one invariance model at a time ##' ## -------------------------------------------------------- ##' ##' ## - A downside of setting return.fit=TRUE is that if the model has trouble ##' ## converging, you don't have the opportunity to investigate the syntax, ##' ## or even to know whether an error resulted from the syntax-generator or ##' ## from lavaan itself. ##' ## - A downside of automatically fitting an entire set of invariance models ##' ## (like the old measurementInvariance() function did) is that you might ##' ## end up testing models that shouldn't even be fitted because less ##' ## restrictive models already fail (e.g., don't test full scalar ##' ## invariance if metric invariance fails! Establish partial metric ##' ## invariance first, then test equivalent of intercepts ONLY among the ##' ## indicators that have invariate loadings.) ##' ##' ## The recommended sequence is to (1) generate and save each syntax object, ##' ## (2) print it to the screen to verify you are fitting the model you expect ##' ## to (and potentially learn which identification constraints should be ##' ## released when equality constraints are imposed), and (3) fit that model ##' ## to the data as you would if you wrote the syntax yourself. ##' ##' ## Continuing from the examples above, after establishing invariance of ##' ## thresholds, we proceed to test equivalence of loadings and intercepts ##' ## (metric and scalar invariance, respectively) ##' ## simultaneously across groups and repeated measures. ##' ##' \dontrun{ ##' ##' ## metric invariance ##' syntax.metric <- measEq.syntax(configural.model = mod.cat, data = datCat, ##' ordered = paste0("u", 1:8), ##' parameterization = "theta", ##' ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", ##' group = "g", longFacNames = longFacNames, ##' group.equal = c("thresholds","loadings"), ##' long.equal = c("thresholds","loadings")) ##' summary(syntax.metric) # summarize model features ##' mod.metric <- as.character(syntax.metric) # save as text ##' cat(mod.metric) # print/view lavaan syntax ##' ## fit model to data ##' fit.metric <- cfa(mod.metric, data = datCat, group = "g", ##' ordered = paste0("u", 1:8), parameterization = "theta") ##' ## test equivalence of loadings, given equivalence of thresholds ##' anova(fit.thresh, fit.metric) ##' ##' ## scalar invariance ##' syntax.scalar <- measEq.syntax(configural.model = mod.cat, data = datCat, ##' ordered = paste0("u", 1:8), ##' parameterization = "theta", ##' ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", ##' group = "g", longFacNames = longFacNames, ##' group.equal = c("thresholds","loadings", ##' "intercepts"), ##' long.equal = c("thresholds","loadings", ##' "intercepts")) ##' summary(syntax.scalar) # summarize model features ##' mod.scalar <- as.character(syntax.scalar) # save as text ##' cat(mod.scalar) # print/view lavaan syntax ##' ## fit model to data ##' fit.scalar <- cfa(mod.scalar, data = datCat, group = "g", ##' ordered = paste0("u", 1:8), parameterization = "theta") ##' ## test equivalence of intercepts, given equal thresholds & loadings ##' anova(fit.metric, fit.scalar) ##' ##' ##' ## For a single table with all results, you can pass the models to ##' ## summarize to the compareFit() function ##' compareFit(fit.config, fit.thresh, fit.metric, fit.scalar) ##' ##' ##' ##' ## ------------------------------------------------------ ##' ## NOT RECOMMENDED: fit several invariance models at once ##' ## ------------------------------------------------------ ##' test.seq <- c("thresholds","loadings","intercepts","means","residuals") ##' meq.list <- list() ##' for (i in 0:length(test.seq)) { ##' if (i == 0L) { ##' meq.label <- "configural" ##' group.equal <- "" ##' long.equal <- "" ##' } else { ##' meq.label <- test.seq[i] ##' group.equal <- test.seq[1:i] ##' long.equal <- test.seq[1:i] ##' } ##' meq.list[[meq.label]] <- measEq.syntax(configural.model = mod.cat, ##' data = datCat, ##' ordered = paste0("u", 1:8), ##' parameterization = "theta", ##' ID.fac = "std.lv", ##' ID.cat = "Wu.Estabrook.2016", ##' group = "g", ##' group.equal = group.equal, ##' longFacNames = longFacNames, ##' long.equal = long.equal, ##' return.fit = TRUE) ##' } ##' ##' compareFit(meq.list) ##' ##' ##' ## ----------------- ##' ## Binary indicators ##' ## ----------------- ##' ##' ## borrow example data from Mplus user guide ##' myData <- read.table("http://www.statmodel.com/usersguide/chap5/ex5.16.dat") ##' names(myData) <- c("u1","u2","u3","u4","u5","u6","x1","x2","x3","g") ##' bin.mod <- ' ##' FU1 =~ u1 + u2 + u3 ##' FU2 =~ u4 + u5 + u6 ##' ' ##' ## Must SIMULTANEOUSLY constrain thresholds, loadings, and intercepts ##' test.seq <- list(strong = c("thresholds","loadings","intercepts"), ##' means = "means", ##' strict = "residuals") ##' meq.list <- list() ##' for (i in 0:length(test.seq)) { ##' if (i == 0L) { ##' meq.label <- "configural" ##' group.equal <- "" ##' long.equal <- "" ##' } else { ##' meq.label <- names(test.seq)[i] ##' group.equal <- unlist(test.seq[1:i]) ##' # long.equal <- unlist(test.seq[1:i]) ##' } ##' meq.list[[meq.label]] <- measEq.syntax(configural.model = bin.mod, ##' data = myData, ##' ordered = paste0("u", 1:6), ##' parameterization = "theta", ##' ID.fac = "std.lv", ##' ID.cat = "Wu.Estabrook.2016", ##' group = "g", ##' group.equal = group.equal, ##' #longFacNames = longFacNames, ##' #long.equal = long.equal, ##' return.fit = TRUE) ##' } ##' ##' compareFit(meq.list) ##' #TODO: add ternary example? or note to start with EQ thresholds? ##' ##' } ##' @export measEq.syntax <- function(configural.model, ..., ID.fac = "std.lv", ID.cat = "Wu.Estabrook.2016", ID.thr = c(1L, 2L), group = NULL, group.equal = "", group.partial = "", longFacNames = list(), longIndNames = list(), long.equal = "", long.partial = "", auto = "all", warn = TRUE, debug = FALSE, return.fit = FALSE) { mc <- match.call(expand.dots = TRUE) ## evaluate promises that might change before being evaluated ## (e.g., for-loops or Monte Carlo studies) mc$ID.fac <- eval(ID.fac) mc$ID.cat <- eval(ID.cat) mc$ID.thr <- eval(ID.thr) mc$group <- eval(group) mc$group.equal <- eval(group.equal) mc$group.partial <- eval(group.partial) mc$longFacNames <- eval(longFacNames) mc$longIndNames <- eval(longIndNames) mc$long.equal <- eval(long.equal) mc$long.partial <- eval(long.partial) mc$auto <- eval(auto) ## ------------------------------- ## Preliminary checks on arguments ## ------------------------------- ## check identification arguments ID.fac <- tolower(as.character(ID.fac)[1]) if (ID.fac %in% c("std.lv","unit.variance","uv", "fixed.factor","fixed-factor")) { ID.fac <- "uv" mc$ID.fac <- "uv" } else if (ID.fac %in% c("auto.fix.first","unit.loading","ul","marker","ref", "marker.variable","marker-variable","ref.indicator", "reference.indicator","reference-indicator")) { ID.fac <- "ul" mc$ID.fac <- "ul" } else if (ID.fac %in% c("fx","ec","effects","effects.coding", "effects-coding","effects.code","effects-code")) { ID.fac <- "fx" mc$ID.fac <- "fx" } else stop('Invalid choice for argument: ID.fac = "', ID.fac, '"') ID.cat <- tolower(as.character(ID.cat)[1]) if (ID.cat %in% c("wu.estabrook.2016","wu.2016","wu.estabrook","wu","wu2016")) { ID.cat <- "wu" mc$ID.cat <- "wu" } else if (ID.cat %in% c("millsap","millsap.2004","millsap.tein.2004")) { ID.cat <- "millsap" mc$ID.cat <- "millsap" } else if (ID.cat %in% c("default","mplus","muthen")) { ID.cat <- "mplus" mc$ID.cat <- "mplus" } else if (ID.cat %in% c("joreskog","lisrel")) { ID.cat <- "lisrel" mc$ID.cat <- "lisrel" } else stop('Invalid choice for argument: ID.cat = "', ID.cat, '"') ## pass arguments to lavaan dots <- list(...) dots$debug <- debug dots$warn <- warn dots$group <- group ## check lavaan arguments if (!is.null(dots$model)) stop('A model should be specified only with the ', '"configural.model=" argument, not "model=".') if (is.null(dots$meanstructure)) { constrMeanStr <- c("intercepts","means") %in% c(group.equal, long.equal) if (is.null(dots$sample.mean) && is.null(dots$data) && !any(constrMeanStr)) { dots$meanstructure <- FALSE mc$meanstructure <- FALSE } else { dots$meanstructure <- TRUE mc$meanstructure <- TRUE } } ## lavaan template from configural model if (inherits(configural.model, c("lavaan","lavaanList"))) { lavTemplate <- configural.model ## check that first loading is not constrained unless ID.fac == "ul" if (ID.fac != "ul" && lavInspect(lavTemplate, "options")$auto.fix.first) { stop('The "configural.model" argument is a lavaan model fitted using ', 'auto.fix.first=TRUE (or std.lv=FALSE), which conflicts with the ', 'requested "ID.fac" method. To generate syntax using the fixed-', 'factor or effects-coding method of identification, set std.lv=TRUE', ' to prevent initial loadings from being fixed to 1 in the syntax.') } } else { lavArgs <- dots if (ID.fac != "ul") lavArgs$std.lv <- TRUE lavArgs$model <- configural.model # let lavaan() do its own checks lavArgs$do.fit <- FALSE lavTemplate <- do.call("cfa", lavArgs) #FIXME: violates NAMESPACE rules? Import cfa()? mc$configural.model <- lavTemplate } ## convert *.partial strings to parTables if (is.character(group.partial)) { if (group.partial == "" && length(group.partial) == 1L) { group.partial <- data.frame(lhs = character(0), op = character(0), rhs = character(0)) } else { group.partial <- lavaan::lavParseModelString(group.partial, as.data.frame. = TRUE, warn = warn, debug = debug) } } #TODO: else {extract information from a measEq.partial object} if (is.character(long.partial)) { if (long.partial == "" && length(long.partial) == 1L) { long.partial <- data.frame(lhs = character(0), op = character(0), rhs = character(0)) } else { long.partial <- lavaan::lavParseModelString(long.partial, as.data.frame. = TRUE, warn = warn, debug = debug) } } #TODO: else {extract information from a measEq.partial object} ## only relevant when there are longitudinal factors if (length(longFacNames) > 0L) { if (!is.atomic(auto)) stop("'auto' must be a non-negative integer or the character 'all'.") if (is.logical(auto)) { if (auto) auto <- "all" else auto <- 0L} if (is.factor(auto)) auto <- as.character(auto) if (is.character(auto) && auto != "all") stop("'auto' must be a non-negative integer or the character 'all'.") if (is.numeric(auto)) { auto <- as.integer(auto[1]) # only the first integer if (auto < 1L) auto <- NULL } mc$auto <- auto } ## extract options and other information parameterization <- lavInspect(lavTemplate, "options")$parameterization meanstructure <- lavInspect(lavTemplate, "options")$meanstructure nG <- lavInspect(lavTemplate, "ngroups") ## names of ordinal indicators, number of thresholds for each allOrdNames <- lavNames(lavTemplate, type = "ov.ord") if (length(allOrdNames)) { nThr <- table(sapply(strsplit(lavNames(lavTemplate, "th"), split = "|", fixed = TRUE), "[", i = 1)) } else nThr <- numeric(0) if (length(allOrdNames) && ID.cat == "millsap") { ## Check for ID.thr if (is.numeric(ID.thr)) { if (length(ID.thr) == 1L) ID.thr <- rep(ID.thr, 2) ID.thr <- sapply(allOrdNames, function(x) ID.thr[1:2], simplify = FALSE) } else if (is.list(ID.thr)) { if (length((setdiff(allOrdNames, names(ID.thr))))) stop('If the same thresholds will not be used for all ordered indicators,', ' then "ID.thr" must specify 2 integers per ordered indicator in ', 'a named list (using names of ordered indicators).') } ## check identification methods ID.fac <- "ul" if (parameterization != "theta") stop('If ID.cat == "millsap", you must ', 'use parameterization = "theta"') } if (length(allOrdNames) && ID.cat == "lisrel") { if (parameterization != "theta") stop('If ID.cat == "lisrel", you must ', 'use parameterization = "theta"') ## thresholds must be constrained to equality if (!"thresholds" %in% group.equal) group.equal <- c("thresholds", group.equal) if (!"thresholds" %in% long.equal) long.equal <- c("thresholds", long.equal) ## so remove any thresholds from *.partial partial.thr <- group.partial$op == "|" if (any(partial.thr)) group.partial <- group.partial[!partial.thr, ] partial.thr <- long.partial$op == "|" if (any(partial.thr)) long.partial <- long.partial[!partial.thr, ] } if (!meanstructure) { ## make sure *.equal includes no mean-structure parameters eq.means <- which(group.equal %in% c("means","intercepts")) if (length(eq.means)) group.equal <- group.equal[-eq.means] eq.means <- which(long.equal %in% c("means","intercepts")) if (length(eq.means)) long.equal <- long.equal[-eq.means] ## make sure *.partial includes no mean-structure parameters partial.means <- group.partial$op == "~1" if (any(partial.means)) group.partial <- group.partial[!partial.means, ] partial.means <- long.partial$op == "~1" if (any(partial.means)) long.partial <- long.partial[!partial.means, ] } mc$group.partial <- group.partial[c("lhs","op","rhs")] #FIXME: any more? "block" for multilevel? mc$long.partial <- long.partial[c("lhs","op","rhs")] ## check logic of constraints if (length(allOrdNames) && parameterization == "delta") { if ("residuals" %in% long.equal) { stop('Residual variances cannot be tested for invariance ', 'across repeated measures when parameterization = "delta". \n', 'Please set parameterization = "theta". \n') } if ("residuals" %in% group.equal) { stop('Residual variances cannot be tested for invariance ', 'across groups when parameterization = "delta". \n', 'Please set parameterization = "theta". \n') } } if (warn) { if (any(c("lv.variances","lv.autocov") %in% long.equal) && !"loadings" %in% long.equal) warning('Latent (co)variances are not comparable over repeated measures ', 'if their respective factor loadings are not equal ', 'over repeated measures.') if (any(c("lv.variances","lv.covariances") %in% group.equal) && !"loadings" %in% group.equal) warning('Latent (co)variances are not comparable across groups ', 'if their respective factor loadings are not equal across groups.') if ("intercepts" %in% long.equal && !"loadings" %in% long.equal) warning('Indicator intercepts are not comparable over repeated measures ', 'if their respective factor loadings are not equal ', 'over repeated measures.') if ("intercepts" %in% group.equal && !"loadings" %in% group.equal) warning('Indicator intercepts are not comparable over across groups ', 'if their respective factor loadings are not equal across groups.') if ("means" %in% long.equal && !all(c("loadings","intercepts") %in% long.equal)) warning('Latent means are not comparable over repeated measures if their ', 'respective factor loadings and intercepts are not equal ', 'over repeated measures.') if ("means" %in% group.equal && !all(c("loadings","intercepts") %in% group.equal)) warning('Latent means are not comparable across groups if their ', 'respective factor loadings and intercepts are not equal ', 'across groups.') if ("resid.autocov" %in% long.equal && !"residuals" %in% long.equal) warning('Residual auto-covariances might not be comparable over repeated ', 'measures if their respective residual variances are not equal ', 'over repeated measures.') if ("residual.covariances" %in% group.equal && !"residuals" %in% group.equal) warning('Residual covariances might not be comparable across groups if ', 'their respective residual variances are not equal across groups.') if ("lv.autocov" %in% long.equal && !"lv.variances" %in% long.equal) warning('Latent auto-covariances might not be comparable over repeated ', 'measures if their respective latent variances are not equal ', 'over repeated measures.') if ("lv.covariances" %in% group.equal && !"lv.variances" %in% group.equal) warning('Latent covariances might not be comparable across groups if ', 'their respective latent variances are not equal across groups.') } ## ------------------ ## Parameter Matrices ## ------------------ ## Parameter matrices used for labels, fixed/free values, and whether to specify GLIST.free <- lavInspect(lavTemplate, "free") if (nG == 1L) GLIST.free <- list(`1` = GLIST.free) ## only save relevant matrices to specify pmats <- intersect(c("tau","lambda","beta","nu","theta","alpha","psi", if (parameterization == "delta") "delta" else NULL), names(GLIST.free[[1]])) if ("beta" %in% pmats && ID.fac != "ul") { ID.fac <- "ul" #FIXME: could use effects-coding with relative ease? mc$ID.fac <- ID.fac message('Higher-order factors detected. ID.fac set to "ul".') } ## matrices with estimates depends on class of model if (inherits(lavTemplate, "lavaan")) { GLIST.est <- lavInspect(lavTemplate, "est") if (nG == 1L) GLIST.est <- list(`1` = GLIST.est) } else if (inherits(lavTemplate, "lavaanList")) { nn <- names(lavTemplate@Model@GLIST) #FIXME: will @Model continue to exist? GLIST.est <- list() for (g in 1:nG) { GLIST.est[[g]] <- list() for (p in pmats) { GLIST.est[[g]][[p]] <- lavTemplate@Model@GLIST[[ which(nn == p)[g] ]] ## add dimnames to matrices dimnames(GLIST.est[[g]][[p]]) <- dimnames(GLIST.free[[g]][[p]]) } } } for (g in 1:nG) { GLIST.est[[g]] <- GLIST.est[[g]][pmats] GLIST.free[[g]] <- GLIST.free[[g]][pmats] if (g > 1L) { ## make sure all groups have the same observed & latent variables same.obs <- all(rownames(GLIST.free[[g]]$lambda) == rownames(GLIST.free[[1]]$lambda)) same.lat <- all(colnames(GLIST.free[[g]]$lambda) == colnames(GLIST.free[[1]]$lambda)) if (!same.obs) stop('Models contain different observed variables across ', 'groups/blocks. Configural invariance impossible.') if (!same.lat) stop('Models contain different latent variables across ', 'groups/blocks. Configural invariance impossible.') } } ## FIXME: check for others? (e.g., test invariance across multiple levels?) ## In general, specify if GLIST.free > 0 | (GLIST.free == 0 & GLIST.est != 0) ## - tau : specify all ## - lambda: specify any nonzero in free + fixed-nonzero (e.g., auto.fix.first) ## - beta : treat as second-order lambda ## - nu : specify all ## - theta : specify diagonal (unless delta?) + any nonzero off-diagonal ## - delta : specify ONLY if parameterization == "delta" ## - alpha : specify all ## - psi : specify all GLIST.specify <- sapply(names(GLIST.free), function(g) list()) for (g in 1:nG) { for (p in pmats) { ## THRESHOLDS if (p == "tau") { GLIST.specify[[g]]$tau <- GLIST.free[[g]]$tau == 0 GLIST.specify[[g]]$tau[ , 1] <- TRUE } ## LOADINGS if (p == "lambda") { free.loading <- GLIST.free[[g]]$lambda > 0L fixed.nonzero.loading <- GLIST.free[[g]]$lambda == 0L & GLIST.est[[g]]$lambda != 0 GLIST.specify[[g]]$lambda <- free.loading | fixed.nonzero.loading } ## SECOND-ORDER LOADINGS if (p == "beta") { free.loading <- GLIST.free[[g]]$beta > 0L fixed.nonzero.loading <- GLIST.free[[g]]$beta == 0L & GLIST.est[[g]]$beta != 0 GLIST.specify[[g]]$beta <- free.loading | fixed.nonzero.loading } ## INTERCEPTS if (p == "nu") { GLIST.specify[[g]]$nu <- GLIST.free[[g]]$nu == 0 GLIST.specify[[g]]$nu[ , 1] <- TRUE } ## LATENT MEANS if (p == "alpha") { GLIST.specify[[g]]$alpha <- GLIST.free[[g]]$alpha == 0 GLIST.specify[[g]]$alpha[ , 1] <- TRUE } ## LATENT (CO)VARIANCES if (p == "psi") { GLIST.specify[[g]]$psi <- matrix(TRUE, nrow = nrow(GLIST.free[[g]]$psi), ncol = ncol(GLIST.free[[g]]$psi), dimnames = dimnames(GLIST.free[[g]]$psi)) ## only specify lower triangle GLIST.specify[[g]]$psi[upper.tri(GLIST.specify[[g]]$psi)] <- FALSE } ## RESIDUAL (CO)VARIANCES if (p == "theta") { free.var <- GLIST.free[[g]]$theta > 0L fixed.nonzero.var <- GLIST.free[[g]]$theta == 0L & GLIST.est[[g]]$theta != 0 GLIST.specify[[g]]$theta <- free.var | fixed.nonzero.var ## can't specify for ordinal indicators using delta parameterization if (parameterization == "delta") diag(GLIST.specify[[g]]$theta)[allOrdNames] <- FALSE ## only specify lower triangle GLIST.specify[[g]]$theta[upper.tri(GLIST.specify[[g]]$theta)] <- FALSE } ## SCALING FACTORS (delta parameters for latent item-responses) if (p == "delta") { GLIST.specify[[g]]$delta <- GLIST.free[[g]]$delta == 1 GLIST.specify[[g]]$delta[ , 1] <- parameterization == "delta" } ## end loops } } ## check for any cross-loadings #TODO: special check for bifactor models possible? Find factors whose indicators all cross-load... anyXload <- FALSE for (g in 1:nG) { if (any(apply(GLIST.specify[[g]]$lambda, 1, sum) > 1)) anyXload <- TRUE } ## can the effects-coding identification method be used? if (ID.fac == "fx" && anyXload) { stop('Effects-coding method of factor identification ', '("ID.fac") unavailable in models with cross-loadings.') } ## Warn about constraining intercepts but not means freeMeans <- ("intercepts" %in% group.equal && !("means" %in% group.equal)) || ("intercepts" %in% long.equal && !("means" %in% long.equal) ) if (ID.fac == "uv" && anyXload && freeMeans) { warning('A factor\'s mean cannot be freed unless it has at least one ', 'indicator without a cross-loading whose intercept is constrained ', 'to equality. Use cat(as.character()) to check whether the syntax ', 'returned by measEq.syntax() must be manually adapted to free the ', 'necessary latent means.') } ## If it is estimated in the user's configural model, free it (NA). ## If it is specified as fixed but != 0, retain fixed value. GLIST.values <- sapply(names(GLIST.free), function(g) list()) for (g in 1:nG) { GLIST.values[[g]] <- mapply(function(est, free) { est[free > 0L] <- NA est }, SIMPLIFY = FALSE, est = GLIST.est[[g]], free = GLIST.free[[g]]) ## constrain first loadings to 1 and first indicators to 0? if (ID.fac == "ul") { ## matrix to store whether each indicator is a reference indicator lambda.first <- matrix(FALSE, nrow = nrow(GLIST.values[[g]]$lambda), ncol = ncol(GLIST.values[[g]]$lambda), dimnames = dimnames(GLIST.values[[g]]$lambda)) if ("beta" %in% pmats) { beta.first <- matrix(FALSE, nrow = nrow(GLIST.values[[g]]$beta), ncol = ncol(GLIST.values[[g]]$beta), dimnames = dimnames(GLIST.values[[g]]$beta)) } ## loop over factors to constrain loadings to 1 for (f in colnames(GLIST.values[[g]]$lambda)) { ## if any loading(s) is(are) fixed to 1 already, no changes needed ones.lambda <- which(GLIST.values[[g]]$lambda[ , f] == 1L) if ("beta" %in% pmats) ones.beta <- which(GLIST.values[[g]]$beta[ , f] == 1L) any1.lambda <- length(ones.lambda) > 0L any1.beta <- if ("beta" %in% pmats) length(ones.beta) > 0L else FALSE if (!any1.lambda && !any1.beta) { ## If not already indicated, find the first indicator and fix it to 1. ## Prioritize latent indicators to be first (in case observed has cross-loading) if ("beta" %in% pmats) { indicators <- names(which(GLIST.specify[[g]]$beta[ , f])) if (length(indicators)) { first.indicator <- indicators[1] } else first.indicator <- NULL } else first.indicator <- NULL if (length(first.indicator)) { ## only true if ("beta" %in% pmats) GLIST.values[[g]]$beta[first.indicator, f] <- 1L beta.first[first.indicator, f] <- TRUE } else { ## no latent indicators, so look in lambda indicators <- names(which(GLIST.specify[[g]]$lambda[ , f])) first.indicator <- indicators[1] #FIXME: no chance of NA by now, right? GLIST.values[[g]]$lambda[first.indicator, f] <- 1L lambda.first[first.indicator, f] <- TRUE } ## otherwise, use first fixed == 1 indicator as the marker variable } else if (any1.beta) { beta.first[ones.beta[1], f] <- TRUE } else if (any1.lambda) { lambda.first[ones.lambda[1], f] <- TRUE } } ## loop over indicators to constrain intercepts to zero if (meanstructure) { ## manifest indicators for (i in rownames(GLIST.specify[[g]]$lambda)) { ## for the first indicator of a construct, constrain to zero first.indicator <- lambda.first[i, ] if (sum(first.indicator) > 1L) stop('The intercept of indicator "', i, '" can only be fixed to zero ', 'in order to identify one latent mean, but it is specified as ', 'the first indicator of the following factors:\n\t', paste(names(which(first.indicator)), collapse = ", "), '\n', 'Please respecify the model so that each factor has a unique ', 'first indicator to use as a reference indicator.') if (any(first.indicator)) GLIST.values[[g]]$nu[i, 1] <- 0 } ## latent indicators of higher-order constructs if ("beta" %in% pmats) for (i in rownames(GLIST.specify[[g]]$beta)) { ## for the first indicator of a construct, constrain to zero first.indicator <- beta.first[i, ] if (sum(first.indicator) > 1L) stop('The intercept of indicator "', i, '" can only be fixed to zero ', 'in order to identify one factor mean, but it is specified as ', 'the first indicator of the following factors:\n\t', paste(names(which(first.indicator)), collapse = ", "), '\n', 'Please respecify the model so that each factor has a unique ', 'first indicator to use as a reference indicator.') if (any(first.indicator)) { GLIST.values[[g]]$alpha[i, 1] <- 0 } else GLIST.values[[g]]$alpha[i, 1] <- NA } } } } ## Make labels GLIST.labels <- sapply(names(GLIST.free), function(g) list()) for (g in 1:nG) { for (p in pmats) { if (p == "tau") { ## THRESHOLDS GLIST.labels[[g]]$tau <- cbind(gsub(x = rownames(GLIST.free[[g]]$tau), pattern = "|t", replacement = ".thr", fixed = TRUE)) dimnames(GLIST.labels[[g]]$tau) <- dimnames(GLIST.free[[g]]$tau) } else { ## ANY OTHER PARAMETERS GLIST.labels[[g]][[p]] <- matrix("", nrow = nrow(GLIST.free[[g]][[p]]), ncol = ncol(GLIST.free[[g]][[p]]), dimnames = dimnames(GLIST.free[[g]][[p]])) for (RR in rownames(GLIST.free[[g]][[p]])) { for (CC in colnames(GLIST.free[[g]][[p]])) { GLIST.labels[[g]][[p]][RR, CC] <- getLabel(lavTemplate, parMat = p, RR = RR, CC = CC) } } } ## end loops } ## no labels for scaling factors (cannot equate, not a measuremet parameter) GLIST.labels[[g]]$delta <- NULL } ## ------------------------------------ ## Preliminary checks on model and data ## ------------------------------------ ## check longitudinal factor names if (!is.list(longFacNames)) stop('"longFacNames" must be a list of character vectors.') ## check that no longitudinal factors are only at 1 occasion if (length(longFacNames)) longFacNames <- longFacNames[sapply(longFacNames, length) > 1L] ## also check longIndNames, and each non-NULL element if (!is.list(longIndNames)) stop('"longIndNames" must be a list of character vectors.') if (length(longIndNames)) { longIndList <- sapply(longIndNames, is.character) if (!all(longIndList)) stop('"longIndNames" must be a list of character vectors.') ## No problem if any(length == 1L). It just won't be constrained. } ## names of factors in syntax allFacNames <- lapply(GLIST.free, function(x) colnames(x$lambda)) ## collapse names of longitudinal factors (plus non-longitudinal factors) # reducedFacNames <- c(names(longFacNames), setdiff(unlist(allFacNames), # unlist(longFacNames))) ## check for longitudinal indicator names, automatically generate if empty make.longIndNames <- length(longIndNames) == 0L for (f in names(longFacNames)) { ## time-specific factor names fs <- longFacNames[[f]] nT <- length(fs) # number of occasions ## get indicators of each indNames <- sapply(fs, function(ff) { names(which(GLIST.specify[[1]]$lambda[ , ff])) }, simplify = FALSE) if (make.longIndNames) { # check for same number of indicators, match across factors nInd <- length(indNames[[1]]) if (!all(sapply(indNames, length) == nInd)) stop('The number of indicators for longitudinal factor "', f, '" differs across measurement occasions. Please use the ', '"longIndNames" argument to specify which longitudinal indicators', ' are the same indicator on different occasions of measurement.') if (nInd > 0L) for (i in 1:nInd) { longIndNames[[paste0("._", f, "_.ind.", i)]] <- sapply(indNames, "[", i = i, USE.NAMES = FALSE) } } else { ## add unique indicators per factor (omitted from user-specified matches) ## NO LONGER NECESSARY # for (i in fs) { # extraIndicators <- setdiff(indNames[[i]], unlist(longIndNames[[f]])) # longIndNames[[f]][extraIndicators] <- extraIndicators # } } } ## check none have cross-loadings longIndTable <- table(unlist(longIndNames)) if (any(longIndTable > 1L)) stop('Some longitudinal indicators define more than one factor:\n ', paste(names(longIndTable[longIndTable > 1L]), collapse = ", "), "\n ", 'The "longIndNames=" argument must be explicitly declared.') ## check equivalence of data type (ordinal vs. continuous) across time longOrdNames <- sapply(longIndNames, "%in%", table = allOrdNames, simplify = FALSE) someNotAll <- sapply(longOrdNames, function(i) any(i) & !all(i)) if (any(someNotAll)) { stop('At least one longitudinal indicator is declared as "ordered" on', ' at least one, but not every, occasion: \n ', paste(names(which(someNotAll)), collapse = ", ")) } ## check number of thresholds/categories is equivalent across time allOrd <- sapply(longOrdNames, all) if (length(allOrd)) for (i in which(allOrd)) { checkThr <- nThr[ longIndNames[[ names(allOrd)[i] ]] ] if (!all(checkThr == checkThr[1])) stop('These "ordered" longitudinal indicators do not have the same ', 'number of thresholds (endorsed categories) on every occasion: \n', paste(names(checkThr), collapse = ", "), "\nConsider collapsing rarely endorsed categories.") } ## create a backward-key for finding long(Fac/Ind)Names from variable names longFacKey <- rep(names(longFacNames), times = sapply(longFacNames, length)) names(longFacKey) <- unlist(longFacNames) longIndKey <- rep(names(longIndNames), times = sapply(longIndNames, length)) names(longIndKey) <- unlist(longIndNames) mc$longFacNames <- longFacNames mc$longIndNames <- longIndNames ## ----------------- ## Apply constraints ## ----------------- ## THRESHOLDS (+ intercept & variance ID constraints for allOrdNames) ## longitudinal constraints (one group at a time, but same across groups) for (g in 1:nG) { ## loop over ordinal indicators for (i in allOrdNames) { ## when other variables are this same indicator? longInds <- names(longIndKey)[ which(longIndKey == longIndKey[i]) ] if (length(longInds) == 0L) next ## keep track of how many thresholds for the i_th indicator have ## constrained, in case identification constraints can be released nEqThr <- 0L ## loop over thresholds of the i_th ordinal indicator for (th in 1:(nThr[i])) { ## (ADD) constraints across repeated measures? equate.long <- "thresholds" %in% long.equal ## check whether not to equate because it is in long.partial partial.th <- long.partial$op == "|" & long.partial$rhs == paste0("t", th) if (equate.long && any(partial.th)) { partial.inds <- longIndNames[[ long.partial$lhs[which(partial.th)] ]] equate.long <- !i %in% partial.inds } ## check whether to equate for identification (overrides *.partial) if (ID.cat == "millsap") { ## always equate the first (or only, if binary) if (th == ID.thr[[i]][1]) { equate.long <- TRUE ## remove this from long.partial, if necessary rm.th <- which(long.partial$lhs == longIndKey[i] & partial.th) if (length(rm.th)) long.partial <- long.partial[-rm.th, ] } ## for the first indicator of a construct, equate the second fs <- which(GLIST.specify[[g]]$lambda[i, ]) first.indicator <- sapply(fs, function(f) { lams <- GLIST.specify[[g]]$lambda[ , f] lam.eq.1 <- which(GLIST.values[[g]]$lambda[ , f] == 1) if (length(lam.eq.1)) return(names(lams[ lam.eq.1[1] ]) == i) names(which(lams))[1] == i }) if (th == ID.thr[[i]][2] && any(first.indicator)) { equate.long <- TRUE if (length(fs) > 1L && warn) warning('Millsap & Tein`s (2004) identification constraints might ', 'not be optimal when the reference indicator ("', i, '") has a cross-loading (on factors "', paste0(names(fs), collapse = '", "'), '")') ## remove this from long.partial, if necessary rm.th <- which(long.partial$lhs == longIndKey[i] & partial.th) if (length(rm.th)) long.partial <- long.partial[-rm.th, ] } } ## apply longitudinal constraint? if (equate.long) { ## iterate count of constrained thresholds nEqThr <- nEqThr + 1L ## apply longitudinal constraint this.th <- paste0(i, "|t", th) first.th <- paste0(longInds[1], "|t", th) GLIST.labels[[g]]$tau[this.th, 1] <- GLIST.labels[[g]]$tau[first.th, 1] } } ## end loop over thresholds ## check whether enough thresholds were equated to free ## IDENTIFICATION CONSTRAINTS on intercepts & residuals equate.int <- "intercepts" %in% long.equal && !any(long.partial$lhs == longIndKey[i] & long.partial$op == "~1") equate.resid <- "residuals" %in% long.equal && !any(long.partial$lhs == longIndKey[i] & long.partial$rhs == longIndKey[i] & long.partial$op == "~~") #FIXME: leave resid==0 for reference indicators if (i == longInds[1]) { if (ID.cat == "lisrel") { ## always estimate intercepts, and variances unless binary GLIST.values[[g]]$nu[i, 1] <- NA diag(GLIST.values[[g]]$theta)[i] <- if (nThr[i] == 1L) 1 else NA } else { ## always set reference occasion's intercepts to 0 and variances to 1 GLIST.values[[g]]$nu[i, 1] <- 0 if (parameterization == "theta") { diag(GLIST.values[[g]]$theta)[i] <- 1 } else { GLIST.values[[g]]$delta[i, 1] <- 1 } } } else if (ID.cat == "wu") { ## priority to freeing intercepts if (nEqThr == 0L || equate.int) { GLIST.values[[g]]$nu[i, 1] <- 0 } else GLIST.values[[g]]$nu[i, 1] <- NA if (nEqThr == 0L || (nEqThr < 2L && !equate.int) || equate.resid) { ## keep (residual) variances fixed if (parameterization == "theta") { diag(GLIST.values[[g]]$theta)[i] <- 1 } else { GLIST.values[[g]]$delta[i, 1] <- 1 } } else { ## free (residual) variances if (parameterization == "theta") { diag(GLIST.values[[g]]$theta)[i] <- NA } else { GLIST.values[[g]]$delta[i, 1] <- NA } } } else if (ID.cat %in% c("mplus","millsap")) { ## never free intercepts, only variances if (nEqThr == 0L || equate.resid) { ## keep (residual) variances fixed if (parameterization == "theta") { diag(GLIST.values[[g]]$theta)[i] <- 1 } else { GLIST.values[[g]]$delta[i, 1] <- 1 } } else { ## free (residual) variances if (parameterization == "theta") { diag(GLIST.values[[g]]$theta)[i] <- NA } else { GLIST.values[[g]]$delta[i, 1] <- NA } } } else if (ID.cat == "lisrel") { ## always estimate intercepts, and variances unless binary GLIST.values[[g]]$nu[i, 1] <- NA diag(GLIST.values[[g]]$theta)[i] <- if (nThr[i] == 1L) 1 else NA } } } ## group constraints if (nG > 1L) for (g in 1:nG) { ## loop over ordinal indicators for (i in allOrdNames) { ## keep track of how many thresholds for the i_th indicator have ## constrained, in case identification constraints can be released nEqThr <- 0L ## loop over thresholds of the i_th ordinal indicator for (th in 1:(nThr[i])) { ## (REMOVE) constraints across groups? equate.group <- "thresholds" %in% group.equal ## check whether not to equate because it is in group.partial partial.th <- group.partial$lhs == i & group.partial$op == "|" & group.partial$rhs == paste0("t", th) if (equate.group) equate.group <- !any(partial.th) ## check whether to equate for identification (overrides *.partial) if (ID.cat == "millsap") { ## always equate the first (or only, if binary) if (th == ID.thr[[i]][1]) { equate.group <- TRUE ## remove this from long.partial, if necessary rm.th <- which(partial.th) if (length(rm.th)) group.partial <- group.partial[-rm.th, ] } ## for the first indicator of a construct, equate the second fs <- which(GLIST.specify[[g]]$lambda[i, ]) first.indicator <- sapply(fs, function(f) { lams <- GLIST.specify[[g]]$lambda[ , f] lam.eq.1 <- which(GLIST.values[[g]]$lambda[ , f] == 1) if (length(lam.eq.1)) return(names(lams[ lam.eq.1[1] ]) == i) names(which(lams))[1] == i }) if (th == ID.thr[[i]][2] && any(first.indicator)) { equate.group <- TRUE if (length(fs) > 1L && warn) warning('Millsap & Tein`s (2004) identification constraints might ', 'not be optimal when the reference indicator ("', i, '") has a cross-loading (on factors "', paste0(names(fs), collapse = '", "'), '")') ## remove this from long.partial, if necessary rm.th <- which(partial.th) if (length(rm.th)) group.partial <- group.partial[-rm.th, ] } } else if (ID.cat == "lisrel") { ## always fix the first (or only, if binary) to zero if (th == 1L) GLIST.values[[g]]$tau[paste0(i, "|t", th), 1] <- 0 ## always fix the second to one if (th == 2L) GLIST.values[[g]]$tau[paste0(i, "|t", th), 1] <- 1 ## estimate any others if (th > 2L) GLIST.values[[g]]$tau[paste0(i, "|t", th), 1] <- NA } ## apply group-specific label, unless constrained if (!equate.group) { ## row in GLIST RR <- paste0(i, "|t", th) GLIST.labels[[g]]$tau[RR, 1] <- paste0(GLIST.labels[[g]]$tau[RR, 1], ".g", g) } else nEqThr <- nEqThr + 1L # iterate count of constrained thresholds } ## end loop over thresholds ## check whether enough thresholds were equated to free ## IDENTIFICATION CONSTRAINTS on intercepts & residuals. ## Note: Group 1 constraints already set in longitudinal loop. ## DON'T OVERWRITE FREED CONSTRAINTS AFTER TIME 1. equate.int <- "intercepts" %in% group.equal && !any(group.partial$lhs == i & group.partial$op == "~1") equate.resid <- "residuals" %in% group.equal && !any(group.partial$lhs == i & group.partial$rhs == i & group.partial$op == "~~") if (g > 1L && ID.cat == "wu") { ## priority to freeing intercepts #FIXME: binary indicators, latent mean arbitrarily freed, nesting problems if (nEqThr >= 1L && !equate.int) GLIST.values[[g]]$nu[i, 1] <- NA if ((nEqThr >= 2L || (nEqThr >= 1L && equate.int)) && !equate.resid) { ## free (residual) variances if (parameterization == "theta") { diag(GLIST.values[[g]]$theta)[i] <- NA } else { GLIST.values[[g]]$delta[i, 1] <- NA } } } else if (g > 1L && ID.cat %in% c("mplus","millsap")) { ## never free intercepts, only variances if (nEqThr >= 1L && !equate.resid) { ## free (residual) variances if (parameterization == "theta") { diag(GLIST.values[[g]]$theta)[i] <- NA } else { GLIST.values[[g]]$delta[i, 1] <- NA } } } else if (g > 1L && ID.cat == "lisrel") { ## always estimate intercepts, and variances unless binary GLIST.values[[g]]$nu[i, 1] <- NA diag(GLIST.values[[g]]$theta)[i] <- if (nThr[i] == 1L) 1 else NA } } } ## LATENT MEANS ## longitudinal constraints (one group at a time, but same across groups) if (meanstructure) for (g in 1:nG) { ## fix or free factor means? if (ID.fac == "uv") { GLIST.values[[g]]$alpha[ , 1] <- 0 # free below, if any loading is constrained ## freed when any loading is constrained to equality } else if ("beta" %in% pmats) { ## latent indicators of any higher-order factors already set to 0 or NA ## in GLIST.values loop above } else GLIST.values[[g]]$alpha[ , 1] <- NA ## loop over factors for (f in rownames(GLIST.labels[[g]]$alpha)) { ## which other variables are this same factor? longFacs <- names(longFacKey)[ which(longFacKey == longFacKey[f]) ] if (length(longFacs) == 0L) { ## not a longitudinal factor, set first group's mean to 0 for Millsap if (ID.cat == "millsap" && g == 1L) GLIST.values[[g]]$alpha[f, 1] <- 0 next } ## first time a factor is measured, set first group's mean to 0 for Millsap if (ID.cat == "millsap" && g == 1L && longFacs[1] == f) { GLIST.values[[g]]$alpha[f, 1] <- 0 } ## assign labels equate.means <- "means" %in% long.equal && !any(long.partial$lhs == longFacKey[f] & long.partial$op == "~1") if (equate.means) { GLIST.labels[[g]]$alpha[f, 1] <- GLIST.labels[[g]]$alpha[longFacs[1], 1] } } } ## group constraints if (meanstructure && nG > 1L) for (g in 1:nG) { ## loop over factors for (f in rownames(GLIST.labels[[g]]$alpha)) { ## assign labels equate.means <- "means" %in% group.equal && !any(group.partial$lhs == f & group.partial$op == "~1") if (!equate.means) { GLIST.labels[[g]]$alpha[f, 1] <- paste0(GLIST.labels[[g]]$alpha[f, 1], ".g", g) } } } ## LATENT VARIANCES ## longitudinal constraints (one group at a time, but same across groups) for (g in 1:nG) { ## fix or free factor variances? if (ID.fac == "uv") { diag(GLIST.values[[g]]$psi) <- 1 # free below, if any loading is constrained ## freed when any loading is constrained to equality } else diag(GLIST.values[[g]]$psi) <- NA ## loop over factors for (f in colnames(GLIST.labels[[g]]$lambda)) { ## which other variables are this same factor? longFacs <- names(longFacKey)[ which(longFacKey == longFacKey[f]) ] if (length(longFacs) == 0L) next ## assign labels equate.var <- "lv.variances" %in% long.equal && !any(long.partial$lhs == longFacKey[f] & long.partial$op == "~~" & long.partial$rhs == longFacKey[f]) if (equate.var) { GLIST.labels[[g]]$psi[f, f] <- GLIST.labels[[g]]$psi[longFacs[1], longFacs[1]] } } } ## group constraints if (nG > 1L) for (g in 1:nG) { ## loop over factors for (f in colnames(GLIST.labels[[g]]$lambda)) { ## assign labels equate.var <- "lv.variances" %in% group.equal && !any(group.partial$lhs == f & group.partial$op == "~~" & group.partial$rhs == f) if (!equate.var) { GLIST.labels[[g]]$psi[f, f] <- paste0(GLIST.labels[[g]]$psi[f, f], ".g", g) } } } ## LOADINGS ## longitudinal constraints (one group at a time, but same across groups) for (g in 1:nG) { ## loop over factors for (f in colnames(GLIST.labels[[g]]$lambda)) { ## which other factors are this same factor? longFacs <- names(longFacKey)[ which(longFacKey == longFacKey[f]) ] if (length(longFacs) == 0L) next ## loop over any manifest indicators within each factor for (i in names(which(GLIST.specify[[g]]$lambda[ , f])) ) { ## which other variables are this same indicator? longInds <- names(longIndKey)[ which(longIndKey == longIndKey[i]) ] if (length(longInds) == 0L) next ## assign labels equate.load <- "loadings" %in% long.equal && !any(long.partial$lhs == longFacKey[f] & long.partial$op == "=~" & long.partial$rhs == longIndKey[i]) if (equate.load) { GLIST.labels[[g]]$lambda[i, f] <- GLIST.labels[[g]]$lambda[longInds[1], longFacs[1]] ## free factor variance(s) after Time 1 if (ID.fac == "uv" && f %in% longFacs[-1]) diag(GLIST.values[[g]]$psi)[f] <- NA } } ## loop over any latent indicators within each factor if ("beta" %in% pmats) for (i in names(which(GLIST.specify[[g]]$beta[ , f])) ) { ## which other factors are this same factor? longInds <- names(longFacKey)[ which(longFacKey == longFacKey[i]) ] if (length(longInds) == 0L) next ## assign labels equate.load <- "regressions" %in% long.equal && !any(long.partial$lhs == longFacKey[f] & long.partial$op == "=~" & long.partial$rhs == longFacKey[i]) if (equate.load) { GLIST.labels[[g]]$beta[i, f] <- GLIST.labels[[g]]$beta[longInds[1], longFacs[1]] } } } } ## group constraints if (nG > 1L) for (g in 1:nG) { ## loop over factors for (f in colnames(GLIST.labels[[g]]$lambda)) { ## loop over any manifest indicators within each factor for (i in names(which(GLIST.specify[[g]]$lambda[ , f])) ) { ## assign labels equate.load <- "loadings" %in% group.equal && !any(group.partial$lhs == f & group.partial$op == "=~" & group.partial$rhs == i) if (!equate.load) { GLIST.labels[[g]]$lambda[i, f] <- paste0(GLIST.labels[[g]]$lambda[i, f], ".g", g) } else if (ID.fac == "uv" && g > 1L) { ## free factor variance(s) in group(s) other than the first diag(GLIST.values[[g]]$psi)[f] <- NA } } ## loop over any latent indicators within each factor if ("beta" %in% pmats) for (i in names(which(GLIST.specify[[g]]$beta[ , f])) ) { ## assign labels equate.load <- "regressions" %in% group.equal && !any(group.partial$lhs == f & group.partial$op == "=~" & group.partial$rhs == i) if (!equate.load) { GLIST.labels[[g]]$beta[i, f] <- paste0(GLIST.labels[[g]]$beta[i, f], ".g", g) } } } } ## INTERCEPTS ## longitudinal constraints (one group at a time, but same across groups) if (meanstructure) for (g in 1:nG) { ## loop over indicators for (i in lavNames(lavTemplate, "ov.ind", group = g)) { ## when other variables are this same indicator? longInds <- names(longIndKey)[ which(longIndKey == longIndKey[i]) ] if (length(longInds) == 0L) next ## assign labels equate.int <- "intercepts" %in% long.equal && !any(long.partial$lhs == longIndKey[i] & long.partial$op == "~1") if (equate.int) { GLIST.labels[[g]]$nu[i, 1] <- GLIST.labels[[g]]$nu[longInds[1], 1] ## free factor mean(s) after Time 1 only if an indicator without a ## cross-loading has an equality-constrained intercept if (ID.fac == "uv") { ## factors this indicator measures fs <- colnames(GLIST.specify[[g]]$lambda)[ GLIST.specify[[g]]$lambda[i,] ] only.measures.1 <- length(fs) == 1L ## name(s) of longitudinal factor(s) LFN <- longFacKey[fs] not.time.1 <- fs[1] %in% names(which(longFacKey == LFN))[-1] if (only.measures.1 && not.time.1) GLIST.values[[g]]$alpha[fs, 1] <- NA } } } } ## group constraints if (meanstructure && nG > 1L) for (g in 1:nG) { ## loop over indicators for (i in lavNames(lavTemplate, "ov.ind", group = g)) { ## assign labels equate.int <- "intercepts" %in% group.equal && !any(group.partial$lhs == i & group.partial$op == "~1") if (!equate.int) { GLIST.labels[[g]]$nu[i, 1] <- paste0(GLIST.labels[[g]]$nu[i, 1], ".g", g) } else if (ID.fac == "uv") { ## factors this indicator measures fs <- colnames(GLIST.specify[[g]]$lambda)[ GLIST.specify[[g]]$lambda[i,] ] only.measures.1 <- length(fs) == 1L ## free factor mean(s) other than group 1 only if an indicator without a ## cross-loading has an equality-constrained intercept if (only.measures.1 && g > 1L) GLIST.values[[g]]$alpha[fs, 1] <- NA } } } ## RESIDUAL VARIANCES ## longitudinal constraints (one group at a time, but same across groups) for (g in 1:nG) { ## loop over indicators for (i in lavNames(lavTemplate, "ov.ind", group = g)) { ## when other variables are this same indicator? longInds <- names(longIndKey)[ which(longIndKey == longIndKey[i]) ] if (length(longInds) == 0L) next ## assign labels equate.resid <- "residuals" %in% long.equal && !any(long.partial$lhs == longIndKey[i] & long.partial$rhs == longIndKey[i] & long.partial$op == "~~") if (equate.resid) { diag(GLIST.labels[[g]]$theta)[i] <- diag(GLIST.labels[[g]]$theta)[ longInds[1] ] } } } ## group constraints if (nG > 1L) for (g in 1:nG) { ## loop over indicators for (i in lavNames(lavTemplate, "ov.ind", group = g)) { ## assign labels equate.resid <- "residuals" %in% group.equal && !any(group.partial$lhs == i & group.partial$rhs == i & group.partial$op == "~~") if (!equate.resid) { diag(GLIST.labels[[g]]$theta)[i] <- paste0(diag(GLIST.labels[[g]]$theta)[i], ".g", g) } } } ## RESIDUAL AUTO-COVARIANCES: longitudinal constraints only if (length(longIndNames) && !is.null(auto)) for (g in 1:nG) { ## loop over longitudinal indicators for (i in names(longIndNames)) { nn <- longIndNames[[i]] nT <- length(nn) # number repeated measures of indicator i auto.i <- auto # because nT can differ across variables if (auto == "all") auto.i <- nT - 1L # max lag if (auto >= nT ) auto.i <- nT - 1L # max lag ## for each lag... for (lag in 1:auto.i) { for (tt in 1:(nT - lag)) { ## specify and set free GLIST.specify[[g]]$theta[ nn[tt + lag], nn[tt] ] <- TRUE GLIST.values[[g]]$theta[ nn[tt + lag], nn[tt] ] <- NA ## constrain to equality across repeated measures? if ("resid.autocov" %in% long.equal && tt > 1L) { first.label <- GLIST.labels[[g]]$theta[ nn[1 + lag], nn[1] ] GLIST.labels[[g]]$theta[ nn[tt + lag], nn[tt] ] <- first.label } } } } } ## group constraints on any RESIDUAL COVARIANCES if (nG > 1) for (g in 1:nG) { ## add group-specific labels to any off-diagonal GLIST.specify? freeTheta <- which(GLIST.specify[[g]]$theta, arr.ind = TRUE) offDiag <- freeTheta[ , "row"] > freeTheta[ , "col"] if (sum(offDiag) == 0) break # nothing to do ## loop over elements that require action free.offDiag <- freeTheta[offDiag, , drop = FALSE] for (RR in 1:nrow(free.offDiag)) { i <- free.offDiag[RR, "row"] j <- free.offDiag[RR, "col"] ## check group.partial in both directions partial.ij <- any(group.partial$lhs == i & group.partial$rhs == j & group.partial$op == "~~") partial.ji <- any(group.partial$lhs == j & group.partial$rhs == i & group.partial$op == "~~") equate.rescov <- "residual.covariances" %in% group.equal && !any(partial.ij | partial.ji) ## assign group-specific labels? if (!equate.rescov) { GLIST.labels[[g]]$theta[i, j] <- paste0(GLIST.labels[[g]]$theta[i, j], ".g", g) } } } ## LATENT AUTO-COVARIANCES: longitudinal constraints only if (length(longFacNames)) for (g in 1:nG) { ## loop over longitudinal indicators for (i in names(longFacNames)) { nn <- longFacNames[[i]] nT <- length(nn) # number repeated measures of indicator i ## for each lag... for (lag in 1:(nT - 1) ) { for (tt in 1:(nT - lag) ) { ## specify and set free (overwrite possible "orthogonal=TRUE") GLIST.specify[[g]]$psi[ nn[tt + lag], nn[tt] ] <- TRUE GLIST.values[[g]]$psi[ nn[tt + lag], nn[tt] ] <- NA ## constrain to equality across repeated measures? if ("lv.autocov" %in% long.equal && tt > 1L) { first.label <- GLIST.labels[[g]]$psi[ nn[1 + lag], nn[1] ] GLIST.labels[[g]]$psi[ nn[tt + lag], nn[tt] ] <- first.label } } } } } ## group constraints on any LATENT COVARIANCES if (nG > 1) for (g in 1:nG) { ## add group-specific labels to any off-diagonal GLIST.specify? freePsi <- which(GLIST.specify[[g]]$psi, arr.ind = TRUE) offDiag <- freePsi[ , "row"] > freePsi[ , "col"] if (sum(offDiag) == 0) break # nothing to do ## loop over elements that require action free.offDiag <- freePsi[offDiag, , drop = FALSE] for (RR in 1:nrow(free.offDiag)) { i <- free.offDiag[RR, "row"] j <- free.offDiag[RR, "col"] ## check group.partial in both directions partial.ij <- any(group.partial$lhs == i & group.partial$rhs == j & group.partial$op == "~~") partial.ji <- any(group.partial$lhs == j & group.partial$rhs == i & group.partial$op == "~~") equate.latcov <- "lv.covariances" %in% group.equal && !any(partial.ij | partial.ji) ## assign group-specific labels? if (!equate.latcov) { GLIST.labels[[g]]$psi[i, j] <- paste0(GLIST.labels[[g]]$psi[i, j], ".g", g) } } } ## assemble parameter labels for effects-code identification constraints fxList <- character(0) if (ID.fac == "fx") { listLabels.L <- list() if (meanstructure) listLabels.I <- list() for (g in 1:nG) { ## loadings labels listLabels.L[[g]] <- sapply(colnames(GLIST.labels[[g]]$lambda), function(f) { GLIST.labels[[g]]$lambda[GLIST.specify[[g]]$lambda[ , f], f] }, simplify = FALSE) ## intercept labels if (meanstructure) { listLabels.I[[g]] <- sapply(colnames(GLIST.labels[[g]]$lambda), function(f) { GLIST.labels[[g]]$nu[GLIST.specify[[g]]$lambda[ , f], 1] }, simplify = FALSE) } } ## names of factors measured in each group gFacNames <- lapply(listLabels.L, names) ## loop over factor names for (f in unique(unlist(allFacNames))) { ## in which groups is this factor measured? groups.with.f <- which(sapply(gFacNames, function(gn) f %in% gn)) ## get the labels used for indicators in each group allLabels.L <- lapply(listLabels.L[groups.with.f], "[[", i = f) if (meanstructure) allLabels.I <- lapply(listLabels.I[groups.with.f], "[[", i = f) ## one group, one time --> no checks necessary if (length(groups.with.f) == 1L && !f %in% names(longFacKey)) { fxList <- c(fxList, make.FX.constraint(allLabels.L[[1]], "loadings")) if (meanstructure) { fxList <- c(fxList, make.FX.constraint(allLabels.I[[1]], "intercepts")) } } ## one group, multiple times if (length(groups.with.f) == 1L && f %in% names(longFacKey)) { ## this factor's name on all occasions LFN <- names(which(longFacKey == longFacKey[f])) ## count constraints on loadings across time allConstrained <- which(table(unlist(listLabels.L[[1]][LFN])) == length(LFN)) if (length(allConstrained)) { if (f == LFN[1]) { fxList <- c(fxList, make.FX.constraint(names(allConstrained), "loadings")) } } else { ## no constraints, each factor gets its own fxList <- c(fxList, make.FX.constraint(allLabels.L[[1]], "loadings")) } ## count constraints on intercepts across time if (meanstructure) { allConstrained <- which(table(unlist(listLabels.I[[1]][LFN])) == length(LFN)) if (length(allConstrained)) { if (f == LFN[1]) { fxList <- c(fxList, make.FX.constraint(names(allConstrained), "intercepts")) } } else { ## no constraints, each factor gets its own fxList <- c(fxList, make.FX.constraint(allLabels.I[[1]], "intercepts")) } } } ## multiple groups, one time if (length(groups.with.f) > 1L && !f %in% names(longFacKey)) { ## count constraints on loadings across groups allConstrained <- which(table(unlist(allLabels.L)) == length(groups.with.f)) if (length(allConstrained)) { fxList <- c(fxList, make.FX.constraint(names(allConstrained), "loadings")) } else { ## no constraints, each group gets its own for (g in groups.with.f) { fxList <- c(fxList, make.FX.constraint(allLabels.L[[g]], "loadings")) } } ## count constraints on intercepts across groups if (meanstructure) { allConstrained <- which(table(unlist(allLabels.I)) == length(groups.with.f)) if (length(allConstrained)) { fxList <- c(fxList, make.FX.constraint(names(allConstrained), "intercepts")) } else { ## no constraints, each group gets its own for (g in groups.with.f) { fxList <- c(fxList, make.FX.constraint(allLabels.I[[g]], "loadings")) } } } } ## multiple groups, multiple times: Constrain across any/all dimensions? if (length(groups.with.f) > 1L && f %in% names(longFacKey)) { ## This factor's name on all occasions LFN <- names(which(longFacKey == longFacKey[f])) ## Number of dimensions (number of groups times number of occasions). ## Assumes each occasion was measured in each group. nGT <- length(LFN)*length(groups.with.f) ## count constraints on loadings across both dimensions all.GL.Labels.L <- lapply(LFN, function(ff) { lapply(listLabels.L[groups.with.f], "[[", i = ff) }) all.GL.Constrained.L <- which(table(unlist(all.GL.Labels.L)) == nGT) if (length(all.GL.Constrained.L)) { if (f == LFN[1]) { fxList <- c(fxList, make.FX.constraint(names(all.GL.Constrained.L), "loadings")) } } else { if (f == LFN[1]) warning('No indicators of longitudinal factor "', longFacKey[f], '" have loadings constrained across all groups and all ', 'occasions, so the automatically generated syntax applies ', 'effects-code identification constraints separately for each', ' occasion and group. If at least 1 loading is constrained ', 'across either groups or occasions, the user should save the', ' syntax to manually reduce the number of identification ', 'constraints by applying them only to loadings constrained ', 'to equality across groups or occasions.') #TODO: update() method for (g in groups.with.f) { fxList <- c(fxList, make.FX.constraint(allLabels.L[[g]], "loadings")) } } ## count constraints on intercepts across both dimensions if (meanstructure) { all.GL.Labels.I <- lapply(LFN, function(ff) { lapply(listLabels.I[groups.with.f], "[[", i = ff) }) all.GL.Constrained.I <- which(table(unlist(all.GL.Labels.I)) == nGT) if (length(all.GL.Constrained.I)) { if (f == LFN[1]) { fxList <- c(fxList, make.FX.constraint(names(all.GL.Constrained.I), "intercepts")) } } else { if (f == LFN[1]) warning('No indicators of longitudinal factor "', longFacKey[f], '" have intercepts constrained across all groups and all ', 'occasions, so the automatically generated syntax applies ', 'effects-code identification constraints separately for each', ' occasion and group. If at least 1 loading is constrained ', 'across either groups or occasions, the user should save the', ' syntax to manually reduce the number of identification ', 'constraints by applying them only to intercepts constrained ', 'to equality across groups or occasions.') #TODO: update() method for (g in groups.with.f) { fxList <- c(fxList, make.FX.constraint(allLabels.I[[g]], "intercepts")) } } } } } # end loop over factors } ## ------------- ## Return object ## ------------- out <- new("measEq.syntax", package = "lavaan", model.type = "cfa", call = mc, meanstructure = meanstructure, numeric = lavNames(lavTemplate, "ov.num"), ordered = lavNames(lavTemplate, "ov.ord"), parameterization = parameterization, specify = GLIST.specify, values = GLIST.values, labels = GLIST.labels, constraints = fxList, ngroups = nG) if (return.fit) { if (inherits(configural.model, "lavaan")) { fit <- try(lavaan::update(configural.model, model = as.character(out), ...), silent = TRUE) } else if (inherits(configural.model, "lavaanList")) { configural.model@call$model <- as.character(out) configural.model@call$do.fit <- TRUE fit <- try(eval(configural.model@call, parent.frame()), silent = TRUE) } else { lavArgs$model <- as.character(out) lavArgs$do.fit <- TRUE fit <- try(do.call("cfa", lavArgs), silent = TRUE) } ## check whether the model could be fit if (inherits(fit, "try-error")) { warning('The generated model syntax was not successfully fit to the ', 'data, and generated the following error message(s): \n\n', fit[1:length(fit)], "\n", "The measEq.syntax object was returned instead.") } else { fit@external$measEq.syntax <- out # save the syntax to the lavaan(.mi) object out <- fit # return the fitted lavaan(.mi) object } } out } ## ---------------- ## Hidden Functions ## ---------------- ## function(s) to locate position in lambda, theta, psi, nu, alpha ##' @importFrom lavaan lavInspect getLabel <- function(object, parMat, RR, CC = 1L) { est <- lavInspect(object, "free") if (lavInspect(object, "ngroups") > 1L) est <- est[[1]] dn <- dimnames(est[[parMat]]) out <- paste(parMat, which(dn[[1]] == RR), sep = ".") if (!parMat %in% c("alpha","nu")) out <- paste(out, which(dn[[2]] == CC), sep = "_") out } ## function to assemble a model constraint for effects-code identification make.FX.constraint <- function(parLabels, param) { nCon <- length(parLabels) conVal <- if (param == "loadings") nCon else 0 out <- paste0(parLabels[1], " == ", conVal) if (nCon > 1) out <- paste(c(out, parLabels[-1]), collapse = " - ") out } ## function to generate a character vector of lines of syntax (for as.character) write.lavaan.syntax <- function(pmat, specify, value, label) { nG <- length(specify) ## LOADINGS if (pmat == "lambda") { params <- "## LOADINGS:\n" for (fac in colnames(specify[[1]])) { for (ind in rownames(specify[[1]])) { if (!specify[[1]][ind, fac]) next if (nG > 1L) { params <- c(params, paste0(fac, " =~ c(", paste(sapply(value, "[", i = ind, j = fac), collapse = ", "), ")*", ind, " + c(", paste(sapply(label, "[", i = ind, j = fac), collapse = ", "), ")*", ind)) } else { params <- c(params, paste0(fac, " =~ ", value[[1]][ind, fac], "*", ind, " + ", label[[1]][ind, fac], "*", ind)) } } } return(c(params, "")) } ## THRESHOLDS if (pmat == "tau") { params <- sapply(rownames(specify[[1]]), function(th) { th.names <- strsplit(th, split = "|", fixed = TRUE)[[1]] if (nG > 1L) { param <- paste0(th.names[1], " | c(", paste(sapply(value, "[", i = th, j = 1), collapse = ", "), ")*", th.names[2], " + c(", paste(sapply(label, "[", i = th, j = 1), collapse = ", "), ")*", th.names[2]) } else { param <- paste0(th.names[1], " | ", value[[1]][th, 1], "*", th.names[2], " + ", label[[1]][th, 1], "*", th.names[2]) } param }) return(c("## THRESHOLDS:\n", params, "")) } ## INTERCEPTS or LATENT MEANS if (pmat %in% c("nu","alpha")) { ## specify all, so no need to check params <- sapply(rownames(specify[[1]]), function(x) { if (nG > 1L) { # param <- paste0(x, " ~ c(", # paste(sapply(value, "[", i = x, j = 1), # collapse = ", "), # ")*1 + c(", # paste(sapply(label, "[", i = x, j = 1), # collapse = ", "), # ")*1") #FIXME: Did Yves fix this lavaanify() bug? param <- paste0(x, " ~ c(", paste(sapply(label, "[", i = x, j = 1), collapse = ", "), ")*1 + c(", paste(sapply(value, "[", i = x, j = 1), collapse = ", "), ")*1") } else { # param <- paste0(x, " ~ ", value[[1]][x, 1], "*1 + ", # label[[1]][x, 1], "*1") param <- paste0(x, " ~ ", label[[1]][x, 1], "*1 + ", value[[1]][x, 1], "*1") } param }) if (pmat == "nu") params <- c("## INTERCEPTS:\n", params) if (pmat == "alpha") params <- c("## LATENT MEANS/INTERCEPTS:\n", params) return(c(params, "")) } ## SCALING FACTORS (delta) if (pmat == "delta") { ## specify any? spec.delta <- which(specify[[1]][ , 1]) if (length(spec.delta) == 0L) return(NULL) ## if so... params <- sapply(names(spec.delta), function(x) { if (nG > 1L) { param <- paste0(x, " ~*~ c(", paste(sapply(value, "[", i = x, j = 1), collapse = ", "), ")*", x) } else { param <- paste0(x, " ~*~ ", value[[1]][x, 1], "*", x) } param }) return(c("## SCALING FACTORS:\n", params, "")) } ## LATENT or RESIDUAL (CO)VARIANCES if (pmat %in% c("theta","psi")) { ## do diagonal first, then check for off-diagonal spec.vars <- which(diag(specify[[1]])) if (pmat == "psi") { params <- "## COMMON-FACTOR VARIANCES:\n" } else if (pmat == "theta" && length(spec.vars)) { params <- "## UNIQUE-FACTOR VARIANCES:\n" } else params <- character(0) ## variances if (length(spec.vars)) { params <- c(params, sapply(names(spec.vars), function(x) { if (nG > 1L) { param <- paste0(x, " ~~ c(", paste(sapply(value, function(j) diag(j)[x]), collapse = ", "), ")*", x, " + c(", paste(sapply(label, function(j) diag(j)[x]), collapse = ", "), ")*", x) } else { param <- paste0(x, " ~~ ", diag(value[[1]])[x], "*", x, " + ", diag(label[[1]])[x], "*", x) } param })) } ## covariances if (any(specify[[1]][lower.tri(specify[[1]], diag = FALSE)])) { if (pmat == "psi") params <- c(params, "\n## COMMON-FACTOR COVARIANCES:\n") if (pmat == "theta") params <- c(params, "\n## UNIQUE-FACTOR COVARIANCES:\n") } nn <- rownames(specify[[1]]) if (length(nn) > 1L) for (CC in 1:(length(nn) - 1)) { for (RR in (CC + 1):length(nn)) { if (!specify[[1]][RR, CC]) next if (nG > 1L) { params <- c(params, paste0(nn[CC], " ~~ c(", paste(sapply(value, "[", i = RR, j = CC), collapse = ", "), ")*", nn[RR], " + c(", paste(sapply(label, "[", i = RR, j = CC), collapse = ", "), ")*", nn[RR])) } else { params <- c(params, paste0(nn[CC], " ~~ ", value[[1]][RR, CC], "*", nn[RR], " + ", label[[1]][RR, CC], "*", nn[RR])) } } } return(c(params, "")) } ## out of options, should never get this far invisible(NULL) } #TODO: adapt routine to write Mplus MODEL statements and OpenMx RAM commands semTools/R/kd.R0000644000176200001440000001006213432435606013006 0ustar liggesusers### Edgar Merkle ### Last updated: 9 March 2018 ### Kaiser-Dickman (1962) algorithm for generating sample data ### based on the input covmat, which is a covariance matrix. #' Generate data via the Kaiser-Dickman (1962) algorithm. #' #' Given a covariance matrix and sample size, generate raw data that correspond #' to the covariance matrix. Data can be generated to match the covariance #' matrix exactly, or to be a sample from the population covariance matrix. #' #' By default, R's \code{cov()} function divides by \code{n}-1. The data #' generated by this algorithm result in a covariance matrix that matches #' \code{covmat}, but you must divide by \code{n} instead of \code{n}-1. #' #' #' @importFrom stats cov2cor rnorm #' #' @param covmat a symmetric, positive definite covariance matrix #' @param n the sample size for the data that will be generated #' @param type type of data generation. \code{exact} generates data that #' exactly correspond to \code{covmat}. \code{sample} treats \code{covmat} as #' a poulation covariance matrix, generating a sample of size \code{n}. #' @return \code{kd} returns a data matrix of dimension \code{n} by #' \code{nrow(covmat)}. #' @author Ed Merkle (University of Missouri; \email{merklee@@missouri.edu}) #' @references Kaiser, H. F. and Dickman, K. (1962). Sample and population #' score matrices and sample correlation matrices from an arbitrary population #' correlation matrix. \emph{Psychometrika, 27}(2), 179--182. #' doi:10.1007/BF02289635 #' @examples #' #' #### First Example #' #' ## Get data #' dat <- HolzingerSwineford1939[ , 7:15] #' hs.n <- nrow(dat) #' #' ## Covariance matrix divided by n #' hscov <- ((hs.n-1)/hs.n) * cov(dat) #' #' ## Generate new, raw data corresponding to hscov #' newdat <- kd(hscov, hs.n) #' #' ## Difference between new covariance matrix and hscov is minimal #' newcov <- (hs.n-1)/hs.n * cov(newdat) #' summary(as.numeric(hscov - newcov)) #' #' ## Generate sample data, treating hscov as population matrix #' newdat2 <- kd(hscov, hs.n, type = "sample") #' #' #### Another example #' #' ## Define a covariance matrix #' covmat <- matrix(0, 3, 3) #' diag(covmat) <- 1.5 #' covmat[2:3,1] <- c(1.3, 1.7) #' covmat[3,2] <- 2.1 #' covmat <- covmat + t(covmat) #' #' ## Generate data of size 300 that have this covariance matrix #' rawdat <- kd(covmat, 300) #' #' ## Covariances are exact if we compute sample covariance matrix by #' ## dividing by n (vs by n - 1) #' summary(as.numeric((299/300)*cov(rawdat) - covmat)) #' #' ## Generate data of size 300 where covmat is the population covariance matrix #' rawdat2 <- kd(covmat, 300) #' #' @export kd <- function(covmat, n, type=c("exact","sample")) { type <- match.arg(type) ## Check to ensure that covmat is a valid covariance matrix. if (nrow(covmat) != ncol(covmat)) stop("non-square matrix supplied") symmetric <- isSymmetric.matrix(covmat) if (!symmetric) stop("non-symmetric matrix supplied") pd <- all(eigen(covmat, only.values = TRUE)$values > 0) if (!pd) stop("covariance matrix is not positive definite") p <- nrow(covmat) ## Algorithm works on a correlation matrix mv.vars <- matrix(0, nrow(covmat), nrow(covmat)) diag(mv.vars) <- sqrt(diag(covmat)) cormat <- cov2cor(covmat) ## Generate standard normal data and mean center each variable Xscore <- matrix(rnorm(p*n), p, n) Xsub0 <- t(apply(Xscore, 1, scale, scale = FALSE)) ## Correlation matrix factored via Cholesky decomposition: Fcomp <- t(chol(cormat)) ## Equation 2 from K&D: Zhat <- Fcomp %*% Xscore ## Equation 3 from K&D: Xsub0.prod <- Xsub0 %*% t(Xsub0) ## Get singular value decomp of Xsub0.prod Xsub0.svd <- svd(Xsub0.prod) M.sqrt <- matrix(0, p, p) diag(M.sqrt) <- 1 / sqrt(Xsub0.svd$d) ## Equation 5 from K&D: Z <- Fcomp %*% M.sqrt %*% t(Xsub0.svd$u) %*% Xsub0 Z <- Z * sqrt(n) dat <- Z if (type == "sample") { dat <- Zhat } ## Scale data to correspond to covmat dat <- t(dat) %*% mv.vars ## convert to data.frame, use any existing names from covmat dat <- data.frame(dat) if(!is.null(colnames(covmat))) names(dat) <- colnames(covmat) dat } semTools/R/fitIndices.R0000644000176200001440000004104713531464724014503 0ustar liggesusers### Title: Compute more fit indices ### Authors: Terrence D. Jorgensen ### Sunthud Pornprasertmanit , ### Aaron Boulton , ### Ruben Arslan ### Last updated: 28 August 2019 ### Description: Calculations for promising alternative fit indices ##' Calculate more fit indices ##' ##' Calculate more fit indices that are not already provided in lavaan. ##' ##' Gamma Hat (gammaHat; West, Taylor, & Wu, 2012) is a global fit index which ##' can be computed (assuming equal number of indicators across groups) by ##' ##' \deqn{ gammaHat =\frac{p}{p + 2 \times \frac{\chi^{2}_{k} - df_{k}}{N}} ,} ##' ##' where \eqn{p} is the number of variables in the model, \eqn{\chi^{2}_{k}} is ##' the \eqn{\chi^2} test statistic value of the target model, \eqn{df_{k}} is ##' the degree of freedom when fitting the target model, and \eqn{N} is the ##' sample size (or sample size minus the number of groups if \code{mimic} is ##' set to \code{"EQS"}). ##' ##' Adjusted Gamma Hat (adjGammaHat; West, Taylor, & Wu, 2012) is a global fit ##' index which can be computed by ##' ##' \deqn{ adjGammaHat = \left(1 - \frac{K \times p \times (p + 1)}{2 \times ##' df_{k}} \right) \times \left( 1 - gammaHat \right) ,} ##' ##' where \eqn{K} is the number of groups (please refer to Dudgeon, 2004 for the ##' multiple-group adjustment for agfi*). ##' ##' Corrected Akaike Information Criterion (aic.smallN; Burnham & Anderson, ##' 2003) is a corrected version of AIC for small sample size, often abbreviated ##' AICc: ##' ##' \deqn{ aic.smallN = AIC + \frac{2k(k + 1)}{N - k - 1},} ##' ##' where \eqn{AIC} is the original AIC: \eqn{-2 \times LL + 2k} (where \eqn{k} ##' = the number of estimated parameters in the target model). Note that AICc is ##' a small-sample correction derived for univariate regression models, so it is ##' probably \emph{not} appropriate for comparing SEMs. ##' ##' Corrected Bayesian Information Criterion (bic.priorN; Kuha, 2004) is similar ##' to BIC but explicitly specifying the sample size on which the prior is based ##' (\eqn{N_{prior}}). ##' ##' \deqn{ bic.priorN = f + k\log{(1 + N/N_{prior})},} ##' ##' Stochastic information criterion (SIC; Preacher, 2006) is similar to AIC or ##' BIC. This index will account for model complexity in the model's function ##' form, in addition to the number of free parameters. This index will be ##' provided only when the \eqn{\chi^2} value is not scaled. The SIC can be ##' computed by ##' ##' \deqn{ sic = \frac{1}{2}\left(f - \log{\det{I(\hat{\theta})}}\right),} ##' ##' where \eqn{I(\hat{\theta})} is the information matrix of the parameters. ##' ##' Hannan-Quinn Information Criterion (hqc; Hannan & Quinn, 1979) is used for ##' model selection similar to AIC or BIC. ##' ##' \deqn{ hqc = f + 2k\log{(\log{N})},} ##' ##' Note that if Satorra--Bentler or Yuan--Bentler's method is used, the fit ##' indices using the scaled \eqn{\chi^2} values are also provided. ##' ##' See \code{\link{nullRMSEA}} for the further details of the computation of ##' RMSEA of the null model. ##' ##' ##' @importFrom lavaan lavInspect ##' ##' @param object The lavaan model object provided after running the \code{cfa}, ##' \code{sem}, \code{growth}, or \code{lavaan} functions. ##' @param fit.measures Additional fit measures to be calculated. All additional ##' fit measures are calculated by default ##' @param nPrior The sample size on which prior is based. This argument is used ##' to compute BIC*. ##' @return \enumerate{ ##' \item \code{gammaHat}: Gamma Hat ##' \item \code{adjGammaHat}: Adjusted Gamma Hat ##' \item \code{baseline.rmsea}: RMSEA of the Baseline (Null) Model ##' \item \code{aic.smallN}: Corrected (for small sample size) Akaike Information Criterion ##' \item \code{bic.priorN}: Bayesian Information Criterion with specified prior sample size ##' \item \code{sic}: Stochastic Information Criterion ##' \item \code{hqc}: Hannan-Quinn Information Criterion ##' \item \code{gammaHat.scaled}: Gamma Hat using scaled \eqn{\chi^2} ##' \item \code{adjGammaHat.scaled}: Adjusted Gamma Hat using scaled \eqn{\chi^2} ##' \item \code{baseline.rmsea.scaled}: RMSEA of the Baseline (Null) Model using scaled \eqn{\chi^2} ##' } ##' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' Aaron Boulton (University of North Carolina, Chapel Hill; \email{aboulton@@email.unc.edu}) ##' ##' Ruben Arslan (Humboldt-University of Berlin, \email{rubenarslan@@gmail.com}) ##' ##' Yves Rosseel (Ghent University; \email{Yves.Rosseel@@UGent.be}) ##' ##' @seealso \itemize{ \item \code{\link{miPowerFit}} For the modification ##' indices and their power approach for model fit evaluation \item ##' \code{\link{nullRMSEA}} For RMSEA of the null model } ##' ##' @references Burnham, K., & Anderson, D. (2003). \emph{Model selection and ##' multimodel inference: A practical--theoretic approach}. New York, NY: ##' Springer--Verlag. ##' ##' Dudgeon, P. (2004). A note on extending Steiger's (1998) multiple sample ##' RMSEA adjustment to other noncentrality parameter-based statistic. ##' \emph{Structural Equation Modeling, 11}(3), 305--319. ##' doi:10.1207/s15328007sem1103_1 ##' ##' Kuha, J. (2004). AIC and BIC: Comparisons of assumptions and performance. ##' \emph{Sociological Methods Research, 33}(2), 188--229. ##' doi:10.1177/0049124103262065 ##' ##' Preacher, K. J. (2006). Quantifying parsimony in structural equation ##' modeling. \emph{Multivariate Behavioral Research, 43}(3), 227-259. ##' doi:10.1207/s15327906mbr4103_1 ##' ##' West, S. G., Taylor, A. B., & Wu, W. (2012). Model fit and model selection ##' in structural equation modeling. In R. H. Hoyle (Ed.), \emph{Handbook of ##' Structural Equation Modeling} (pp. 209--231). New York, NY: Guilford. ##' @examples ##' ##' HS.model <- ' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ' ##' ##' fit <- cfa(HS.model, data = HolzingerSwineford1939) ##' moreFitIndices(fit) ##' ##' fit2 <- cfa(HS.model, data = HolzingerSwineford1939, estimator = "mlr") ##' moreFitIndices(fit2) ##' ##' @export moreFitIndices <- function(object, fit.measures = "all", nPrior = 1) { ## check for validity of user-specified "fit.measures" argument fit.choices <- c("gammaHat","adjGammaHat","baseline.rmsea", "gammaHat.scaled","adjGammaHat.scaled","baseline.rmsea.scaled", "aic.smallN","bic.priorN","hqc","sic") flags <- setdiff(fit.measures, c("all", fit.choices)) if (length(flags)) stop(paste("Argument 'fit.measures' includes invalid options:", paste(flags, collapse = ", "), "Please choose 'all' or among the following:", paste(fit.choices, collapse = ", "), sep = "\n")) if ("all" %in% fit.measures) fit.measures <- fit.choices # Extract fit indices information from lavaan object fit <- lavInspect(object, "fit") # Get the number of variable p <- length(lavaan::lavNames(object, type = "ov", group = 1)) # Get the number of parameters nParam <- fit["npar"] # Find the number of groups ngroup <- lavInspect(object, "ngroups") # Get number of observations N <- n <- lavInspect(object, "ntotal") if (lavInspect(object, "options")$mimic == "EQS") n <- n - ngroup # Calculate -2*log(likelihood) f <- -2 * fit["logl"] # Compute fit indices result <- list() if (length(grep("gamma", fit.measures, ignore.case = TRUE))) { gammaHat <- p / (p + 2 * ((fit["chisq"] - fit["df"]) / n)) adjGammaHat <- 1 - (((ngroup * p * (p + 1)) / 2) / fit["df"]) * (1 - gammaHat) result["gammaHat"] <- gammaHat result["adjGammaHat"] <- adjGammaHat if (any(grepl(pattern = "scaled", x = names(fit)))) { gammaHatScaled <- p / (p + 2 * ((fit["chisq.scaled"] - fit["df.scaled"]) / n)) adjGammaHatScaled <- 1 - (((ngroup * p * (p + 1)) / 2) / fit["df.scaled"]) * (1 - gammaHatScaled) result["gammaHat.scaled"] <- gammaHatScaled result["adjGammaHat.scaled"] <- adjGammaHatScaled } } if (length(grep("rmsea", fit.measures))) { result["baseline.rmsea"] <- nullRMSEA(object, silent = TRUE) if (any(grepl(pattern = "scaled", x = names(fit)))) { result["baseline.rmsea.scaled"] <- nullRMSEA(object, scaled = TRUE, silent = TRUE) } } if (!is.na(f)) { if ("aic.smallN" %in% fit.measures) { warning('AICc (aic.smallN) was developed for univariate linear models.', ' It is probably not appropriate to use AICc to compare SEMs.') result["aic.smallN"] <- fit[["aic"]] + (2 * nParam * (nParam + 1)) / (N - nParam - 1) } if ("bic.priorN" %in% fit.measures) { result["bic.priorN"] <- f + log(1 + N/nPrior) * nParam } if ("hqc" %in% fit.measures) result["hqc"] <- f + 2 * log(log(N)) * nParam if ("sic" %in% fit.measures) result["sic"] <- sic(f, object) } class(result) <- c("lavaan.vector","numeric") unlist(result[fit.measures]) } ##' Calculate the RMSEA of the null model ##' ##' Calculate the RMSEA of the null (baseline) model ##' ##' RMSEA of the null model is calculated similar to the formula provided in the ##' \code{lavaan} package. The standard formula of RMSEA is ##' ##' \deqn{ RMSEA =\sqrt{\frac{\chi^2}{N \times df} - \frac{1}{N}} \times ##' \sqrt{G} } ##' ##' where \eqn{\chi^2} is the chi-square test statistic value of the target ##' model, \eqn{N} is the total sample size, \eqn{df} is the degree of freedom ##' of the hypothesized model, \eqn{G} is the number of groups. Kenny proposed ##' in his website that ##' ##' "A reasonable rule of thumb is to examine the RMSEA for the null model and ##' make sure that is no smaller than 0.158. An RMSEA for the model of 0.05 and ##' a TLI of .90, implies that the RMSEA of the null model is 0.158. If the ##' RMSEA for the null model is less than 0.158, an incremental measure of fit ##' may not be that informative." ##' ##' See also \url{http://davidakenny.net/cm/fit.htm} ##' ##' ##' @importFrom lavaan lavInspect ##' ##' @param object The lavaan model object provided after running the \code{cfa}, ##' \code{sem}, \code{growth}, or \code{lavaan} functions. ##' @param scaled If \code{TRUE}, the scaled (or robust, if available) RMSEA ##' is returned. Ignored if a robust test statistic was not requested. ##' @param silent If \code{TRUE}, do not print anything on the screen. ##' ##' @return A value of RMSEA of the null model (a \code{numeric} vector) ##' returned invisibly. ##' ##' @author ##' Ruben Arslan (Humboldt-University of Berlin, \email{rubenarslan@@gmail.com}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @seealso ##' \itemize{ ##' \item \code{\link{miPowerFit}} For the modification indices and their ##' power approach for model fit evaluation ##' \item \code{\link{moreFitIndices}} For other fit indices ##' } ##' ##' @references Kenny, D. A., Kaniskan, B., & McCoach, D. B. (2015). The ##' performance of RMSEA in models with small degrees of freedom. ##' \emph{Sociological Methods Research, 44}(3), 486--507. ##' doi:10.1177/0049124114543236 ##' ##' @examples ##' ##' HS.model <- ' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ' ##' ##' fit <- cfa(HS.model, data = HolzingerSwineford1939) ##' nullRMSEA(fit) ##' ##' @export nullRMSEA <- function(object, scaled = FALSE, silent = FALSE) { fit <- lavaan::update(object, model = lavaan::lav_partable_independence(object)) fits <- lavaan::fitMeasures(fit, fit.measures = c("rmsea","rmsea.scaled", "rmsea.robust")) if (scaled) { RMSEA <- fits["rmsea.robust"] if (is.na(RMSEA)) RMSEA <- fits["rmsea.scaled"] if (is.na(RMSEA)) RMSEA <- fits["rmsea"] } else RMSEA <- fits["rmsea"] if (!silent) { cat("The baseline model's RMSEA =", RMSEA, "\n\n") if (RMSEA < 0.158 ) { cat("CFI, TLI, and other incremental fit indices may not be very", "informative because the baseline model's RMSEA < 0.158", "(Kenny, Kaniskan, & McCoach, 2015). \n") } } invisible(RMSEA) } ## Stochastic Information Criterion ## f = minimized discrepancy function ## lresults = lavaan sem output object sic <- function(f, lresults = NULL) { ## p. 596 of doi:10.1007/s10519-004-5587-0 says to use observed Fisher information E.inv <- lavaan::lavTech(lresults, "inverted.information.observed") if (inherits(E.inv, "try-error")) { return(as.numeric(NA)) } E <- MASS::ginv(E.inv) * lavaan::nobs(lresults) eigvals <- eigen(E, symmetric = TRUE, only.values = TRUE)$values # only positive ones eigvals <- eigvals[ eigvals > sqrt(.Machine$double.eps)] DET <- prod(eigvals) ## check singular if (DET <= 0) return(NA) ## return SIC f + log(DET) } ##' \emph{k}-factor correction for \eqn{chi^2} test statistic ##' ##' Calculate \emph{k}-factor correction for \eqn{chi^2} model-fit test ##' statistic to adjust for small sample size. ##' ##' The \emph{k}-factor correction (Nevitt & Hancock, 2004) is a global fit ##' index which can be computed by: ##' ##' \deqn{ kc = 1 - \frac{2 \times P + 4 \times K + 5}{6 \times N}} ##' ##' where \eqn{N} is the sample size when using normal likelihood, or \eqn{N - ##' 1} when using \code{likelihood = 'wishart'}. ##' ##' ##' @importFrom lavaan lavInspect ##' @importFrom stats pchisq ##' ##' @param fit0 The lavaan model object provided after running the \code{cfa}, ##' \code{sem}, \code{growth}, or \code{lavaan} functions. ##' @param fit1 Optional additional \linkS4class{lavaan} model, in which ##' \code{fit0} is nested. If \code{fit0} has fewer \emph{df} than \code{fit1}, ##' the models will be swapped, still on the assumption that they are nested. ##' @param \dots Additional arguments to the \code{\link[lavaan]{lavTestLRT}} ##' function. ##' ##' @return A numeric vector including the original (unadjusted) chi-squared ##' statistic, the \emph{k}-factor correction, the corrected test statistic, ##' the \emph{df} for the test, and the \emph{p} value for the test under the ##' null hypothesis that the model fits perfectly (or that the 2 models have ##' equivalent fit). ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @references Nevitt, J., & Hancock, G. R. (2004). Evaluating small sample ##' approaches for model test statistics in structural equation modeling. ##' \emph{Multivariate Behavioral Research, 39}(3), 439--478. ##' doi:10.1207/S15327906MBR3903_3 ##' ##' @examples ##' ##' HS.model <- ' ##' visual =~ x1 + b1*x2 + x3 ##' textual =~ x4 + b2*x5 + x6 ##' speed =~ x7 + b3*x8 + x9 ##' ' ##' fit1 <- cfa(HS.model, data = HolzingerSwineford1939) ##' ## test a single model (implicitly compared to a saturated model) ##' chisqSmallN(fit1) ##' ##' ## fit a more constrained model ##' fit0 <- cfa(HS.model, data = HolzingerSwineford1939, orthogonal = TRUE) ##' ## compare 2 models ##' chisqSmallN(fit1, fit0) ##' ##' @export chisqSmallN <- function(fit0, fit1 = NULL, ...) { ## if there are 2 models, order them by DF if (!is.null(fit1)) { DF0 <- lavaan::fitMeasures(fit0, "df") DF1 <- lavaan::fitMeasures(fit1, "df") if (DF0 == DF1) stop("Models have the same degrees of freedom.") parent <- which.min(c(DF0, DF1)) if (parent == 1L) { parent <- fit0 fit0 <- fit1 fit1 <- parent } #if (min(c(DF0, DF1)) == 0L) fit1 <- NULL } ## calculate k-factor correction N <- lavInspect(fit0, "ntotal") Ng <- lavInspect(fit0, "ngroups") if (!lavInspect(fit0, "options")$sample.cov.rescale) N <- N - Ng P <- length(lavaan::lavNames(fit0)) K <- length(lavaan::lavNames(fit0, type = "lv")) # count latent factors if (!is.null(fit1)) { N1 <- lavInspect(fit1, "ntotal") Ng1 <- lavInspect(fit1, "ngroups") if (!lavInspect(fit1, "options")$sample.cov.rescale) N1 <- N1 - Ng1 if (N != N1) stop("Unequal sample sizes") if (P != length(lavaan::lavNames(fit1))) stop("Unequal number of variables") K <- max(K, length(lavaan::lavNames(fit1, type = "lv"))) } kc <- 1 - ((2*P + 4*K + 5) / (6*N)) if (is.null(fit1)) { FIT <- lavaan::fitMeasures(fit0) scaled <- any(grepl(pattern = "scaled", x = names(FIT))) chi <- FIT[[if (scaled) "chisq.scaled" else "chisq"]] DF <- FIT[["df"]] } else { AOV <- lavaan::lavTestLRT(fit0, fit1, ...) chi <- AOV[["Chisq diff"]][2] DF <- AOV[["Df diff"]][2] } out <- c(original.chisq = chi, `k-factor` = kc, adj.chisq = chi*kc, df = DF, pvalue = pchisq(chi*kc, DF, lower.tail = FALSE)) class(out) <- c("lavaan.vector","numeric") out } semTools/R/reliability.R0000644000176200001440000011642113532026747014732 0ustar liggesusers### Sunthud Pornprasertmanit , Yves Rosseel ### Last updated: 29 August 2019 ## ------------- ## reliability() ## ------------- ##' Calculate reliability values of factors ##' ##' Calculate reliability values of factors by coefficient omega ##' ##' The coefficient alpha (Cronbach, 1951) can be calculated by ##' ##' \deqn{ \alpha = \frac{k}{k - 1}\left[ 1 - \frac{\sum^{k}_{i = 1} ##' \sigma_{ii}}{\sum^{k}_{i = 1} \sigma_{ii} + 2\sum_{i < j} \sigma_{ij}} ##' \right],} ##' ##' where \eqn{k} is the number of items in a factor, \eqn{\sigma_{ii}} is the ##' item \emph{i} observed variances, \eqn{\sigma_{ij}} is the observed ##' covariance of items \emph{i} and \emph{j}. ##' ##' The coefficient omega (Bollen, 1980; see also Raykov, 2001) can be ##' calculated by ##' ##' \deqn{ \omega_1 =\frac{\left( \sum^{k}_{i = 1} \lambda_i \right)^{2} ##' Var\left( \psi \right)}{\left( \sum^{k}_{i = 1} \lambda_i \right)^{2} ##' Var\left( \psi \right) + \sum^{k}_{i = 1} \theta_{ii} + 2\sum_{i < j} ##' \theta_{ij} }, } ##' ##' where \eqn{\lambda_i} is the factor loading of item \emph{i}, \eqn{\psi} is ##' the factor variance, \eqn{\theta_{ii}} is the variance of measurement errors ##' of item \emph{i}, and \eqn{\theta_{ij}} is the covariance of measurement ##' errors from item \emph{i} and \emph{j}. ##' ##' The second coefficient omega (Bentler, 1972, 2009) can be calculated by ##' ##' \deqn{ \omega_2 = \frac{\left( \sum^{k}_{i = 1} \lambda_i \right)^{2} ##' Var\left( \psi \right)}{\bold{1}^\prime \hat{\Sigma} \bold{1}}, } ##' ##' where \eqn{\hat{\Sigma}} is the model-implied covariance matrix, and ##' \eqn{\bold{1}} is the \eqn{k}-dimensional vector of 1. The first and the ##' second coefficients omega will have the same value when the model has simple ##' structure, but different values when there are (for example) cross-loadings ##' or method factors. The first coefficient omega can be viewed as the ##' reliability controlling for the other factors (like \eqn{\eta^2_partial} in ##' ANOVA). The second coefficient omega can be viewed as the unconditional ##' reliability (like \eqn{\eta^2} in ANOVA). ##' ##' The third coefficient omega (McDonald, 1999), which is sometimes referred to ##' hierarchical omega, can be calculated by ##' ##' \deqn{ \omega_3 =\frac{\left( \sum^{k}_{i = 1} \lambda_i \right)^{2} ##' Var\left( \psi \right)}{\bold{1}^\prime \Sigma \bold{1}}, } ##' ##' where \eqn{\Sigma} is the observed covariance matrix. If the model fits the ##' data well, the third coefficient omega will be similar to the ##' \eqn{\omega_2}. Note that if there is a directional effect in the model, all ##' coefficients omega will use the total factor variances, which is calculated ##' by \code{\link[lavaan]{lavInspect}(object, "cov.lv")}. ##' ##' In conclusion, \eqn{\omega_1}, \eqn{\omega_2}, and \eqn{\omega_3} are ##' different in the denominator. The denominator of the first formula assumes ##' that a model is congeneric factor model where measurement errors are not ##' correlated. The second formula accounts for correlated measurement errors. ##' However, these two formulas assume that the model-implied covariance matrix ##' explains item relationships perfectly. The residuals are subject to sampling ##' error. The third formula use observed covariance matrix instead of ##' model-implied covariance matrix to calculate the observed total variance. ##' This formula is the most conservative method in calculating coefficient ##' omega. ##' ##' The average variance extracted (AVE) can be calculated by ##' ##' \deqn{ AVE = \frac{\bold{1}^\prime ##' \textrm{diag}\left(\Lambda\Psi\Lambda^\prime\right)\bold{1}}{\bold{1}^\prime ##' \textrm{diag}\left(\hat{\Sigma}\right) \bold{1}}, } ##' ##' Note that this formula is modified from Fornell & Larcker (1981) in the case ##' that factor variances are not 1. The proposed formula from Fornell & Larcker ##' (1981) assumes that the factor variances are 1. Note that AVE will not be ##' provided for factors consisting of items with dual loadings. AVE is the ##' property of items but not the property of factors. ##' ##' Regarding categorical indicators, coefficient alpha and AVE are calculated ##' based on polychoric correlations. The coefficient alpha from this function ##' may be not the same as the standard alpha calculation for categorical items. ##' Researchers may check the \code{alpha} function in the \code{psych} package ##' for the standard coefficient alpha calculation. ##' ##' Item thresholds are not accounted for. Coefficient omega for categorical ##' items, however, is calculated by accounting for both item covariances and ##' item thresholds using Green and Yang's (2009, formula 21) approach. Three ##' types of coefficient omega indicate different methods to calculate item ##' total variances. The original formula from Green and Yang is equivalent to ##' \eqn{\omega_3} in this function. Green and Yang did not propose a method for ##' calculating reliability with a mixture of categorical and continuous ##' indicators, and we are currently unaware of an appropriate method. ##' Therefore, when \code{reliability} detects both categorical and continuous ##' indicators in the model, an error is returned. If the categorical indicators ##' load on a different factor(s) than continuous indicators, then reliability ##' can be calculated separately for those scales by fitting separate models and ##' submitting each to the \code{reliability} function. ##' ##' ##' @importFrom lavaan lavInspect lavNames ##' @param object A \code{\linkS4class{lavaan}} or ##' \code{\linkS4class{lavaan.mi}} object, expected to contain only ##' exogenous common factors (i.e., a CFA model). ##' @param omit.imps \code{character} vector specifying criteria for omitting ##' imputations from pooled results. Can include any of ##' \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the ##' default setting, which excludes any imputations that did not ##' converge or for which standard errors could not be computed. The ##' last option (\code{"no.npd"}) would exclude any imputations which ##' yielded a nonpositive definite covariance matrix for observed or ##' latent variables, which would include any "improper solutions" such ##' as Heywood cases. NPD solutions are not excluded by default because ##' they are likely to occur due to sampling error, especially in small ##' samples. However, gross model misspecification could also cause ##' NPD solutions, users can compare pooled results with and without ##' this setting as a sensitivity analysis to see whether some ##' imputations warrant further investigation. ##' ##' @return Reliability values (coefficient alpha, coefficients omega, average ##' variance extracted) of each factor in each group ##' ##' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' Yves Rosseel (Ghent University; \email{Yves.Rosseel@@UGent.be}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' @seealso \code{\link{reliabilityL2}} for reliability value of a desired ##' second-order factor, \code{\link{maximalRelia}} for the maximal reliability ##' of weighted composite ##' ##' @references ##' Bollen, K. A. (1980). Issues in the comparative measurement of ##' political democracy. \emph{American Sociological Review, 45}(3), 370--390. ##' doi:10.2307/2095172 ##' ##' Bentler, P. M. (1972). A lower-bound method for the dimension-free ##' measurement of internal consistency. \emph{Social Science Research, 1}(4), ##' 343--357. doi:10.1016/0049-089X(72)90082-8 ##' ##' Bentler, P. M. (2009). Alpha, dimension-free, and model-based internal ##' consistency reliability. \emph{Psychometrika, 74}(1), 137--143. ##' doi:10.1007/s11336-008-9100-1 ##' ##' Cronbach, L. J. (1951). Coefficient alpha and the internal structure of ##' tests. \emph{Psychometrika, 16}(3), 297--334. doi:10.1007/BF02310555 ##' ##' Fornell, C., & Larcker, D. F. (1981). Evaluating structural equation models ##' with unobservable variables and measurement errors. \emph{Journal of ##' Marketing Research, 18}(1), 39--50. doi:10.2307/3151312 ##' ##' Green, S. B., & Yang, Y. (2009). Reliability of summed item scores using ##' structural equation modeling: An alternative to coefficient alpha. ##' \emph{Psychometrika, 74}(1), 155--167. doi:10.1007/s11336-008-9099-3 ##' ##' McDonald, R. P. (1999). \emph{Test theory: A unified treatment}. Mahwah, NJ: ##' Erlbaum. ##' ##' Raykov, T. (2001). Estimation of congeneric scale reliability using ##' covariance structure analysis with nonlinear constraints \emph{British ##' Journal of Mathematical and Statistical Psychology, 54}(2), 315--323. ##' doi:10.1348/000711001159582 ##' ##' @examples ##' ##' library(lavaan) ##' ##' HS.model <- ' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ' ##' ##' fit <- cfa(HS.model, data = HolzingerSwineford1939) ##' reliability(fit) ##' ##' @export reliability <- function(object, omit.imps = c("no.conv","no.se")) { ngroups <- lavInspect(object, "ngroups") #TODO: adapt to multiple levels nlevels <- lavInspect(object, "nlevels") nblocks <- ngroups*nlevels #FIXME: always true? group.label <- if (ngroups > 1L) lavInspect(object, "group.label") else NULL #FIXME? lavInspect(object, "level.labels") clus.label <- if (nlevels > 1L) c("within", lavInspect(object, "cluster")) else NULL if (nblocks > 1L) { block.label <- paste(rep(group.label, each = nlevels), clus.label, sep = if (ngroups > 1L && nlevels > 1L) "_" else "") } ## parameters in GLIST format (not flat, need block-level list) if (inherits(object, "lavaan")) { param <- lavInspect(object, "est") ve <- lavInspect(object, "cov.lv") # model-implied latent covariance matrix S <- object@h1$implied$cov # observed sample covariance matrix (already a list) if (nblocks == 1L) { param <- list(param) ve <- list(ve) } } else if (inherits(object, "lavaan.mi")) { useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) param <- object@coefList[[ useImps[1] ]] # first admissible as template coefList <- object@coefList[useImps] phiList <- object@phiList[useImps] ## add block-level list per imputation? if (nblocks == 1L) { param <- list(param) for (i in 1:m) { coefList[[i]] <- list(coefList[[i]]) phiList[[i]] <- list(phiList[[i]]) } } S <- vector("list", nblocks) # pooled observed covariance matrix ve <- vector("list", nblocks) ## loop over blocks for (b in 1:nblocks) { ## param: loop over GLIST elements for (mat in names(param[[b]])) { matList <- lapply(coefList, function(i) i[[b]][[mat]]) param[[b]][[mat]] <- Reduce("+", matList) / length(matList) } # mat ## pooled observed covariance matrix covList <- lapply(object@h1List[useImps], function(i) i$implied$cov[[b]]) S[[b]] <- Reduce("+", covList) / m ## pooled model-implied latent covariance matrix ve[[b]] <- Reduce("+", lapply(phiList, "[[", i = b) ) / m } # b } if (nblocks == 1L) { SigmaHat <- getMethod("fitted", class(object))(object)["cov"] # retain list format } else { SigmaHat <- sapply(getMethod("fitted", class(object))(object), "[[", "cov", simplify = FALSE) } ly <- lapply(param, "[[", "lambda") te <- lapply(param, "[[", "theta") categorical <- lavInspect(object, "categorical") threshold <- if (categorical) getThreshold(object) else NULL result <- list() for (i in 1:nblocks) { common <- (apply(ly[[i]], 2, sum)^2) * diag(ve[[i]]) truevar <- ly[[i]] %*% ve[[i]] %*% t(ly[[i]]) error <- rep(NA, length(common)) alpha <- rep(NA, length(common)) total <- rep(NA, length(common)) omega1 <- omega2 <- omega3 <- rep(NA, length(common)) impliedTotal <- rep(NA, length(common)) avevar <- rep(NA, length(common)) for (j in 1:length(common)) { index <- which(ly[[i]][,j] != 0) error[j] <- sum(te[[i]][index, index, drop = FALSE]) sigma <- S[[i]][index, index, drop = FALSE] alpha[j] <- computeAlpha(sigma, length(index)) total[j] <- sum(sigma) impliedTotal[j] <- sum(SigmaHat[[i]][index, index, drop = FALSE]) faccontrib <- ly[[i]][,j, drop = FALSE] %*% ve[[i]][j,j, drop = FALSE] %*% t(ly[[i]][,j, drop = FALSE]) truefac <- diag(faccontrib[index, index, drop = FALSE]) commonfac <- sum(faccontrib[index, index, drop = FALSE]) trueitem <- diag(truevar[index, index, drop = FALSE]) erritem <- diag(te[[i]][index, index, drop = FALSE]) if (sum(abs(trueitem - truefac)) < 0.00001) { avevar[j] <- sum(trueitem) / sum(trueitem + erritem) } else { avevar[j] <- NA } if (categorical) { omega1[j] <- omegaCat(truevar = faccontrib[index, index, drop = FALSE], implied = SigmaHat[[i]][index, index, drop = FALSE], threshold = threshold[[i]][index], denom = faccontrib[index, index, drop = FALSE] + te[[i]][index, index, drop = FALSE]) omega2[j] <- omegaCat(truevar = faccontrib[index, index, drop = FALSE], implied = SigmaHat[[i]][index, index, drop = FALSE], threshold = threshold[[i]][index], denom = SigmaHat[[i]][index, index, drop = FALSE]) omega3[j] <- omegaCat(truevar = faccontrib[index, index, drop = FALSE], implied = SigmaHat[[i]][index, index, drop = FALSE], threshold = threshold[[i]][index], denom = sigma) } else { omega1[j] <- commonfac / (commonfac + error[j]) omega2[j] <- commonfac / impliedTotal[j] omega3[j] <- commonfac / total[j] } } alpha <- c(alpha, total = computeAlpha(S[[i]], nrow(S[[i]]))) names(alpha) <- c(names(common), "total") if (categorical) { omega1 <- c(omega1, total = omegaCat(truevar = truevar, implied = SigmaHat[[i]], threshold = threshold[[i]], denom = truevar + te[[i]])) omega2 <- c(omega2, total = omegaCat(truevar = truevar, implied = SigmaHat[[i]], threshold = threshold[[i]], denom = SigmaHat[[i]])) omega3 <- c(omega3, total = omegaCat(truevar = truevar, implied = SigmaHat[[i]], threshold = threshold[[i]], denom = S[[i]])) } else { omega1 <- c(omega1, total = sum(truevar) / (sum(truevar) + sum(te[[i]]))) omega2 <- c(omega2, total = sum(truevar) / (sum(SigmaHat[[i]]))) omega3 <- c(omega3, total = sum(truevar) / (sum(S[[i]]))) } avevar <- c(avevar, total = sum(diag(truevar))/ sum((diag(truevar) + diag(te[[i]])))) singleIndicator <- apply(ly[[i]], 2, function(x) sum(x != 0)) %in% 0:1 result[[i]] <- rbind(alpha = alpha, omega = omega1, omega2 = omega2, omega3 = omega3, avevar = avevar)[ , !singleIndicator] } if (categorical) message("The alpha and the average variance extracted are ", "calculated from polychoric (polyserial) ", "correlations, not from Pearson correlations.\n") if (nblocks == 1L) { result <- result[[1]] } else names(result) <- block.label result } ## --------------- ## reliabilityL2() ## --------------- ##' Calculate the reliability values of a second-order factor ##' ##' Calculate the reliability values (coefficient omega) of a second-order ##' factor ##' ##' The first formula of the coefficient omega (in the ##' \code{\link{reliability}}) will be mainly used in the calculation. The ##' model-implied covariance matrix of a second-order factor model can be ##' separated into three sources: the second-order factor, the uniqueness of the ##' first-order factor, and the measurement error of indicators: ##' ##' \deqn{ \hat{\Sigma} = \Lambda \bold{B} \Phi_2 \bold{B}^{\prime} ##' \Lambda^{\prime} + \Lambda \Psi_{u} \Lambda^{\prime} + \Theta, } ##' ##' where \eqn{\hat{\Sigma}} is the model-implied covariance matrix, ##' \eqn{\Lambda} is the first-order factor loading, \eqn{\bold{B}} is the ##' second-order factor loading, \eqn{\Phi_2} is the covariance matrix of the ##' second-order factors, \eqn{\Psi_{u}} is the covariance matrix of the unique ##' scores from first-order factors, and \eqn{\Theta} is the covariance matrix ##' of the measurement errors from indicators. Thus, the proportion of the ##' second-order factor explaining the total score, or the coefficient omega at ##' Level 1, can be calculated: ##' ##' \deqn{ \omega_{L1} = \frac{\bold{1}^{\prime} \Lambda \bold{B} \Phi_2 ##' \bold{B}^{\prime} \Lambda^{\prime} \bold{1}}{\bold{1}^{\prime} \Lambda ##' \bold{B} \Phi_2 \bold{B} ^{\prime} \Lambda^{\prime} \bold{1} + ##' \bold{1}^{\prime} \Lambda \Psi_{u} \Lambda^{\prime} \bold{1} + ##' \bold{1}^{\prime} \Theta \bold{1}}, } ##' ##' where \eqn{\bold{1}} is the \emph{k}-dimensional vector of 1 and \emph{k} is ##' the number of observed variables. When model-implied covariance matrix among ##' first-order factors (\eqn{\Phi_1}) can be calculated: ##' ##' \deqn{ \Phi_1 = \bold{B} \Phi_2 \bold{B}^{\prime} + \Psi_{u}, } ##' ##' Thus, the proportion of the second-order factor explaining the varaince at ##' first-order factor level, or the coefficient omega at Level 2, can be ##' calculated: ##' ##' \deqn{ \omega_{L2} = \frac{\bold{1_F}^{\prime} \bold{B} \Phi_2 ##' \bold{B}^{\prime} \bold{1_F}}{\bold{1_F}^{\prime} \bold{B} \Phi_2 ##' \bold{B}^{\prime} \bold{1_F} + \bold{1_F}^{\prime} \Psi_{u} \bold{1_F}}, } ##' ##' where \eqn{\bold{1_F}} is the \emph{F}-dimensional vector of 1 and \emph{F} ##' is the number of first-order factors. ##' ##' The partial coefficient omega at Level 1, or the proportion of observed ##' variance explained by the second-order factor after partialling the ##' uniqueness from the first-order factor, can be calculated: ##' ##' \deqn{ \omega_{L1} = \frac{\bold{1}^{\prime} \Lambda \bold{B} \Phi_2 ##' \bold{B}^{\prime} \Lambda^{\prime} \bold{1}}{\bold{1}^{\prime} \Lambda ##' \bold{B} \Phi_2 \bold{B}^{\prime} \Lambda^{\prime} \bold{1} + ##' \bold{1}^{\prime} \Theta \bold{1}}, } ##' ##' Note that if the second-order factor has a direct factor loading on some ##' observed variables, the observed variables will be counted as first-order ##' factors. ##' ##' ##' @importFrom lavaan lavInspect ##' ##' @param object A \code{\linkS4class{lavaan}} or ##' \code{\linkS4class{lavaan.mi}} object, expected to contain a least one ##' exogenous higher-order common factor. ##' @param secondFactor The name of a single second-order factor in the ##' model fitted in \code{object}. The function must be called multiple ##' times to estimate reliability for each higher-order factor. ##' @param omit.imps \code{character} vector specifying criteria for omitting ##' imputations from pooled results. Can include any of ##' \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the ##' default setting, which excludes any imputations that did not ##' converge or for which standard errors could not be computed. The ##' last option (\code{"no.npd"}) would exclude any imputations which ##' yielded a nonpositive definite covariance matrix for observed or ##' latent variables, which would include any "improper solutions" such ##' as Heywood cases. NPD solutions are not excluded by default because ##' they are likely to occur due to sampling error, especially in small ##' samples. However, gross model misspecification could also cause ##' NPD solutions, users can compare pooled results with and without ##' this setting as a sensitivity analysis to see whether some ##' imputations warrant further investigation. ##' ##' @return Reliability values at Levels 1 and 2 of the second-order factor, as ##' well as the partial reliability value at Level 1 ##' ##' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' @seealso ##' \code{\link{reliability}} for the reliability of the first-order factors. ##' ##' @examples ##' ##' library(lavaan) ##' ##' HS.model3 <- ' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ##' higher =~ visual + textual + speed' ##' ##' fit6 <- cfa(HS.model3, data = HolzingerSwineford1939) ##' reliability(fit6) # Should provide a warning for the endogenous variables ##' reliabilityL2(fit6, "higher") ##' ##' @export reliabilityL2 <- function(object, secondFactor, omit.imps = c("no.conv","no.se")) { secondFactor <- as.character(secondFactor)[1] # only one at a time ngroups <- lavInspect(object, "ngroups") #TODO: adapt to multiple levels nlevels <- lavInspect(object, "nlevels") nblocks <- ngroups*nlevels #FIXME: always true? group.label <- if (ngroups > 1L) lavInspect(object, "group.label") else NULL #FIXME? lavInspect(object, "level.labels") clus.label <- if (nlevels > 1L) c("within", lavInspect(object, "cluster")) else NULL if (nblocks > 1L) { block.label <- paste(rep(group.label, each = nlevels), clus.label, sep = if (ngroups > 1L && nlevels > 1L) "_" else "") } ## parameters in GLIST format (not flat, need block-level list) if (inherits(object, "lavaan")) { param <- lavInspect(object, "est") ve <- lavInspect(object, "cov.lv") # model-implied latent covariance matrix S <- object@h1$implied$cov # observed sample covariance matrix (already a list) if (nblocks == 1L) { param <- list(param) ve <- list(ve) } } else if (inherits(object, "lavaan.mi")) { useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) param <- object@coefList[[ useImps[1] ]] # first admissible as template coefList <- object@coefList[useImps] phiList <- object@phiList[useImps] ## add block-level list per imputation? if (nblocks == 1L) { param <- list(param) for (i in 1:m) { coefList[[i]] <- list(coefList[[i]]) phiList[[i]] <- list(phiList[[i]]) } } S <- vector("list", nblocks) # pooled observed covariance matrix ve <- vector("list", nblocks) ## loop over blocks for (b in 1:nblocks) { ## param: loop over GLIST elements for (mat in names(param[[b]])) { matList <- lapply(coefList, function(i) i[[b]][[mat]]) param[[b]][[mat]] <- Reduce("+", matList) / length(matList) } # mat ## pooled observed covariance matrix covList <- lapply(object@h1List[useImps], function(i) i$implied$cov[[b]]) S[[b]] <- Reduce("+", covList) / m ## pooled model-implied latent covariance matrix ve[[b]] <- Reduce("+", lapply(phiList, "[[", i = b) ) / m } # b } if (nblocks == 1L) { SigmaHat <- getMethod("fitted", class(object))(object)["cov"] # retain list format } else { SigmaHat <- sapply(getMethod("fitted", class(object))(object), "[[", "cov", simplify = FALSE) } ly <- lapply(param, "[[", "lambda") te <- lapply(param, "[[", "theta") ps <- lapply(param, "[[", "psi") be <- lapply(param, "[[", "beta") result <- list() for (i in 1:nblocks) { # Prepare for higher-order reliability l2var <- ve[[i]][secondFactor, secondFactor, drop = FALSE] l2load <- be[[1]][,secondFactor] indexl2 <- which(l2load != 0) commonl2 <- (sum(l2load)^2) * l2var errorl2 <- sum(ps[[i]][indexl2, indexl2, drop = FALSE]) # Prepare for lower-order reliability indexl1 <- which(apply(ly[[i]][,indexl2, drop = FALSE], 1, function(x) sum(x != 0)) > 0) l1load <- ly[[i]][,indexl2] %*% as.matrix(be[[1]][indexl2, secondFactor, drop = FALSE]) commonl1 <- (sum(l1load)^2) * l2var errorl1 <- sum(te[[i]][indexl1, indexl1, drop = FALSE]) uniquel1 <- 0 for (j in seq_along(indexl2)) { uniquel1 <- uniquel1 + (sum(ly[[i]][,indexl2[j]])^2) * ps[[i]][indexl2[j], indexl2[j], drop = FALSE] } # Adjustment for direct loading from L2 to observed variables if (any(ly[[i]][,secondFactor] != 0)) { indexind <- which(ly[[i]][,secondFactor] != 0) if (length(intersect(indexind, indexl1)) > 0) stop("Direct and indirect loadings of higher-order factor to observed", " variables are specified at the same time.") commonl2 <- sum(c(ly[[i]][,secondFactor], l2load))^2 * l2var errorl2 <- errorl2 + sum(te[[i]][indexind, indexind, drop = FALSE]) commonl1 <- sum(c(ly[[i]][,secondFactor], l1load))^2 * l2var errorl1 <- errorl1 + sum(te[[i]][indexind, indexind, drop = FALSE]) } # Calculate Reliability omegaL1 <- commonl1 / (commonl1 + uniquel1 + errorl1) omegaL2 <- commonl2 / (commonl2 + errorl2) partialOmegaL1 <- commonl1 / (commonl1 + errorl1) result[[i]] <- c(omegaL1 = omegaL1, omegaL2 = omegaL2, partialOmegaL1 = partialOmegaL1) } if (nblocks == 1L) { result <- result[[1]] } else names(result) <- block.label result } ## -------------- ## maximalRelia() ## -------------- ##' Calculate maximal reliability ##' ##' Calculate maximal reliability of a scale ##' ##' Given that a composite score (\eqn{W}) is a weighted sum of item scores: ##' ##' \deqn{ W = \bold{w}^\prime \bold{x} ,} ##' ##' where \eqn{\bold{x}} is a \eqn{k \times 1} vector of the scores of each ##' item, \eqn{\bold{w}} is a \eqn{k \times 1} weight vector of each item, and ##' \eqn{k} represents the number of items. Then, maximal reliability is ##' obtained by finding \eqn{\bold{w}} such that reliability attains its maximum ##' (Li, 1997; Raykov, 2012). Note that the reliability can be obtained by ##' ##' \deqn{ \rho = \frac{\bold{w}^\prime \bold{S}_T \bold{w}}{\bold{w}^\prime ##' \bold{S}_X \bold{w}}} ##' ##' where \eqn{\bold{S}_T} is the covariance matrix explained by true scores and ##' \eqn{\bold{S}_X} is the observed covariance matrix. Numerical method is used ##' to find \eqn{\bold{w}} in this function. ##' ##' For continuous items, \eqn{\bold{S}_T} can be calculated by ##' ##' \deqn{ \bold{S}_T = \Lambda \Psi \Lambda^\prime,} ##' ##' where \eqn{\Lambda} is the factor loading matrix and \eqn{\Psi} is the ##' covariance matrix among factors. \eqn{\bold{S}_X} is directly obtained by ##' covariance among items. ##' ##' For categorical items, Green and Yang's (2009) method is used for ##' calculating \eqn{\bold{S}_T} and \eqn{\bold{S}_X}. The element \eqn{i} and ##' \eqn{j} of \eqn{\bold{S}_T} can be calculated by ##' ##' \deqn{ \left[\bold{S}_T\right]_{ij} = \sum^{C_i - 1}_{c_i = 1} \sum^{C_j - ##' 1}_{c_j - 1} \Phi_2\left( \tau_{x_{c_i}}, \tau_{x_{c_j}}, \left[ \Lambda ##' \Psi \Lambda^\prime \right]_{ij} \right) - \sum^{C_i - 1}_{c_i = 1} ##' \Phi_1(\tau_{x_{c_i}}) \sum^{C_j - 1}_{c_j - 1} \Phi_1(\tau_{x_{c_j}}),} ##' ##' where \eqn{C_i} and \eqn{C_j} represents the number of thresholds in Items ##' \eqn{i} and \eqn{j}, \eqn{\tau_{x_{c_i}}} represents the threshold \eqn{c_i} ##' of Item \eqn{i}, \eqn{\tau_{x_{c_j}}} represents the threshold \eqn{c_i} of ##' Item \eqn{j}, \eqn{ \Phi_1(\tau_{x_{c_i}})} is the cumulative probability of ##' \eqn{\tau_{x_{c_i}}} given a univariate standard normal cumulative ##' distribution and \eqn{\Phi_2\left( \tau_{x_{c_i}}, \tau_{x_{c_j}}, \rho ##' \right)} is the joint cumulative probability of \eqn{\tau_{x_{c_i}}} and ##' \eqn{\tau_{x_{c_j}}} given a bivariate standard normal cumulative ##' distribution with a correlation of \eqn{\rho} ##' ##' Each element of \eqn{\bold{S}_X} can be calculated by ##' ##' \deqn{ \left[\bold{S}_T\right]_{ij} = \sum^{C_i - 1}_{c_i = 1} \sum^{C_j - ##' 1}_{c_j - 1} \Phi_2\left( \tau_{V_{c_i}}, \tau_{V_{c_j}}, \rho^*_{ij} ##' \right) - \sum^{C_i - 1}_{c_i = 1} \Phi_1(\tau_{V_{c_i}}) \sum^{C_j - ##' 1}_{c_j - 1} \Phi_1(\tau_{V_{c_j}}),} ##' ##' where \eqn{\rho^*_{ij}} is a polychoric correlation between Items \eqn{i} ##' and \eqn{j}. ##' ##' ##' @importFrom lavaan lavInspect lavNames ##' ##' @param object A \code{\linkS4class{lavaan}} or ##' \code{\linkS4class{lavaan.mi}} object, expected to contain only ##' exogenous common factors (i.e., a CFA model). ##' @param omit.imps \code{character} vector specifying criteria for omitting ##' imputations from pooled results. Can include any of ##' \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the ##' default setting, which excludes any imputations that did not ##' converge or for which standard errors could not be computed. The ##' last option (\code{"no.npd"}) would exclude any imputations which ##' yielded a nonpositive definite covariance matrix for observed or ##' latent variables, which would include any "improper solutions" such ##' as Heywood cases. NPD solutions are not excluded by default because ##' they are likely to occur due to sampling error, especially in small ##' samples. However, gross model misspecification could also cause ##' NPD solutions, users can compare pooled results with and without ##' this setting as a sensitivity analysis to see whether some ##' imputations warrant further investigation. ##' ##' @return Maximal reliability values of each group. The maximal-reliability ##' weights are also provided. Users may extracted the weighted by the ##' \code{attr} function (see example below). ##' ##' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com}) ##' ##' @seealso \code{\link{reliability}} for reliability of an unweighted ##' composite score ##' ##' @references ##' Li, H. (1997). A unifying expression for the maximal reliability of a linear ##' composite. \emph{Psychometrika, 62}(2), 245--249. doi:10.1007/BF02295278 ##' ##' Raykov, T. (2012). Scale construction and development using structural ##' equation modeling. In R. H. Hoyle (Ed.), \emph{Handbook of structural ##' equation modeling} (pp. 472--494). New York, NY: Guilford. ##' ##' @examples ##' ##' total <- 'f =~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 ' ##' fit <- cfa(total, data = HolzingerSwineford1939) ##' maximalRelia(fit) ##' ##' # Extract the weight ##' mr <- maximalRelia(fit) ##' attr(mr, "weight") ##' ##' @export maximalRelia <- function(object, omit.imps = c("no.conv","no.se")) { ngroups <- lavInspect(object, "ngroups") #TODO: adapt to multiple levels nlevels <- lavInspect(object, "nlevels") nblocks <- ngroups*nlevels #FIXME: always true? group.label <- if (ngroups > 1L) lavInspect(object, "group.label") else NULL #FIXME? lavInspect(object, "level.labels") clus.label <- if (nlevels > 1L) c("within", lavInspect(object, "cluster")) else NULL if (nblocks > 1L) { block.label <- paste(rep(group.label, each = nlevels), clus.label, sep = if (ngroups > 1L && nlevels > 1L) "_" else "") } ## parameters in GLIST format (not flat, need block-level list) if (inherits(object, "lavaan")) { param <- lavInspect(object, "est") ve <- lavInspect(object, "cov.lv") # model-implied latent covariance matrix S <- object@h1$implied$cov # observed sample covariance matrix (already a list) if (nblocks == 1L) { param <- list(param) ve <- list(ve) } } else if (inherits(object, "lavaan.mi")) { useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) param <- object@coefList[[ useImps[1] ]] # first admissible as template coefList <- object@coefList[useImps] phiList <- object@phiList[useImps] ## add block-level list per imputation? if (nblocks == 1L) { param <- list(param) for (i in 1:m) { coefList[[i]] <- list(coefList[[i]]) phiList[[i]] <- list(phiList[[i]]) } } S <- vector("list", nblocks) # pooled observed covariance matrix ve <- vector("list", nblocks) ## loop over blocks for (b in 1:nblocks) { ## param: loop over GLIST elements for (mat in names(param[[b]])) { matList <- lapply(coefList, function(i) i[[b]][[mat]]) param[[b]][[mat]] <- Reduce("+", matList) / length(matList) } # mat ## pooled observed covariance matrix covList <- lapply(object@h1List[useImps], function(i) i$implied$cov[[b]]) S[[b]] <- Reduce("+", covList) / m ## pooled model-implied latent covariance matrix ve[[b]] <- Reduce("+", lapply(phiList, "[[", i = b) ) / m } # b } if (nblocks == 1L) { SigmaHat <- getMethod("fitted", class(object))(object)["cov"] # retain list format } else { SigmaHat <- sapply(getMethod("fitted", class(object))(object), "[[", "cov", simplify = FALSE) } ly <- lapply(param, "[[", "lambda") te <- lapply(param, "[[", "theta") categorical <- lavInspect(object, "categorical") threshold <- if (categorical) getThreshold(object) else NULL result <- list() for (i in 1:nblocks) { truevar <- ly[[i]] %*% ve[[i]] %*% t(ly[[i]]) varnames <- colnames(truevar) if (categorical) { invstdvar <- 1 / sqrt(diag(SigmaHat[[i]])) polyr <- diag(invstdvar) %*% truevar %*% diag(invstdvar) nitem <- ncol(SigmaHat[[i]]) result[[i]] <- calcMaximalReliaCat(polyr, threshold[[i]], S[[i]], nitem, varnames) } else { result[[i]] <- calcMaximalRelia(truevar, S[[i]], varnames) } } if (nblocks == 1L) { result <- result[[1]] } else names(result) <- block.label result } ## ---------------- ## Hidden Functions ## ---------------- computeAlpha <- function(S, k) k/(k - 1) * (1.0 - sum(diag(S)) / sum(S)) #' @importFrom stats cov2cor pnorm omegaCat <- function(truevar, implied, threshold, denom) { # denom could be polychoric correlation, model-implied correlation, or model-implied without error correlation polyc <- truevar invstdvar <- 1 / sqrt(diag(implied)) polyr <- diag(invstdvar) %*% polyc %*% diag(invstdvar) nitem <- ncol(implied) denom <- cov2cor(denom) sumnum <- 0 addden <- 0 for (j in 1:nitem) { for (jp in 1:nitem) { sumprobn2 <- 0 addprobn2 <- 0 t1 <- threshold[[j]] t2 <- threshold[[jp]] for (c in 1:length(t1)) { for (cp in 1:length(t2)) { sumprobn2 <- sumprobn2 + p2(t1[c], t2[cp], polyr[j, jp]) addprobn2 <- addprobn2 + p2(t1[c], t2[cp], denom[j, jp]) } } sumprobn1 <- sum(pnorm(t1)) sumprobn1p <- sum(pnorm(t2)) sumnum <- sumnum + (sumprobn2 - sumprobn1 * sumprobn1p) addden <- addden + (addprobn2 - sumprobn1 * sumprobn1p) } } reliab <- sumnum / addden reliab } p2 <- function(t1, t2, r) { mnormt::pmnorm(c(t1, t2), c(0,0), matrix(c(1, r, r, 1), 2, 2)) } # polycorLavaan <- function(object) { # ngroups <- lavInspect(object, "ngroups") # coef <- lavInspect(object, "est") # targettaunames <- NULL # if (ngroups == 1L) { # targettaunames <- rownames(coef$tau) # } else { # targettaunames <- rownames(coef[[1]]$tau) # } # barpos <- sapply(strsplit(targettaunames, ""), function(x) which(x == "|")) # varnames <- unique(apply(data.frame(targettaunames, barpos - 1), MARGIN = 1, # FUN = function(x) substr(x[1], 1, x[2]))) # if (length(varnames)) # script <- "" # for (i in 2:length(varnames)) { # temp <- paste0(varnames[1:(i - 1)], collapse = " + ") # temp <- paste0(varnames[i], "~~", temp, "\n") # script <- paste(script, temp) # } # newobject <- refit(script, object) # if (ngroups == 1L) { # return(lavInspect(newobject, "est")$theta) # } # lapply(lavInspect(newobject, "est"), "[[", "theta") # } #' @importFrom lavaan lavInspect lavNames getThreshold <- function(object) { ngroups <- lavInspect(object, "ngroups") ordnames <- lavNames(object, "ov.ord") if (ngroups == 1L) { thresholds <- fitted(object)$th result <- lapply(ordnames, function(nn) thresholds[grepl(nn, names(thresholds))]) names(result) <- ordnames } else { thresholds <- sapply(fitted(object), "[[", i = "th", simplify = FALSE) result <- list() group.label <- lavInspect(object, "group.label") for (g in 1:ngroups) { result[[ group.label[g] ]] <- lapply(ordnames, function(nn) { thresholds[[g]][ grepl(nn, names(thresholds[[g]])) ] }) names(result[[ group.label[g] ]]) <- ordnames } } return(result) } invGeneralRelia <- function(w, truevar, totalvar) { 1 - (t(w) %*% truevar %*% w) / (t(w) %*% totalvar %*% w) } #' @importFrom stats pnorm invGeneralReliaCat <- function(w, polyr, threshold, denom, nitem) { # denom could be polychoric correlation, model-implied correlation, or model-implied without error correlation upper <- matrix(NA, nitem, nitem) lower <- matrix(NA, nitem, nitem) for (j in 1:nitem) { for (jp in 1:nitem) { sumprobn2 <- 0 addprobn2 <- 0 t1 <- threshold[[j]] t2 <- threshold[[jp]] for (c in 1:length(t1)) { for (cp in 1:length(t2)) { sumprobn2 <- sumprobn2 + p2(t1[c], t2[cp], polyr[j, jp]) addprobn2 <- addprobn2 + p2(t1[c], t2[cp], denom[j, jp]) } } sumprobn1 <- sum(pnorm(t1)) sumprobn1p <- sum(pnorm(t2)) upper[j, jp] <- (sumprobn2 - sumprobn1 * sumprobn1p) lower[j, jp] <- (addprobn2 - sumprobn1 * sumprobn1p) } } 1 - (t(w) %*% upper %*% w) / (t(w) %*% lower %*% w) } #' @importFrom stats nlminb calcMaximalRelia <- function(truevar, totalvar, varnames) { start <- rep(1, nrow(truevar)) out <- nlminb(start, invGeneralRelia, truevar = truevar, totalvar = totalvar) if (out$convergence != 0) stop("The numerical method for finding the maximal", " reliability did not converge.") result <- 1 - out$objective weight <- out$par / mean(out$par) names(weight) <- varnames attr(result, "weight") <- weight result } #' @importFrom stats nlminb calcMaximalReliaCat <- function(polyr, threshold, denom, nitem, varnames) { start <- rep(1, nrow(polyr)) out <- nlminb(start, invGeneralReliaCat, polyr = polyr, threshold = threshold, denom = denom, nitem = nitem) if (out$convergence != 0) stop("The numerical method for finding the maximal", " reliability did not converge.") result <- 1 - out$objective weight <- out$par / mean(out$par) names(weight) <- varnames attr(result, "weight") <- weight result } semTools/R/auxiliary.R0000644000176200001440000002477413432435606014436 0ustar liggesusers### Terrence D. Jorgensen ### Last updated: 8 November 2018 ### new auxiliary function does NOT create a lavaanStar-class instance #' Implement Saturated Correlates with FIML #' #' Automatically add auxiliary variables to a lavaan model when using full #' information maximum likelihood (FIML) to handle missing data #' #' These functions are wrappers around the corresponding lavaan functions. #' You can use them the same way you use \code{\link[lavaan]{lavaan}}, but you #' \emph{must} pass your full \code{data.frame} to the \code{data} argument. #' Because the saturated-correlates approaches (Enders, 2008) treates exogenous #' variables as random, \code{fixed.x} must be set to \code{FALSE}. Because FIML #' requires continuous data (although nonnormality corrections can still be #' requested), no variables in the model nor auxiliary variables specified in #' \code{aux} can be declared as \code{ordered}. #' #' @aliases auxiliary lavaan.auxiliary cfa.auxiliary sem.auxiliary growth.auxiliary #' @importFrom lavaan lavInspect parTable #' @importFrom stats cov quantile #' #' @param model The analysis model can be specified with 1 of 2 objects: #' \enumerate{ #' \item lavaan \code{\link[lavaan]{model.syntax}} specifying a hypothesized #' model \emph{without} mention of auxiliary variables in \code{aux} #' \item a parameter table, as returned by \code{\link[lavaan]{parTable}}, #' specifying the target model \emph{without} auxiliary variables. #' This option requires these columns (and silently ignores all others): #' \code{c("lhs","op","rhs","user","group","free","label","plabel","start")} #' } #' @param data \code{data.frame} that includes auxiliary variables as well as #' any observed variables in the \code{model} #' @param aux \code{character}. Names of auxiliary variables to add to \code{model} #' @param fun \code{character}. Name of a specific lavaan function used to fit #' \code{model} to \code{data} (i.e., \code{"lavaan"}, \code{"cfa"}, #' \code{"sem"}, or \code{"growth"}). Only required for \code{auxiliary}. #' @param ... additional arguments to pass to \code{\link[lavaan]{lavaan}}. #' #' @author #' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) #' #' @references Enders, C. K. (2008). A note on the use of missing auxiliary #' variables in full information maximum likelihood-based structural equation #' models. \emph{Structural Equation Modeling, 15}(3), 434--448. #' doi:10.1080/10705510802154307 #' #' @return a fitted \code{\linkS4class{lavaan}} object. Additional #' information is stored as a \code{list} in the \code{@@external} slot: #' \itemize{ #' \item \code{baseline.model}. a fitted \code{\linkS4class{lavaan}} #' object. Results of fitting an appropriate independence model for #' the calculation of incremental fit indices (e.g., CFI, TLI) in #' which the auxiliary variables remain saturated, so only the target #' variables are constrained to be orthogonal. See Examples for how #' to send this baseline model to \code{\link[lavaan]{fitMeasures}}. #' \item \code{aux}. The character vector of auxiliary variable names. #' \item \code{baseline.syntax}. A character vector generated within the #' \code{auxiliary} function, specifying the \code{baseline.model} #' syntax. #' } #' #' @examples #' dat1 <- lavaan::HolzingerSwineford1939 #' set.seed(12345) #' dat1$z <- rnorm(nrow(dat1)) #' dat1$x5 <- ifelse(dat1$z < quantile(dat1$z, .3), NA, dat1$x5) #' dat1$x9 <- ifelse(dat1$z > quantile(dat1$z, .8), NA, dat1$x9) #' #' targetModel <- " #' visual =~ x1 + x2 + x3 #' textual =~ x4 + x5 + x6 #' speed =~ x7 + x8 + x9 #' " #' #' ## works just like cfa(), but with an extra "aux" argument #' fitaux1 <- cfa.auxiliary(targetModel, data = dat1, aux = "z", #' missing = "fiml", estimator = "mlr") #' #' ## with multiple auxiliary variables and multiple groups #' fitaux2 <- cfa.auxiliary(targetModel, data = dat1, aux = c("z","ageyr","grade"), #' group = "school", group.equal = "loadings") #' #' ## calculate correct incremental fit indices (e.g., CFI, TLI) #' fitMeasures(fitaux2, fit.measures = c("cfi","tli")) #' ## NOTE: lavaan will use the internally stored baseline model, which #' ## is the independence model plus saturated auxiliary parameters #' lavInspect(fitaux2@external$baseline.model, "free") #' #' @export auxiliary <- function(model, data, aux, fun, ...) { lavArgs <- list(...) lavArgs$data <- data lavArgs$fixed.x <- FALSE lavArgs$missing <- "fiml" lavArgs$meanstructure <- TRUE lavArgs$ordered <- NULL if (missing(aux)) stop("Please provide a character vector with names of auxiliary variables") if (missing(data)) stop("Please provide a data.frame that includes modeled and auxiliary variables") if (!all(sapply(data[aux], is.numeric))) stop("missing = 'FIML' is unavailable for categorical data") PTcols <- c("lhs","op","rhs","user","block","group","free","label","plabel","start") ## check parameter table, or create one from syntax if (is.list(model)) { if (any(model$exo == 1)) stop("All exogenous variables (covariates) must be treated as endogenous", " by the 'auxiliary' function. Please set 'fixed.x = FALSE'") if (!is.null(lavArgs$group.equal)) warning("The 'group.equal' argument is ignored when 'model' is a parameter table.") if (is.null(model$start)) { startArgs <- lavArgs startArgs$model <- model startArgs$do.fit <- FALSE model$start <- parTable(do.call(fun, startArgs))$start } missingCols <- setdiff(PTcols, names(model)) if (length(missingCols)) stop("If the 'model' argument is a parameter table", " it must also include these columns: \n", paste(missingCols, collapse = ", ")) PT <- as.data.frame(model, stringsAsFactors = FALSE)[PTcols] } else if (is.character(model)) { ptArgs <- lavArgs ptArgs$model <- model ptArgs$do.fit <- FALSE PT <- parTable(do.call(fun, ptArgs))[PTcols] } else stop("The 'model' argument must be a character vector of", " lavaan syntax or a parameter table") ## separately store rows with constraints or user-defined parameters conRows <- PT$op %in% c("==","<",">",":=") if (any(conRows)) { CON <- PT[ conRows, ] PT <- PT[ !conRows, ] } else CON <- data.frame(NULL) ## variable names varnames <- lavaan::lavNames(PT, type = "ov") if (length(intersect(varnames, aux))) stop('modeled variable declared as auxiliary') ## specify a saturated model for auxiliaries covstruc <- outer(aux, aux, function(x, y) paste(x, "~~", y)) satMod <- c(covstruc[lower.tri(covstruc, diag = TRUE)], paste(aux, "~ 1"), # among auxiliaries outer(aux, varnames, function(x, y) paste(x, "~~", y))) # between aux and targets satPT <- lavaan::lavaanify(satMod, ngroups = max(PT$group))[c("lhs","op","rhs", "user","block","group")] ## after omitting duplicates, check number of added parameters, add columns mergedPT <- lavaan::lav_partable_merge(PT, satPT, remove.duplicated = TRUE, warn = FALSE) nAuxPar <- nrow(mergedPT) - nrow(PT) newRows <- 1L:nAuxPar + nrow(PT) ##FIXME: mergedPT$user[newRows] <- 2L (list as constraints to omit printing?) or new code (9L)? mergedPT$free[newRows] <- 1L:nAuxPar + max(PT$free) mergedPT$plabel[newRows] <- paste0(".p", 1L:nAuxPar + nrow(PT), ".") ## calculate sample moments as starting values (recycle over groups) # if (is.null(lavArgs$group)) { # auxCov <- cov(data[aux], use = "pairwise.complete.obs") # auxM <- colMeans(data[aux], na.rm = TRUE) # auxTarget <- cov(data[c(aux, varnames)], # use = "pairwise.complete.obs")[aux, varnames] # ## match order of parameters in syntax above # mergedPT$start[newRows] <- c(auxCov[lower.tri(auxCov, diag = TRUE)], # auxM, as.numeric(auxTarget)) # } else { # auxCovs <- list() # auxMs <- list() # auxTargets <- list() # startVals <- numeric(0) # for (g in unique(data[ , lavArgs$group])) { # auxCovs[[g]] <- cov(data[data[ , lavArgs$group] == g, aux], # use = "pairwise.complete.obs") # auxMs[[g]] <- colMeans(data[data[ , lavArgs$group] == g, aux], na.rm = TRUE) # auxTargets[[g]] <- cov(data[data[ , lavArgs$group] == g, c(aux, varnames)], # use = "pairwise.complete.obs")[aux, varnames] # startVals <- c(startVals, auxCovs[[g]][lower.tri(auxCovs[[g]], diag = TRUE)], # auxMs[[g]], as.numeric(auxTargets[[g]])) # } # ## match order of parameters in syntax above # mergedPT$start[newRows] <- startVals # } lavArgs$model <- lavaan::lav_partable_complete(rbind(mergedPT, CON)) result <- do.call(fun, lavArgs) ## specify, fit, and attach an appropriate independence model baseArgs <- list() baseArgs$model <- lavaan::lav_partable_complete(satPT) baseArgs$data <- data baseArgs$group <- lavArgs$group baseArgs$group.label <- lavArgs$group.label baseArgs$missing <- "fiml" baseArgs$cluster <- lavArgs$cluster baseArgs$sample.cov.rescale <- lavArgs$sample.cov.rescale baseArgs$information <- lavArgs$information baseArgs$se <- lavArgs$se baseArgs$test <- lavArgs$test baseArgs$bootstrap <- lavArgs$bootstrap baseArgs$control <- lavArgs$control baseArgs$optim.method <- lavArgs$optim.method result@external$baseline.model <- do.call(lavaan::lavaan, baseArgs) result@external$aux <- aux result@external$baseline.syntax <- satMod result } #' @rdname auxiliary #' @aliases lavaan.auxiliary #' @export lavaan.auxiliary <- function(model, data, aux, ...) { auxiliary(model = model, data = data, aux = aux, fun = "lavaan", ...) } #' @rdname auxiliary #' @aliases cfa.auxiliary #' @export cfa.auxiliary <- function(model, data, aux, ...) { auxiliary(model = model, data = data, aux = aux, fun = "cfa", ...) } #' @rdname auxiliary #' @aliases sem.auxiliary #' @export sem.auxiliary <- function(model, data, aux, ...) { auxiliary(model = model, data = data, aux = aux, fun = "sem", ...) } #' @rdname auxiliary #' @aliases growth.auxiliary #' @export growth.auxiliary <- function(model, data, aux, ...) { auxiliary(model = model, data = data, aux = aux, fun = "growth", ...) } semTools/R/monteCarloMed.R0000644000176200001440000002177613475314617015164 0ustar liggesusers### Corbin Quick, Alex Schoemann, James Selig, Terrence D. Jorgnensen ### Last updated: 3 June 2019 ## FIXME: work out a path-analysis example like slide 25: ## http://www.da.ugent.be/cvs/pages/en/Presentations/Presentation%20Yves%20Rosseel.pdf ## add example to help page, to illustrate a complex function of parameters ##' Monte Carlo Confidence Intervals to Test Complex Indirect Effects ##' ##' This function takes an expression for an indirect effect, the parameters and ##' standard errors associated with the expression and returns a confidence ##' interval based on a Monte Carlo test of mediation (MacKinnon, Lockwood, & ##' Williams, 2004). ##' ##' This function implements the Monte Carlo test of mediation first described ##' in MacKinnon, Lockwood, & Williams (2004) and extends it to complex cases ##' where the indirect effect is more than a function of two parameters. The ##' function takes an expression for the indirect effect, randomly simulated ##' values of the indirect effect based on the values of the parameters (and the ##' associated standard errors) comprising the indirect effect, and outputs a ##' confidence interval of the indirect effect based on the simulated values. ##' For further information on the Monte Carlo test of mediation see MacKinnon, ##' Lockwood, & Williams (2004) and Preacher & Selig (2012). ##' ##' The asymptotic covariance matrix can be easily found in many popular SEM ##' software applications. ##' \itemize{ ##' \item LISREL: Including the EC option on the OU line will print the ACM ##' to a seperate file. The file contains the lower triangular elements of ##' the ACM in free format and scientific notation ##' \item Mplus Include the command TECH3; in the OUTPUT section. The ACM will be ##' printed in the output. ##' \item lavaan: Use the command \code{vcov} on the fitted lavaan object to ##' print the ACM to the screen ##' } ##' ##' ##' @importFrom stats quantile ##' ##' @param expression A character scalar representing the computation of an ##' indirect effect. Different parameters in the expression should have ##' different alphanumeric values. Expressions can use either addition (+) or ##' multiplication (*) operators. ##' @param \dots Parameter estimates for all parameters named in ##' \code{expression}. The order of parameters should follow from ##' \code{expression} (the first parameter named in \code{expression} should be ##' the first parameter listed in \dots{}). Alternatively \dots can be a ##' vector of parameter estimates. ##' @param ACM A matrix representing the asymptotic covariance matrix of the ##' parameters described in \code{expression}. This matrix should be a symetric ##' matrix with dimensions equal to the number of parameters names in ##' \code{expression}. Information on finding the ACOV is popular SEM software ##' is described below.) ##' @param object A lavaan model object fitted after running the running the ##' \code{cfa}, \code{sem}, \code{growth}, or \code{lavaan} functions. The model ##' must have parameters labelled with the same labels used in ##' \code{expression}. When using this option do not specify values for \dots ##' or \code{ACM} ##' @param rep The number of replications to compute. Many thousand are ##' reccomended. ##' @param CI Width of the confidence interval computed. ##' @param plot Should the function output a plot of simulated values of the ##' indirect effect? ##' @param outputValues Should the function output all simulated values of the ##' indirect effect? ##' @return A list with two elements. The first element is the point estimate ##' for the indirect effect. The second element is a matrix with values for the ##' upper and lower limits of the confidence interval generated from the Monte ##' Carlo test of mediation. If \code{outputValues = TRUE}, output will be a list ##' with a list with the point estimate and values for the upper and lower ##' limits of the confidence interval as the first element and a vector of ##' simulated values of the indirect effect as the second element. ##' @author ##' Corbin Quick (University of Michigan; \email{corbinq@@umich.edu}) ##' ##' Alexander M. Schoemann (East Carolina University; \email{schoemanna@@ecu.edu}) ##' ##' James P. Selig (University of New Mexico; \email{selig@@unm.edu}) ##' ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' @references ##' MacKinnon, D. P., Lockwood, C. M., & Williams, J. (2004). Confidence limits ##' for the indirect effect: Distribution of the product and resampling methods. ##' \emph{Multivariate Behavioral Research, 39}(1) 99--128. ##' doi:10.1207/s15327906mbr3901_4 ##' ##' Preacher, K. J., & Selig, J. P. (2010, July). Monte Carlo method ##' for assessing multilevel mediation: An interactive tool for creating ##' confidence intervals for indirect effects in 1-1-1 multilevel models ##' [Computer software]. Available from \url{http://quantpsy.org/}. ##' ##' Preacher, K. J., & Selig, J. P. (2012). Advantages of Monte Carlo confidence ##' intervals for indirect effects. \emph{Communication Methods and Measures, ##' 6}(2), 77--98. doi:10.1080/19312458.2012.679848 ##' ##' Selig, J. P., & Preacher, K. J. (2008, June). Monte Carlo method for ##' assessing mediation: An interactive tool for creating confidence intervals ##' for indirect effects [Computer software]. Available from ##' \url{http://quantpsy.org/}. ##' @examples ##' ##' ## Simple two path mediation ##' ## Write expression of indirect effect ##' med <- 'a*b' ##' ## Paramter values from analyses ##' aparam <- 1 ##' bparam <- 2 ##' ## Asymptotic covariance matrix from analyses ##' AC <- matrix(c(.01,.00002, ##' .00002,.02), nrow=2, byrow=TRUE) ##' ## Compute CI, include a plot ##' monteCarloMed(med, coef1 = aparam, coef2 = bparam, outputValues = FALSE, ##' plot = TRUE, ACM = AC) ##' ##' ## Use a vector of parameter estimates as input ##' aparam <- c(1,2) ##' monteCarloMed(med, coef1 = aparam, outputValues = FALSE, ##' plot = TRUE, ACM = AC) ##' ##' ##' ## Complex mediation with two paths for the indirect effect ##' ## Write expression of indirect effect ##' med <- 'a1*b1 + a1*b2' ##' ## Paramter values and standard errors from analyses ##' aparam <- 1 ##' b1param <- 2 ##' b2param <- 1 ##' ## Asymptotic covariance matrix from analyses ##' AC <- matrix(c(1, .00002, .00003, ##' .00002, 1, .00002, ##' .00003, .00002, 1), nrow = 3, byrow = TRUE) ##' ## Compute CI do not include a plot ##' monteCarloMed(med, coef1 = aparam, coef2 = b1param, ##' coef3 = b2param, ACM = AC) ##' ##' ##' ## WORKING WITH lavaan MODELS. From the mediation tutorial: ##' ## http://lavaan.ugent.be/tutorial/mediation.html ##' ##' set.seed(1234) ##' X <- rnorm(100) ##' M <- 0.5*X + rnorm(100) ##' Y <- 0.7*M + rnorm(100) ##' Data <- data.frame(X = X, Y = Y, M = M) ##' model <- ' # direct effect ##' Y ~ c*X ##' # mediator ##' M ~ a*X ##' Y ~ b*M ##' # indirect effect (a*b) ##' ab := a*b ##' # total effect ##' total := c + (a*b) ##' ' ##' fit <- sem(model, data = Data) ##' ##' med <- 'a*b' ##' ## Automatically extract information from lavaan object ##' monteCarloMed(med, object = fit) ##' ##' ## or (unnecessary) manually extract the information first ##' myParams <- c("a","b") ##' myCoefs <- coef(fit)[myParams] ##' myACM <- vcov(fit)[myParams, myParams] ##' monteCarloMed(med, myCoefs, ACM = myACM) ##' ##' @export monteCarloMed <- function(expression, ..., ACM = NULL, object = NULL, rep = 20000, CI = 95, plot = FALSE, outputValues = FALSE) { input <- c(...) ## Get names and the number of unique variables in the expression paramnames <- all.vars(stats::as.formula(paste("~", expression))) ## If input is a lavaan object pull out coefs and ACM if (class(object) == "lavaan"){ input <- lavaan::coef(object)[paramnames] ACM <- lavaan::vcov(object)[paramnames, paramnames] } vecs <- list() ## Matrix of values, need to be converted to a list dat <- MASS::mvrnorm(n = rep, mu = input, Sigma = ACM) ## Add parameters as the first row dat <-rbind(input, dat) ## Convert to a list, vecs <- as.list(as.data.frame(dat)) ## Give names to it works with assign for (i in 1:length(vecs)){assign(paramnames[i], vecs[[i]])} ## Apply the expression to compute the indirect effect indirect <- eval(parse(text = expression)) ## Get the CI low <- (1-CI/100)/2 upp <- ((1-CI/100)/2) + (CI/100) LL <- round(quantile(indirect[-1], low), digits = 4) UL <- round(quantile(indirect[-1], upp), digits = 4) interval <- list(indirect[1], rbind(LL,UL)) dimnames(interval[[2]]) <- list(c("LL", "UL"), c(" ")) names(interval) <- c("Point Estimate", paste(CI, "% Confidence Interval", sep = "")) ## Switch for outputting a plot if (plot) { hist(indirect, breaks = 'FD', col = 'skyblue', xlab = paste(CI, '% Confidence Interval ', 'LL', LL, ' UL', UL), main = 'Distribution of Indirect Effect') } ## Switch to return simulated values if (outputValues) { interval <- list(interval, indirect) } return(interval) } semTools/NEWS.md0000644000176200001440000001476213531740155013173 0ustar liggesusers# semTools 0.5-2 (submitted to CRAN on 29 August 2019) - Requires `lavaan` version 0.6-5 - Minor bug fixes - Addition of the `plausibleValues()` function to extract plausible values (i.e., multiple imputations) of factor scores from objects of class `lavaan`, `lavaan.mi`, or `blavaan` - Full support for `lavaan.mi` models fitted to multiply imputed multilevel data; resolved issue ([#39](https://github.com/simsem/semTools/issues/39)). - Full support for `fixed.x=TRUE` and `conditional.x=TRUE` in `lavaan.mi` models, including `std.nox` solutions in `summary()`, `modindices.mi()`, and `lavTestScore.mi()`. - Added the `omit.imps=` argument to all `lavaan.mi`-related functions, optionally excluding solutions that did not converge, failed to estimate *SE*s, or contained NPD matrices (Heywood cases are a special case). Only the first 2 are excluded by default. - `reliability()`, `reliabilityL2()`, and `maximalRelia()` now accept `lavaan.mi` objects - Added (S)EPCs to `lavTestScore.mi()` output when `epc=TRUE` - Added (A)RIV/FMI to all pooled tests when available for `lavaan.mi` objects, to quantify additional uncertaintly in the test statistic due to missing data. - Allow multigroup models in `plotProbe()` and related latent-interaction functions. - `standardizeMx()` was deprecated in previous versions, now removed. # semTools 0.5-1 (on CRAN 25 September 2018) - Requires `lavaan` version 0.6-3 - Minor bug fixes - The formerly deprecated `lisrel2lavaan()` function has been removed from `semTools` - `compareFit()` now accepts `lavaan.mi` objects returned by `runMI()` - For `lavaan.mi` objects returned by `runMI()`, the `anova()` method has been updated to behave more like lavaan's `anova()` method: - more than 2 nested models can be compared - fit indices are no longer an option, and must be requested using the `fitMeasures()` method - Given the previous addition of score-test functions `modIndices.mi()` and `lavTestScore.mi()` for `lavaan.mi` objects (parallel to `modIndices()` and `lavTestScore()` for `lavaan` objects), the remaining "trilogy" of tests in lavaan (`lavTestLRT()` and `lavTestWald()`) now have parallel functions for `lavaan.mi` objects: `lavTestLRT.mi()` and `lavTestWald.mi()` - `lavTestWald.mi()` implements what was formerly available in using `anova(..., test = "D1")` - `lavTestLRT.mi()` implements what was formerly available in using `anova(..., test = "D3")` - `lavTestLRT.mi()` cannot compare more than 2 nested models. The `anova()` method internally calls the `compareFit()` function to compare multiple `lavaan.mi` objects. - For all 3 tests (score, Wald, and LRT), the "D2" pooling method is an option, and there is a newly public function `calculate.D2()` that can be used to pool any set of Wald chi-squared or *z* statistics. - The `runMI()` function can now be applied to multilevel SEMs that can be fitted with `lavaan()` - Known issue ([#39](https://github.com/simsem/semTools/issues/39)): `fitted()` and `resid()` methods will not yet work in models with both multiple levels and multiple groups. This will be resolved in a future version. - The 3 functions `measurementInvariance()`, `measurementInvarianceCat()`, and `longInvariance()` have been deprecated, redirecting users to the new `measEq.syntax()` function. It is much more general, capable of combining features of all 3 deprecated functions without their restrictions. - The function's primary purpose is writing syntax that users can read and edit. - The fitting of a model is optional, and fitting multiple models is not (yet) automated. See the `?measEq.syntax` help-page examples for how to fit and compare several levels of invariance. - Find many more details [posted on the lavaan forum](https://groups.google.com/d/msg/lavaan/oKwP0_6-i1g/i0jGCU-LBwAJ) (the Google group). # semTools 0.5-0 (on CRAN 1 July 2018) - Requires `lavaan` version 0.6-1 - Minor bugs fixed - Minor convenience features added - Redesigned `runMI()` function, which no longer produces an object of class `lavaanStar` (that object class is no longer supported), which inherited from class `lavaan`. It now produces an object of class `lavaan.mi`, which inherits from lavaan's new `lavaanList` class (see the `?lavaanList` help page for details). The reasons to redesign `runMI()` include: - The user was required to choose among available methods for pooling the chi-squared test statistic when fitting the model, and the baseline model was also fit so that incremental fit indices (e.g., CFI and TLI) could be calculated. This lead to more frequent convergence problems. - The `lavaanStar` class could inadvertently mislead users into thinking that certain results were available from multiple imputations that were not. For example, the `modindices()` function would return modification indices for the first imputation, but those were not appropriately pooled statistics. - The new `runMI()` no longer includes the `chi=` argument, because those options have been moved to an `anova()` method written for `lavaan.mi` objects. Additional methods have been written: see the `class?lavaan.mi` help page for a list of methods and details about their use. Additionally, appropriately pooled modification indices and (S)EPCs are now available for multiple imputations (via `modindices.mi()`), as well as a general score test via `lavTestScore.mi()`. - The `parcelAllocation()` has also been redesigned with new arguments to improve its flexibility and ease of use. Users are now required to provide lavaan syntax not only for the parcel-level model, but also for the item-level model. This allows parcel allocation to be automatically detected, no longer requiring users to provide a list of item-to-parcel assignments (see new examples on the help page). - The OpenMx enhancements in previous versions of semTools are obsolete now that OpenMx provides fit indices and standardized paths, so they have been removed. However, `standardizeMx()` is still available (temporarily deprecated) to provide standardized mean-structure parameters, until the OpenMx maintainers add that feature to `OpenMx::mxStandardizeRAMpaths()`. - `measurementInvariance()` and `measurementInvarianceCat()` now require users to name all arguments passed to `cfa()`, including the first argument: `model=`. This is to prevent errors that occurred when some previous users had passed arguments in a different order than expected, which should not be a limitation. # semTools versions < 0.5 Find our remaining version history on GitHub: https://github.com/simsem/semTools/wiki/Version-History semTools/MD50000644000176200001440000001645313532170512012377 0ustar liggesuserse0d228f9b3674a72722573c4f36e7a7f *DESCRIPTION ef9455f212b092e1551cb1e331506bfc *NAMESPACE c15cd82927b4f621e62a7458fac9c21e *NEWS.md a315fd6941777e1a222190f9ec1eb639 *R/EmpKaiser.R ed8f8b872ac8c0f1fa7e34a9785a5df6 *R/NET.R 4c9ce95379c7b5cdab0b98dc9e618c6a *R/PAVranking.R 4c0dbba5720dd1ce09130cb3dbbad46b *R/TSML.R 614e893f4aba40799230a8a94c16cfeb *R/aa_semTools-deprecated.R b9945e25b1861fe98132046a56186ce6 *R/auxiliary.R eb88cca001294e650191ee37666c84b3 *R/clipboard.R 80ab2d83b85f286a047c1c57297ccef2 *R/compareFit.R 4b16bdfc99f24e1ba4acb62d47d65c6e *R/data.R 783eb720e415c5043dbb054d815ccdb1 *R/dataDiagnosis.R fdda12b25a06f4e428b418c0b8d7c2bf *R/efa.R d5bf53118c42081daa0a4b394840f88d *R/fitIndices.R d6177b2a7aba38de705694f0ecbcb706 *R/fmi.R 0801395291a470f1a197c1cf2eb9ce2f *R/htmt.R 207208a2958321ac21b56cd53642c9b9 *R/imposeStart.R 17b39abad2c98349deb3dff49dfddd2f *R/indProd.R 84d5b351d498d611743c4f022275882d *R/kd.R 664b1dee784db1f51dfc2b82984d21c4 *R/loadingFromAlpha.R 63aec6bc03d7439869cb7394cba44376 *R/longInvariance.R 652d979cde8d9f308a556e06a71eafb2 *R/measEq.R 833f4297dec33a6550a8a0e086ffb467 *R/measurementInvariance.R 51680d004cde6757cc51b1f248993f9a *R/measurementInvarianceCat.R 4fe4d473ca94e2e72dc2fb480c699e1e *R/miPowerFit.R b00ce4c1da6015b8b5c7fb66b03367cb *R/missingBootstrap.R b8197832bdf88fea01c578053afa4080 *R/monteCarloMed.R 59765ff3d583e18fb6bd963b8a7ba5db *R/mvrnonnorm.R 804c2f3d8625e8a2db7e262a1ee805b8 *R/parcelAllocation.R a48a9083ca241f0beb554469d27cbe5e *R/partialInvariance.R 5d0bdcd8607001e33e59d855ba397198 *R/permuteMeasEq.R 16736256abbea7ad44114bf104372a44 *R/plausibleValues.R 07e97efea9b5830110f0b1fd66fc3771 *R/poolMAlloc.R 84466d80ede70183ed41a56b594fa86e *R/powerAnalysisNested.R 1726a14e353015b338ba1b1d4df21bf5 *R/powerAnalysisRMSEA.R 1a97422665296e0f54e5af3eaa6fa1a6 *R/powerAnalysisSS.R 89db1374eeef021ade1d52f308a2e878 *R/probeInteraction.R 679e7fb03e32ca52fd71ebac8d990111 *R/quark.R c11e448f4e32f77ba24ebc8032fdece1 *R/reliability.R 2134c41427ab07a2bcd7db18ae4169a4 *R/residualCovariate.R 40c49378eecc984edb3f2e6e4f64fb0c *R/runMI-LRT.R 22fa2b5a6028a12d10678ceef8692135 *R/runMI-Wald.R 79c406cf43a7c33476b955b62581a15f *R/runMI-methods.R a5d39b91115c802d9e94064ee5c4473d *R/runMI-modification.R 0c3b95c1e04b59967e1ea2e9fd333a74 *R/runMI-score.R 3e2802099120cbdd8b978b3f4fcb1363 *R/runMI.R 9cdd242a756f9815aa9c484a2ea6b017 *R/semTools.R 14a2f8556006017c12f77eff791224dc *R/singleParamTest.R 419f5f28bf8c448d5bf912d83af9576c *R/splitSample.R 0d3de3b59b265e9321f823924cab79fa *R/tukeySEM.R 601d9cc4e9066fdc9f64dea1a68fa9c1 *R/zzz.R ebcb52eff7ecb5000bb00ad17957036b *README.md 83304581f160f4f3c668b75a79725d47 *build/vignette.rds 4d1090db8b160303f235075ebbda16d7 *data/dat2way.rda c56c2358c6e4d82c4cd7855e4df37282 *data/dat3way.rda d3f489a4fefd07ccfcd3875bc71e0b83 *data/datCat.rda ba21a4b5113fd7515fbf26fccfe0fe17 *data/exLong.rda 35cb60d186fd440645d81b56e22a3fa2 *data/simParcel.rda f3b57cd561033b86ee6213144201bbbe *inst/CITATION 81dc319ba4e3739b20cbcd3196f00e1e *inst/doc/partialInvariance.Rnw 04e3e1f39aebdb666b7f25c15d266533 *inst/doc/partialInvariance.pdf bff36835f5f9cf6979273942b5355633 *man/BootMiss-class.Rd 9bec79688120332e294b788cf932da1e *man/EFA-class.Rd f6c696d79efaeeb03d9cc4f50b9fd1dc *man/FitDiff-class.Rd 912591e3363f8e0d95ea9d3adc524915 *man/Net-class.Rd 2fd3e64a491bf023b151a33dedf09bfa *man/PAVranking.Rd 106836b9055e066f2b03ac2001e0345a *man/SSpower.Rd 68d3653357bf8968b38713a7fcec4b14 *man/auxiliary.Rd 00602da325baeb4ca7bd8b2847210a3d *man/bsBootMiss.Rd 00740e967778f8b6a67ae35e94dcc41c *man/calculate.D2.Rd 567bd3961eb647a88710970f6671ad73 *man/chisqSmallN.Rd 95971dc574f7f0002796e557d84ebf87 *man/clipboard.Rd ab18b884e2eb6fdc47cdd490a9d7e8e5 *man/combinequark.Rd 84efe5c0a1adc385188f3d350a80f251 *man/compareFit.Rd 3da81daae1d3fa1f39b2021b167b16f9 *man/dat2way.Rd d1045951fc20eafd5a5ffd80f9849518 *man/dat3way.Rd a1eabc54596df6bc710c78bd580ab2d9 *man/datCat.Rd 68d7cc4206e2e39470e699db4ccce927 *man/efa.ekc.Rd a3012be7802489ccd6505c06fc72c209 *man/efaUnrotate.Rd 4c8671fd9404d8c0a4a198fcfbc52ff1 *man/exLong.Rd fb2043383786c56f8ca1315d868aadd0 *man/findRMSEApower.Rd 754c4b98cfbf3dd69ee898a7d5e7ab30 *man/findRMSEApowernested.Rd 932e846a94764c58a0b6c95fd0f2f980 *man/findRMSEAsamplesize.Rd b6cf329e83252c0ffb40e833a7677e70 *man/findRMSEAsamplesizenested.Rd 1f7e14327bd79807099d8656a41c52b8 *man/fmi.Rd de9bcae60a4a551d798938acb9917235 *man/htmt.Rd 2865323337743b69f0111bfe8d3763f4 *man/imposeStart.Rd 2ed07123667ef21bcb5ecd1387a6dca7 *man/indProd.Rd c03d6381168797c770a09fb425219d38 *man/kd.Rd 32a34c07f6916c352c810aad5554064e *man/kurtosis.Rd 839c4a6ba4f53cad31faafa4b9dd06c2 *man/lavTestLRT.mi.Rd 310acedae25aedabc13b590fef151c2a *man/lavTestScore.mi.Rd c9f47827ee9a379524b2a542e24fb713 *man/lavTestWald.mi.Rd 12968c77516ac507e9449716f333f764 *man/lavaan.mi-class.Rd b24d6f6dc617e522f1bdb7d2478e9ec0 *man/loadingFromAlpha.Rd db93073fb80b9c7f8b90dec6583b6845 *man/longInvariance-deprecated.Rd 99fbebf35b89c6adb87cd356b15d4dd4 *man/mardiaKurtosis.Rd 22d9a3c4f46ecc2020cf5261e4bc5cc0 *man/mardiaSkew.Rd 9e31911e86a3bd094a0ded01de60e268 *man/maximalRelia.Rd 05e418fc4f8402b3cf4526080d6203ba *man/measEq.syntax-class.Rd e77533dfecebbff2746a17f914050a75 *man/measEq.syntax.Rd c71bcd6c55e2c836cd03fac42cb1e650 *man/measurementInvariance-deprecated.Rd 2de9e5933645167c2015f74632dd45a6 *man/measurementInvarianceCat-deprecated.Rd cd3ad8efa29b940f80b4f93babc4e67c *man/miPowerFit.Rd aa59166b4953ef19b37a9e5202599272 *man/modindices.mi.Rd 9f7525995726e9ad4fa8b454c8b3122e *man/monteCarloMed.Rd 8e3defb9d84ab1b6e5151e7910002f40 *man/moreFitIndices.Rd e07902b72797f1b17edab29f4c57e513 *man/mvrnonnorm.Rd 32b7c466bc456365f11a7c1cd2867f3d *man/net.Rd dfe769ce32352b7b9a1c894e489e423b *man/nullRMSEA.Rd d07ef38e918c3ef65e614bca924ba93d *man/parcelAllocation.Rd b1de786d8072072ca7ea03cc79da9727 *man/partialInvariance.Rd 9a389eb294d818d1c4366e8243fad41b *man/permuteMeasEq-class.Rd 6286729b8f9ceb8c88b3d71c952f079c *man/permuteMeasEq.Rd 4c347356a99cd6bcd9b235024a113a7a *man/plausibleValues.Rd 56754cbf5f68d021d9d4f4eb01b17a72 *man/plotProbe.Rd 8a767305454e48e00e9e51106c6c35e7 *man/plotRMSEAdist.Rd 7769944fa8abfaac33712473de7ec967 *man/plotRMSEApower.Rd 136be2c682567f26f14f7dc06959fa35 *man/plotRMSEApowernested.Rd 82596a3893022caedac7fda5bfa9ed08 *man/poolMAlloc.Rd 5af4d99db392a5fbec49400193df621e *man/probe2WayMC.Rd 35b4521babfef29b8fee6ff8412e9882 *man/probe2WayRC.Rd ac0d633ecef758659b94c0a608f543f5 *man/probe3WayMC.Rd 0e063e158610c474ec98a80a719acd70 *man/probe3WayRC.Rd b6a2f863ca61c25f2a4c6b448d939211 *man/quark.Rd 699278d45b6090d29b43a3e659585cf2 *man/reliability.Rd 14e16f6e0481a7f4cb4ed528c6bfb453 *man/reliabilityL2.Rd 9178b9a704030e6e23481e58d63deb7b *man/residualCovariate.Rd fcc3cefac1224809340fabd12b61cc1c *man/rotate.Rd 1aac5caa994ec306e5c8321ade93db62 *man/runMI.Rd 0262dbb665dc17f251421b070d8b6362 *man/semTools-deprecated.Rd 54adefa225fe2fb1231454b6710f12cd *man/semTools.Rd f33f38793ca24908b59c82c767e1b10f *man/simParcel.Rd dbef15ef3c9ef143dd94d01e05f1baf8 *man/singleParamTest.Rd f70de59a32bc222a1981fbe5bd6f56ea *man/skew.Rd 36700bffb9aa48d6928e4ca824760b42 *man/splitSample.Rd 26aa77e94f2f50bf56fa305762592a00 *man/tukeySEM.Rd e08d1adb92b3764a866bf65addf019fa *man/twostage-class.Rd a8403ac8bdb66fbf5f766b16e8345d7b *man/twostage.Rd 81dc319ba4e3739b20cbcd3196f00e1e *vignettes/partialInvariance.Rnw 4f5891dc46f7212c1ce6189c4467adba *vignettes/partialInvariance.bib semTools/inst/0000755000176200001440000000000013532164177013045 5ustar liggesuserssemTools/inst/doc/0000755000176200001440000000000013532164177013612 5ustar liggesuserssemTools/inst/doc/partialInvariance.Rnw0000644000176200001440000002437213432435606017743 0ustar liggesusers\documentclass[12pt]{article} %%\VignetteIndexEntry{Partial Invariance} %%\VignetteDepends{semTools} \usepackage[utf8]{inputenc} \usepackage{amsfonts} \usepackage{amstext} \usepackage{amsmath} \usepackage{natbib} \title{A Note on Effect Size for Measurement Invariance} \author{Sunthud Pornprasertmanit} \begin{document} \maketitle This article aims to show the mathematical reasoning behind all effect sizes used in the \texttt{partialInvariance} and \texttt{partialInvarianceCat} functions in \texttt{semTools} package. In the functions, the following statistics are compared across groups: factor loadings, item intercepts (for continuous items), item thresholds (for categorical items), measurement error variances, and factor means. The comparison can be compared between two groups (e.g., Cohen's \emph{d}) or multiple groups (e.g., $R^2$). This note provides the details of the effect sizes in comparing two groups only. The comparison between multiple groups can be done by picking the reference group and compare the other groups with the reference group in the similar fashion to dummy variables. For example, the comparison between four groups would create three effect size values (i.e., Group 1 vs. Reference, Group 2 vs. Reference, and Group 3 vs. Reference). Alternatively, for the measurement invariance, the change in comparative fit index (CFI) can be used as the measure of effect size. In the measurement invariance literature \citep{cheung2002, meade2008}, the change in CFI is used to test the equality constraints for multiple items simultaneously. The functions in \texttt{semTools} will show the change in CFI for each individual item. That is, if an item were to allow to have different statistics (e.g., loading), how large the CFI would drop from the original model. Please note that more research is needed in finding the appropriate cutoffs for the change in CFI for individual items. Are the cutoffs of .002 or .01 appropriate for this context? In creating effect size, a target statistic needs to be standardized. Sample variances are used in the standardization formula. If researchers can assume that target variances across groups are equal in population, then pooled variances can be used in the standardization. The pooled variance $s^2_P$ can be computed as follows: $$s^2_P = \frac{\sum^G_{g=1}(n_g - 1)s^2_g}{\sum^G_{g=1}(n_g - 1)},$$ \noindent where $g$ represents the index of groups, $G$ is the number of groups, $s^2_g$ represents the variance of Group $g$, and $n_g$ is the Group $g$ size. If the variances are not assumed to be equal across groups, I recommend to pick a reference (baseline) group for the standardization. In the following sections, I will show how effect sizes are defined in each type of partial invariance testing. \section{Factor Loading} Let $\lambda_{ijg}$ be the unstandardized factor loading of Item $i$ from Factor $j$ in Group $g$. A standardized factor loading $\lambda^*_{ijg}$ can be computed \citep{muthen1998}: $$\lambda^*_{ijg} = \lambda_{ijg}\cdot\frac{\psi_{jg}}{\sigma_{ig}},$$ \noindent where $\psi_{jg}$ is the standard deviation of Factor $j$ from Group $g$ and $\sigma_{ig}$ is the total standard deviation of Item $i$ from Group $g$. To quantify the difference in factor loadings between groups in standardized scale, the standard deviation in the standardization formula needs to be the same across groups. If Group A and Group B are compared, the standardized difference in factor loading is defined: $$\Delta\lambda^*_{ij} = (\lambda_{ijA} - \lambda_{ijB})\cdot\frac{\psi_{jP}}{\sigma_{iP}},$$ \noindent where $\psi_{jP}$ is the pooled standard deviation of Factor $j$ and $\sigma_{iP}$ is the pooled total standard deviation of Item $i$. If Group A is the reference group, $\psi_{jA}$ and $\sigma_{iA}$ can substitute $\psi_{jP}$ and $\sigma_{iP}$. Assume that standardized factor loadings are from congeneric measurement model, standardized factor loadings represent the correlation between items and factors. \cite{cohen1992} provide a guideline for interpreting the magnitude of the difference in correlations for independent groups. The correlations are transformed to Fisher's z transformation: $$q = \arctan\left(\lambda_{ijA}\cdot\frac{\psi_{jP}}{\sigma_{iP}}\right) - \arctan\left(\lambda_{ijB}\cdot\frac{\psi_{jP}}{\sigma_{iP}}\right)$$ Then, the $q$ values of .1, .3, and .5 are interpreted as small, medium, and large effect sizes. For continuous outcomes, the amount of mean differences implied by the factor loading difference given a factor score can be used as an effect size \citep{millsap2012}. Let $X_ijg$ be the observed score of Item $i$ loaded on Factor $j$ from Group $g$ and $W_{j}$ represents the score of Factor $j$. The expected value of the observed score differences between Group A and Group B is calculated as follows: $$E\left(X_{iA} - X_iB | W_j \right) = \left( \nu_{iA} - \nu_{iB} \right) + \left( \lambda_{ijA} - \lambda_{ijB} \right) \times W_{j}, $$ \noindent where $\nu_{ig}$ represents the intercept of Item $i$ in Group $g$. Let the values between $W_{jl}$ and $W_{jh}$ be the values of interest. We can find the expected difference in the observed scores under this range of the factor scores. \cite{millsap2012} proposed that, if the size of the expected difference is over the value of meaningful differences, the loading difference is not negligible. See their article for the discussion of the meaningful difference. Note that, in the \texttt{partialInvariance} function, $W_{jl}$ is calculated by (a) finding the factor scores representing a low \emph{z}-score (e.g., -2) from all groups and (b) selecting the lowest factor score across all groups. $W_{jh}$ is calculated by (a) finding the factor scores representing a high \emph{z}-score (e.g., 2) from all groups and (b) selecting the highest factor score across all groups. \section{Item Intercepts} Let $\nu_{ig}$ be the intercept of Item $i$ in Group $g$. A standardized intercept $\nu^*_{ig}$ is defined as follows \citep{muthen1998}: $$\nu^*_{ig} = \nu_{ig} / \sigma_{ig}.$$ Thus, the standardized difference between Groups A and B in item intercepts is defined: $$\Delta\nu^*_{i} = (\nu_{iA} - \nu_{iB}) / \sigma_{iP}.$$ Note that $\sigma_{iA}$ can substitute $\sigma_{iP}$ if Group A is the reference group. By using this scale, .2, .5, and .8 can be interpreted as small, medium, and large effect sizes according to \cite{cohen1992}. The proportion of the intercept difference over the observed score difference may be used as an effect size \citep{millsap2012}: $$(\nu_{iA} - \nu_{iB}) / (M_{iA} - M_{iB}), $$ \noindent where $M_{ig}$ represents the observed mean of Item $i$ in Group $g$. \cite{millsap2012} noted that a relatively small proportion (e.g., less than 20\%) is ignorable. If the sign is negative or the value is over 1, the interpretation is doubtful. \section{Item Thresholds} Let $\tau_{cig}$ be the threshold categorizing between category $c$ and $c + 1$ for Item $i$ in Group $g$. Note that the maximum number of $c$ is the number of categories minus 1. Because thresholds are the location of the distribution underlying ordered categorical items (usually normal distribution), the location statistic can be standardized by dividing it by the standard deviation of the underlying distribution. The standardized threshold $\tau^*_{cig}$ is defined as follows: $$\tau^*_{cig} = \tau_{cig} / \sigma^u_{ig},$$ \noindent where $\sigma^u_{ig}$ is the standard deviation of the distribution underlying the categorical data for Item $i$ in Group $g$. In theta parameterization of categorical confirmatory factor analysis, $\sigma^u_{ig}$ may not be equal across groups. The standardized difference in thresholds between Group A and B needs the pooled standard deviation. The standardized difference in thresholds is defined: $$\Delta\tau^*_{ci} = (\tau_{ciA} - \tau_{ciB}) / \sigma^u_{iP}.$$ Note that $\sigma^u_{iA}$ can substitute $\sigma^u_{iP}$ if Group A is the reference group. By using this scale, .2, .5, and .8 can be interpreted as small, medium, and large effect sizes according to \cite{cohen1992}. \section{Measurement Error Variances} Let $\theta_{ig}$ be the measurement error variance of Item $i$ in Group $g$. A standardized measurement error variance $\theta^*_{ig}$ is defined as follows \citep{muthen1998}: $$\theta^*_{ig} = \theta_{ig} / \sigma_{ig},$$ Thus, the standardized difference between Groups A and B in measurement error variances could be defined: $$\Delta\theta^*_{i} = (\theta_{iA} - \theta_{iB}) / \sigma_{iP}.$$ Note that $\sigma_{iA}$ can substitute $\sigma_{iP}$ if Group A is the reference group. However, there is no direct guideline to interpret the magnitude of the difference in measurement error variances according to Cohen (1992). A new standardized difference in measurement error variances is needed. Assume that $\sigma_{iP}$ is always greater than $\theta_{iA}$ and $\theta_{iB}$, which is usually correct, then $\theta_{iA} / \sigma_{iP}$ and $\theta_{iB} / \sigma_{iP}$ ranges between 0 and 1 similar to a proportion statistic. \cite{cohen1992} provided a guideline in interpreting the magnitude of the difference in proportions using arcsine transformation. The new index ($h$) is defined as follows: $$h = \sin^{-1}\sqrt{\frac{\theta_{iA}}{\sigma_{iP}}} - \sin^{-1}\sqrt{\frac{\theta_{iB}}{\sigma_{iP}}}.$$ Then, the $h$ values of .2, .5, and .8 are interpreted as small, medium, and large effect sizes. If items are continuous, the proportion of the error variance difference over the observed variance difference may be used as an effect size \citep{millsap2012}: $$(\theta_{iA} - \theta_{iB}) / (\sigma_{iA} - \sigma_{iB}). $$ \noindent If the sign is negative or the value is over 1, the interpretation is doubtful. \section{Factor Means} Let $\alpha_{jg}$ be the mean of Factor $j$ in Group $g$. A standardized factor mean $\alpha^*_{jg}$ is defined as follows \citep{muthen1998}: $$\alpha^*_{jg} = \alpha_{jg} / \psi_{jg}$$ Thus, the standardized difference between Groups A and B in factor means is defined: $$\Delta\alpha^*_{j} = (\alpha_{jA} - \alpha_{jB}) / \psi_{jP}.$$ Note that $\psi_{jA}$ can substitute $\psi_{jP}$ if Group A is the reference group. By using this scale, .2, .5, and .8 can be interpreted as small, medium, and large effect sizes according to \cite{cohen1992}. \bibliographystyle{plainnat} \bibliography{partialInvariance} \end{document}semTools/inst/doc/partialInvariance.pdf0000644000176200001440000037643413532164177017762 0ustar liggesusers%PDF-1.5 % 3 0 obj << /Length 2201 /Filter /FlateDecode >> stream xڝYKsϯ-TՈK&S;IUR[v KtDO)OvI+Sum*ɛ֬]eVw/ٻ*2G/E?- >?x~s- W\H6 l)AyW4,G :{^*ݗ>¯}?|0W6ǦUm ueV-WfG|=Hf\A"'!A>\"7pˁ^~I;4u޵jc-)BLrf[ vg-|a^ m83yvv?a9̜|fOveGRX}Zom^ǽVNU1 )9Py5-e?-\U-v7Fgy,U! oDfL32Cfy6}V{^D\!NuYxV/'&N}@L_&7E@r(SkSsI K ~w4/s"*")4PE( Ȫ̒6oL6rQ\1Nhң˞|O+}%Nb!E4S٥rڼ躥B%A .$! K!g&;u23E*v&?}nB Z %fENPedSIэ a0!m\rOP'1f!`qwPqMeW 2m3Zag\me5R6KᘴGϏ{9~p".>h(F# lS C5$󨈓װәW"ZQ uKЪLIL!Ȥ@hZO>eSg3_[]o ZU٘2I̅+ΏG jSl ⨇-ٵ4i?~)dı<, |<,v ; blb!e CD#w<dFJ)w8<0 Rч늑M%"s(S~SW\eCiz&Vk_b*5* iUjCKא3eV,~:]>7m:qϫbiJ4L?Π1`(xzbA b|ͻ0Iz͓ Tr&>yPɧAe)&Xa| km; r4~̰Y19d4]u3[8w@j{ԫB PLXc\:5G 04M2y `rU 48p۶.; z#mdd $|2u ]u"|_a1yЇ+o >_m#ÑwhE}IZz\߇Ɇ..A.L)ڥ{M|c~۫2Iu;Hn>xj$0]\)^gtׄwr>Ν_4׭͓Z)n]Ǜ{z;v4a)aa " dHE t-:&$Bl!]]?ͦѫz~쁙|&W@bt҇3t*o2QژϔctNwr#6= m.qaAotVQxxS ji”6@qc3I=ފ^0}s{"԰ L:Rk4(2,כ,n= endstream endobj 14 0 obj << /Length 2645 /Filter /FlateDecode >> stream xZKsW-d{o bSUJE7.9$eg7> `bHڛ0<~ko~x'y#n9sF7h&u\\ZYz^l-\lg_bp=µ3X'mz3~.AށIA5qosis$ӣHrtQMq`{Ûs6 趉 ݤ1Bl=D] _oɔlo=XG!],DE@FxLy<ӭImQM|fn4S"lFk2>P\QqfhȖḧsepRUcx\ ;W݆7d{2t5a,S+{DwaݕՍћ@E)`ᘵ`WL($ƒL%ɤmE!poDUV4 -}̨DfA-΁YU)@s>!ܦ%Lsiu&߮> r5mmMc<̭M|87+uHփt n-Io a{7֑5nP@T#gRF}JP5W2z nkl=k]obd]wJ,uLXwn~|UmjJ)BInRFulIdK'ʱ~+d[z<LmYdF^0 |:b{$ޣXMP8sb{Pi]Lkg_Էa[g٢u04[8⮇`  >$b3ȷ :rEԏ".gML<.XR 9]I}K"y*82߲EHZυͽ.ECkf?E/VĴLF(Sk2aG.M5}9H-PFЦK1M}` &ﭑ[9|CO$щ 4L+1jAh]OqC`nldee!ʯcxQ8*K.SV-OpsܐOTdBJCP{#"|bF00)bEpszyTPs H&{?fOp;t V<@^ Է en7uDɴ40k,MюTVR%B+C(nr:4-QUBX&Qܕ B`e-I'dL[]/U*MtVJ2 TOܐZRYX\X/`JqY$(ClRF }==Ӷqc[31~hm1xwQ zX%jﱷDx6Q'cgp}GG!{ʫ*X#uljN:$ KngsG}@7s4 ~C;n J)ǜQv {oSI `o[}ABqLRID". \#F,*}zPY4&3n 'n5'vqɱHew/*9h>"]j7DF%πVg6C"q=g rh.7ev!r/Cc0U^xDdN &g-(e&reqE U&/^hb/8+';9M0v%a{UEq }!x蘡5  ƠkRˣ^j 6 V ~XMht0)Q0^;e G rv}̞ fwAJ&(Yaæ5Luzqf\% W| ZN~+?}梺Vu@8)17&r͛OB[/qO{$դli,T{-O1D'm>5"6gm4ðSlwFţ4`p[}#| |,z[@˴%?hDg6/U<<h,фJpQ nH48(bOäv o|קwQHn^&E萄z ocybz9+1d{t[Є(_F< Re] H} *ԡ*r} Qx?fB+)KS*Pk-M yJH["VhAqDi(Fx%/a=s.9Wq endstream endobj 22 0 obj << /Length 2645 /Filter /FlateDecode >> stream xZo7_G ^El R]s; ΃"˲ JrKk]!1ww88_71rry3N3*ü0鯳?~Z>c^O8|?MaqOZe̙\q)17M$  遒5^ܸy$vž,騯8&RpfE<I~ű 䳲aMW!kՌ#$0CEֹQ0 v؇@h 2 )ظF @? 9&QZ&{5+LEMF13b^1NgLw!pW0 oh`(.W|o)- -dwݾD+?x[<Vu4(nSj6;WNWW\vV0y]? DÌ%3'0I[qEm2=”6nBd̲͎Ç !&v^h7QYB7I#7Q .se7%`Z]\͋Bd(#lx,PQkZ(|\Y2G⃎jEXUVIeh~×m_ 8ZCzJ#| U\DMaK/^㤘rb2pLSIiҢY ف]GTS0gD*j]mpۚs`cC4n.X3H{GYemMRwI7斖xZ*ܭff;ֵCL@BBy?+bnѵF``s(_%9"EuaLU 3";rs S\E~-.Bw) u!,8=!>{QmE:H% }/|w$Yhķ :h5I+DC)eEG|9 ]dR!1aK金ɣ->q _7JXYAf[ɦ"aCKň7VT _3R++OSK**4Js [Q ˂]`R yеX]}͙f f<Jf( AI?$ ǥ2|!#hPW9oIeD'2('GUUe\.ϝ)g +z{r^[CPMBb , Sφzg\Xq۱3 iO+k4 j~ߡ< r)cHTtW܇{ȅZEU#0Q42RT5jaJ2=HSTvKp8WR-/{hK6E"򩖝['y~HӰ* CTLv ~S6IYnx;ц\w1* G -*?=$7Ƹ;g|=lMh^|pKjW6 9 vOrVNFNVD\w#$͕jE¬I#-!g<M<wot6i3ˎ0t&9eR.xU V}wjҧ+῟D^)v]z@R^@,<3nE-By ([b,dm஑f4a^eR듯[b^q8fpJN2`k<[zXdbr!Jϼ2c(]%Fxg{ k ײ%BG7}uS:VMz(%I/Q٧VQ! If D?]NV27oN&.]YVnIʵʤ"IsGR4C>(75[}Qvb/ppSܖ 7-D[J @N9&M3YLE<;$w040 ?V?k Y7/i`Z:Є|yHZ*5h0۵Rͳ*W ;'C}EPr_AMGj5VQ*_C<`_5ysYPrbm'46×pO0Kttݭ.+w\ 0Ndq '07GUȅ *;U@3@ы[KßJ"#?DU o^8OT$hce$}Q@Hiqft}A fuO R9IWHwARO!WFc76N-{YId뿌_8]Kp]sՏ6%=Ѷ=aq{\ֈ\=콫K3i/'#J?z&; endstream endobj 25 0 obj << /Length 2234 /Filter /FlateDecode >> stream xYKo#ϯ%g'8.d=1`KZofض Mů~ZH)kz #Y#כO/]#=^|6_\-TiͳܢM]I䄳zn M@&!^d-#FG_,"{]g4R[YᬞfEUv - Y1=+ Yn)*cs۠0 V|a]l&-@\.U6çm~D)_āA$ pȧ #S_o 2C}>N_Aʒ~6<[qܔ5x{Z}4^c AM,ZX;uH1{r"{Ϟ=:G.p"(F=!گ% 4sC3-{ۊA}d~U/9=x5 ~l3LK8`}7[䀓Vn #|{xK+tߑU# D8dXV.7E$~ B{է*Jo{YJH7}_ev,MX} s/yԫN(LUQ~K\i(ټCK8x# > w|oiU%Tb \70C 3?)-Oċ*qdAod8FH}**i(9N,;N\cr)w!Sh)vsK{ݥOf[-!Ѵpa[]ߗyJ`J)8Ag=F0a]>`U ~^4)q?qu3QLRt$ kR8Oq1JHҋ1yk^ ) yLQe h#)ŝìFIkao݈JJ5:+]Hy=`! gi)ng/m\C6}0f4_Xwzʉ7B&'0-ns߂ӈFh<``\%Zd'E `R@ͳMk 6xn[4"(;#j睱Yyu6ȺL*l67{8 U ?l{!(nxVF|)M/t1zq{£3GïiR(H1ܧ҈@<\Js_BBG묞f#i^ N̜,ܤ,Ok0C',Ģ K\AT w :1h'S/[lWL- r+$d+j$Kv̺&9+hG ߤvRxҒƛeJWʎ+ӹa}4Iʳgpn4xmγɵ8k.?8O:cJj![BmF}36ŚT0k5XAҐz3 B4`oX6 /X[$6)e:KF/iQDq>pb/HorL99Ze9gok3)@֥jěWUM-PU#z\ӗO~8crqdǀmv[ bIr1QT/s=AtkxI2W[.tp|5_KY /ǽrZ#I{TWªq|^ {$f=D\v ; h2*jBƨ!<;c>vI!l[̲Qڤ ?& O}tEҤg'k<9p7gd;$3Gjhj~e\ iuI6Y=5 niHncNmm}zuc}r[|yoJΏ!^AU5:F ^|mlXëKz"bQ%M_ w$Q}@*0D1$(AKJ͓r%qe 7Or~mDmP0W^tGr?܅:x ^{sٌ2X6}\nwl[Gߣpϸ_עRWܵsǭ_uPW`# ^Ԝ@z7 R endstream endobj 28 0 obj << /Length 2387 /Filter /FlateDecode >> stream xڵZK۸ϯБZa&Ʃu8Tr}5Y[3D=qϧh z59H&/| _\څF/nBpͤ7w^zUWχJTrj]|pTZ:]-|`凛Fr؍9c$n+i+NNrfx =f .VN) ftFw@*Q}BN_/neG:" 54|H 8eiL YӪHxCǫ_`8eK b($P 2QNISɳl?֎I}EUYH+-Eu$L[h•+K+ imwna )IYIT@% ZkABa5uooF,4S0Kt\kř`pn>`@hĉ$vG uФߡ٣&;x  W@ VDCXH9j~/ KW%-Dh' T=*jٿC̆mzd.d$$WI[E- [H3m)&Aݺ,#Ł`ɡ"k>_p11ss'L\aw!1M "^DU3OcX3E nJ;s p繆!0!&=٥冴F|H=$per{ަwvrƃ\Nz1QO1l=d9fFzO7f*R.}DHl#LFم 9 * c< $l)?GS (F)|+Y}@ANXO Ez}D+s@~y 0@)9À>pxBʹ~|MW6G:8ߕCDF}w|ƻ!3@Lz{8(x]=}"Y>u9^_õ ʺFL&Y M0RT w$KMٓPF*3sg&?sP!F`>_IzEo +9-&aS?FF~Gq&X [j& [B<-r<";d?A/y%!DDe.\8srTEHͱ 4SZ<:q#2)pu?U 0Bt?rֺf*EÉaRJXuS=UC=Ƴ,p-H&RUxk&*଩O@qvW(!%BK~VuI=iI&ڐ:T"b]|s+ 4L~+ 6X?\p 4Oam_Wo @{S)Vy>7G-bTSY3ݼ|41rD_/|CW,Ɋtvi|a]Y^Rg`BߤeXH.7ݳyGd't=m|\3[r|zѼn`R4[HiN;B a:` OG{o[ !{m ]zN$JTo˕i)Ǟ=OB7,qS<q=%qX@ 9 SS`$ƌ`v"IIj|cbF sOy=}iYW2-RśZ+^+x+*Pz>W q Aȋa2I endstream endobj 31 0 obj << /Length 1720 /Filter /FlateDecode >> stream xڭXoF ~_A vYbhز5XNR,EK#?~C\pq5>ۏU;_O/>ϷpY~<\F4(Q/jAz|v9 2z_=6Gr%Wk:qz .3}V!⬘3֐sA0 :h&k&#\e3vWQQSN'>`ԦS$nʤJdԆv?ޜh29naj~Kf@@w\N3Z2o/%=qq`zŋ #EEA$>~Q;pt. i뮅5{uw]bcCg<_j;,5bѧ܍=zBu(R1jFUl'w5ĭBdcly聉2f?jm:6X\B\Fp*6U0(¨{L`v$gbM%>7 g65N-j;Ynsjdr>>57.4g; gIj>a%;X])B 9mTz!-T@/ƔQ>\ twU}MZWQcuv<_Mt@=bLZN8U.c/GqijE7M|3he}va`Gx;fҚhM,K1U)Xo:sN%m5.(TAiq)TZfRݵua +ɛw\=OLqGG . TT wQ, 3ձ٤oIbŨ8~bTqZXL$S uEs=<7L.j![{k"xKìt)sD:V$κp͹Ѻ[6H.1K<;uzmZLLRdr0pEܭ* nL1Gz'[P.2Lo˒u-'2v->$UҶWE\5ŋ:z endstream endobj 43 0 obj << /Length1 1804 /Length2 10906 /Length3 0 /Length 12040 /Filter /FlateDecode >> stream xڍT.P$.E @ݭ-.Ŋ[)R@q+Vb̙s{WJg/R`yG+7'@FUڀŃȨqG@"AO2Y듡#fp p pqxcȂ!UN#(X۸>#ł-,,;@ XU )hzWQWW' Ãgex@\mZ`0l 2@ 5N F O< !`˓ Y l4w/_kuYAV۰tsB!n`EٿlDȬ~...AaNC[ԃ  r\an`?+aps,!s5O'1Ot0'~ߟLf#J(JiiGO?/+ w :UZ9,iN)/ \0ssY<}q??%V$foO≹nO[ 5ҎSt=1B\!`K ͟tS{!P vY=].OC~ZN)pe< z??i-:>X90~́rE A.P *@ 7A|?Oo$=eh/ X vt_,A~/( >U~ /Tߐ*)˿|\) Rz8inb tO4=sӎ/Bm?]θ3kwZv)<Ѓ.r!9Os\N,Ak|cvۇ}/T:[ξzAvJnB8Ϯ=>+x~)[ܪPƼ+}kT<ɘg=EJAw;AηG6 B؛) .22CR* q&%8"+*Fv\3_ mhз!] 28 5F]{RZ{n ͎{b,XDODzo=uJNNʌ,Y~a ɄY|WEh'OQ3l;\CWEļ֍$ʌΦ[ex<6VD4|G;Qlis U`O% Ї|f+ *Ѻ҉H]jiDqŲU 8:8f&DtTCa,dj_ާ`g7v_Գ>xT?\Ǹc[3^W)dћ0>VڢmiA87s.U h**Y4_}-FwQ 6ͪ"fS;\: ExĒZ4ـ m L&G9jF gI6/ox!r~FWZD Tr|}vp9ؼ+[25n9g6qfOpQ_Q3m.K{pб2,O4HK*?GhvE2PjOiN5ZQpfhwRj):aiz bQvJzYwǘ%]pJ/N،0Fi(nH{ȞN; ӡa^Ntvl<|p-DŖ2q5d M<"5z`v^MTSNz˺?}LpƐZH֠P@`՞\2 'j'Yep.mIa2ͥqI\pMEmT<`lI ^&XQBg!2Kqݡ:1[O7—>|s_RpʆSRm%+3$젧y>eL,f˒|zzWأ8lQZst%kZ-rD3m*fuPlzgL>Nd[+6v)e?9R]@g𐺔i 'ISهuNl.L_RՑ7o4L Jj$vfV-ѯ UýH~Y_<6s|^T!n"w9# )p9pQCU]'ZE)mHWw*mPrG9OYBˏS܀Fy 3g9[gʩt/ULyFJtGjX[fVZpk(.ů^qjN3hapz T.9(76}b[|~q{"rb |MTnn? Hqtj љggJ;a 2tnI~xh۵Z5Kq/@8"Z[de_D 70{&Y6whIՕ?2B>=_t`11);*LVVy)qos0J4<(ꅋ/1΋ߗ80yqO@ źZuĚisc>ĻN K ]v&l~Or7q/ ux6T;s\=P seGy.Kxypβp]"v"Zzò Xq֜z}ڟHwA%^ðfƺSy^;kœh|Uم~Y~1907K?_\A(:0-.2'iMd$/'n0b ܌<ݕoU5˻_Sava0z"YM `N-%&|:mYd|ac6OgUҽ*' Ki aA E92C:=Cʾܖm@r] 3S&M6%̫߾ hc<4c2r (Y}1rYDG"E_ }ۣDY]}p_djc'梙z] 6}*/{?$j_S_`Z̥:v>gޒDGJBNn6jt_L>W&w܈.̓3'8|3Ph+s!z]ۿ5 |}GY|_7;2?zP-;<]0>o#؋7mWZw!s î8r1;Ԑ[ƞEc 7fxQTJH*-{4'J:DTjlZg9<2,n rL{n$'GY$j7P,(8ےhQlb,@4N&ZfU}l;eV03fv}CggɏH e?|2Zs4snVKmF],iraYLuʴ\xV'Sg( UM,L뼦eRAߡ67 '`?Cb 'fWFUex lL[Q4v1y](%(of֡E!%vG-4W  $>R^}R{1l \׀f$R ~Vcʤ(OJbY;vo`~5WphUQиk}Y25gXYQƉdmB/J@d: ʹ2zITfλ4h#» R|꾸5}r@Tɭ…_*n/{=IUSP2;!яpJ^3oY$Z-F mTѭy H-P9qU҉po[Ż7ܰZ?NE8Gӯј*ouГT(Ԅk@<௙V:GN02o[Tk FLe!ϴ57/OM 1 CYQpOe˃GxQfGòU]XɎjˋ wk=^ *V3֡N*l. UQ Q  ?>%AGR(PWi^p*O-:(01Ə\/Dwmߪ}3l;:5^!. EU8ht `%hzݞ5!a:&9eUW E`72?v }z^ixDXKC8~3".DR=yN?^ի;})ܬ5F?8wuȠXEggY A>Q X4 ^ B-꽧.tL]R/RUi+ lЪPML]1ϧR2; /PL>/ k Ӕq)~cpbL|)fM5 , V-EH<R`L'eZQ?y.k-N#+M%:k"=?2\$_|a%A`λA:BƟvMgF3RZGۺW@!:y"'&Drim|>}I RW+=Fuh SHnvc{'ߴROy?C JAKKXlp8^r3^Q0`WOM߆~{^q"=caJVї_:hy>u?sxۇWY.y-1xӆRaOPsVFY9@[rkZYѳU]ieZ$ԝM K\u}1G`jF)k\-NmYʷ܁p3Pӏ&-7dcI9 M "߬aJ]#{ 9u0*NF10rVbQE2v@Ep2 [ ڨ~Sd8Oꆯ*'fCjm1LP SI!ת'D8JZɭ]Z"HwQ-c?x1J`hC3nC9p>"AE8*DS.QS#nmGYXPPvE{“%MSP͹^FhMua[hj@O..Q?oE>ԖA1"Kֱ!>SVxR\2F5lT#NjRaInq(R#]_}ws뾍f N. +{8&:c[G i|\sK=ی#8( [Mj6Hnytz_*j奁H#~i ]M:;teۥ;p]glїBռ݃aGe9>3%dӃ$a哅۪xJ{G`zx7ޚ{Pgffq5r5/X<4z :RAu A2% .m1=.8IY.`3P4ZK`bԙ0V|tcc³lnt+U#xTxSQ,EZX`{#8.VB-eќѕ!Hc Uvk(옦y5Ar^ݳxd[ͼ@ҠCŅ]#>nIsbY`!dvd<)5,e; LJW)x$Vѧ4س; 7rC)4 z\ h`ahqI4Q} Ezv7='WH~fp홺uyPV wKCV!};Nhq?=ʽm*<-"#?t'┩O70d'0vJ)j*:{JPX2c4}KhqIҞ/4{k%z`y{/կuDC󍖎9{5N0񘸺NhK>oN [wިkfUZl1u,Lh6<֚nҨ5 z ϦGۉoT+N:Nx57JW؟vTܚ%5mMq\DSR8e[5otLG bh4QǯH^Ln㛬Lĉ /@BG1w뤩J@o0N(/\L4j?U ;[B7$VxeS΍Ӛ FB1S+, G Xu |aWI6_!\;*Rە򇃞S` I-[޷vpsN_LDiiy QO9s\ ֈw4 PڷZژF.\^̦onQZLC3Jo۱m LٚQah3HmYC05&HY)L~zL=e{0k[Lw\aڤdf*vȳZq+N2 Cl(Lӆ]6α=t,IKn)i&dXl$S׷D 29~2;>l8tEDzNJdۮi"Gw`Z]9ng=|]=E#lp$k _3gWV1WÏ݊3AyrsLy#J'TOt#(%oNT$+n [i9`Suu+#M^$ * +1ن"= -'gwB5epx?a:8JSgXȳ_z﹵;%dCeWWw=1o`P{n>Y)dA5”^зR(NP,q;tA97bc-} h8~$Rb*Bvb2޳+D@|@*sNa!s} "~t5':9aCϼy)Q-`]6|7MS'BAy eH3W_)ĖA"=5R\ۜĮN7; 1\L D-زa rAH-Dh'QR\%.BƼUXy( {T"*@@ۇО6/zY#f!t)d3Gw M_B/xu\fF 6s⩞e7|WܮZغC)<V Hk%FSԅr.6"4Vv7)Ϣ%p=5h[,p Wj :e5}v:Sl$j.%\+u.D}<`弈nR ߧVj酯AcxjܑQ- 37t-Z> v6v(J"ㄷji|"gS4u^`*ta⡄m+P5BV%x|W'VKD 2vI0"I׼$}D8ߎTn=28LkUP[ bQ#gZ5Y̑GrS@8®XtncG;Gƚq&x\۶?N4*07H'u89kzeGul粖fT޽n)Iv9H_v6V|M yǃ=#׋'ebvtRcV+ P:U LvH0RWF"nl1/0MT (? E"nɷWl1ݱ4>?u#׍A-iZ0j!*RXYG2$E?jlRz/"=VoS FNjEQEEƾD&_x0ee%^Y hKohf³ꌸ6IAU|]2sQ$|.PkN D@&~\ B%KRٳT$8o;f  M9Qkg ę]ZїF}V/Pۥ834=m0 2ws:f0D.Hr%ŶI>W^^JVN$N%Fpg%Li?1@ _(ܘ CJI w d=NkXin }OԛއpdrI;;=CIo2tR)z,đ Wl!g.VfSAG wY)~Fhtb7x! QpH |%H s{SE\;x_f|'K+Vbkf{`^zAfEr&*/h CNnČ|~Z /{sXEDߏK kڙ S%`(l:!U PTɭT;7ʋ'FjW=6FFOo` endstream endobj 45 0 obj << /Length1 1462 /Length2 6378 /Length3 0 /Length 7368 /Filter /FlateDecode >> stream xڍt4]׶ޢwI0^GFl1e1=х( A!J%C8 EG@ sb<  AHg`B!DzHܽ'YN/ 0xA>: wȟXFPC#n nP0u s  5_d/ۯ=fCzOoݟOs]ap_# o~csk" H ؙF x[$@> B'p{Ff_#~K[Y*s7w͌5Lt/PQ@ x%D@aa 蟁@?+ۃ;g?"3!ҁJҭD/Y]2Ug wƭtc SM!ͮ6ߨt; 0'$KqЃ")/Ys zp/ /v7׭,C*00נ @ȟͷ+ x;B[myAG8WO~{qDC~!='#@:@\DB$ FDm5_L0 KE4FvW˻1.u4IY q9Gq* ք*]"F;]y7<M\1лyzP}b :Q;SIƉVaӬ38H^z<.C?ӳn>&K*CY&,U z=<%;ɆRzH3([~:͔q&gyftc%Y*+f)2ƈޚդž~-ܽwZ{3SM> Q{;\r,X@>$N:Cٝk>ϱb4Uu]-80T=+3}(|!lB%rBʧ C3z~ROYd+7Ʌdj+? Gϟ|2TOJ~ʜ\;mOk*`] 44Bfw])gDTI˴p5RTSҦWfO8}V15&3~v5 p.yk<%SȜu²6RXX/i9jc_ڗZgLx~b9M8$J;^lL7d>yjt1#R_7 -פɮMFYb+UٓQQd<@`6[os%=x* {wDdo>OW8/*sѷkhxޯ\oN7qPgj-۶ IdkkSNJzB ceMֻ3!l~ 17GIR̃_hޓSbrr!T}ŗ?g\<`{/;3hFop4RyC;˚scVD7}a$#l@T1'IXoG7Fh)]܈A-;ĢE^-G5+NhǺ6zcYɷDGrBGnCދ|ErThp,i[|Lu5r7*Z U6?V(.f8wܸ98):-8ԥ,5E%pk>"{\-*u.y3yC7q!ҫ˭[κZ|?(+&̭c45q6'a3F0 |\bA7'F~x>]M]H.{|,pjUE|j{vaZ dԭi>?&`QZ/-&r|xU.{3x7Y-379OkD*~kR8U ~f7pX efv9̊#KCvTۦbE\5EҌ %DJF2g(A ] " YXQpvֺc[bEWbw/4Eg\z_R-I?ܟ -<}څ%MY[[kQzDE]wbW֩:N>Q'AiCJO%CUp(GV:*RZf-%.E{~LE\ONuUsnigNml4wa it U4KfIyu|K"جW`՚Wy/[Nf4`-_Zy>g$%3eCB=UDvJ|Q{Ȭ4S\Ocs׆.wc5}7S_s6+HSeFݻG\A^ K|uழS?ZddtZKƤw#buo#b O|nj>,4hTHWIvR/6n80+BsCN4nޒp",zåC+P!6u{JcjcWu.wt#(_BOM/EPo0QH۞3J+bP 0cHC:Ł|C k S9BdM2?}$Ho20I]"'g7/8 i)<i?~JpH_I=TAl"TK0x e;fPph>gxn m' 7PWtWR/ԓthZQ+C 5YܠM__,=AP|nBCAE(q ^dA>> KX$z2GaCSdz|ڬ9jaGw{$3(N"_}S{2@3Zn7c 40BSҡeM |nnG 3:n+T:~YisHc#Wm/tT*gx]_-ʌIN4X#:Cݠ/@_œ`8fAXZHh@'?"Ng(Gem%elۻ2wD_sn4Gw  0B(7 Ł2!ڡCX[+CvB<>T8r:DŽcjfofN08\YC}gJIn|?2x!1͡+ܷ's ԗb}^UCÇjٗ;Y_};Ȣ^:kLT"=(8(s[yXXvlU_U`w>7|扌9W^,"Bd>1pZy愾-v]u?#o%Pdն+?,) \6y~q&^0[$[Ϩ]C4kWNҳ4w^[Zuc(R1TE8#ZITER9P Vc,@[%Rr;v -z V'9ya#$X%hs" EC̒䂢 +l3v9Qƍw/' tU<9bw\Lϲ 6t9T~~j 5ZWH74Qˣ*0 ;&{δf/}U(` Jl4)m&o3acChږn1k21l~g=>s)zGf>#72摝EĊQОzj$DӀȮr15jnu%pMlҮIJiGx*d{)9 ï1OV&g> 4 ;S$H\ƍc3:F^782{iOp tz1NnLyU ߝֆ&-%%׼tʷ_hL3;Q5 xuaNtXd0?|lx~|kͮB=zW3`LCk. 4#*bQE ^x<%_mc\lx7'-a0*]r!]BRj%)VyP*-Q.ʄr!;>{H|y>&co$c2eg %/Z|Iw{ufv %fG\'>Øyv 9ۚzm3-zp(|ov`>Rڭn: %c*! 'Ϳ#%v YL?#X_tI;?.yt`fgYx?Dr.&VyЂOLsm֌{bJg8I/BTJ0]SF)=lNJ.}]꟞%N&lfS- I/U1PVi *`3QD.W7H^!;?o邞X.k ϶ڴRuSyvv@,U'W-3<5O\WYr4A0k1޴tDcE=g?/MܨuSk{Q.= ٙq;2C_+v ^?ɛdsp{=[em43'XB61njOT#0dp4jM%`J=I*Od:"V- -X^wآWȪX [" 6Xď,΂x^bĽ&ˉ(k髤̽;QԖ 2}[Y^ƍͅM|ߑigf6ğR3T1]_z۫\C?&i d༣N1C>÷l߇R_ &9(y׻7{r~?u_ =ԖtԻׯ34< yx*:lB>sO$dڸ.Os?9[4V_~l*UurМ'^ nt}p `ӬRJ3VZJ>_4]"ִu`JZ_8=60;, /`6*UwY' {cڶ1M Hmw;hf^0+9q;ƛw}4ւ7/ϋ'ڇL [c(*63UOw|1EHva򓗞w]etލN/:Gbń;fd”՚EBIͦ^>}u#8ot]κGMBdޏO=vF'yyt1" UR:!zZo''O MBt3IoShuN帢jrSt2YLq'|rėE_k!E.}y;SXq'T%z0$)M˃nd <1ޱhw2E-]g,oxMOvfW%a ģ$.DdOK3'﷬ dHDu֗>\`!CsH PV8_/g 3xPz']W[Ll% .:m&LLpQސkWUW*Nf0m$i hz:x\;4ykAǛƦPا +9IqiEc嚄l5zθ V<O!SM:3[8+y ˨*I1xXUo&w )~M$ p|/,T{Dh أZ-mdy iKYQ#p $B*OP^~@6ڻNɄWHaw[QkYˢc) R4zx)0E<,Pp)ǛK'iZgt*pSN_9MA/5jbrgSݏrJ'_ce睐꼥وzs/9].+b%8> Mt2=74x^~ׇZ۪!ng3K#t8#R 4LB8of>-7I+q( =a-;?f7/WwD~&刚a2Mf;?Ɵ(zhIif8n=l\\~Ү1ט/$|gFSb6{}g11b=CM Mԡ&v0ؾ|p[<˻6d1q߷QG7ِ,a a zlWg? endstream endobj 47 0 obj << /Length1 1771 /Length2 11783 /Length3 0 /Length 12928 /Filter /FlateDecode >> stream xڍP-` .ww'h$HKݝ\wwwdf{Ugmg})H V`zf&3 B6GHhk-P*YY̬ffN&&  lybz C@hH!jel 26@e@ ` lY&@׌ze+_!L`kFFGGG= ;+[cj:#lPm-,ƀHP1P2;sh xP|Zi,030/߁@8XYXY:,F s , Lг4mgng25t="@ÿ3Y@{d- E,,`;lp,-]@F0fTy!#3L\\ d`;5%okV6 #[`2 KFۂZLc0}ye?QEGYڿZ[)"bpgeг3X8|,^=S_$kCLꕺ@?Lfbg2xb.4H=՟/Wڃ_@u,T  A뽮+$@N@CO)Wp K?-30{}F^Ju 7.!ni`e{X9zzΈEW55:n#ڳ;98"E"n߈(0XJW? 7z]F=sk%ܯY Fgfb0 ML,, ،X'k֯2[ڛݿR6w;x\Y^I@ mm_'&N?t M[VV ;o$ުЏƒ{'E+K`5mM_pݬ!uf'5&֏~zC@:/BS`@ց hhܣL{6Ƒ]_|[Gea,u%p~%·uPdR;h վE!^[`tq~16ϋo3 1\%1EHpEcOpMSLbpP[wL<Xٻ|2~-I(y5 N`n#u5S{|V1M,7ФgPNcfq4`BidNsJ=OlQMw|Dli9Q;v]S07K@4V -l㎅^?pcԭ0AHOE ?q!8lmNnGv'|k :MjPd3lL8_ '`Gmʧ2NxO)p\w/FEaO'bte\rRo#1zm]c2efW'h1_Ĭ!2?~ 32#Moc3uă܈yU@*&ˈ3v;J2TYyr(cˑ] oX&5D I3@Ah ĠIy=?7Prcu'&˟g $<3Rl)NƩ$_.|i7 #49шF]r򖵻$6UKy.da;SӮ =[\eGGDDҾCμ~Oa^)51mM˗0A&F-ESw!hq@7:OTᦔg?NV̛Pz_nOŵյC%Sm (lc͇?c8j |Y66Nㆿcg z=VBn+9`P>9zh9@1>V^MR/~w[j|,*4p>0RM1fE~Cc\ 引aTP ?dCw*jNNdn=m@{fĜ6ʔ ^˅4ԮCy-!,q.FPۤ@oac~Yz 換5*1&)Ɩ/u٣MKf ?34f__!v/R+:ƑUlQYQM'ɋ쭺>|3hj'~z5ZF=qc@e-PCԕ(4KчK ڍ-^x7ظ s\5Sqd^ `A)l`t=75r٭ L v Us],VOG,y$A@M]Lh I_w9[~ކ!, `O I=&-dZt=T a-'Wx- OE};*=47qڷ`?%n LE|ɸVSck:oq;vfh=\H3#IE.oON}QCl@~oT$jéV9,|6?1z9Na6%lmX6*Fw_ ){;+p܈g8YW.Sʮ3"A%]=3F\t&ޯhWܻ<{]'#BS䶻'UÉŋ'}g*ް ǰ܆PQR;x;)ʤAi ϩ}E45F _:E#%(<1f2S "n.]P9\J%[4O33v G~C-/&4* [PH(YSǿp0ϙ$cǔ$&?b 5 u0FI.;oH7e(ـKYO8 ~)j_k0|lP_ ;ktv3s:a֮ԄYpޞk練K.v =v#Ù`Q_RN%ńBq^U-ZmwΈWF%<?,bqK0 ;u}.S)iI~C{HR/e7Kt^ZQm{^By,Wy)9ܠuh tRg/Fiks 3wKnd W]];lz*lQxiYhmظ3:Msœɒ1;ɢjFp2qԧ-&.Y"XIg_22cqT!\a˭hz?|Y2'5dT̟פ,20G+\ZOo_zAzt=C|/T75b7< ˭MvxK?gljyI*v:ޭ=+.\,[P5fmϟ _̧#kqX@Z*FLiuȶ6{}gz٫y6&˛jVE'CpU"9¬r4v&(o@Yp1߰+ ~W\..u:qÆm8kumD?c C+u<.>q4tn%Fs Vwvqw&)5z W;6"SE!SK֋op+J(t ʊ'NpgޓV%$h mܛk iZO#iaAF^zj%.}HcƗHٗǏxC\rKiʗg|To,nݥߔp\ aܰYHh.P ]2Ix-A"o - NRpBTf)X+#b2ÏrO>UNyɠr[ 2p'"lU)F0,)O|3Zw><}^-X\S6p+llδ! 2âO8"^*0rԷM߄4*0F1LlFW$*QH(!^ayk 3R="W~:s<}s&Lu̦LnmXkOMYڔc/\&.haj3a bg~'r t_Xf2gDvNuaEK8Oϗ=PF)]~\# 4ͽUݏe`gε(8/̻ DŽ &lhtn/ovI-F- ȧ!0 ˆ9$P6߈b#M*?}&>!gY#:J48`j7Va*aj痦lQ3ݕC&mg=W?IZ8W0F]A5@B֧z{"52 aE1~k= UOs/ׄɴ99q9;vվ~UK6'Yw!hq0קξ_!'H{KZWh'2VF=8FQTo$pm=νiEm-n']Çp_bApF_@c.3(WݔGcO>AFdD効E-vk_U݈TMA`GMRY>=2OfOς#djDs˦_MF&UoX &f" JD4T5׌̲"؃Mn86;x;xdԕ]1oKOSn8~#,uvv6O$?q-_5j2m&:yh-Kb=aMylxDYMhY܈e gg=@Zih+$1FƅX偱~W-(|i)F,]fpj:fQzڪBѣUPdvt?5yjgu35sdJcB7*ˠ9 z&g{V'esrS˳%]]UL\$ZTy{GTp|+p2q+*A!_ v&-dL^}s12 Bp.E߄L:62G9B*{+n Ӹی%mDoʧ#geyVH$()ǷD,CM FQ}"0d p= !oŠ`ĵR^tULADÑ-njix@ 7G70 ڬHS64ݢiʬ{?^dGh D3LCr}={S 'פ  z+CSgBK>>@I|I}Mm#;(D\ײl˫X2nk-#oRZv5t:}irg?PyeP bh`#&ʕʼn4I_7* Hs @#6 |&E(\5[ɰ[ٚsd|m׆!BOtl&Bdx~=#Y4#S8p9?eկx.j~iZȭNW}%'1RS$}#oH-W퐥7S%x ōİotPM9~mHZSH~FBy(Kfdu*VmzK ԦoWa7j ɕT&T̩'ah7LF} ъ"k 1nF|*#=_P)V2\Vׅ {nCCBؔ!7K㌆8((nOQKfڑ$v7S;)ab:dMdUQJ/7C}ЧؔbB\?BN,L: fm-}ߤ7wQ8Kh ׅ'nnQNle&-۝u151Q(3cIC}A9#99z9;x㊰C,_4ly]M4z'16q-ܧ~EÊn@묱AS۲?ª`̼ho%F Db8+=Pz3ZD柛hN]}`tM~~)őXmAidONHWnWrT˖m+@C*j:k@u-qv2K0* )#/FvN$w.|\v xlKXPU, ?->ѿ%]k>` QZC_Hy(*Cg0ȌRoŇDP/<2!e}Y[0!(UGT$Q%B[ fRI7eɳ5`wnK%`b9lYq K#`g铹ԛ\8Sf- $/q;Y.|ê EC$(۽_f+e9Ї+44G$ՖT%WV,mXEoȁB,rKcXX'_+{;`s`7 ƜAiāWEO7{(WjY=qt&ңJ Tw<d#FWhuJT"ƽ$PͪIL;ΰiLbf:jkJO֑H9 sn[!>ICiMG~M);T~۷zȝoY`+jw퉕3Y޿i֭PyJL(?붠~n5Fuf[F4N9fR N;7]!M!rVjpW=),d wN 3ǁ\ #NF_lpm75"x&ko%9I0辣q1,'lӆPBٸ㚌8C$⓪&t)F43d%ov|9`4{L?Jߴ5t˵ b؋1o~{>b+7aVܼ VL6'(AKSDěwS7J eܩ]3~{/mXjtIF,bFCihMyVm>TaY@){Ș[HF58yB. Mvb&6e(S SLc.'R/8ޤtP7K|+RMTzW9*#g``WNՇpwXN ;eb> stream xڍTm.( % 3tIt7 0 0CKwwH7H#%HwtJ7}_5k=s_׾i)Xa`i b $e@ 'ȁNKA؂jT? $& N9@Wp29 $M!E6 J!Vku01@,bv`G h=hfb PA d@ `Œ, LUƆN P,.&`b<`G5Y[{0/c Xbo߁ ?&ff0;{j ؂o -ol51}0 @ZL`P!8bDa,5ف8$!`5Y(7@-~adϮ88e%6y[f F||jf;=[P=`Pb~C8G'*@s` @A ?  >ps2x9 jo?eUאct0W+@@(@No_Y `'݇>+ev0;쁶`ÿY=|@\GQo,߄lm?j;uBk.-B?4Y)J^=Υ{w=Wo&#%K&ɅZgV^* b*1zDnB!OjTmPx=bԭ+O5'w?_r$UeТOkLp8r-Ɨ&twVe'^bj)flZiN\&3 e (Y5+$5P@ pZG!͘…i4+&sV{SZ$(G M*`-"tzDNj~Z(oأB),֫$G |Wd;xZBv/KE_arN}mudņR +flC|PNȑp~~CfI'LӖg~'dH L.*߄i'i_w{mヨp&m0lG"gzk%"JxF/w~74WR$i,r\I-jg']X1QHoȟ|ܒ- `IsQoNՉ{%$lK˚:jZp!2t.ncYAۋcSԫyhLZ.)*t>t"dx,Vsk*lGDEv`1R ~Mx!Kz)'~BD~S]YX{̴T?\4]reWJ5Xtc)<ۡWA8|w62e OqQr2[y=r{nC>Cͅ;iuؓ9&F8O.ݤ:氺;mw.67ɢ U'qTXnثo){5ȃ2ZIo gx>E}4NW:#XX?։ps< W4 "7l y7sW?&}x9 /4AѮ-2`sY-ԩAcCw^ t87 /ب1a+}'3)CsBv[DMGEt1_nYwicF} IR7nPZh/;Fc.[,6vv1ӶuψƊpiT'4[ȖuD;<ɘi'4p'$ߗ͞}]l hU<4/%!~G_*L'P m+͓ňv׭O+߹DTkM|>W$&Y '׏?O=mq/R?cL8Mu:AU,Fh,qF~DًN=ZqVoWiza5ڳKGՙ3H^ 0Q=LyJ6Tb97`y50HKWhBUk`d4 >o9lɉnTl=ڻM[ȝRgW6! ?T8_c0J (=YH>J@v =ΌQ ;й68ZBw907{@7j6lRk aMzw[!557.9<==Uk`4_Hc" @U`I v.;dV@67gg跏+ճw7x" T+\w.W45 OON/; N-'/2RTkvVbبH \lt[>-"I).yk[໼G?Mb]:_@k@gwMK췣7UF 72쬇+TS" %pꈚ5?jݢ}.zD5"4>m5ѕmyQݷGἇX7ɥ{$cYIwVc`sb$L ,tU=tUʔnbk XO_=vYү$r>?ÌEm#0V}Uz\D ra㩑uMy-Є43-E=#";0(ψ4ܮᅼMjm.[N@yBstO)I`CO\~JE ư帲wդ> =gF*g& &Ij ,lM&?}YgQMaaTmvzW/ժ#uO1C}Bjf&MMo/v+.n{mub.ӻ֤2oex Y7`}-z}fM|Gfj\~Si4Ot$TqWE;ITr1?Wq87+'; ,_NbUuY& vq?jc!QR@CsA9*o84u_bzȸK:[\a)9Ec6/+tͤ5_5TdVR^ q)E3`cC&7{a T$(E{ v rZ]Ze(I=|s3ͯ󆌉GO3$Ww:HVx!QFm­55ye'ʙϯȌNVW>_'O#~jAgt ziNTJe`%7CЇOпHұU>ʞ+E5~,yW$Oe3yΪ~bod\ֳαƤE9 n$=b#s8%v= F5m3/{vy@V_"_f/ή~$.d+%X>DەiwRZ_!E9,rc4ܧ 5^7,F[TfG]]h+2% ιGVJ1&>oLyRwz4m3 jO.khFk9zoqD` BGD@+>mDcg7$޷S!2z4c I7SH!X<8o/.bBSEF1N3W f N"7/TJ]Qd TR.7/IlhvܮU JUyAQr//NL|)Fsv^a'U?ca5.A$wSd6R[c'Xl:2+E #DWA`TY%F* ֽc∳ ^Gn3zE#pH >0Y (f,|HT֍xZ (kieAyקI#=bo >|Dd0ኻ"KL05NH,ynmie:%FwtjrӆzdzDMܨ%\LPxLS=XRaE':$WVДD͚0QDZe)bISʀ jSE+qWőγ;M37ㅥ+r%L꫹ TVHg!ZFx_<F ndC=v'a<6A9fDk/zmMn(-JHް\ҷ^ر]<Va1V8P&C) c&;%3Ł' ,Am|kLc]"o5;Ay:IUtxW&4\bٍ/+%A^t}a‰ JMИ`~G57Ij% wqw[kF [~<X$ȎW!<ѿ$`'):%gn{&ڮD공_hg/- ↤ a,yR?sO4y-ۭ씊ѹUׇμ\Z}aʢdGzerw<ױ'->`0gP:0^XՅ[^qSln%P0eb :*yWc']=foDG>IF^ib /ΈI+ '?y8(SqZ,&-1Bc禈}96o?):F,$)OW#:1vo[e6>?ϺGS9zT0g|'S<,^>\Rx6Y$-ۓJdWp]޷m`x^-i4){;ksFgi\eX/'}ӴCe-PpY#|iN6 "Gr,%#ƛn(c+yO@cg{!dcox |0t/W: u$쬚 - &'7Ƈa|CzE"*S"OSs^9GAV`)1+Y11>2aDgl!\rTuzӱ- gB* s(`a }Toژ!WZ~H1pt[8l9sfxJ}?h'ZioR@NKGj(ϟ4ZvV7d߫~C+~zہvlS4Y>n|V($Ϋdjs0h8ghO9n2#Tj"p41[+Sө7N_>V>F69 }t/>O>h'%f}h+${4-GԺ59ÁdGR1\~sJz2fbh&|ԡ#q&4S ~sUiNLN\XnZmd{;TS#bXxsF"g2dP$i%&(hc4Q;` _ިƋkRuCg)ŖM)!whٶ8Wx݁Ci=O'IˆĮ.C#cxgڇ_@쯝d-H>"qo:@T%GsC Gc࢐Y{u>v;yEJ%s+ҿ[+ m9;bpI~Y^5=˷S8bG9zg3ޭ'S?SFM$;dڂho*KkꢃP@1gg/8O,PZ);ZB$0f,ک)ꃂU9CЎ`@a_#DuTC_6b2np =N'xiYuKCaJFJR+!QdZtHQ8'[߳хjp!ajy%ZRwlX6[>+X\z+6SLf*t3{] r?o5syIJu_6ErP^~k~WU(ldo093y[O1ena{yVxg:͵UKj`NRݴyKmIBc6;_ uQ 851vyD<*Q,}?`]kJF1bd[}Za-Na";ny}|H[oPf>vdu㠱dCU /h%SUΉBK> stream xڌP{.Lt,;͢CS Anrs;LEQ (rcdeb+XXؙXXؐ()5l#Qj]\mA|2wen`;EG@`cca @s](]liW9-/wP4u:3mnF͉ӓJif P]< LwƄD аq[hi6@+dteN@ 03+?; /gSssG'S `ic(K)0y1LA M]6f`*7HL =Ws'7W&W-2$BrsE] vp.0#r%q$4],q׻OlBGd_fO`N!W 3Y:pnr/ǿ (dl8T'g z֖͑lzD<g(3i}\zݟPhsC]D&>oK܊=lJQl6w؍wtTyQCVt'de;J1փ簴WHTQ=y8XE"%wpn$t^hw rIH|v|hP@bNQVmLe,N:*=\QQ3Ccnw$;T%q]lm{^qw2('ۼ*s7s=|*h9iҡPIk& qF@4SFZ=mj"Ig|Elbk\vRg>d7,ad`RCCVOWNT8_($2m~᫐;B {GU-K2lte1:MoRy:x>džy[oB֭f Wqu[`uH1*k @%6SO88GC!CxCbf\/ ᣼识XSxLsOI~_5HI£-6=IzEw~V볐)O=&E6|C`=ʂu,q{+f;np)E?Q Tiظ|M,Ѣgl`~Y](Vt+\^mʮ9ٔO#fdpo~Vͤ^x)'rP]+69ȅ{GRr9ّHm⋡I:dU.?,Ih%oGE`1@v9>#%wBC[r͢3Èp5$QC`9oqR/: 4;Xz*Cwr>4`͹ɾzMPmZACW jA .Lf_V3f|^c;s6%VLa0] h.'í9 @*5Z4@yΚ_NHWcԉJyl.X}DoIv<4o{T(i:P.VH01zLfL5%gzq Ofe$pEgej6ŎzD~x# &I 0Mj?oMT3O1$3!̎!A—bԖZPHGq ~[`dc'rn<H*nod7+5 |Bi~)/lTvI1V9醯OaN)iP[][>lYa" ƪ6%=tBURGOFU H|8Bt@ %;kװpAWD>mB)LCr)nEƢ c~qr^r %ሮ-M25o*",2O;gs RL vEY@OfuXJ3rX#\Ts~& Fv? " m`8H{qLUsX8';01H +OhStŲql57٬N;5aG^˫jeY9ѱ! L^Do:"xjY:> %󌐒T#S5!djo Ss#[. x F$,V(.«99I[2J.2e \;eͤ9HaJɽL!'5iZu{+ }V6EVڂ>< seLHPh4vY9rWm? Fw `uD94] 1*}gƩYr2]MG@8z5~`5vI%) U,f,1h2FS_ _{*:.a+rl~'AX0&b->qёTG6}/UPc(hPo#ezoj),$4-?")3}4SrI]o!kf*]~D٘67w$H&dE } )dmxJ 'TgU+ȋg)Ckz+eu=_2mRͲldXjrfTuvi?Yp"l;C6)5I ,q5.즸,c#do-ǎasq|!6_QPw+Z3͑QYSV9 xᴺ+n*:C錤 IɅU; l/Rsjjf=+OoTu`;z.h~J#8LQ .Ў#ǥ&q7ٲ9uЌGy8]L}Ϗ->E3r"'V616.-{jMZ4bCMHk[0BA̽3s| $-Gvا},5_f$/${yoY?|1mͲKl8%q1W4GwO?‚#Ƌ9^z$/9^Q8HVyeeH `KYf>ŭ4Pjs3Y p29«'`:5 ]Vh^֖PT4Ҏk!VpZx]ὓ0w #aG櫜M=w%14x#ZU8d:\lk@4Q?zY\2#an6HxG_ASm8(FQNg~v),Q,døQ x,qxEP٣Œ?TDk4LPv L<Ƿ+X.xJ&i<޴ Zk%-,JPCvO"0&Ć'cAs]a-tuD,=E+/p]ZH4{J֖MΩG\g N܌[?Ю_w[~BQXӗ1r.D,PN^Udj<e [;~B0Eo^Q$h?בHUUн.JƱgn1؊0^gJ{r}PlӍz=v;)vqηflPZ(:öچOq$H3@ ;Cra-8#tP"g ^Vs C 9w,mh ) "/v99/q8RђeUf3&/GPoT10 %]{pM/j6yT`A&R.c|B UZ>M!xN*+/Khk[޷!qO*im!U=m ;1068 ]AQzGh֞LtV:AkzW.ZωF|k"㐓z,\xsS3R4/PmG:K[K}~G* q.)׎M4G萊8Ԩ1Ϝ%FI:탕j31 2x2"C7rdItuMPxuH7wDB-Ny@Hq!J&3,))HJj[Gy!Us>F~ gA><{Ig 6/ҙ.iGO-b 3FlZTjtYY߇[5-E.D ۸j: aAWmJ&+$㻖5 3T "=:`o)7@{~TvS{@2z*Y䭑6Q:~Yɘ㳴bO8RK |wِi C'5.^xt ]5` odVf8@XaT榉G^O[UNfF)mj[R{FNJmQ/W$P3u^l\WB%>wQbؕ)>P / LbcMjl*%%S%'H=8ĊYj'E\)kClXaQzzٽŖX 'DC+rw[.&K},Q#<ڨ|ݙZfHue JBʼn53YO}+4h!>_0^Y#+l~݌`O9RY?jzdnzjm ]aICyb m>+r#'-D*}]r#$]QҙS EV(q+l@oT l"iTTZwt)pa8M#ş|N'1U~L94˘9ǔ디bP 8xii2uzR'J\Gq#8lrkYW:x?3{)7vʍs8ԃ@muU_):n(}`FHfE8HkR&:d<ᣴn[#gL=7m|;DI?6rLrAH; RukYFioq|gD6p'!N ^E;-"Rv(R1jiX@CwpfI<&3ۢ]F| qR !t)>ϙZ["cal"kGh5C$+T^[|Lک9U 7F\_* &<&NkJ E8tFld(l3ngKvsz$jMj_Y<`B:&`S EϿm,<".-x9ʦ_>]IBQE(h+1k/c\nJ1v!5aBp~|q#4=uo uxFpsݑm $ӛ7'6o Z"a\CX4BoJ*`-46h=ɛOe;%ܕ[#ZwC4gwv+vRo c!os^ZX]KPHlQb h]L>Ɉx  &+c+R {@YwYR=gyٸ2w㎏gܽiRP7kuʸ<2U 9T_ŵb a~c`tpqX)%ex^J—GRl]frbSndIz}Ob`^,lwXB *ʓPe/gJ ?. T3ztHNGג~zI|8@[;32T*mGd}*S]JDF۲ӂ4!Z!#e,kGl$Z0G`!?fLj N=};`/ rCt{qݶ=:+ KoKnSD#NRӱ0n4(DNk'ڽ :-^+fg;*Q1>GENY7, w"%Nb8USzO~:K~}w$4}Bv /Lq1 V6BռȅE2WHtYMh/1 d/@-~W ^<~d/RP֙v6n>+vv>{ Ԣlo^+=W6dK$ 4Pb%g,Il\ȓM}œ!. 08ϙN. : 9ig!6 MҬՔ{jwRT?V:=d~r-n:T`ҞKd ]^*HU DX+s4k~仛H_-K| `':J֛/hjl8٪bG`4>f&%Ctow3NPEZ<[4;|C;i[~fH9@)jFJ! I4!K$KKQ3{lٟa5kR9zuoi(V&kU)?l| \aв`LE-T{XUv?rγ񘂐oYWىv""/S[H]30nL%>N"^\;YP8;TO B >D)2m|ձDJaUM.5+tw7d1-!olמe@PFh3Ȁ&)ٟ{+-"=N4``0_j80xoi%?iQZ^bv9nir'ιEvExż+#a( ]ER[T2ͅNvq搞&YG%_ρ7L{7Z,aKGF%Ⱥ);E}|0R>ȵF4c}@/Ы2)Asذ_mQ!8E75SxToK~//;Hu]'Pշ;4~vM=0;O*Dæd2R/UEVQebX{Cp{K)UojKޖa:4p PR`*)V>tDry5D球0wޠTy㨒_8Zu[ñs䣓}] JťnR&pB;ft˱mʲlNjx\FX.˩z,39΄l}" e`Gd y4p? r4m=Tc -"w1Y'Mh<@=@暨MA>Hɑ9%F\s/bІ*jY;S(Q>b.D`|aA|yo(򈾞|1}e[GAw:9ؙg=rK jT ^f" |9(aɵŔ*$L)}_;T{p ~?+VkIN2#YR߇ nc26̒}^垲T&),eOf`i 5Y6 |PhGɬgQaN ՝?ր_2Yo\lơ TRj@'7/0ˆl[$lf17π?ny`!5-ʹD:2*X ҠB b7;A&;cGfFUԋo#Mr-!Eؘ^iUGFAX^޿͖A䟎,}ir?pCUʛ:LDQJRi/d `,,:ϒ;8K`fGZ!tG](`㎝{TulHrM帼28B_%Ҭ|9n.Mo cܣZ+}{\ XpXnB1&zJVG5Se5Rxƚw#x!Όsp߉AO7vvG0Aڕ,hqسE9EBh!6'cyX?f'">i3.*ʒ)x 2"@樢S ;@KEJJ%#DOwZhxE6ڌ>$das $#b@=,T*K|PƘK5%OQ<[aG2;M#<jȌ5pN[VI4%. QO%(1ZrP!ܯrӻN-O iƦC&h5CmapMZ+7qCsg >鐼WA aOZ)s]~::]cȪϾUe6ϨKؑX{׸bϠ,6<'-67Z#(P_&5m]HXj Z&@lDlJ!-,ۯ;e7RH&P~ag"zI>nb~$7,1D*e$[ةtR0ų~T Y޾5'b XuWUAqǫ"-I750.E},LT[-Lmk]H~ QbPD(@닣ZXGYysܶe y=gI'>P4'RX28)bEtӈPuWQ[;ǡ!I!>GZj7MfT2qRfŒZkjoq{ʟ`?djɻy˴NJ&w 19F'R.gε3fgd ~]]Q<1sx:40*$# MKWT*)in䐠f0O /~D>tU|uO'ZWƾ-"X{TDј=k4i5ȇ 3dֿa;J9e(E,Yk|CCr> gŃb$F&PE"2moAJ܌M/4<ڪR64[gM?_7Р Nl%܉iAgý2<cD Cw /5**z>%+ٍtq*~mTx j%DwP2T(pu0,>#T;-ռڹ7<j@6f[d\dDi.~3wmF3CާjG]-!򾋲s GWKjRAH] ̸7XN>i 谵~tR~ 8țܒJYmoʵIABXLyR f?i HH%Gu#YΩ@JٕjR~]y}Nxc-5YV ^ O&FէCN9_ccE|m $!#.%@s /ʫfҫsiwJ cV'*.Y;1kLGxRDа]hC>I\*:yDEJAP>k\qi)٫Oz?7$ #FH,R8^DN_r޹";DVz Qfsb[cҠJĻ@0hď`B?~dS ga%@"tp2u6I/=s叁Be|F5m%$+ VSJV_Ĝg5,F-!/']6": 7p::c8R3c#jJAC|~QVwXV'|lIW'xfb"/''ȫ'-Nto P+bCG"܈q3At; ,{A̯6(_{X/6b)]tk7f'22Kn8mxCF8vF99=;| " }hG>^*:~ܒ^R@\7`,_{FJSc083J>N %'v-lo]pGΝ'o݂a6Ę 0P_D%-:ILĔRΓg֟ QNfh-eg G`z/;q?362Hg'pAd/v ݨR+~a!%'(A*҉S]":|WY2 ,.h~+EU$u]oXL0D(pT:V7idȊ6+|lQmNIH=k/xNu9c.O,O @ո4b@mzV!l/vm/4BV [,Θxu|&v赌ɓnqbʩ(Xyrk(DͶRX/%YbX_7p.aqe7|ߕe~R"rg>3}|SԂ|)ShൊJa~ BjN\FjR;tG0E,M 1RVm2#ę(]-cQtCb>M6sXr3~~!~{w^cTUR4 bp&HM~z#7rrt(^)hُcjhEV8 qTԻ]f||+1=6i[sc$Ɨ1&!a\0(GlVE&]MeEۊB Vke%lПe\Qis費\SN ]D0FfA7|u+\ILП_o uujH͜a7`wthꟜeƭ!`{xrd! t?HG5xBf${(eDp}%6 %$J BS Y^BUr hX˜Х7DCưɮYFW`%> \v:3CH1WnҘ\ ǐ ćN82az^ 1Rq?ʑ7vѱeE>F AHr5)w;V{efY3`{XIJgV);|ܔO>WN#:Ir[3/b~4&lpS;&gN-YWC5Lkw[:VBh؁R|AF *{Yn^8#G+,PegWtc6QkemWZs/gZi\G~S[Qm??f.Xk-d\Nᣋ1Avalb =$(-ػ&/%ғjԣ%U!v_>}3B9V,{J5h9:=,kGpFsu-lkPK~! 脎Ӡ?qEuVIhJUH}m'^0'wI#X 0~4@ΡE?*-{r~ԹLoti42F8\Gl Gq#8}G7^%Sf w Z͛:QZD֪>9|Fh^W.|]oʗ4 IXv9e?{e!*Mq*Nyh0"7w:,;4mhrϪoSy PNFvlWuڌ4u\/;sW\_ Лp?1*o?4CPi  oS dBQ;S8)IWI' rXC[Ň9Y,G2ٹ4BSן҆:ũYqD@/h#/^_q9S%mɌ=@us]ulGD>+2' y]C84!W.>6•c|d{G*;Y&XF<+gې~|.57ׯa<]_;naVaۋE"^ʜ!NEAl3apW%]4؈*X"XqﻼbExJ"SO l4F[a<[bSkj|s8&A7"}2$=UB|`4[Lbu*}sFC;ºJ1TCٗR0ZsNs6 x]&h^uxUNGy<}G/bؿ |c DkEW\kY?\ڥ@"fbgGcXo\8J9%1 fᬉlA(gk8=7i)i@Q4 fp鴸%IkU*X(>:.3B؇`7zQ7ߥ(_^!r&e~W3ȼCʝCc7gz# I^07 ,r6\~}r+J2L<أDѪ~]vO@Y?" dHQ{.ZxNB9LuaXuٿ zhBsOj4O"S=_zw&ẃKâfDȓahg/HW-Ѯsev eBvZ`t)f;y MCP'QZXow$CIiO9-}pG|]&fh*tH>z,jcPpu&Ò7;p JMJBEx@GCEaϓuȵS%Qң?&vwVc0m*5[P9i( }Q?C0_B5df]H7ʬU·ní~LNBlpҌrX#[\֭Y*+}HQMdNc5xB;0woR3wA9G.魶rWj 7T֋HۥV Q&/ٻJe4FD}MD `}љU+ݬ;bEp{_Woc_3J%pn 2\&WTu ވ\]A'ٸuM{GFr. ?er㿜T3" cQ5̖MROK)cHkY=C 29Y!'~ހ/<[yfwdoФewO#" wV0<Ä7x˩˝ֿ G(͆Bzs1 h +lgNgmzA~xH ϑarrd#ASM@'VcmCe֑,8>Nm:ZiaKiU Ӹ2ҔKG7h7T2#hI6>l8+H `l7Ϟr/ R]%怴m< n9y@;TD6a+d##)ygw㜐 >ئ(KRy4TA5{Rn4hɸ$bY4$Ǜw}KʹO&N@,ONE2M?FqdhN_KBhMc:+C@֨圔~[9IAS"ޯLyY+y6' O Xpt.q(I2[q{G nV֒QPufc={l#Mc)ȳP=R0!mx*O<"i`vN%[<9@0HTc\j N!lv+`톊d"Ҽ*x6"{b,^DlowbKuNXv߿!J 㿜a1n1q-U%GrOȇ.l,tOԐr/5z endstream endobj 53 0 obj << /Length1 1655 /Length2 9691 /Length3 0 /Length 10761 /Filter /FlateDecode >> stream xڍT-[p`  4ҍ4 ,8 @ xdfkzoWSuήP`!`(3+?@BIʎFC 2h]@0@3M S67??++?D3?@ d PbC@4 3t6>>?b@gd:N.'P9gϻ:f߹!ϢѸ!+,7oAҮf {ϿϚu>_ <Y%%rP9[ iRS,ڵ~= T~*&6V=O?\% 0sv6Dc};y-h @C hO "'Hx8,r ?7}fjf#g ,Vgտ suT"XANs*An _dEzk-\o?`@4B Ķ&J؝i͖N*=s&r2}Ǡ+؋Rt俼[Z}LǶZfG F j{IQI4E}~9h7~qrTûq-] RۮVxP:N+0h&-;BoUS^K.DDH/qx~'.<ȞK> mVx+૷.dɃը7_$/6:mP=.5Hwf 8l/>7ScE !֎/SJ|]=4f :Nz#֋aN$`#.t`-etmFtTZ.,V[ڜԗiO8Ь]Ks˔%< "q#|zx͆%gǺx gx=G,I=m]ocɑ1?>@F"j60>}k >eԬ"7b;_^J{ȹ ZoW<TJU\rEpqI}40N W&swMުr,6R|5͑4'k+u%S}_ʸכ7ŌfDdC 1_RцBaZ>BoK^^ȳDYACq$8T$92NE}IFFv]ZYYn2?%#~u_5ؽHGO{/x#Jq3æ-2دY>,]O2 BCe[FTM׈jjɾrZ0~ ]%[ W#J{nIžp6':Dɘyu,Wϐod1rUGv\廆>q[=ʯu>* Ji<_Ԝ >NٴA:ɔ^*C^kLJCN{y䧣Ф]z[/3A>({^߆*NÌ'r pAXa1R((<)(n~Kk%NQ]1֧%ޚ-;kV |.^eelc~Uk!uirg۩ׅx,^ %$ثLz22`Al~? [%qS}qsUb8o1ð(4^(uOlϋ IZSl,7 [XOMJGẊ%K,Ҡd%w. v[ P[Ӝ2FqB< :5h=OfDٷmgjEӇu4?%&.*vFjtT(&s)%UDe3SfۧjD=y.hzZjJ w^Rƪ!E4ԧDު,dLW6׹z94sִe&vU.*MC_`ۿSm®$CFgtuxHz5ʋzuD|E:>,kEWQRf-kxݔirnjiLy;̝mB֪ ʓO= Q#D :Ќ&.DSFУ,Tx)w)h?BJDR!zyhAnB%ꛊgV§|DA6~7CٽbW2Á0-[-O ?$]sol&8IgDk.q7'J^-h .>dÞz?i >0(of,['1כrZ4=?CJ,Oy|qbcPwrj^n6J}˕Hj\R>Ƽqp>1EDN}e氄un1R)^m-a!lYj"bH2OAe"ڀE"WE"W_ىe³@r-3ϊ}BKns-nU'-Gر]H$r)IMQD9k6S%?{HT<.PoSn?dEa{T$Ӥ~u&ݩzYTdENg*Me^p_?_{lFRgu_<.",~ ~k-N[ˊZK$[k2uՆ~\k8v j{[{.cCD&g&!kd(͋oPgSCG]>cP8duY0Fs ~c$$4:I>Ԑ v_ Z@}Z:e| ؔDk4׈SCm I_tE0@,D7 &mާ`WUP^bO-Oܩ7D)xkr~bY<c_`WZs$tfQ+l 0=_B dPX<$!R߼H'E]o)-(ا{Gu\8b70p!C9i`rIF">?GўUI;-RunW ,zgd3 KPq1c_ %R^ =&sp-FY/{Hu1(6y8n׌Xu2wX?l73NaCz=o@8 %XvbJݨճjv8|)g7^ɻ]2$;kǩM\oq;{6e蛵bYw&=0=լC (K+?L(Aтu3mv˾ȕ tig[uNjVŎڥKnK߉v Q>wX=@>EcFՀG? '*L8B&"%2*`znEiPa5h dmG՜KֶfIoJ"~e*PxcV&Dl昶yHE-Z@:%GZ0:o MO#D:Z/R6 0 BEq!Ǿ9#_32:I}|Iys韍53&ѿZ/iَF0yIb5LWY;A+_E4esc5gLp4}0w,ЛBOxN ªbeOݹc8-ލX^.^]_&_hJ༉I) + oNۈkCpЩN 0⢌,6AuGG2osG^[^/T>/S<ԩm]Q'ɪ}Y6ncwn% S>3#Z jUpC*ȇǣZP~A=a-I'L%s<HP<!!I*(l~*X) BXVQC[ $ G]ĤI `1p8CQ衋ȗa/ Rrcd/ю$k% .qa$\(AYTFpaҷ=8QNlV&S Tn[ Фwm=Uo%g^~0V&_a}L[CN6V@XRHD"_|ZW!V։Vspn+й ʓgڪ;Oؼ$m36I׫Q{ O#㧨&dE?sC'ͩNLTKƽT;/Yb|+3LZccn::æH׸%3)h?WY|X=GRUm3W2Q?Ҵ!!\_޳虱2}\Jyo {4}i|MBif g{ %!jUa@iX\e[;Rv^r+TcމCbJXD:IT0*(WtęZ2Qc&hIql쫚`iX.dV!t)_rZޣSvߖ4Kۍ)"]RxRƳap͗p"5P e4o4I3}n<蘮:03\AE LLljEc2a?+,@z0W}cvPcFdH0#&#0kB%^h30s iXL-l[w?DdIY\.T\~;ě$M.rc# {"^} &J5Mv3jKPNW@C-Ptd2<8+T%`#E 'RFE˦- "ĉ=sG+xlQ/%)bTE)5@w{dysT)pI@{$Zt56נc~` >|#q<̂*KJũ]ÒV;lk)IqTZ3I\svxͶ +xzPӴϚl^'*|BuQNťI:eikG3Ab *_.BcUcagؚxZ ى5+̤qiVJ1n&eQ I*zVK-Ȋ>n_ 4=}xbVI0@r&uJW 2sGmŠ,~\wwpcA X3 q'(E{ <Y 14װPR'rԿI>'m i]hnӾx7=$VF ņ-T{l{EjM ݑUTՐXSB6LO-d/7blX# O-kP{Yl1436$o :{y7J7.i-Zny2?4γ`ⱹ%Ya/q7H}ԉ#cjPT:vE;YMaPU*6^kvAPUm5 *\+4 Ĉ8RVـ bdXZe(H\Kht"cZF @Ld{JPsRNֽxbK)7ԫn)ۙtQٓ a P~.<4pViMqI1'?49T*0Y؄0[[uty7ʄ|1w&ӄso^[g茜S\5cK*=}t9>) #U 6 -l.֗ G7hAht,[4[ʨW2fdSy=ZH,pꄳB MCY?|m"-@>UAVv60.@n].չlDWW*){Z({?uFsԂgOt컽7hhۆ|o7j~,__5b/VWlrd x&HBMnIVT)y_P@ZԶ!~Mcbo-;n*F8!@ fkK]`nyܑ+ѧ=h5"3mζuܫY+;o5T>|z*)iKB ڽ9.%#KyeQ2fdN5~1kiNiXD>/de ѣz&26θ`"̻P8,`7&}xׄG q5荦uoƶ~[a4S7׊JC- 1`T-}Q$I=l<ݯ5l,VG?NU+'3O(mD#D\0*%՝Y ȂV[T<큯%Ah?G4ܡ{UH۽hpM[Yg:E`&5+2|}4sˣ.O c@PAG읳O_HYI8vcN^a@G-VujǠve1k*Kk Z_oas6PDF]2mizs0/`)_^~JU_>p=mT oyZu~Z!2Eܸ-AH٠\o=E.V5N0\nÐ ̍";E`nٍgrSc[C'X:Z Ewc;m lqaz}]-.6 |&OBN5k< s$o^j>!{ 7*m%khѓwA[~VZF<&d6zl Ch+$&OױX3*rEf5WC6A1e0 Tr}c& OjZ܇A^bV!F8RN~4C'5m~#зĖOSY _^| -ʚW@&CѡFmҪ3Z9njDWٗic?_a ?Ú LƮѰ/3R2mT׼}):_Րxe؍ :#7)dpޅ` Nl> 7JųvˍyP9 yj(.jĔ ObkecCE!ueا~2$E);;鞍?XjXAҨhO mu${IVT̤^W{T2g?yXMjK:I"gɧ~҆0Ds+FG(}/].R6ηYWR]w;则+F|NuQ^O;`q*Dd@[[X|+ 6=ra>Rjo|m nǧ NR0/r r]z2h0S,mf ,\ 8=> stream xڍtTk/]J H!݈ 10CH7HJ HH4"-%-) J7y{ZY?=Lzv0  H B|F`GKjraPeWtY#P:%kMhx@@!PT(&) #]d h4`PUvpD%$x~]@`[k(@rAU a`_)8:"^^^|.p> ' p wO׸kXF`!e- Gx@@Tm@ǁ|+;j C`Ň@"xP_8 oi X۠~7n PXk:;烃!&uP;E O Eݺ7:Ca^P?g{0vPH]/@n֑Wr#oWoo5_W+5lB­=A-;-`rC ɎRȨͻs_OOPزA!^.*6)(_^!Q( &CEWĪCa?͢n? {} w. ?E}0C,w?*o+/avC@a_b]Udpo:yKUHa*ƿCAz08׃  *[gԣG "ͿK*Cmav%(" vw&@%|(ځ j<=̝>E~Qa ?[FUGx~*m"oh dK0= zTR΋w}Hzu+N^i "Ίg')}HV9f}57&7]]Y%7|B=\MHn$M5CH/̫SYU2;6Q!yd78"h5&kˀEv$?>#{=|˨M+lI0|gHƌl`Waƽ)7T-./-u3ퟖlÒ41jI.ELI8YsHb 6p$.DȪaU \Tj#^jPVjm}? YQS#J1Z'8? .J(7V!RjD=~Zit?{c 6 tMSt8~zm)T:*MW;9 -I/9 VSx>9V]>qݓݔQla`Չmg9S!5HlcDh e)?ht.,pB$V$b2h{aNu-lx%9cS$AWPH|&}v^lm

jLܑ9崯uVPτ{nÑÙJՎgDJ%+ɳ3 T1 ֯;;&+I aM9?Ƅ>pYĊ-|HD\k|bf8DyE/~Prᕿvd^aߖj!v`Exh;B+a 1uEI)EKd'D0LZXvO"IЗq̲0Τ *b }Ύ{Nњ߰RLnB2m0Mzt^mDfEw j:?JFI_amj&SY,Sm5=W1w M624U6wvcl8$2M<7b/UC$Armť1>Zz(q#ID뇻+u|٪^͆'$2MN>4cۊҸt]8O/_e!L?iSL.ښF,1S=BqΣ%.jxp&53 "3@_fp-Ge}\AMkQxqR&θnx*ebI՝%KEq+$"'1Qjg߿qnfy B5oYg11~fv}YqnuۜoUBhyS: -q"e<7L1̾+쇶(}L!bQOÆpE+}dW' Ijķ,e9슧x[l+WuƲK gULoVM3]لO(hNt-Uo\=cB-S(:;qcg ,qC֐I4b+4@CQw mpҢ[AӴP…9Fv|A@F+R"ȍl ne3=i}e{V-fcix?RϬJ]/\;\}\qHOC{䩾nfr=KvXgPC5fQmW:0bq<=[SaG]H R3zηomofa˽9.[U>-d"w5]s۳54L.v l >  />M4uJ8).MsHTa1S)%y]=bj:hV?uII2T G~g}BUZ%0\ZRe  f+pvYn=iGٹ/=zpTjYx#x#'SNP~W`uDyԐE\̉oDu5rwJJ]bsM 'tA ٧_/N#PclEst´}Ch&{?}h4rfkɹ+2DxhyrLr ؆cD~Ѵ[{A|?`C16:$yLx^It 2ɺ,,ojT&~eN޻{앟xӍذ\-<]JD*Ҭ&i Ioy3UOHc20ܣk3P|c1Uܣ1#+2y6Z2Pi`ڥ2g.?F$su]a1 !WblŽ'MI.Z%8)SVtأ>lyZR){fqen<0?;L(|݀s5<,3 dKwߑX*G kmE( b &n=ؑmqMdhO;p⻎ɦe{N2j#q ?}2wd8*.63#pܒ}z( eӇC#{3~:)NSɨKŨT b_z:YV = 6[)"H\ny, Izm%;[d-(۹wL3 q\ N d]gv$r* մ﬑Mu:PO7w4QT:!5p=m{č' w?$nLYniHm*S/#SQ@tuhx_^yH5wEh=K'mKƈFm O6s"W6F7r%cյ&*(=ީޟۍ;5oDkďѡWy;ؔsV:%_z f L{>+l<9.`>wу䇆-l1wzfD}>挪mo[ N:i Lxh*,u9xS1 ckY=UM}t/ݚIdҌ-OSS"D7`]=I]QOQPCb6 wVV԰ZO".0 Vw$9 ؂r!XjpDSY3=}qܵ=V÷eqa {LRTENO2~H8[g*z! 3Pl.IGi^:;BG?hD5Uxe ;-~߇<.WE߂ U)B?̺eI]cg'~0ux?O7cJdMk(e6[SA%ܛ6ZTHL͙4P坃cط%ݳQ*]PN>AO|x!BjX-d/ 4!o(x.h'CHZ\u]$@v9Pȳ 0W}8BUU1+8ԋ's.FӀfQ8W]{f"t?ޗuGM!9h׻ ϻ[U"}:_3}Zs.dA+HY4aWfN:UnS0!F겏{72i"zNSI>4b-PT;HAhq⻁%USwƷ&!oTb/˙ct8EMKH|lc75 YcK/>2,+\[k|^&)ݷ@w֩t G4f})Iܭ( ( hX./iSyxx5 xԒ*|0@T _mdiWӻKȟ[;%.q}{c>O( W}w\w5޿\=PӴQaW~Sb}V\%YykcDO3-[d=RD3%Y%?l]wu;4|_ms_uq<~K᧠赝lP5lHEfZiW( Jn~D ^Xi%7mtae7:= "{m/919 婨\l/>dĖ*~IQwHi[VduTEnY`mdZM0,<d;*Ͷ'"9y xD)W+l8dvJSwجHUkj}Rq96k㎛ť&> stream xڍxT6Ҥn@zM@@zo{ J M)Qti MTi~k}Zɻyf̞gvah" E94PH0H(P3@(Fm `$0O4' bj` Bz! qHR$%Ā@鿉(O04 *#IKK v(<0Ƹܱ;B aOEP B8` C=;c$JCcpq/cgY 14yĝU$ CN.44xK^CD=WXLEo4mGq}eMlJ~-q\@]S3OM 67lДVL471]'y2IINŇJwÚ̏_S3I]au~.s_TN'šD.vq;rCj"t_jz!OY=M{yN7Y*E[#bwN?nݗzl7mWqPS3Qј(Ѣ.̦vDΩds*{{5nA{}55Ci*|h`5owc abHFUmGMTu'ݬhZjYECN4|6Y@2u[,jazVK" X^؋5ߨj.Ci|'hx;ۼ[Qdg'7ٌf6'Ny^. FAqh`LP\4G4 te޽YtPd,Ds~ 6b[J^Sn00}#ߌ~c&-d3;d{)*+a,' p7 F)//~% a&EBjY2ՖApsTݓyܝŮ7e}T[)Yqޘ0n{X5m?Hͅ|R^(~(ݍ> }fh[^3ES wボ~Ob143Ԯs )F3?$:lUݖ+&q&,{.AvVxd*܂!oCSB}\.f%zuۊKR3 tЊ&ER~ݾxZZHQqTxD=Y%Pj7CM‹c4GGNQcvTj.V 23U҆.^ŜxUk_LPIE2K grM'򶛁< i3@JG'C/n3ȥ| 9x521yf7'nЖ"-q'"hZOwX}h glFڥI a<|7r>t*3vi(ppP@x߀Rd#,82x90R?y*L, gυ}^ iY/[]4: .˲=P"=t#R3\(%D{wٗoN2ryIb9m 5hp+cu޷|<>jFx?Wpfb;Z}^]ܕR!ڜ8gv cR:PHUTkIOnMߢƬ܆JP][|7.߇Q;YnJ Վ{D+N&"=5v6Z ]UDS7$nϴ@RDp0. nsEx'5u"OW?Ӭs/OsVdJ>Jޛ}ݒuMziBhAKroF>)G(j]_b!3AGPnl5xi0uv+7fվHI Kz:(D%ت+p wxj^X{ppNbx ytlӑZR]o^"#dܴ;"Ie/^YW=^&Sp%D,X'_|a-0)>t **ź~ ( bA5_-ݼ*q\fa1xee!sA8F\VLMBp[R<<5*J.dxyLBGRTxN,;2:۲dtٜp麥Sx)lt(b)5:[:]捼sG>#ި2m{L՝4;H~w{pN ^zeS/{Iv,"0'yĶӸY}Рˉ@qY3],յT6Ag7bTaRߦHSҫ̲Rlj 4 19K-">0RfNrÐgiwïwPJ5?'K6;U͏pK'ʓvh g>f%3Vq`vS`\@t݇'N ܣ,2HSR>O,9޺F?9'U6Ξy0|],Gߋ pWo__{I䦬mϠwFhRU9, 3qT.?Cxj Pnqi8n ϭrF~;hLPiVy5g(vF[*WS ?{xIpъOsƣaR?3rG@QJYrmai;K6Yz `Q#9L*.:ufLΉj:}SSE5e*Ew'OS_ܳp6a7zuRsLTIOi+^ΜN5:-͜Y+ }q}ӣDzAL yfäj.Koǯ?!c3v"ᆞ^ IC]>oS5q}F]ktW@2=e0Ӫ|GOj5rDF VMXuh|i'Dp( Knj>+m8Ëo_eֿ(iԿxdJmXnh 7b""yƥQ>]KmМYoI3|kXWJIάKz&|AU}Sz 'ubqncAwyۮ>q9P~WB/T>ByݕLU轴y3o7 8_wh{.c$KK}3!eue*7ScJ,iE e8L "7^wNlW_ȏ[q"Q:63D'_)ꨊˢYm<-N= lKP/ VRz}&[gz1]緾6ؓJh*qlFC)+1IO`MbCg,уO嫳wrlWy-2`JWdȃl^#w fE'Tt0e\o}!%-tb=৐i>YXb:Qe]e4Jq9SN>@#oW"sRcȃxBzH\M;2{@;I$yǖ͆ cHcbmu.$3HM )P7 v!j_يs(J^EA]3Q X1iP:iAu}ULhܝ^8槢QL7(s[g芠Lp# O~Ayofrd>PD1RH/VqfqNU}7;g5XV(Ѡ,US@x]բv<{$73^/: KBrglM4\cqYų-v$lݪ?Nħv،F3QfTȭI)n&,2}gA79"覽&f!T @9vx,jْZYYRk4I^ ݤnKn5],JBʍnG-`B{8B#jnVj]֒cq܅]L-F29/]裿(:3X4\Qr,nj qڭ;! uR1vDNml;Pxd4-jK2\g%[b̡.q Mv; eqJeljFkޤ$kM CONdLq@nSO;|Y(tG*\$W)}ltA/n-~H@T7eRY4"ah>|=a/^_8ſ۞Mx?) `BҸ6h^c+lΙrDlsm=ob<٢(KL%4v廬"\(dZЄ4a9 EN)8ϗ.:'ŭfڜI+:aTDRDJDe%L?Q?>/"V7ZDn';n2J͎<>~j&t'ƻog+;8Wy[/ Ɍ |3n<. oB_w_o\N;5a8(4nskS}2݇s89W~ȜG_7޸ءg%(fЛmF|VNpkT6& endstream endobj 59 0 obj << /Length1 1395 /Length2 6123 /Length3 0 /Length 7080 /Filter /FlateDecode >> stream xڍuTo6-N et)tm ) J(! "% 4H H7gl8Y`(;]+( Ut,`( pr#.0 PH堂C8L颐@-(PXBFXR rDeO jp SF88bqe%~\hBpW\E(h"Xsbd@W vz!@C8ÀރLp?AÁ8#1$ 4uznpg?w ;_(A @{ wW !H/G xB.;!J@nah \~(+ Ր0+~@ákYg$ `@ p2A"=]p? KKHI@7QWzc7o/7A h~~'E{'0bvp8n[> ' G/ 2P653m(o(((-.JJJF?H{POkWǞ W溇±ɭ`(KCo En讇o3oa"\|:H @]D !<\۪tYPX #0wp> uC?/ p}݂ˆxuՐPD%4$۷0N0o&@HG* `p3#0θ-82@!WGESobpo805ʆ; o0![C]C:Zn\Cnjb>Uh4ejOtn0J͛66uLaDZǤyTeSOXW^떒|A[9(:AGxEkcϺvE(O>n$tlxĒ #)*)LZyU#8K/5 |5жIb|Ьo%js,Vu83/rC{fnh̀:cM 欃 %\!^)4ާU2ːBߡVbK!nkNKݕUTܓ1G3>fQ'wPcnM$oZΈs9kA<=?CkWV#c#[Ԁw>?}BtFȗ|Vt*.xb\C82ek*;n/3;,srn.ΰx0Y ] ̦6yeGYɄ+G֮!MK[`51VLi9qihgr!iH/0R!O*LnkT<%"Pڕ}g7!L@o5`YG_8dici>2 b6WW]6ߣ^ BovC s5ސ`;x~:Qqp ~q89\m)GdLE*הYHwe].ͲTb>xF(홚G ^mG3gb2N)$#ma<,ڛE:LRꛮG\OfxYAƆ"|%+͵rz)QC (8))K,k#U\ʏ$ܘ̶ycߓԮa/s5_sIYaԙ%<$O9ɺBшd>0$m>,ӡwWr $s+^4Eu{:a;E1O+"W˴{,Zyַ.LsaSQonA̡ mCҢ?, {q)dqѤ}=G8dutWig-"MmصgUC, V*gՄ#j=R? CG*879 Jy ^zVOYL?'5D-ĵjCdJϤ>#aR&| M) <8Vw1'MrA2[k ߵ͇n"6'b FS~Tf$׭djܠ'ڄ٤m+.ao^PhPE#rӛaHXw 9 Ó:ҟ+R2X? b %Y*6 |^pY+`n\نg`fgYr7~PFvw 6{?8t H2X D>ݛ&1Ok.a{7[Ьjd+\攗EAYO)ʾ<Ч>uV"O13pЬ]CE >XØgH^-$Jݜl݊fa5|#RP۔bӣd,nk|=s(?{a|Rx;`ix3ʧ?e<9In! Os`Nj` VvҶȗ\f/@>818b)s~R+AY㑏S7X? n9M^+B?djhWwXlN5K J$[(f \%9 Ju7LyA F4XerXL?aKԁ<Z7Lb*[`Ipm2C|ybL[6_z4ۺՐ~v#& Irk*o|Ŗ|=_7?vҥt#w} )X(q.1'͕6Vk;_gILei,S=ļP8jmgfaq|(Me{}OQF woU1mb@\pTh"Yo/Nt{Il" tmV=tQ rE_\]^}vo+_o=Og2YGgi J3RkITpnZuͭ5Nw6 d׮zxĤRcY*,̻ZKW (U8k_+K~,6UKs8\>ppuIk?g7 2 +O?Wj0f~Huیg, VEK_4F,}/#Ad'I,Ij΍/y!7*m}eQ>&^dW^\lEGZ4dR3ߧgG{Z(2߸|]&xq%]u bIheߟ,r^+:QMB0PO1 m.zB%՚4o.뚸u1 V"\ \ ^]H˜B~}. ܲȫIKKeo G8% BShBq|~R+տQUJZUc%}}?y)f1=-(fm?kNvD(/D U뛓Fε%P@vpsj `CkR~ <*ZleruU5Z^BB67R!wj'Lߵ|SNq|Z_aF8 Vls8u E3Xo~~~`1 3 3q#Q&S)^kc 6Q=b#:IbzX|@_M"@Ag[KaUry8"{N$_+bdҚ۾)'|c"%{3sfUՄ#>|{ߌP#ų:xH$5j\:8K/ ?(8w]ua4S'ePtuYcAm۪FwU"cR[Ab˅HRX; gLbxͩ2?Ic(ܩpg)KQ$(R6W}U _tl7ג&JC{jE)rTGefH{9$NZn)7I[ F'v pz_w޼$3!ކK$UAnzW 6keƜ3(/T5o6/f}f [fvr!'(}4tuJݽ/г h|Ҁn|ϷGK n:8Җ& G1]?ߋykʉCp<|_OdithHܗ743PȮxPILJIǽUcbl V~@JX*8}=.kjAƢʋ 砎yEhP4c-5fQ3xӹR{|vjG@![y_,sWNHC͖ց1֒R-Tʴ|7'1y #,ց gV i7wƁ^|A:+G4? 䍨vӕl4F&kۈs#/V*ۄl' 0uḀ<=^6uS<*˞C,zf7~`Y WHz5H͑]J+Opi;7ݙRFޘj1yST^4\zuL~ xH~Tu+U#3رK]CXb5Q8I0N3G8z g@FN*I~側D a`24Ωٝy93\-O0ĶhBP$*3oX{KWݏNI/Qt}/-t._YA# :s4)jlʂ3).H# %]i3m{WMSC}C-/& NѨ}4W`٭+vJE$x̛P~5ޟvOK:RXͫ1g&H8ipڋR4DuAZr?Nnvݞ7>gRLn+}aVq\j7K9TIف9Y{*:lc_!a+`p~)Ȳ1Y7͕?>v,taK P{p!1nvm軲2j=a/4Gz;c]_wv|6y_Cv4ORM{ިIԷZ2vYUek`goc.nM@dW2]e'uG1iL= U Z[MjyGTh;?ߒ[JLfUy\vc%sH!*y{ 'Km[=nBu !~/ԭZ^^zsuF9,*PO\卭O,g~@: dױ%}]=sD]*%S6ݲcyKP%3m:?z ~} [A-ꤺxS\AԋTWq[\%5DUKd"j# MߦWU|;H#yMTܚ rK>(?y6I{L؄ܞxp& Ba7JO t5M/݈C1|s#7ll<9>SǢ]ǛCR I 0\KH#/4vY<ۓ+JxPa ז&ē+̧q49-Z>1-,I wwϐY6L?9ƾnN<]ˏrK)Ɓ7ϊxs`M˙fLB$̞7pE$%Z3KgKP/Z? endstream endobj 61 0 obj << /Length1 1773 /Length2 11814 /Length3 0 /Length 12933 /Filter /FlateDecode >> stream xڍP\ ܵqwwwwwh %8!!Ƚޫc{TEQ (WԐee3!PRj#Pj]\A|w$LrvVv++7 ?.| S@ tEwtvYY#Ɯg:@27u(Y;A@7)A#`djb%DYԀ@JGcBhX\r;Zy;9=xPU(; V+29ljnhd rXe)&7/7v ;S?DU= ͕dnjy͒@7W?I\ݛõupt9XX1(+w̻  :^4vd>} ?j}_ ̀V wyY `'wY8:y3h=bb^_Fv.#'+:*y+`{e5@rnbYSU_I+oj;]n[ 7T*-@+f Vfd`bry-T@n/fr8a޳XX}mowit0wc8..,boO1S3,]8X.N?7ۻAf7Y񿈇b0Gxf ^3[ t,߻X ӳ|/N_ݿ;  ; w4i8 p|/`v|gd{o/O?gl~vkˎa6a]E <f8>\iKy <\Ǯ->l`xkf5ˊ^k4c.UPWτ ]mFd`)DZ b&> "[4VAtaGJdg C$ȷJpN'S.|yIܧ4!7]a4fSjjL )Cdy2.뇮q:?S{4)^g[]U/[du=af(UvϬwDK76SL";\`uRS]!-[- 8 AHCj•ͥ>V+:++/ӶQ}'v}Q;+ŃćV]L<&M̃G%\nZL$|!!sr Q1s%.jձn3;JDcVR~fM1ٽ1*~3{=;z~EA{town7v,nW|Yԃ5RT>frY޼5 Ͽ򚼶*u:S:pџJj['h~i*bK8cī [")k ɑ;^Ѣ2QhVРVFfXT0r0n|#wAy/`|Rټp*D:V"N^Fm-uC6㬡Ek2+e>G2$E/5%a5Ѻ<j  rEA㺶n;qICpBV~Vd 6EY>|䍬uĎ'fra|"Dz"=0xd܄>žf }jRJ3V۔K& p#jȋ%/"~ ϙh3D*yj%%8X99`B 8ڜK+JdG kc3[K6e} f'/d,L>dW!-9`}#:6D7#BulN*Nx]C a814-lF$$g{T@i,S!}Hr#byF"Nզ\w}uPө|TD^i0T6XhTZZx )Oi9apafjQ銏*Xko_Ů\y{E#$d%^An!!ȟ 29z! QwXK]Wq;UDS2 E$ٵ|R;5rzuGD V)A]e0&S+3agȇ?E:S34{_!lX)g[vB Šfm-@H:5fZQ[~ x|.Za~LՔe`BN% AZYnc`R`wx0:[q^a&+Mh; ; N~>#rH|Ttfy,GNJ= M:VuG̚55/DZuQPA[m" ׫C+G π$pqSE1.ڡ $M1*F-uAO1_|lݐVnݱwB](48`1 keY䬅~R ]O2$k{BZbx(!ptDm9ٓm $bmߒo%4JԱ$+_ ?=3+F[WA9s ǩSk4u_=4[m3 s҆/mifu8g@܈<J$aSwX𼛁Z7N>;胲Az:r?j\F0~=Y*>X6{dtU ÉVPQ͕:W:?h(Cy]&>~<3ǚa#WqA^g cRJCK/5Z.yVa0X64vXҤ(mfr:(BWTҁWpi8vA| ֆ$F,B#c.w{i%~FtE)4낝t#h:tȞ+>@oj UJ {$Z5S ycj$+&R;qT+zǶڞj;0Ml/N hf6{jY{6 QH)vDhA-]8ގ U^סɡͅ҉{*mÿүOS"~)Eo*lZboCf`=)R~.EjӒ|Bl蔂-b 1!ѹ缶rX"}(kV0ZQTh-|>uj# v }/Вdqn茖fgYQY}{.܁=9` A>eAVLg<`#nդd v 5R82n%kenOӔ D;#E5N 6uVxsS7bM#9()hJm[t?KPr> {~Iaѳ6I(A*$|]J\)XLMƞ\'IȇC _#m|D伂JFj;*C]ċ;j6T>W%?rbcܟ6<([]BkGڑG>x\ұ)vAeYЃ^VbaNhHE? b^ZOއp?9aa#Ruf&sUǸ\'=ކ,Wf&{ꠅg7%ZZ-R$bsN|?w@9ϝpƁW(Ȳ!sR'(OAC~39!j&URsG։ۗٳC'-eu c1|XD+W$KdSZqcMoԙeсv CuT 1p2EY p rv\n|}]1Bj_Bpx&{6|ϓT 1VfX}s_P<$/7Е>5q!rb&IC_G]=__K YF4I,/Ͱ߮\?^:p{C*检˜ߞrBM=jܴIA9@>,lGEC& ' CRV+B@(7";w뭯} I^?}62]L Fc.OmmdU:m^z"nNݥN!3SfTGKQ=N[_*)،>4tǗ_$بvRn9Ef5ݷ:}?9ktX_jMh!Ap^$?u*fV3fsxv?`S!fMKgLp-ȲŪ/&?q A;AbƺޖF7Vjx&( mҹ,vMMY퀫mc׋ǡNYAw113?.$2 lQXO!OjW=ӼQv6B]/EZPyԽV6=@2!fXVyqܾ\+aXY*z=r ~܎g ; >phAD _Q ^sրu_J3U@ -(3' -u{ё9+ @;I&ɈaCǜ<+8.{a| b4 :)gp4ʣ@t2&wfքa4GE̛ûƞ̔u_ǯPVkWPОhF\5K/Iu"uH<ˌTV"5_2f3|O4YxGp&ڳQBpҏVG@j}}Ǫ|>|8nN|3g'^:+|bOwg\k7[YwO#-C-P-780ǐ OhqSq{A/9OnrS˪ p|[\3Ջ7Z\j׳GzK19׼.GN'z5 uS/\dVC3z8mݪNm6K12 ٟ$dM1ИK̰B/KUCgq: 4{yi JmM"aƥNx5x*+OX<㷡V||JTtyX ӧE-l9Î̴0' ,&b]IAO7 nkߨtHP+c-^fڎOX%rfIJ[Zv O6>=s-3=Sr7B d}8(]ߜn+ [6lzjB˷ׯ845Ϗ,urE~q5Afq9x qc {,VEŵ3,2,Fl reJJFUw"'G|oebuki*!\A=o_$|VgO*+B蜴%KYQ\o@6:8$pbgd|BnbH  DϙW )v-W̦c7KӴ Ozc˓d]U3$tY" oC"AEލfZL͍whӨٵ''/(W$տn^~>B}(Y!/xx˶kICRʸ"8g4>+"z ]Z <Tɤ殞 FWt.7:WԂ_ڛBiơ0R3AަF-,Tф>RmSI[?u~>~;ݳ k'1ucTjH.:7W]~PQ͈3h9Cr@Mg(u=!hU9CkA;)RTZ6fA-a.JH~ñ3%ɖ8o<ǵ=2Z^ ^qwE1EXGa ! V`7 3μ|?k0W"TDf鶺_}^^2U79$z<,mS/1<4c6VS=L0xB*t,@g x,p8pSjEкE9.F)ъ˳4~X͓Dν>|ok+aePBQ1ޔ!=D@x+h54I7L**#ҹ°`"$H&;o0XݛAl ;@Eg[JATåAY ]j0ՏȟݮkSU||yD | DTUsfYkܣ}kOq Lf B l^+Ĉ+inf=e 8 u3@ȳb]zjTEYvMB®0!UvN T懖OJ QU2;");O`)6-uY3=D+b1GUGM6|[e̎Y_Bd+$q/^) H/WG#lw#mcΥ2֘R =x8cq=Ǎ4;cd1fMHRڟh@Fp0ce*ۚQw"m0hN(^0&@A?0C'h,5x, й_SܖE cymʝ< @jjɎ %~a.G[?2~6|Ѕ;3'fcvWV"=_{K.2l3 i : ( WQ)K*3 9BF.W/#Pbz`=>|g^\¥J^/`u&K~`b9';}Hٙ!M`[DQ$l0OHVΒ8N™*6ZAd۰ռϗ\o燣!kp!QN,t1^tY~&yI7BTJ~TC>ޱE+2 Mlފ4UIz9 _77‹9NϥǟsDfOqt.ᇕ#=YnGj6E7,BsB ٥AX{UTEz{C){Tǩ=J_>^'I\m%})Fl=w ט5ĕǾVg*Ers_MA5W!#v ֯;OfZqہubx(Nf y6hOA lQ!Kdhk%8*T9 vt}n(lɂs/[<31qVV? {/5V313:.M ո|ֲ3S:?}b2!:M[Y'`M@Qvojtd;΅2)6Z-PTn}.0z;[W8prOm a$-"eˌT!uWKX4+T5å:^n28SwT^&TONpJs~Y rѻWG#@ R'xa a/ UE[G&s$_y meҿa,l'*z*5{0/UQsdoHP@LL/7[:r); 3X YgBX6,bP@wݴ!24 Eqغc mƹ3H\}P3i >x:s۾ ?A뼒 TV @2IE  ΑJ]^5X2I+f0bj65`GIޗRB "ZjjU\9cX\4%Bڐ*$7Cn~]{k}9Gz&:'P!<ScWa FhIqo1t%rG/p:PP0O1Q<Ű$lsry $yH}߶~) O4)Eks%3וq;uN@VK% o Iw6G@uK22t endstream endobj 63 0 obj << /Length1 1589 /Length2 3395 /Length3 0 /Length 4391 /Filter /FlateDecode >> stream xڍtP[5*F,4@z(-tD)Ր|@H J.*īґ WE&"MPY^~L휵ġcV:8 h@& pgnmPp%8\"!aMv B%I?zCc14&ќLLD TQ5(ȟD2c|8\0!@*DBO!јu~2Tn: Ős dVbKitѼP0Ɠ@` RA6Fb<)@$k7Êb( 4 XDeI80VfHN6N?@( ~@ `dO/ ɟ@r"X)hPCm1D*&au `s0'1K!xѨ TqcFF1I8='HQ!( yA""3)ñ)kěX`^ZR#+?٩նxaZgxѫco| 89;XQ2 ޭ6G5+6mlt8D¹}[mwIGf3E˅noKUQ} E:=wũڝI"3*/۞߄3fgL.NnV4= HjF_OYeHPKW[/V-DCWF]n+N<=dmF-T1qR.GQKb%G1zM{ytd]עOG׌K閖Q F0T<-aFr/_Z)36}İr.~\kz4~m}D˭.pۃv99/a%oU8i^o)Fr/["*nsJB%?Nwc)ū? ˮ?pjtW9q[KS'oHTrHYb+5=At TġF炥5&KcoAQ,"zA{?YFسqjÓWE@Kߧll5cyx.$z˺qMuzB?gEQ%YF7[ X`֌V2C5+ qy;!lOv[/Hx\Pmr:U8ey4͙rZ?Du25 h c["ߐhPƦOqvCֵa|m3kr_sD$mCy a4Py q|,G[ 5}oMWw.Ѐ8ӽKZd\ Rw 2y=|{9A#H0Ͷ6q2kuSZW=zB3Ŕ$ ;u|?߀P :c[HlohIވv;x+P4l>gT9"CTzyѓ _xl;AڤJpbXYݩg6((BOMTa {Z Gǣ9|߬D)_,["{ԗ4UqtDAhOxݩ|nk^ĒOHeŎm_%'L 6i^%*,? }7p:-2Yl,D}-R!NǾ˒FEI+ X bfW#,DZC*ZJᡩG&vMJ멯֛V_n-:wu{`wh_߻[1ŗgxv9f3_A9w9u{sOAm+B3iG2͸SN {ѐaޥ{k_FW{J\=JYP5>$UU~ש---V/J'I˯_z𕒤kQ^*;GvTg,&Oc)&69 ںuͯ aA>^ cl['{^eD_]GV%m[j0FRPw!9G̷XA'_@>uNc|xꗽG#߭]k5J7jLtu:1}͇#ϮDXx~WmU_2?0M:5xM2FXX݅S\Jm{z`v\)\nψ=s[ٳִYl6_>j|jЛWatf tɪҘ9 m~=aows`Ֆ2uHFF E;φ )< '|!Vvt`]z̠eZ.@i.]=%Yai^: W*20;=.RHֵ6̿X@-F͎*\ngӅu*q[e K^+ė8.{~JtSk,ǔ9٬\TcѮl=ay}>9M@徳+w59-6A[DlV[4喓Q:u FS.@(ruGLç?3! HZijӷu>'qZ-?~Ғl+i"rmw޺d٥Jd}R1dHʁrGJ룯]ZVBY9xS:&_^MP Yp|R`D_6>|d endstream endobj 66 0 obj << /Producer (pdfTeX-1.40.19) /Creator (TeX) /CreationDate (D:20190830111230+02'00') /ModDate (D:20190830111230+02'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.19 (TeX Live 2018) kpathsea version 6.3.0) >> endobj 10 0 obj << /Type /ObjStm /N 47 /First 357 /Length 2986 /Filter /FlateDecode >> stream xZ[sG~ׯ}UQ0 1P<퉭EIN >9=z$˱U֙~Jh9 m/^Ga cE"L}X,Ua:a H,,YuBGX #PAb*\18%`?<ĀO .`SE)މhMq1gԤ H%-ɓIp "rƭ\ Dp"Ydh9h A `0?= <|8_Z!_7g@>_TXX@iI;+xb=6/DSb6X>:@h?xPȧ6'!4!uCC+2APOڙa|*ae&>yݨvkjң&11 2F~&=W0m7Wߡ1*]*VzUttu;W[_8tKZ%|.twk!_*NF^S1Wp5EtޮdE{wG̎B+ {f> ;&HUAH3h)A¤"D˨IhFHa*DxlӀlɀh 񈱦Vy-&>hM#HQTj );çٹ<-d.3c[;Or89cČaXyutRdew 9LG?'g1DaR"G3 VDS+! )ddO?%jØ:9VC4ECF)B2;:IEuR:D09fJ1D0nFX́oHr0QhYnɱ#v{O+f]b֐zUs&:%ֻKb:HZh;GedI#QnJrN\<$ MPuV賻oXřELS v''Q0Nr&!Rȥd`HQ4%dx±T4!Z ;-8GJNc.# NMDCKUnwq8F$ M6z?@9C=ѴPɂ@ϑk{g;zlK!wn ư* rtvǨxԷ8) 'j:| FTq4] k!ImOA+J3Zoagr#JJs4pңpNj|xKΡ4RT.%;G&;KU;];= JK\`<(N8Hq6GGى:5W5yZ*^ l}NQxx∢9W}h!?ΰk4IeZ<) e<Ͻlj NWCq 2 u"y{7[ѽrB1W_#LڥG"?֎p+akbM|Ź'l}U*z)(sxU2=snZ~W/3_fQ+U-GK:WJ9^*\Ov!i*y+GdrJÛzǫ^YFm윶~ԈS?Zv#.]K:=W,ʳ¢.u@T6&3W\bi AA\ ΪkɋFnY>ÂCmG5&S~Bۥ e+Ŗ2StYV10%qHBZ~Q8Q '̛zYb}\{|ED_gU8se\?-qoo]яd2'ѫO><:|qxG9 W0vUo`~Gz!207W{) $g4=|>kFÓGgV<y-©4Λ n|"ܗo|'y"Oe+ e|=g\H^K9V^ɉʙOV?w^$O$$9С&N&QA`+{X]^fór7!<4ÓfE3K/~߼{?VS*%bUn k_*G/< & >΅=K͞t}ybtak􏞖j\&A~OڵMv( p2Lhn4_)}5 \sdg\a֘sdgYj7V^I6kz׬]vz{+~=sf{:A/ !V: HS*,yoH~6KWo0jYln6o|0_ {֙73Sh @NGV}R3˴V>yܠjߧ endstream endobj 67 0 obj << /Type /XRef /Index [0 68] /Size 68 /W [1 3 1] /Root 65 0 R /Info 66 0 R /ID [<8FC148EEDF5DAC9562EB001C26DA7BB0> <8FC148EEDF5DAC9562EB001C26DA7BB0>] /Length 202 /Filter /FlateDecode >> stream x%-OasQ^AE`46GF&X؜F?;v,T=q+/"B$uDK܈#"NT'EDn=Ȉȉ(@(ߞ3WΈW9qUn KU%_]5rr4Āށy7c endstream endobj startxref 129879 %%EOF semTools/inst/CITATION0000644000176200001440000000252313522560521014174 0ustar liggesuserscitHeader("The maintainer and *primary* contributors to this package are listed as authors, but this package is a collaborative work. The maintainer(s) cannot take credit for others' contributions. Whenever possible, please cite the paper(s) associated with the development of a particular function (e.g., permuteMeasEq or parcelAllocation), listed in the References section of its associated help page. Otherwise, please use the following citation for the package as a whole:") year <- sub(".*(2[[:digit:]]{3})-.*", "\\1", meta$Date, perl = TRUE) vers <- paste("R package version", meta$Version) url <- "https://CRAN.R-project.org/package=semTools" citEntry(entry = "Manual", title = "\\texttt{semTools}: {U}seful tools for structural equation modeling", author = c(person(given = c("Terrence","D."), family = "Jorgensen"), person(given = "Sunthud", family = "Pornprasertmanit"), person(given = c("Alexander","M."), family = "Schoemann"), person(given = "Yves", family = "Rosseel")), year = year, note = vers, url = url, textVersion = paste("Jorgensen, T. D., Pornprasertmanit, S., Schoemann, A. M., & Rosseel, Y. (", year, "). semTools: Useful tools for structural equation modeling. ", vers, ". Retrieved from ", url, sep = "") )