clubSandwich/0000755000176200001440000000000013576443015012670 5ustar liggesusersclubSandwich/NAMESPACE0000644000176200001440000000135513576304571014116 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(as.matrix,clubSandwich) S3method(bread,gls) S3method(bread,lme) S3method(bread,lmerMod) S3method(bread,mlm) S3method(print,Wald_test_clubSandwich) S3method(print,clubSandwich) S3method(print,coef_test_clubSandwich) S3method(print,conf_int_clubSandwich) S3method(vcovCR,default) S3method(vcovCR,glm) S3method(vcovCR,gls) S3method(vcovCR,ivreg) S3method(vcovCR,lm) S3method(vcovCR,lme) S3method(vcovCR,lmerMod) S3method(vcovCR,mlm) S3method(vcovCR,plm) S3method(vcovCR,rma.mv) S3method(vcovCR,rma.uni) S3method(vcovCR,robu) export(Wald_test) export(coef_test) export(conf_int) export(impute_covariance_matrix) export(vcovCR) import(stats) importFrom(sandwich,bread) clubSandwich/data/0000755000176200001440000000000013500611663013572 5ustar liggesusersclubSandwich/data/MortalityRates.RData0000644000176200001440000012144013500611663017474 0ustar liggesusers7zXZi"6!Xk])TW"nRʟJ zE#w+:)o us݋Îs1wMqhȓ,z=eG-88$4=z- ǨY!yx ooҙ_Grϩw^`쮂Tm"ƙk>[_b+sI,mCџ|,>u%Atd1[Ko%Puc3@T;n^d~}?[NUGّa{MBpZ먝w͙XA'/ɛZoskha M$k% U4*@F6UU?*O>Jw>D}%1 اP*rX/ڐXraڥKpYIoAi֍w% :.8r n_Y2 <ƜH$f+RȢ36RΔ¢ Glؙn5(mfz}^qp Z,h)Juk6[N, D&sd5-t'ݑSԤyY{I:.~dwB]ґ Ly>2J.: wKΌ88Z4ĢaAuS_#D$*C닆b&w9qDbgm{ݮcp>M̀|"hn$2 LWa+JSL@+Y|:kBs%8M3Go@,nK\nSP5df@#U< ^HLxi $W:TB$QEOi#0m`huX1bwI\ρqGIT(*fje!С{uS)Vcf.,RSe /Ej k2i ̩b^s'[8D=r>z}_~>6* ΓP an(F]a#0EPYuŶ2ar>Y$HP[ \t(2ȫ:1 RĐQ{.q^W$6 Պbyb .'M:34;ܖkDSy(L"\Yvh  iJ?ުDyQbs洖աIաu^)vJݩ"jMdPB|1k^KT{`Ln<>`)p6 cQC*eqJ-S*y?\rA6}&$(|js^%ϟk;W/ ɀ6eQwCY>yĢj2 c z9NߢdY ydžL]7?nB ӞQ"6vfi$LtpR4ju,hؓAIZrZ S[ YJ> .ߜRv~D@Ey8Ԉ j{1O%2^ubPSXb]7L*\To4"Ϡʫĥ )Nzݢ\ ~͘jԯ[B,wfr." F{_ϹR*vvXY _ W315rzv/QP!aENy MN)&JRjy %0?,Z=Eu QbJn$N*;tIFmċ +H<$E?`vnrK%':kʵ]+Rk, CãV7 6DKN)t^(HŜxxIjkAz,ocA$ivJn(o;\J-n2a%MJ~U|<(5}t㲞3($)]/ٔ1'CΕ\.]lmF{}egxys "3cmk_@5]M apj%QIK Ԓ^j"F?+ ObP}Ga6J-CMqk5#ysufL"SyZeKwžߝ|&P*Bt\L[t}x%Z|; /stU}+-*[^c{qs1cXaz?F&TGςc k LrU#W;O"jʿ+Let8zm7N5>XwUҩ.96&;i9|^Zi'[w&7j 4uB<[ЖT{F٫:H ;}`\k=^R}wH7 U%J4I^k柛<\*ALfweȋtރ O/>O/||*`wMߜaI'9"}{&!$\UBp.Gh8 -ٰ{O*V.xj'fBq`p髫v#%Ej̊o hw)70fZ}r!ߺlHT2-5ŭb䢵wm;yjJweml0GV2P+B=l2y%j%ʕWpR*єtc.'X$Q OTϖ9W.j^<-]l|4֚09"ՀqQȼz7BvE,d!huN}lzuoU7Q.^k๏$TC|bveMCFGn~ ߐUM+ڨ.&3P5JF&Bc._G#Kw={2:hczd#0}ӥGD:՛|>dFd ,|q?hfNݰ[lBw $v'&Y4@(_;G/=K)eXOTdVA vSa=GTqݘL#=^R/Mha4 %Vy^dzZr}4$g-|v Ny ]k[D8󃖝0=e^Eܬ'ﳌDAFr@1D|˷PUVǰ8N\3 }mM*^:r0!gG2~3*9! 1UU0Ȓy"XUk蟛d'҈J{4]1NQ/d~zq23L4صyr@hu. yowaWl{dPoAize D1j*MƁӋ}q:$GEF<'ekN݊ ))"|GqJA'R s"*TME_1hmP3w,F>$l ~(UJb:9J+Q&;#{Tع!]ω`w`:򎉡LkQ$aH rOTg K8]:2nv {w,hSv[K/bc$҆ɮ56Β^8AR$(h(bX# ˬϷ!̡6xV f \mVڥ_ Kh\mt(6Q;)& /"zdL*\'Y/dv20Nwб]_WYɜlgzwQmVƨB juI* *=YH `w7Σ!J9!|$$X]l]ߓw[8yv+H3"l%Nb"SE{GU=(otaET*Z8(HL e#xh|yS[#ċ=\_! AgAi1_i~˕x}&WJF$Vv $kV_(*<"*W\KkԖ>?C3dc J7 MKrc9҅BcY5 $>^_X0kfcŞX|GHeWBN>PxRMê܊Rd NUHcfQE~γ@"VQ) P$I)U.a$+O{l88h Uʞ:CJ7ʹ&qb|4? VؚŸ~ye$3\~ˣ-M(KbJDng_aO '}͟ S ,l/"3EHrœ¼^k EVI4Mq]ްH#4O {YK/iVMZ?'dХLgGEQ[Clt?ԇE|]t@jϨO¬!oY@"O!"Ylvi[Hq)ߕoy=":ƔLp㏝u-|K5\wkR]rG?Lz{9sD~Z0s4f[cgT3VOw_g&LGwWG23??bǗ9͛}j)X9‘6-RJ`tF,k $n'9t?ۂ-L6(ۑOCM6.FV#0kXDM7J%Q}~vn&:9~ Af#uHND>&*ఇ CzI]Ě%Bɯ!iH%ϒ}r ; {3Cs&:QfC*NC(P% v*<%|Ûonv3QZH@`T<,z2MJ:^Y L$;=|u/81Ea|{hBv-CNڅWR_h#l7WC 핒gٺbl0>t߳v36գZ2?krM|&lDe<>8uQKYΫˊD!F*i`ۤ㐯h9OҨ}cvf߀mqS:BĚJțP*ʼnq[wVq)y\dw?,6Wi 0z$2Orԃ>[Ґ'}%i}!lTBA^곊sd u3L:%&_VZZ.WK4 u3w=jbv?EG w 9OBBvM2)vYyDoCt; dWnFs 6BUNmlo\RF ~DjVN\[1Ր\͝I&Q0ntbɒ%זE)XVikD2h{fVu久g& rNtEʓ[Sfic0J}mañ==>\з7]=w[UqOv|;1nEcrSSj{2 ࡌSfKl:6k| ħN|i0*Z(#+9fq\qׂ빗} -0KUYogzj2n;-=i@9E | ˃,cPR-QY -172XDAWFFѵ_nb_F.D(jH "F3_aQwwYWaÆ;_]/6Cqvx;3#bL@^+P?A6{rZ͋"m.|yd؅)cnzg>Z>*ù\-:=\n9^~yv,-(yC4z(Ơ0ڹ&5cZնAj(ٛrrN*"syt z:shӫXU&:Fs+>R,fN._KgPz1p#fJO ^_#S=#<χQVǮOŸMaM5SEHV *>';\HlzZMC@rUFъCo1~Tyg sa]R6N]Mr*}}YqM w%vKjfkT-'#L2Wccj(Z:AWY 1 ] ˨q#Iuâ!z$hEDMG:= xY1x 1w' LKZj@Ț#F)MK}hY#dv{a(Kj:x^ n as־Y+`1f-Ù(qhbu8jZc>axdm -*pvvlCǙKau8-?Չc|r֓evm LZK5:#oXU9=4qch:&FӁגt"<+Vf#Iy zP`پP%}y 4Fs9lsLa8H OI8gtw۰?Q]OqAq' ϡXX{Z~̔Gt9, :$k9,:4R/O&E@Jո`@gX.4Y(H㎖c.s]w+ x6|W'1،RdS2/7 9n3`=Aѻ%<͇\gŊ$%uhLye 6w8wٗWM H}w?dfվtׅod^T~oPNb;tʀRRynkw$g7_8~8(LL-ãSEYBMubtٶ#J|q_*& 9]̨)3PŊ?rVI'_§ChEA>A,amkIEe򽓈Q6K9n@ m]96Rho95{a{[ang]v{JTܰV'TaSz%?4oOj)&w\#qm櫻z|oӴG~"ҩ$lGsܐ <䶸*NQf2mԠ@=<$}O˜7 d*ߋ_127x#|G2qsP{aDQ, *_JK BDGfq>i)O;Ełz/o(k[37KcrI18]o7AX,Iy=DrM#BD(&_1d֧֨^-a@^ ;a$Ƅs@~IGbF*n;76 w1e0gGy;%ZWNL&Q5Rwj6Yj5m~ԇRn4͸%ݕUDŽcɆ_ʴfsE:r=OT1yo&!ͭiO@Ff|g[wGbdѹzk } Oi"uWGMM{he@uCuJ:-u Qqt 秂ax4O5卖p?/p*\ ob Q@@{ԯ\, (.pȍ6Gݷ}ök@O p{o4>pLrB&> ? 8}XсQ9.nPSffS8% pAUxd1?͡/U%ܭBx HnYL@'P0|\)GU`3˙[7l o`tfF4UIR *>Z[CuƙJ?6ta i80Jg]bY Ձ 2Z>qg&`O8$ ctL'A{Y.!Lw/+p@8g>]TZ-hؗ^_=(SMl' #j< 'zP[68³#_B U>Aܺt#Vq&kaa?c\o n/;a>VNZD"Z> `nj˙mch'u6Ez5n3ۤ 2-[ xoÙA39=pLN 9w -{'ŀ5ZLFy1 rWԧӬ]'Gh9ț#$|^ι7穷|OPK?0P7-aH EAG;e $lwC&DE$yfED_3)p6ӕhDzy}=քeP Nuji496@A9"$.Vn7cOx9v&}%M HgHg3ܫW= §4e"?LB6@iD 9jhA0É!Vu7vX.%^ecZmJw;Gkaƾ Xtx c}?.xx:94Vvf}0]ᗩo:=.,R툐b/4qe82l ,ֳ֭EbnͭعdŚ=J[u.> I!z.$~+Z`*r#ҋX2ju9s>4>5w|ʼ}>CKdl],tln=O,z]~'@1Z$<zA!HΔaK^TDk'FITQ%xp犕1؜ nbjBhP^#;,>]5zH(|+\o.abo"d^~8˭9Gk"F0l/|g[@t i͇ B.JTDh~> t:UFTuLĎlh]Yc+2!3~}tHÝuMk|ʵ'3_/GaѮEyޯ_Dp:ͱnV]L և\9"WRLI&}̢c< y,:NaȺZd`0=JQV1쉬ef\7F.}=L^ţoKGRW1HDuM0jK&O͆Sg@HTPVtޅ6?c3oFNƦ{TId{ќXq}}V$z)g ԩ'yHPJp/0Iy cH6 M"%R@`.wJ,x:xq(O25 سfE>]t~-q_zCR']i/RW$d3 { M; ?hSf[ђzi|5џo_Mp"3=KWD,* X%0-xPȬ2iOCH 7ft=S+?dv=I`f?9EW8\GYq̧>=-j ,Ya ~\b]n8Zm^/Y{q^r'UGui )u5Hmʷh{Aq_ˆ@stFgX-+ KΏo((BTٽ R¼^աbxOZGfo+?6X]=9wڟI#Bn' h  8VjDL-f|FWDmJL&?KEru(P~}C!;0 YdNpbj䂵ՙd(  G–9/kxnz7OeyI0MWKǥȣʵUlv29Kj+{QKY$ʳs_a-n6 kx,c KU g$,Q᭎sRBxxG9n)0Ѱ{;#9ƍ(F$Jt`yt!ugpGCWn*³(Hb[*{<_S,I䲆M{4'՘] O=4?S!Y U8~tH*dɄ ڝ%Kڡݕne_:җ8V^F=/uV6{)"=AV G~?;4oÊ u+'Ȃ)dSϒ:ڜj nMzufwtV[c lebF9HhF`F$&w:pLJNq+6ޟW."CYM_A!ZF͕@&<=z g:\0[1j4BC0**Qw3"i/|<(g="Ƚg,cl~)C럝^xA2d\L >zdgYNa9]o8KCbR%q0<~wGFhDHȆE&4Gn ׇV PB-V\GD*oc4_t6_?B jZbLu9q[K<ˉ_PafClM["l P~$XQ( ]9WB~`5N"R =J9Urk%]u傾P@ek6'}ʙ̔=z9iAkCG9j^i KU\N"D @Col+م$DNu5>V5k }zѿ^,@NT1ZAM7W 7d_@\ZF87xJg)ZՠY \ra>*,i2LiU $`)p0BlƔrU ls¸ %ཀྵӒS ǛG 8fh71&"rO:il4$n>#YbzV;jM埯{VlL&ͪwMz-r5aa3\ՌE'2*١Ka1~ 4u*l;$.:-g6{ߗr`;SEo{:bB>bPKFD0^K5ZcsgCBR`} ?VØPQ+ւ2% jPd[T{MDѧDw OLx\Q~Lhc5m؞/߱܂a~+UCb`>XTVq9zOJ:75Zpk|,d9GN$T>}a75  YzWG<`hr >Pcj~syg_zhb"[\Xw$;6sPC l䲝Y/2-β\PFa!j9:*$xDf-XqԨ4pL@I RQ7P(pN G^SR(\: ~=V#\J_V<{=DQ5W< Um< Y-rTnؿO Ld`Dl(l˶2e`5 %{>7$ ѰwU69=oESTDb:DL7G[U(! A nc2AZH<=`/DGiP.)Ͽaə _RfK׶~,|p8  -U} )MJ X0wi^iTTAÀ+_!".". nt 2H+y< }я&Q*4ύGn"vlTrxn߿OsW//6X5ZAU^u*|BiٙCoVڴh@:x:ū3sŮՙVZܯgdDk BxP_/[&bY ~q/,XG+0z`[[#̎ϒ0.<[w7sV'z㴈FГG4?ąprO0ůtvQ( v\׉rӒ\(sV/^SP,,@;䗯vC}3q:^"">8.Tl-5@Z8|>Elg)xg2scn^7nkFd5]![S13Kꐃ˾3 S; ֙o9N,+ _pߨfe(jyXNOGjμ'~<9R|<~V-ʬyu6w_֝7MWWga7˲na k!E[EsR޾`AB oϧ /kӨ[zDlRT2X&Qq ò+E (ƗD/ǶsD*\h,h_QXFųEr=TgҢ^6zt @KS^(.dP!ClY Mu.䰮֣[7$YRY|I w<Ez~;gv"w.O3|6 H&PaCBGx)T\EOf$ḽKTY)>0_L;/@sx ͮ#A;e8=g;l)cp <:Ij*9Eҍ5_ u ;ڠs\3v7U?41|kmtx0>:!PM 2O1N܎^D chl" f?ܨT{9FÛ]c}ңPdv, Sno9 [)$릯^8XQuY&> ,\-cޙL&1|,P䫱-Gn0ljI+(Wm6 W2Ώ H8;YNc# x2W]!C>#e VQj35Y4]Ý Fr+aĹfF  +_>SP rٓ3o?0o'D<6V&f mτ_n%Bߧ*Q8/m7Ct]kh~ Y#ԗu# d,&g3q,$ 237iqKxn|NOg6-H؅ Tk4=9)#qfw)uN{ =sĽl@7&7zWم221J>c2̸ـI] [~A'>~|-O|˂ڞ{ʒ*|me{!<]ܕƜyX%U'dXKK,(.Le_<[ NCZxqnDVl$Q4TȄGy!>tAؐt"ۼev]j̔¬-(/Cw)IL1LK=.1)To83Iq6#5PЈVuX{/)d/u!^g glLQZHtI%g{7 hj#*/Rƭ)}@/T. +iz]k;0mA+~1Cqxx=y_8FZ.#I+_bK뫣Z);U1&d:W;kծ4lkawg_\1$}('BG%kM,ݳp"*yrT%_3zۅ5M[\M{a MIR #o^*+b@]laiٓ2cβ$0ž?Zo;^Huw.˫Аtj" (- 'Q qȪawdҦA;&8;6sA^3D\яh÷U$z#*/fMC<_mV+Ї!57~#V{=sC  uLHc3Wj/rK<٧T=#Jx53]ɲc]iK/L;T7.Z:L>z#?ƇJb<7b?U\F d[2]zY R glWW0~2&?H뉆>DhY&TMNk \QOUaz!s+mk%~ҾNܩsZ+, =Qߕ\Y{69y*[FfD4H `J'9HvJ*Ƀa~`zzOri D83^I%_?hFE׽=Ge I oO9<*;Kuq]PjQ (ǯ aD*č+OOz8UE#ekIu:G]^bȖO׀<Q^Ǔ{KrIX8"Qܱ,Zy7fPb@-MacxL/ܿ B.[ +fPa5웺cDvLփk>xstx0q(I8Ԧ6GmUUohCn|^-4XF&,97JC~i -rrGt"|Sv#,3Dt82/sg(ϖp~P\+"|O,܁<Tr߮ys7`s6rg4Y՞U6_w?;̊Du7 f,n!0ϓ`+=7CWKnG,N0wpK\3\wNe=E h Hv B҉ߧ 3yM^cNy3] ]mQWӅ0m7 ')6:_* \U7B=:~ѿ(5nEHUkz 2w 3IvD hc)(~ZjBS"[xʴ F?aw)2PsΓQPm>U.T7a G(r0>JXޫ!,= C) gɻɃO֦GS†n \{353BL% N#(nP)RrjKkC>Of4uU]NNacM"cc4Y{/F6 m'HfA3#\3ly|JbVA%Y<1fZ:DԙLk AC m\ܥ|kc5ua[;u1PpDuadq]~.L7͌~j(=[Ȣa:A@'S'Wkn+&},i PVKbsFD(GtC V7c/(mqiO'HnK B"0dMT5UZ%R?אHԒ#*(?>+>q\ihs2gxC4:7M@xoJ{CRWP%zMLB\t-rj7Uwk?żxwXBQŽgc{ga{AP ][o Uru?mtb ,lШu=_@ %yaA<%x506DFƗYtD1 Üg5 9v8ST7܂=( O7M Š)sSUs7!"?19GG?d~Z%2e?4wTNřX4WOh5qW8NA9-t8mbE >rVO4KFlcz2y7z0 -QJ >( Jܽx]'׉Wn>NPE}pĖlKS0|HdkJtCG_jN-q{{qg=3e͆d=WK<loDXK%Giut/OV>CwػGKU5z.LWCy>edy4a_[*ۘO)_exqWWNFWߘ x˥31ل]ԏKnYxo[V7؇]/v[T7a $"O (6بI…:j~TVzѤ^lK~D!kr⾋#X*J9b,uZQJv.xa y 8eў_XA>H;".u; W̊.)eig5tN_X`w.6Uw=ȩ+l]Q'sz*ygBJvolgqS G6lR%xMbf6wA3"o5h;?:qe%|!x0l^9+͢ [Ɣ~p_aYpjE GPz'`vd,~4miCdQ+ @wC&l~GPaN~ko {'G@Xo"u@שwTjȃhJB>UzJ?d[&Qa:&*J@ ۀ ?䋁̩X52cjXU#"**W&q`f~Ò0^A}Fz4\XBz*e꒡Bܬ6sim"}~~U&eƼ[sDUFpO6`+(TdFͳ;s&n{]j-O~k1$@j%ղA$;SY/j&.FSd"k0i~Hg5q'j s`O%CbVGO8ҹй1ƊP{Deh&_Jz{ ojښ־Ʀ!& \g٨L'\#Nf4Y͡:ﯚ2LV)ԙt  I&(Wq'_3b}X;CUpYu-ڻxШ1[)sSCCS35_e*Z$p6(nKۭ*\LhPyYA$ˢ_eb-[1ye &l VfҔ}=(ߓ"F`$.2ZFTv& LvftBfDڽ7de.w34Zpū8@ԯ^,1ŷsdSz:{DD LW=3D==1X!2ώ=<)H yFHv9YAPMgV<)<'aAoɲ[Z5~a<^S#hk7>A)фcmn{){0_g(M.`"60zϝ%?Myb9.9p-aU8>Ia D,W ,dOo0xQ.]Lw _.PÐ}7lۚ N}K${+1V*,˘!^$nO᫣edIt\7/8.5{E}sb=BPEtgS&!azL/lK,IEAZ<7ߊ?Ъmctd}͓kf<$}U  Q4-@iiYe\z;5(WG,iORQ G\,J[=ϯ v\n*BXpwuyxGU_MD\P33b|Fl/Zpor.署n"vS~[Hؓ -U)r)1jc:lYe^."H}s_ =>6 Ty^DadcޟR8O~Sy'm\{c◖u4RQ5hWƮ򏖦Y6{WZ#"|l˛Q+(-E<<{!k1?2Ui gBb>(qBM_T䎹=j]-Sxco3!oѼJEZ * xq~4#R#pNiXo bXem+O}SuT2]J p"PҊ]fzAm+2R۰-φ =bZCɻ[et~틊v^|p_ + h vY[,M?J6nteMуH|T3x t1k8S ",y>R\%7u[5 bYp]HQnuאַ*wRa޺eIHR/ {gC A.ǎYx0{ 54v:곣#\YAƯꡙ!*`$i|\( FȤE:]΄8_́ |Lu_u{Rl >(-[:Mٚl9UwvaKOطA!jK +K *87#ǙhԙŁC\q,8魤kB*HIsc}[s n&iMgLHbtc1VQp%a7kV(bA(pi'o2 -sTF&WݝPCi PLwS\$cB*w"N Uv?oeZH7[p<>(VI7y&.3|H<"k|aK|8#k݊[͢Tջ ٮ*CO{| N5t8쬚HT ڝLXa11?,_1{DKV/bM8lF\=3` N3-xK'$ʐP&1ZH &coBo>+FIp` LX S7}ͷOs 5p1e䌰}NjWpۊe9`ƒjJ31Rױ}(:af~{.^2 U-x5pYx\?ґ'SD}َiĚ7=zz}$Z A/ͲkQ=)/Dfoc> r9[Xz/t'fue{[.?~ ;5b%jQ}n?Aq'r$ĉZL]M,GI| ?R}#}5C M#AV 7Z,<|$~C~0pE0(3nc7K#$/Z?[%YTбmʚ7 o֗bV-7o\pG\55ߓ*:ݙXmACVKƨj_fJ~<Ni 0JW-Ӥ6u mk[4+IhLbrޠgݮ؉57.^`ڬ/ӗ0v']Z ׀m^1#8AQ%/DIQHi:>9 ]0 l7Ji쿧^40O>l>1bbrUPޝw&1>(j[@}886#8pLPwZo'aSHg^z̽Z3$J Dc~6[~*@/?q "[39/㺄xK!@B Gn|Z%0+qZ>_Mk r{-.C 8:sZ‡JǷ9,e|D im6ZS%]EPU!kg~IwBr ѬB!݄"4M>TvLɵJ-.N&HTMW2~M^\r?kB7̝B9ԨhK)wכ4PQua Ot&`N+feĻQx鈅n/r HnF#ˍk9MЇW&Y2޹>\-7ōֵժdȔ<,XIMw-:ݾZ#v Lϖ/T?Rl+\/B.7Lx;&:j& ?WF҅NYb.UEV2tff*.kyngԞɁV5C9Gb;e9(k$vn|Ƕ^tMoq:4c@G'5[̖X2) =3{1hDZ_=нPY4VKЗݍ8"aNxhw6sRQ-bl.bEϹ"Y]&Df}\eܼJ%N$N@3ikO2[yq̈gpo79 SִV7QMI "-a.3Gp^6#ysESFub2$e:;6KMbdQ^+=h~TDD fۚ%ԬTaF@q]&R3x[g L2! SwjNg#uYd-L3V:m|"{v מWАtc57jҼ ܚ̚+I]=e.ħubĂJݶDG)gNv4IYG}$R" 9z|bGM a6H#v*nrdF3xuRxV N*n Rtp~6z.߂.CI;5\A4&HU(jjD\ 8}ƩTJ1AHJFFZVPF2n+An`ŵM{%[6#)n{zPDCg%rpQz)/u8:Ƭx!,O;^8A;xO  Ϛ]ΙAxqUFg׏h m+> & "wIQw 2O|8r꙾N$R`/6=iO9_4+ k8C;ΗۨӜ͈Ozƶbv2Ã"$X7حU 5@w6ZއY{y2^,\YXhȅ?agY'CǏF Fm@#[˴5ypTә }+3\obL3F=UAIYaY]K¼?y(V3I};+G1U+2؃cH=,/| iuem?iCȍ猞`=|lt7R 7ɦР-1L~?EkFwyd7&pG Lz:N#N$Y53Y5&$HrmR ֕ˮdWX%eM^S>X!m;*3M+SjA/>D🞺 aEH}j@4JR_gsbbR]<guZsd[q|$p jO( _3$dEe9p F~xeC! "ZZ}HX-i?B?A|x2#GHrӌ m ۔Q(@=xGn!Z!l ᘿi"[{hGQ"KVQD^ RrD-Oxߧ 1uvU2W)ovCz<*`7d:<-~ZPz8lJ_kKZVG9J̫o xkR>kNu:4O}­:%JuJgVR!oJw"b!gީ.~N?^Xu9k MSLƥz7>:,twD!_A IʃKhs\>+1P*Ն Nt7;CY7` KM-ZơvWh4U9 T Rl֫@E܏uzz+Y,#QԶ eڵZmūN6p>@qN ĤYdpa*'=,2vz?Ѷ2@#p0o>8բ4u~Gdkrm̛]5 ^GT+,! 5^#r<ѦP- b`X8őx cB(+PZ}AGeMs]yl8qN,Bvk#˩ഃL3X2*mXr_Xd c>r;S=ͮK 2|:%B6I%w1Dn['VW]@qwJlpʦW^uriJW.%:o!fĎ? $6{ۮ΃5Afƥh#f:;8]Mo!@KSRBuC<`r'8 P߽O&rGd%9P [oBr+pIm_kV/FN?np'KXiЍFxoGiJc-2*qW^4uX-Um*o,s,>6 1:\zFq腈'JuJ 06qrs7 oи=h= ۈ6 gm ݚ[} 9Q?|X&ք[ +*ʪlQI}'mZdGK.3ki!c|4f)0`QT!ܿmHcΫ %^D>6AYGF?@"}z!cvԉgw- <dXgn d 8"jppD}ɣmac:Py]64 /MC|u'M%zٻ(Z:F^y 䘺VENs`A# 4q6W:TUJ^|nY(DS)]y(wG/mdVAPA~dx3>9l @k!UJ#07WOu|F@R`2ˈSj G6䍍 Q/k0i<©ト2Ōܸӓ6' B)hJzy.6'צ /Ҵf_T~|ǂ 0Z¼"vsk-Av|.y1~9Wx SeN_H86+A/#F<ǁ/{nQoγ<8 ^::8.R@`9FҔ:jXkf BttPgo_DMYdkZ5 °"s+ y 60!Hq9 Q{*/ 0Qrn!mh,]/6IIyjwo^T5(ZhBY_hj/?}Y Ug?4}gp9!u^_hʴ+;veSosjJR"ah/!A%=j,m'dCk6-hΦ3 -|{NjoBպ~ScڸQٷJni¥A*ږ8iM^'8XtTxNd5,ddYp N(]Mu>udێHqx!LqG.f/s(W)iod Zڥ;*:#ZtŷdBD l|Fi#]Hbfպ j*ء[L,1=Ikk]W=&G4JoR@M;ԖeaoNk^c哕0̖IaFicu>0iT:t%M {~CucֆZ)2G4RQ>dTrցA)`h33&h×deh6y/ ? dL \|m9U2|񖣔YAwNZcÒYy@7`b-g/l@ ,\ Hɓ1(} ʖIA0;M7KQ9\O* b9o?a30*adQ_)Gr[r[:s+]@H公:6v<ȸ0 ,y]GdYT7.̗T4w#$*}1hN&3+WͦP\.Ta5E!!y^aEh `{%O?vF7|0ѹ7#{ɘN/TG ɴb^0* C-~&8SΎc&V*dE"hiw.Bg|WIHLSLJ+YǞ"EOm~:'pP-C5!ǿc+nɄK,Nͷ m].C= _^ wТH=ѯ̰I ~JGYu7efWOް/~.~7WpF6+\j_nNP@>>',\kޥqP0+/W`Vl&-oEJYSȴ6)˿$AxnzX:ԛ{âIU8{施r9"{(ܺJp>w{qXW]jiFňd#8رpЎ[]v-zY_^mJ8z16+:#6t썏fΉ[`bm:CS0u9^쭖Ĉn+¶%v>iP#zDjvFj{LHh&pR7dd݉j"5_^M=F>eqA^L4xy?'5I #RW5;M~vo8o.:"Tyq?PO6; !xg~vRʹ3/fWMߟgGw:T&(i$*ܪȢmw gHs*T$|N: RE9LDFjTt6TYڟ$c1X|ь1,< <=#%`ZD+j27_5&> rX/8"t}29w )$9E`?K.Jmyjd!nĸbr ^5^nm`<{)ƨk. `M3ZCl$.Ů SgyЗ;N{o|›EdAY-<nuH P!ΖϝYHƗR+~F;$F8J{!`lد3Y03v>E58A\/=c-I E^V&ʈ3>uYl|@--B90q,© acd &DBOs t,ň0m--{54MаG6:6x9ʪ5"D,~{;+T4-ۭI ցZr >Hˏ:>|(QdMP]a=77:^׋S iS?zչpO 7]阠cEdsm`"W$ܤ1b#dxD-R M!9[56WP:0vᔒ?#CSNcfܶFChiQs&%jw.VGX8{^%ez$R7 4w~68̝;Ded)NNj*>tS=Xx&+T/Rhu$>"@k9z)a~C.:2LBL&fQ] .czik2hqI`JCʊeB@_嫎6|6\ Fj+h8:n00C#3#|$ԫ]O/7t9b-=O_;JP+/J/>bu='ezGHi'AĹUe)H8dk|{VU]6 p$KF:dVz"UTK)_ x8GSmlvL\7fg@Jn{9MJ#YY9[(,afȟ;AgPكE|ocඔ;}mlZ]F=x\zm UǴѬ]FPQ&,:#  05 U6z_ 3d@SވqǕHjuV7| э`Yp]8:w߂cudֻFd`$AK[YKsP!Ui| -8+ @Q@9mo#toE`UXJAh& '׈2m IL 1U;'$Gi>L1Q~S>C;!+Ŋuv &1Y,߾",ᴆ9(Y;dYGE MFYak[N 8 g_5A1E:wxѩ V Mb*ɛf,D ЪmbEg ˩pj)MO\$%9A$HKGnA/ ' ]r* Se #~ơJ0ڀ--F\=vx3m>;|HM!.CmTb)$ae(D> m 1d)rQ jJU+9+åw ڋR҅m?3蓅˄&UBv6ˠ͇x;y;^1hHt'5Ze{rW/  $aTNYc E^chwDLH^697bbRn:.icbL?>?R]K61 '.BmZ]9R,R"}' 5)3(w#^ҩrDS=;GNjvq=\BfM8 O*7gfhQS*pB$B X7i̙ n݊d"dtl]%5[48-p)T5wUrh6=GNN;zPYd?rW8r}g)yC/F6r/:5>{*A//h9T_rm0c4 w( KD?L[ $W#OM%$\ZR֧sڍ|#;3wh^hw* Ѹh4vN7TJnɂZUvJGDnU5mwdx쉪Iժ/! ]3m¶jR͋J<м_pC?ƽ?$72SpŽ4:ΟTXPﳗD6!vzgĹYZt5V H|)*[O "Rʖ{/4~@<.xB#`;'Ca֓$ 'i'.p0áU-D]VN)w,4y~RVʏdfMhP'/dXM#g՛=[* fHEϵ+؟C #'Jtϥ]MH‰3ϥ⫗;$(Flm\qq%+Y'Х!MYT/ .E\LqW]LWzowN+r9D厹$ fCT=Q%[6.Z ct!-Bh_pUf-Ҭg /a vY۬K@[-DRBZqJa?lEd3S2^C {%" pA,6\ m׸.QD7zO`++FJ ER|+8"-v>ewʝֆ+?QZeeYcV'8m Zǥa(zDF֛rBm2D/3jދf@u)"S׌wZVTzE^RFk'>'5K͉k$^ݘfn}GS3t)7)Ma~%MRH)j 1ud Z'U2{E=uP*N^h5{ij )-͗ "e֘krb%jZX8MbzlCF 0 WwfhlkYܣ^ͽn7\!tuX1wo~,ҒHES7@}z:|_M~XYB&9B/Ѱ)(B7͵&I5|iףq[e)i7kpy㧨o(35P]pzd:s57rUKy P GW ՏVc8`9۲\S 4`+\ N:) BKқWJD*qo_s4W?Ea\7z ǘᔼKZu!5z)3K~N@ʆD,`ѭw#[Z[dE3e5Jt|ga`Urr%%'/I:Unsf ;^RE?wIu')ج7mIZg/ZbϛRȀq^ }I%)؆+n!,2-~3H|iOaB(re!T,onw [-eR $B[]c>0 YZclubSandwich/data/AchievementAwardsRCT.RData0000644000176200001440000031252013500611663020455 0ustar liggesusers7zXZi"6!X>_])TW"nRʟJ zE#wEXȟt3IyYC/cfvesۂK$O pRѯRta~;S#! 4[(dXXCM.Ii| =/|B"ʈ]j";^S;O6xƎ+v W-ymrl˜aX~TJ/SHyatDʑs< )-[pW6g^ K4XFqDV?-g #j6Cx"޽w7!L&>F؟.#;ds=.̭G?17};#~MF=MwL 4J&+z@-0#MKO}KG 0`m Lw(SRP{+.6r i48 0YPn%Q^eR&xf H}~@UDx[ =`q{- 4Ѵ%tD0j䔷h``~,=‡kG̶Ow;n޻$Ru@YNdFu;\ѺoRlč 0k3iW/)p@% =w%yOР(s8psB "|TM .kŐ ɔE)":!@vKTq9'ַ. El!|Fc˼F (PT0%݀6(v>8 8wݢ͹k%j^O}줼Ox=VLہRAu\y҉AoTr;(nE\?KړC%`u?!NlH/P.jTX'zґ>1F,R}̒1 (>C>$m q f.z.qtKc?ǎfH b^_}UY3.Jj8%!=yP}] ! 9!!NךR* #Ǘ_d`/Ynp_',]0&LUT lԺ3X18Ɓ`a WB78Ŏ28 L5|*(\.;/CnVBOL8kkiRDWCdWHu#|qMWef&axQE059*ë9Uh@<9ӽ&puY`C(n/^'D\HK{qenpb \E%琕&ZHʵaA?Lç[ >D=eh@kQcS+|zoxpq[`Nb,޹ =sƤCK70+_&N+\gp9H.$;9mZF{)<:%U3I{r7_VA+F{ *9:7ǡ ƌ·Zj>.Dӡ8q򴩂;ndDY~4#\# )D(y%-ĉ"ɆO ?6c|,Az&HP}qrR4ٚvHLzpYI" y㷴^}G-WFw?~ D֩J΁_Hk //@>KhKuȒ.{|yK)PjZYλ4rI Uz5j\y^L/ ,F-s]7\m>F_q`8Y󳊽;8Q|Oʕy,WW4Q28s 2=@㉽_ulR,䗏hЪX9%jo?5lۘX!p`)ee0",tmA~YCF‹~Xtx5p;)׀ߴ"f2Ӌv k Jz4&ߌF?(W 0w?H-8{dU[CFDqsG^bRٺ39-0t s`R?E hiwp53-/edfLkZ( :cz`Q (K(=CɣثcÁj{m'm 6hrk#rkH=cRSקl=`@{`QIFd<8U.ӳocy ]d?XPSpy3L],=r\OؑPLƽ?`s?zT `εM)09쟌:ŵv=vnZ `dcetF@)aO-[&pQ^yZc[2U ˯dON*CzIlT}=vlPDW?#T|@C""ua *VtX?lϳ)!N}FYWv$i@b?]Z [!0=ڈ! Aj?GF'] ='jp88}JI£3_"TORC7F'U!n'~ (R4nɂo7 1e vˣNO$`vPpNAa /&}~K%!%:"]Bt3Z8ȃ2#u~1O.^w1Rl.[tؘC>3pPԖj 2 ޙ?p굵圫5e?4rNs1zK`e8njkoa%2G6Zh-ByOyۚƊs])Grabco:Q<"W^t [-] &5ݯW%Ǔ'!`r?KyHS_T eG}:4LYo), ٘=8Ԧ~)V>PL|x<-dOĜ,?U}XɄ VS1mȜgKͿBlZ/FoglTx6C%d9 Fʼn%ruSDNftC#XޝL;/Js`41a거#DZ=.^z T!|hbҶ?QЀL w [n3u9c lOkðڡXive3lh 0>)c$ՔipL&PvU- Bȸfxdĭ[%$Il9yIؤZ5 c~}O~.w6Ƶ=bezV檥}0-S\0չPy{踆[oZkORu1t ~eE,J|CpmEzOpa=;'5}!1x&Fgl*sr6 % LlPP=]{hP>%K^UH,xk#|NWcfצ^淖W\CTy@N^UBx]4J:_l*@&:c+d#68oVugVFe:/1A8t3:\I*sSt?/:A_rB!8dE z*>ml6 1D}d  N4{rڲuE. L4)i9%jt:NgY1/fvݓC",(knT9~Pb'  D!K@/Ͳqe֗N<(Ɛuwe[%CJȁBIEkel F!pm:n"ݳ wZNk0a_Æ*91 ӔE9O BY ͆,CRSs 6^,E&7J\8|8BA96ZVk~N ȥA[,Bk"B旙H_4 fs?5;0~XaܖM^ܜ?ae"]jO.D^n1/b7= eOUՏJEKiE"*n_iDΘBo1֙J4+V $ë&H3,eb(M' spZ gٿ8X9%@0Z+ԫ߄~7I9n|(SkB (fF\\UaM*8Ѧt&ee((mqVJ>)]ZU[>_h/ƝgnrH<t {};+Q=ñǴ Jk"ntE$(B*_>qvnja,{Q=;zf +6F4OJa *YCxXν -IGmߡ%}Xx Dirr\♕TYK!MJ8]Ll6KP?r tasYGOCgy(|P`:lӠTU66&.#ly6Eyp񭪰e;( Ch7\4[oNeY1!ߕ+')hj.EPJdeJ PpA\ Xm@2~ 4pu[D4.:_H撮Ǻ5 =#/Ѻc؄۪zqgճP;Gڎx Oj $1GG :I|HHc4${,{ћ%,t|:h(g ^xos| NSf̲Zּ\焿uVŕpq̐DD9f F69ѫg̀o;`Ӡ++PIe7!:ӉNn&b N֏$UVk#gG{ KO(S`%3<%>>aocp;ttVJ'lũų %gדWIYorV'g/\Pҧi1YۋXMحu|p3p<Ep oaf]@`} 1Ǥq8 J/ǽUfH3js@<=*ɭ2FtI~ptDzJ0ͽ3 (&+pklʜfi`";=ưuPSA^j.6a-EV@~K])cG4!|AnxЧ vwAtX\UJS&l=7S 01[?կ*aYR`֯S4X 1 fa2R(.V !4 (GJky`†wޖ}|{^!EmѦu ~ j(UX2J0'h9a `Kg#EoT+p_ֽ%rM csIJ#Y(p7`׼C&.AQY>mzDG M:a8׫GKzу.an/} IH(_h8 .VnlJ l'6]vHgu8Eng.qPz@n@mH5Ni"xKpC j"cDLC;_ `^r<_|݋or<״ z"fNG9NkF*;R׵v1)E\Ðh0fSgrAZ+.1ƬU >r', zc}ڱ Ч !sR&_j MVJ|n07iZx}AD¹|Ět0Ó&eČاkV,OK^FvWE Jk_ŪvBka<:7/!PqrW4z~ns˽S\9gՓg|B C!"d:fo0zb p =wJMѽqsx z)l<;@=P=x5٫xqf%Kf =&ePt ꋳb[G,=FA~,4Z@OB~1`vsmA\ۙp~k. ML/MۡWSiņq]zAzywh0+[3Aa6?QPMKiױ"p/8 5t$HOSa!\*"d{Hd+Wp 2n&]6eY 9*X#Vӯ.Q.%ӕbPOy6C6ftuitǵP7vunID^Gjsc9WsH9fD@a5"(Ho[ڇ5T_Zp#>hKC'4OS pqBӆ@Q`| }&0esvrNtdBYy5 tau[ȗ4iHf nsن^zr$ ݗOP=$K5a OA0aplHnqS6u]UcnV;GR o_X[w;l ^7:(?5|Ym> , ɧ!Z#BvCŘm п?$d=sJgbgSR fYZ֜LS W[nlۑ~{4Qq[EqCY8v)r{ 颎v3}]eFzv8`|F'oK6ތphhkfT\^>-B/C%!A5s,:,rjn ihicw JaKgѨw,D+3ֽbR*(e,-Uf$lAw1[PI\X |*f%7uz$/ӱAFi h E/KK:*J*:;$wMwD:bG:JDF荂dC*M>'xdJJ\Y_wH-zl3DNLP͓G=^i`7J;%mUC~JIKNItINW"?p~ZQ+{S8j+ZˣZ259D/flEYn !a[%"V(UP_bI| Asy+EjRMwePGJou2(|]'+TBB%)?_\H L~OGSɻB'+4"q15,z-ZY6ytg;0Ab&n&uQ1ҷ(ͩ*!tXɽIwba\`L̰s=uDoF^Ys'&>J`Z,ұ":H`O}q_^ !>>}du:Vw [m].Q6ESW1K9]ٸjW!ZywkS>S^ s">tQ[sVt)X~Xڲj @SpMXp€eo<[J^@/wTַ@1` >g^"Hfu 3ed7,ƈ(5V Ax܏zœ7$Y:;^c;lOqZ9 =(b[mEf-H;r/E?2֘rd[A@A PJGd.ڬ&"/&#ccNwPա,OXpz]J7\:ks׋9sV/Hcb`6@rdFʪтh"(@?F 5;ۦ\0u|8܆?ӆ <,8FNl(VS WC\C'#1v⑅ג%Up]IXx\κ@S57zse2> BmuSջe|Tc#\l}K(Q6%Yӏ̀I{r1\cN~L7M ۟nߘ,VLw'8=xV$ALliwJ=(xFaGZ;Xk:^f;daj "Jj% zcrа$GLl__6 )U IrBXKvzf$7- u FpňG9s{k\WW#;m~{{Ms3+>]Fs݇|`{xiíBIKr*ےS'Px-ګ;)ҩ݇za4-7MwjٞAEf7 .M -j-VLs}rǛz<#E~28txiWX::.|EGl)"~xעҰ`],ժ%B;Eau/k$D$ΖW9۶>Nˠ1\4 ?7j,<2򝚊Uֳn8C&LZ.Kzg5OC⋓̮h25*½4VV6͖@.+&ao{SJ!6K, U 1ohƿkMI44B+]pDwV@jL\^SE/zeEWo9vT$όX=RIx0*ۙc픪^@2q V"q\'ZE O#Ldb:Ml.#Ade[لrMGoTEWDe3~UPIFdTTc3ddȅ?>8o̢*Ѧ޷)&i>0ĻKy)*tSqNfe [ S趔KLu,=G'V4gVgoCk/ v2&عMl4=5❭S2;LJԒٓ2§o>O .qiz3H8%1vDpJ@q'Ut#d%Z0T ˋT15C.A 4Ô5 TM3oa(,SddD#[6}s 1׽\G'#rE#u6a@3c6L0l>*-Mߐv{s0孇bG$Exa}3;RǢYHAU^`µ$K8MlNGvlce@ _M{Z`?)DYik|7m,OaQ⇿/{Gˡ*^'s2$N,4tL;I[9S<6F(p$r-כ*oyQ}J>`|E2]A %(FuCK@Apv} ]Y !04bFodh){&!o_?WLE>mWITwe ڶEW~02IS66c"FfH /"%YK$K<~$In6@ONP!?UVONh;huX3@CuU w1-yZ /+]0NӦkP2M@UiJ1}#nPQO(I*^_I%72KvwDx71So/족#l\(r9=6 ԩz~̉50O 3A{#BTC=D1&.jSpRW_A7nbHE(Me,L'n+ I8?³!nw̾IZY?9~->cϦSq?fm\푭h_ [ ׼ٞuAP7r!6|?o10ڦCt`,"\ȹT֭q.J뫖J&}FE>IxXcn+ ϓW5f,f !Q ̄_n{2qrr4#޶KD*b n[{;,[?Ty[c5/-@Ke5u|S;R &5&7|Lh|nJ#, \f pR&Ϭ?fUɘlEwLҬ7Mx,!=GtA(=UPgV`KhAt f@CmSzr 'knASM/@ndR݋j@;ɂax`|j%cٴ(Qڻ֘,NEѾH\F SKz9fQOዀYG۷CaIYfM8BzqmG m'~)9z[}`X%Y+@OUYLԄ)o{ w QU>nD $%XM1Εzď3\D0|0'8S+= e-r/T {ث+yrvq 3 « ufM̙=A+9᧕u_+˃+7tI=4JZǙvi~[Lx:,m̷5UfB|Ie(M?'t+-CD&HŊ+&/*嫃 `ܳW %S>7ω眲5 YWwX@@h;Ns`=UK]1]TnBv@YZNNwsz_O(?<`ԼBwTi~4LHcp()[7/e{F %xeh/c]ɜe SNIcJ@nn@wqr,z֤r}mПEDmdV?q#ZIqsR19 flk&MNbq7כǾX~M2%=+L0XAlWlz]hI9t53j}VDwٴV ~Y^d%͏ Re|sr9@O" mp ݩ2nչeHe_rIfBa 塊哝K:A(.ިV[# )5Sftm9@Q[ #JCɩ39V0jsuCJcF ]Z`K^UgKi3̘en;`mDd݄BHlj7ikA"n'l`F]mtga#LBv[dDʘx4|Z^w< E,*69g%<4½)J?,FL^A)C4*<&iʜ ]ѵm8g[4SWHDz15#p8 bVnq~+܈ef|)E p4`+89ToMל+ YԗVeN7 Q@*1݁۔_1W,ȜaOdoOK{(񡁻WN#W_2ZVIrBr}|p@N zҘj4Mܱ&B0J1 !>rhrSj:Ci"(1I|D 1.;tB"S"Ly7 XT˫g&zш脅gɗ7N'f醓!<^|ϓoA"7Ⱥvuc _}VhChkwAПbjGɝ15_4u/RgV2 2k~_m dx "enM{et|| sd+ 36_~N(M_|3^`|U(DsH(F(q|rQi(V_q3 :, bL}8yR$G) a1Dv]&9]}{1=0/2um0ÇEbф+1;}3crR* OϤT&7W9v$lfx׋0y¢[Ԣ؁uK4$Bo꿖+ʤ(яIȱ\'LVrX"фSU_`<*2{^Ŕ \$Ak<:nC_~&l뿤>OAˣcRZyn<s? y@s^-sRlx?)LY\GaV,wNH37 ̸fՋ,IܝZMsTM>)*xj1'1 '((A-1kdHPAsݼ#ѩ5Bdr|D=9I:=٬ e~v$}%^:L'QP oW GN]\~ ‰5" ;%+E vQW o$DzV&c9/~6[EyaF2""C*PM'[p_!vsf@#iC/~Yӳ*FZ8<ܯ~Ь4 ~X੓Ƹbqv'!ҧin]&Si=و|-Xyh"OYpX[|A"Ք!e7 L_%̝>bga?0MaTqkM*oV x?v_rH\7|M̐. a CP_,{BY"(HPEz(f4Tr&Nj PF YJ,ԜSG[g9:XyaKn!#Ȱ 1bĻXuLı>\e9uy%x'~V W@U "0*U)yť3W* ޸#wbC! wNJH"xFrDzWPϓ3&5S34py jGΰxK"dS"4SL'Lyч1,q~XaRA?y73&g `1o};d D%Xwmg;Rɂ.]n~މʶg10 o~EY'[bU}!dq2ӇK?"}ܮOV 0g˥  l I8W|(Dg⻘UdS*P!l(!^}ј\k͝Y7&[,۸iA8'.s}ٓɵKkN"lDf/`x]g5)ENB%u)j~ȗyU{W"J #^E,:ƽgţBˀA89mQX۲]97Uz:/rb/Y dG#gd4 ebc&;Tg>"瑑$EY?I9رve0Y+* V OyGE$w*+=$4͂#OGګxwdUfJv|oy\bMkxv7Gjg玱2g~og]c(}XDpk Z/ș 68گH K@ uA U@i{A8)]S+D/=K!͔Ӕ- nYc5_ꑾLP'm] !ioϞ bb@X֤)XjW\b.d<" ~甄Q)c(e0h%&H]82"h~]"+ 9a1Fm UŴ?Ppx%۬~f,wRƩ .#zW-d?әh$i!(3눰 ࿫E[@Yeu3u}_YFEtC\#RKśsB۾[졨x/ojJ:oQ"e3|(Ę`M?e3@#Xc]bۇ#Y8KS@ˬ~lG+Lأ<^oiE@# j *Kش4ҽ+0yo=Jo2.:Jqg*nkggk_2So;@^ݎ"ÀIN'=9)WybBaoI&bNkTg#6DZk<&uNGdE=IyY0#E,v%OM=|.S(fخG5Uy?hY؜z=Ԝe 5SloOlp2)?"xonud=KFtz>4Xc4(־T'RhhQw@'n4R;&#JO~I=[Vf_v ~C2[#NhTuEUlW;4yҝ+!K)W")r U eX$ /Wb*o~XjH&eFj_ hɀa7{UwD7w]|zRDyМo.HՌ립wA ;*0:.jN#2(c$.֚V$L.',Ci~N^J)wX6%431˄m!lM7mz"7/9 MU+גlRYZy1Om4_*S Uh !68%Q=y/%P!p`VDinLDA{>j&@NGp7kߠTj0wDN!*Ory ;ј6EYu-w= pk<upew:q|?ŗ8 ~mwܔ
]Tg7Yϐ25J%Tjwmճwգ?t I{Fh0 "UJf켔֢4WQ'W5r(7RyqEpH ?[X2Cސ!i$r!`5E.;Y J%.UBrD1QđrSqEc)x<'6(I{ĉlf+v6|Dq>bZI3rH9ew)K K=~ceC&6Lb|4;Y aD{#(+U ͏/pU;RVJWthĢv8ޑ}W^[`U#W[p#CZ X) (PLI7kTI@3? Д^73SKSH7HMG*d *M@8DQ/=X`}c鷮c$3-5EݐEHSf>"odsTʢ sDN%,/sS.?gp1vVZ ?EE7(bc1rf=&t f-NDB(vlY ȵ`*?sVxle߲;'ԡv*e3 Z& OhA< fK<1Ğ\1S,2YA+d.E_ b WalPYMqJ_Μ>(Jt?rC- mZE6i6f5]@F&SOe: :4R bduYԡ8x*isEF5boF)}Hͼ5R烿b9gKg\}P}RП$۟7&= EzUݜ}ItA"SuJ֦0J oSh1Tس}*B:!۬ Σ]a~՟'+HX)3[${ŜBsҫzN/! B޳~m9B%sS:!Ɨ>}cl64rB4= ~am`rB׮*Dhg[dPLLwnvz9uξt ~YRgBl~}NE}3G>T ēnFCG/Z9sQdL 0 (#-)J8,X>w4 &as>XĊr.qL ؾ/ʦwl ,n¿6T '>xEsm]IP&0 Ӧ x;P)2iHW9dWZjW\LW 7H (r Y*HC.l-)Z;!MD||ھ00nr(At9s6v;4iGVj!F/a^h{݂Ĩ#uZPtdQ Gn{Mv1^ 3(/s:M DɵM]!iCBD:69<|bJi,+ _lZCIϽ>+̴.X =i+00:UhQRS9 ~? MyF;kcEX:B|w>0cy{a~rK$,g 5a/"p}iMł1 5"ۇsǠHz4w2G1F1 @^LՓ+Pl)a%y2NO@$`s,u)&*jjpc7 "9<{fl#F:eLC њA1-Btuw.sRe",pE%P*Hfmĕ z'óYfNҥ{h{wVݸ}< C'ɆX ʵjqIw3"} R![uzW)$i*{=^2{wYp?Svx:D޿rP%{nQ797k{L+E)cN?m@k&V)b柊խ#Q~,$pMα^M-&o*M 9?52L?xiM"0+Uc#BId|@kw$`_DV&&l_.{SߔCj Ehu:__8gМBhD byQ,>yU -.y7;ކO 0S6WKi`3ob7 &다󑬋Hv{ğ:|zުM5oSݖ$V'難U=0}ZH8!lK(8T m֦eoouatQlN")"F9SW \YV&bױ흃PLMȀXE,?sL n8c/"tGR#cQ Kc5W7so;qOqm؀q1[f/ U^FWVpxsI;x "Z㬵RS%fzH6bd7'iљ3^;eN#{ٵB>kj:'eO/ qBIj$*ܳuܿ#gn/,F cIAx`cn:@rBBgd& 9JX uB]Wa$G%7%"{ROzWM ujgF M#VX[9ǀ͚2X%{23OE(uZ(Px_ukkyXev6+ID.Do5I}v_[~%A\CIu7zuX3VCXANTԵcox1]0kzϘrqC*_ $2RX 8%>AgF%n sF qQQ\FT6]wC[<6=JL>LS$/WZU4%86}`ETɫg-4Yb '?Yc]wC\bY% i jl(IUr'x,iqDFi}g'%ƈ;9PHXy˺W,%"q6Eq~RC²ߵ_i˄JbZ DyDRiXq㓵?Z-klnUN+S3;x? lSZ3K'6^ yV)gc.A$5ܱ*b R4krIG 4ց4$xǧCx*,{{R6)HAn?m\N^J:)u])I| ᜽ũɲX/1-(U,T1$&AiP-yFN/'ӱ0;ae/M쫛؄?!s- hLf"jpYT7}5$$6!ln͗g kPGdxMpa%ܭ|.*WɩHV :xR'j[6!j+X=^$T<4 jͫTwx`5>C?+Ѩ;W'E\,}#)q6lj/N'^ rz|nT4";P7_F$| ?R)q̼x,T>$2U(p&dDL[ /#ڤ3G@5:zMu5^;(;| HhWQ>#f$@ 0SWvnjR3d-c7䀆h߁hr.3.IXY 0˯~̖o qTՑܐaB)3QVpr=0kT1w9f6 '-ޢELȱH=)ܘ ziyO_؟$緐G,K/ƛ?}/4! fy.Gk񅛆fbdhu'P=p9'|_28ԇeo5;!=xJc7&ȏd O4}KB]bX4@.܉]h!~YdFWEPkhZNμo (:Z\z 1lXs㞽=B}o"`"A{*n&<˕f=mQ Gǹicis-=ẁ? D*FCRJm7JVFM:H4!1B<QLn1N+5zAy&ykBϵ[7QU.ZZe0C/OޟkuKsd2'nL̈(GgN^aA>V*8BjM'=< b?$7qW82TpRдXV h0f+Mk8pSgP_uB&Yc28Iy HYbM٣ɞ%jCy"l)bB&SnOYʫgl9s}Qn1mi2LΣlLLc ͭgS![+HjiK+)D[p6Ξ*ڞK,ZfCٳI5GZEYA=&!P,$g֜zU"TOkцBs<m\UzE43(RMUtŨ^&w#Jv`]5͡Lgaq ޱVi^oیsĵ&X̴(ܭ@.)6T pn|[Ah`ϡ XwjvFkҖ&+sJ]̮j%[UvV^f:uNXFmӲ}H>-Bm3fӘCsg1_180HPWi08mA$:wPR@]@܏sƓM2)&h7IBSh33*ݱ#gCY)Mr8dN#b`yKhޘ²te-  NQ p;[P7YMtZ7Uҹbo_r-?X n0@SlXem OR7 ^-e(`KVwLJ,B.TGG_kNUA6;.d툘 6ֳ:+#J2/Eͺm, KAwrRWkL+q,1 S2Lѓ5|qUJ97çۯT7-^8噕>@AvfO o{o%O(-OCupdzbf(uۂ  ޻x )7;'e,{&QXI{72[hfg61lG 6b_ץ/:? _B[گǨܷIiv">jiۻ 5?8K5[鮼swvtPcy `=D54WQ8e}ڢƙ+tB)T,jzEă.Պ-/ɩ4ϡ3<!-d+ (vfFsvųA82j~B/+ѓ-B[Pͼ=]J-F _9EYE7 )7@Oܓha,˽yx jpmdA8NJ\Y rR?& ;DLD,A=@'?E-H}`^ܙ `wr&@T#¹O[ycUIj d~ݽ1YQl+qۥѥ>#H:. ҂L<ս/FG ǁ亪Ppl^sXMjLU=͔Oend9%+U ?Uu J}Nxl4>/Z9rZHґB6PBQ2jsM?m5|5y jb \ 3>H'>OҼI滪KA *HɪM8}yE>uI]/7+{1\3WiAߵ/`go[hz`&UܰK`7X.$Ԧ(n ݵc?@[]LG-R)Ve0LgW Voe_9BONɲ5Rrw0gX9NV謀d*|+N *];N 4fo.6yE1B\Pic&,Oԣ&\s$_m2+K>ZOp &9ח+Eঁc?g] _UDN*Ol|MND!-_qð^eΈr5 hU~`R@o^!q)4` )rMif:sJ<>+V-ExJ5D{%jr:B?c87t؟b s6UhD=ҘSuōAcU:߻xo7'z+CT$UwCw}c&veuz0dſ٩K'u>pKܲ;! XSq`T9NT;1kF>|gedaЂu,2G) W6lgD 4M `M.3o-S")8#oi\I.nuڂl6fYfA=",R#mP2V1҈9PQ/zHTJf /*!bxE_'ņ.:[t.%_e,QF Yy3ܯtyvNL:DjNy`vlݥHlG|y9N6~-F$8Ap稍Mv҂C2sDypB-Kr988m/?{ػhءʲr1hW1R[>.´zS,Ըo C)[pFA{׷YDT[h$R8xA_6#B"t\Q<. W9 3AsB݋H/t)Sl;h5ή٨ ߙXcXM&/avM hhlՐATIF5&m|\X=GF OHi fMX~07ިO) F\m<1! #M`kL5RͳN)p2gyx(K6I1k26Q6sMJ׉4+rĉ 3$`5 KYds {xlOu%)L .eJNYYy"?T_\A&Paگ}r0'1V_qo@n~1p R^m6KJF^M/j ic_(KHVbIm2ن9"wSLp;xYZl]<Hk7NtJ٣AM<:x똿I.ېk.愠7GP4Y'L;t|(E/HɎR~-a S / 9K ~b[KĤs /rߊ(L?_`^F75>drqMn9ŁPV446b*IU*")yVVL. Yۋ,8 M$ =z "Ξȼ5@ORiFmՄ Wֆ`ξi N(9`^k'4o;5bm}{Yx:P@z)GwCԯzF#Grsx̖T\@a~2psF%]i0ݘ-7O#H&ѥhN e"(oslY7B/i̅`S;IVhf)%Q Y.zkeDhjlrñݗ $, W-lr\aZ%•s5]ݟYrcɩ^[:-82>/1, 9vx,k Z$rq Ou]BkNN"Ph!GaO g—YrRڲ_fd .8ɦ<ϢM?X~ͤL`1=%RCX 9Y0ٹQU ^@69GW5@kGTI(V(d 6c(W#LlbHYB [ toqv%uպL<^"yg_ pX9 ՟UL8pk󍬊;B_\vbbXbx+J$);gMyXB(+g+U14WQ2}F BvPV ݵXzӰCxrsGx5%((YD2b@_J]΂M$_(XMjH|'ё.E#{ )xn-riܹc "Hܚq%̂qadd՟bÒh°(cr?k7TnkPCv5+EfY&5 h8$ގ…g㥐έ2Oں.fE5˟eD&}Ht_ۣalYH+0 LSY|2+V L@$џQy@>9eAZ͝;HaMX /DнreX2)FgtUJ]yL\# mp١ŤȆ[#b?Џ8=)>j<96OPjZѐ_o$kq^Iڦ !2bR#tU]t!lki 8hhXAR0%2x9-4${7X௫܇g$2}:.{ ۞_4d(v%aa9?JEŽ%EBc݈e2,v;W/e8>*--3+z>+@I@^-Uޜy࿦OV|OZoXM܃7tH&jofAܿX] f}v@ʼxbW1)s쪔쏪GE[\PQ]ҦWMT" _ !0 ~!jQƅ 4I,uKPġ~LNc;SKl,Lʇz7MwB$`&-\F5I*}];)|2ly/oMBU?<#3<*gʱ?Wt&1(tc!][!3G,xal7<φ3po7֤5 !E 3H\2.TIQzN)o7+ hT Wа!ƸaⓕcӟR޵™ܭ)h~iIN܃VF&%iOҭ .VVkYª]П Hn-y7f '{G3CS)kH0 _E9u[zs#ִ65!Wkm4I.w5lʙ7n~~2S@r ;K|*n*$3jng>yIu,4l ?H 5e1%u0'띊yHe ec2[,0kV~~P rμ+/oA2j*DhS?Kф,] ʎe9ױlm)Zh43|y.vQƲ*b!Y>9> /Z[ 9 Lbi,<^_c,פBփ<8]v\U@뷸;}[&׺aJuVyn͔Lt/ñfuKH7 FJը'tzU+s@ؘA#&#U:01CoV|FR3VF)5!QY8mh^Ti:` ۏO_ tDވrv:23,,RR1mt<8 ^^Tf̐4jj`[шIzfV+4:F)P,UmH*R ƝR\G dg/ʰZ4UuQ%:#4K M->GiDaƆazAtƏP`(\57~8EkIbfυ)坁k52꤭hsCZ(8e,$:XF!4T>$H׍ч`1,T] WRłG) FE[-kCPq֣EGIQ W9*A}DG)N-*$eU27jVD9344 Z}'RS`h,.b h'?$%܀jc|⣡)l$LJ{!okd~h7HNʹ2Q32HG9[`jTnIIi6@7@tbakaehe6ͬX;n<EUQx uy$`LvYha=+q|N%2fHuBG߹7 EBw97y8m3h|E& UR2hdSX߬8D6]Evi l U i>J|c_ 2peE&Hc=^4Hݰu2-\@,3(XCDhte_l1LT,0Ű] F}e \b",HKr+ձQrH.)w41+]:xsf֑6Ghp<̴d n!GrW]s,IWjyk}>MPҔAJ?'2"~: bBdVOo lnsbpUCHBEc}薜?ɟ:`}S2;/:e/y_wPEQ,/X'gNR`{Ρ/2Z4 ~әef:rF=!@T<[4[Ѽloa!0&x(cMwgU,!)%/w5OpTM0hEJ8RrΩ^=d#up 5+0 $`LF>-P`PjuzGCȈ("q(J7ãuRk?AlɌLgM BjNX11aJ=6j;܈ɮ!x21nHW)fOYk +vuHTz田q|RwI(3K[TXrpq#.#ê v yuMjϝpT2.* >UAڽ+ձ~I^u`}C˨nU\/ԕIAd!GeF>"Õo$;Mڟ lfRHt!_@j5/. 7:c.iaZ I_ #7)SȼqAWw%=C{QHpxccˊ9 E^𞴾!cEk l^7ޭ?kW OQ|$aAe?'`vh#'US'dz>bW%GO)/bN JpH (0dzm!A]f2[0-g!,Z^t kHI* $ȟcqa:,@rDװqGvۢ!7%5fSA9cח̢*;H^(폄鷅՞{(?X5Υ,/IkY[Cq[Pc[=fY[>qr?⣲BHdI!?"I_g)RA]򔉸';{ys۸?7O$HμUl(6>y^=vObm#L9Tl )pPD/M^?T$bV7Z@+kk5FbjxQfY cH]x t|Y?rO 2M;ݲy >_7 h4dW/{R;l&E(Rk&Y_Ǫũ[8SyAܤRNǶ9EN1wJ"v!U`u[ {0f=;_o"?@ӂoOz23'CYaK5jJI6=Ϗ,DРT.dS̞h9W53Rp=" 9aFe,ik.~~ DCb l5!)DLf[J$la> FcƌsŎ\Є IjVPؠ" J<⚚瞰^y|$fC*AÕd6r?h;wT,{׉u3h $N2-'`ٱ O?z.D$!4ƅ "TMzolrbr[fGL9d6wf^g U*Lւh]e7M^)Z jrEL C ?jn(qjZ#6A~@E`7=nGqI| i%y]sedKD+sG<@1zc@pEPL+;BCK4V%F2 y X{hԢlL2nCdc-}4qQ%zwpaiݞ-F @yW+@s)2K9sg- gk&)}J!VRCP{*eݴlU9)2s)"._\1|\J#~քiK.#`ϑZ/XIA {>y/8;[SUXGk~ufW 'h3.zb=jiO^_8icN~XKl%aizSw .g۹n=1pWzƮ` JfTF#N92}-T8ëuw]6/FM?) ӌ9B ؆~05 $P*導dWcnuUNocNG frPX)?[h+33k?=;3 텽nhB! ݳ jLlW+w)0|YGݎ|L^ѬI:-4MY3pԴ!y Qm[S?qW)nysrҠaM!{cfK`}ɵ&B24<"Ұԃwp2Yeq# =LIR|+(X7HNRj.QV1<%2'yjya#u BI=x~Iq=nB_!I]Q HHuF.r: S%#Z8jo=BcKE;աNfry@6$tnQ5a{JVJVJ"Nc!RٮBT\zQĈF,CHꟳ@TUoXD2m-Ϸn C E%[0WSFV=Ft>ϖtb>fOJ'qMCgZ;T)iTk>W?@Y>ީB^V|8a:|MTg8ۄ]iy^DI). e{TNq>X|1<$7O#dp"UMTB 0įgld}DȎ`"č!?ɴF\>dNƪ Qܔ@kUl2;BgMǀF_o8z"6^V9kf8k{F6^Qp).Q Nysc{|\$B<)-ļUs Y DV@j,,Ԍ>irddf=(-k@9ԋ5w2a fus:Rl~`thUQERyӴH}DV3CAJIdKFDžQu5 7r$9#;&Wȗ7{a7 A0dV2L0T_e$do7~bs5ߖv-x u(q@Ўf14ή$tjay4p>; {/`yV't =P)fJ^wLWq)u|\k罱WR:W+v."6/CnPKE~^߅ z+U{D$3NZ@I݋G]ܦ*;]mOW].=jRe9 $ T$#!)H5(zS#jbO瑿OɁV+@>~lMDI:Ʀ8yu?~- hREr*o̘ueF_G_CC.yC_9 ˎZH ۦ(.QmA9n!v6is͖OjW9M5{^+)L8J2s0DC\r;me~fJE=ʍR쓂pcyG/gJ`D (zV4ߕ*5jylZe)^.di3xܨS'NR7^5v/}l(w6ꮓ4s-ZSX PDu:y?@89zTNPB[$><(E\۞f$+ߖLҮd(z;48;~)ͬa_/`GacxЄHC 5)C@-H5ma?;[NJ~*>(Km>zsۄjMs-۟_U.ţ?|=^d`)"$E?DnI]n)O8_tZ#e҅Bi+hx9i/vj NKi4kAҪ(W8rvk矍s.b/?29=`"\d bk$nK."@^~|뀈@pO/C'/QE%tx(p{ xpU=?V_10 u?#y~/_A{h %v+p-]S۽֔3d@$yb5N }*pXߡ]ٖͻ>6KGJi-rAز8 " 줹\!Up@dկ9b>:`L0a* }GXQyCV҂bk4j<ʕ!͝N`"|Џ@4M# Gj ̉V+#l&*N.ӝE WZD\a?t*_'C=IOL7FmY~.~X;9e+kqͿ\(':A-裏ac V\6A4 & >ChF o{z2a#\)-d!jVn^LjQVq˓ͿӔA(?c3w@ [7R$.AI,O[GN`$ws:ݲz=gͯdp\Y-ͷL`ckXԷߍ3 īa+ByzZj^A@yf{n5ߐ8;3Z ` HKx7H"„è@LQ0$ˇ6ڈXw,\8hiI1:;E-N\Ѱ|gb~h5 ]Ksx]qK,s NSZ/]ETY;??HKo'ώ5u 9ĠU?L}{Hؚnߨӱ AMR4왽H{ۖ1u≯wʓvd<*@]h/=ރ2=)"jSE[w(Cn@]kٚ.7&պIgQA{2#I Y( xOzCK}^ lv*^z$~Z*WE}E{{`/x"WPLfwLv>']+RT0OґQPʎ]z\=Y_[ jR(2B7+;Ӝ7Ebg+)' D[em${LM5V 9<$n-+.?p)F-XUa eG]\]Fܚ@mFO~e{ j$U,,1CZь4[&2x_Tk8gZ+KE2=g%>XȔ frq]&یs(hULuNuV%D%&f3'>x1GheTcxI1l1|zŻWG延g ))X0.kznta"@ߞR:N`jM24x%lO->@8ߥJBhC-!׿zېp.+r+EAv:բ)3~9`}eoz"fg&]BiܪV'(w߂~"ϻv%yaJWũI-=TyD))9 a76 û3IKYLLBY[dAWOPu' l s KO1vVp&mf%khңlZ=] #Na/eaXoEUL B3ʸÜ:_faOЇ1`𫏦p̏ft32:آȂepnH tY$8E`ء$(ѪCSZ4k <P3i01"oqS]p^1Y\TWz8Jpv aM6/BRj_9cZu/E~d9z~LaF e1UC&AtZLyN:xS@a+_#p nO,}". 0htN/r;Hn:I-c*>/:ئ `1&N?HaIwelEwNPKoaꔚ)u3%1Sَm@I*jc(@TXT,LZ) :oOzLg4Vzmo+dwrnb1H@ AZNTqy5I*-l ::yxB+rH}y(Yf.Uud#YI/alBj[-L1I֔Y OQVQ0aTc [B9P%(U_ڇ-Ғw@8~]d9a55QEb |Ԩ㐖)^^oZ{h,9V&r+WV+Z.>q~ݜAG*<)2 &3%U)h>zyZ.l6*(uC(1&.]KLN!P /kTx $5cBFKOn0~ZUbבlD|?_tSv4ޮhoԭ$_%PH5H%K0֬o N4KLHNH[\#( HoCptr*=n=vԆMM NjОҾL/2zR+exn$"4|"ʰa#\cG%g'Ť-O`4O XK_Ml5D㷴]|AXگlPZ}vUUFs9;7]D( ؛<,IһE\+##F|Yo1\Vǫ?dwv*FŎ5nb׊]x"bR *3op+V`V#q4cM2ɶ̎U"I_Sy[H,KP1½N%pn T7p,v!hO}S,Ю@eG@ gɿN }hMdFp( :mBYU8{ؠ&:3T@k e <멍o}8X p{z eY= >)NP`;ćg( OŹL^+uQ4.["͘|<<`iiR ,5N")[n¦@'6Z,vN,C)rpRQ5QnOmI\Q^PHa?%H#N<:MѬ<{{ Эg%@]dLY! Y/>r-h3Zb{%tMnp?|Y)9oCXkƺ9'NH=B1#8iZr9XPUv3[:n{ǥe }ٹRWdի0~'#)gƆ_Okm+SeANh',> m >%bC̞נ . &ڧW] Z@N]2{|2u É]|t(g>M,-Ϗs9wİJDHO |ouH9Z~*FDmwZ蕦ʙj r,_ !@0rI^wO1+\|6bTZ_q-t❩cq9c PgSp$%a u;M_ )Ƽ!An RW@jҞC\Dh%1d% yp2A7\*uͲ/\1%Tr8³{e 샞\8l(EvEҰ?}z%.ϟ^vgrW*r`䱃(~W >wEjF/^5RzpÅbA)MxbJ W!JXP]1al̙Hb(1TIv.x3ʗǞ=z@ V`sW 00{)Bk|cPf-2I"G{/1QӘI`-Ś T$?? GN%ުʔLW(+b` %󚧐wloIIPR++f?4)o%RL0) u6fDxF_2 -Ys3ɋu9^BO]GP_BI lZ Bu::mDbG?4 iyp`'Hήp-,w+{&JG҈bԚ4gbم&v%^Ƌ\+SH!M8x9& y};4i%}}%2#f!Kg yr&8;֣#bH;!S7z+ZK5(_ΘqŎ=X &Zqn (~9I.Y0ȟ]a&ID{r~yb)׉«>};j5ϺEK7ϑr{gWrxf`8n@Hv?3r־B1|7>v9?I(&T[k(@44nѨUdLz۳! *`;X&!-G$f{b;NiFXݜr&xJՆbnF(3 dpQfHC8VDCd0 YI ~݇hE;*ŏ+J_l1&q{T^zǼoDkvT+8vz7͏He7 Qke;tJ׍BPJ^Bi $kR!y5A.FƸ/ XH \ F[^cN\!fI飒ٗt4 0jj0&4^ywdZX8cz]6wɚ6 i)#S!XZV]ƱE<ۆk%ZWT8 IgAXhA_lAMq_SCc'[r)eS.=1H*sZ() սvy,Bd3g_ɱnHgʢY uiNA z/L:`-B[4Y(YFvB bZ!Q\w@(47S-2X b 6(Zֈ6 ͪPڜ[¯XuA9'yuGY80m(QrskSOǁCāzD!qL%j;VZwҠ/IՓT:_|!ݺ7sA@dh.Rm 1F΅3O F\,ׁ愍 "ORXK0+S5M/N ˱MnSݼ(Pa!A&[x*X,;?APʖCI3@ʻ0)8mJp\ Bډa'L"j(1 D*i ̳k$0:.(`#!3"|!۰g&Y[ raGϨ60"r[@w4Re_!yMK6Es@-8%j+2#zqGtEdF@9YϡkǕXf[ׯ\i2PYHʗ 6/xwim٭5I pES̎5v rewQ'o_ JPbKKy 9CHq1˵Kp f"NƒybH'[) d.; ܡ#:zq柌-^:F"TTnH}Q@0OIf4Ր >NO,@C!T97p4)1*klG~NE6 s`|AmԯMP~`%/Wv$].s$`/K /VztRfwJB5E0nyO~wSq.꽞y[ љ!Yþ$~{pd+95:W`Y{AY{0&N) }hm/MOqGS>^lY6ߡ5ɞ1z|$}h/\T[4L·,ڎ`Zc F#ema=UN]l<^ {D>WA?9FӚ⮽Upn:Yf4ḍ}<SI.uP'´U(VlF(sj~beSBBcAy":H/-.#^^˓wB zsQҴ fQ"Y SӮeOwmͱqt v^hRl<0k-jU7鵗!UI}Q ulj^ ,#Ȳk Кp(8l )s IMsclQ||ۋХ (!); UG!)=c%PV9!Vo:YG&c{ ʚl 0W`d IR}_[4% ۫M7, z 7k mWK(GW46ܩ7.̻2JLZD(Cs,Ӗ+bEl 8REDe1|7z~{oaA}nssyd`_C&!-7p涹sW Qj*m\|oK( n5z5̟e4s˦$8򦻜:@8b}AIҲ'RqEţ[T7#,=[I`8g{QzW]8+o 3[2[ӸGxm>ರ-WPjA#T[6z%M&i1CNg;EYntH"eXEF . <#"STIந{oQ(''[•iǡ87IUAx%4tN#xilzv6g4Z8 ;5rY!F@'ԝ9ǣ>jZ |U~h.H%TV^Նx3xK|Nxj:ָ99I~hF@$V)-2 X ֲ:qJGPf06gغƟ۱QA Bp#<J16mS{UoJm!ޝg? !e6'v1SAq{8,=AX:fýz/gWXYtrqm [G/5+HPPF{U?sbU7JYoQ+ a-B~Vi D CppR5 [(IfϗRc| w cf 愌=0K2zF$~IX{6X3 evNADi^Π~7y9x~.{uؑ:Нwu#ZTRu^FC4Syk/D~[C"TO^{/;ir^0X~( s6da:fj^ Б0i<+np~l?7QO=>rzyeǢߙ5:L ƝoQ?F`%o/L/KAF0p J2=+,:_ΏmגWh{aI=Ȩ}Qև:BPɰxg/^6Mh4Hn9ݢws,[(snfnںUU@Q0h7zܶi[3Dܥ!ξS?a IfYu] EpL?]&hL[6FևJ{p#/B79+-'ָ"-#"Khݕr%LA|ζ9.j!]@&THG6?Sh'20.c &p?&>{}vs*a- $$9[}Xj*۠oa-*| 8N${:Z>g^Bc*$^~*+[ekg:q{6LxsTY#Ա_{tN[\i<ХJvB{}8nT 1dQP@CH;Һi~L\,i E“2CEu9BɂkWCRs *nї@&#t >VX96%7IƮK4p63#/;W/(hm ,/'zýXpQn.r-̴RSTħSom߲Z 1HCY0L²E)^?9$^/`X^h.aDS5VJ7Ǡ订ne*8ͩ.h}w1qcp~I&~ –U`0zćv)7Ԁk|cRqGYVDQ.MoߣZ? g3G4Fc}g*_GQL)JBZ'Ŋ0CO )L6{Ӽ,QumS"r-Rq]s3:`qdP*3pզ D٪"LHEԪ%}b=ĤjiwT϶5/銱A6\1x^,OeyP WWy^O KEK1o)53*>̔s% Oad[Gv-)>jQ4 a6qu /eZ`R F{jS+LBL֪| j@67RVR^'rqz ] ֿj&޼-n+=::;}e^NL%OUx4;Rv^8r4.R Anr"Ö4g@RwjڭП.eڭtpb\~;EzlTsœBH\F/؄.2C!%"!a2;^m<[Ih}{ lh 5I~tF}xV`ր,%Vk3z$^ [mLp^r|j Hj>T; #M?#ekf+m'y50FLW.c$aL*Ee%4̙3Ȍr%ysY$ÑpF~.Kf11 q`#J8<#CG\HԯF4~ &RBG]8RB/$S2+tEW5i*^@B1](% d T z6}RY8}I`2~!p]4?|3 wc^ezӈse茥ݵLoʍvS0^\]A}Le=h$tT~]qOD`Yp;F(34t+22zf[!x3IIU+ "I1%'A_&]X+h/{W.m6ZÜt53i=ktۥINd ރWdf hfL<%3`?ex@AEe(l{@6`]CҴ3DPA2ޡӼ叮O֟R8^.ebh3:[ EH֜ DA駽kӹV'\+po۩RaVЁQ[q,_fHhBe+pC )[!҉)8cޫ_^`ai$. Hc9v}?72E HW>fiڮ2=eoU;Ck6'WnD0 AXykߴ2%} MZ9M삑A@m`gYf-|@N%Sl1;ҫ'R 9Zd[511\qYc)X=QzHqPw#7Ui%"p5멩|*/"]zh.UGU^;T͂ㆲp ]kMAAQ9e*YsX.O3vVF'E`޵_Pmy!tBbB8\2+Q{1W uOu;De aUmt"Iޕ@#(>;@쳗QQyp 7bmG.a￵ ,+"M$ GFGwI%_FvqQ8KskMűl|U{*]E{ED6Rrq V]éY`eI&vOxMZkS}W34)e㮂_A!cHÛqpOD7 /SKWt^6zAZ1*3L̖59i:;ͻ-.zn OdȾM=[ELsd]N{W%QkR2զZVl-U@XC1j;r#l9FՄBgk@֒Z;槁WѬ=Vpsd4 @9SM%NpIPQ'?y@6&9]!VUi1:y&Rg. 8/0ծKdJ7eޙm[`tcwtJ_G?" Ȏ^k)WW ;U|}u`YUh8R&Eu.qxؽ]vj,D(AK#LF&tLvyܸhњ7euv"rMB[¸W3*Q } {vC5lDu yE řObB\lUuJC{NQqg0Oxim|kfL! \׼F%inJf4! > w0BU狇"M K+a!"}ͱDpARt_6 m/Zy[Lfshꂲl+&X.w\5Ds/(+Y9wxu8(0R.Yohr7 akcg…cLa,Z3 m^eO+ڎVaUR/eGF JGNjא4o24LIC+a5(#Pm?| =0"L9|ՖR7[@T\y}PcSst@wM4/`8XC-*/Y^o{_O%[@wup83ŝ@J[ nQU:a qqZU)#HM'g]#6JU!VJJaٸVkiHcz.Tݍ#Ђk^Fu~[<@2QYaF$ybP!ZO;ve]bIE@>c%Z߶r@nSjʠ]7h|Td"#X*S{ACl8&`  1A"!:+'R9Eڳ?PE~,G/0?\ 1I1$?ׄ{Qoy6nyZ BxIݜiM:c_I?A'H;PkWma(̗e!IǷv\֏H _UA\5[f~\>c%?#ثNMA-<UXn*C3xb.~$QMoar:ڼ;y 7Za6^[~M0)&STkcJ ]%|m/4 Db8 Cm7s?xsKÏeUgBzzjDɧ^Đ3y o($WOQaqOӀ>-P|# "Us߉\p.V]лl@j\ ~N j`z(M*W&9_cb^z-}*%~]GGl_2HuafBsPY͎ܟZ ?\Ӫ+4hh^Xۯt@?|F(<9Xv 9I+jRrKяm'ъCZ81͟Ef*CY8@/Axߣp/ΗzJ[ c#?4h/* $/Xe7{SkFo ^ލv2}PA?B/ ]Q L zhAL#]H2Bn#Xʈ|;;$fm :H?;|( 7Si}00Ҹr:QBB➨CtH%֏`D03 ¸ - Mk;ŀ"hpYk|jJܤQOѕ@ޭVW+˿Y%.RHS/L!jJXgnvUu3;*\j0 r :ob . ZJjYě)2V(O,H~AT@)kgIkSܛ Wyf!'.fNg}[iG҉zo)x;EkBLШOtr\nOe+Ku-lAǘ)'6 qL-KU3Ղ~_Bx%A @UdF9,cEt޸:;=~v:8" *.Y 6D@y-6ыe6>23ܯ ;2%Kpg[m45N(^},';PruAFA;1/*UVHFB& M}쬄Xcix܋}^0Lbpi- oc"*JÁ^XWZrMVXZ#h4Yr1'1!5p8l8bJ]ne9@f"sW;*f#: )󗰍lo!,޲Rz *p V)',S>B@ ჺhcrXb\[J }WTrMojz.=I qT'B].^%v7(qȂoye1ʠcs퐙k;K=Jl5;c$RGKpy(=h[CwTk롺yYQR00H1Ѻǔ9jOA&!^ݽDyUtʡzY#OuGx:u8C(,տ!kEI*QFE4 hᖘZ" j9sYæwn#w1=X2b,BԣǖFbc+yac[(=aнj(Lբ8;q-l$$n 5۴ /YM/:[ٻ;r!?&ճ83m 2ae&-,:ݠɗ=IfHa_&*nH+}VT<>C~HUhpQ᩶F70omO;sΤbN޻[p y݄bL/(h?P sfks4wcaQ!Z`bad0b}Vhʤ3BTwyMEPdnY"BMVZY>`dbN4=")8ҳZHqE:$OQF"&7 OzK6L:^j;ߞ]eK"8Y:N5 QGK9H+WLt~fPm 44c5x~%bK~6jgۤQՠwێLX4&j y*{DJ{(Ieq56`h ogK`,}#Kf)&U"'bG)9*i|١3=&\hKoptHxΚzC(L3H̥]̽S /'þ.>& GiF+Qw&mj O%R8qaщ3`*^ is0䯿vl\#a8@2I #قG^Y2 'Xx=׽bT5Fh![wWcVfAwby ,@ǟ15D|9`THٚPڳ/0+ >}1pJ\QˋFZffaoGN>^Gj_+`Q$g>:U4.q_HނI@&й/?[AB{7|00,3ÛЇO "v".U:apm(3*Jr,숁^\mB?{O]\7 PQ<.S0#>z ]17L8gv̎M[r ܍OE׉ LW: WU͞d)`ݨ\@%'W81F`F/oẟ^ҟ؃sT`/"ID[XbEpܓ6yڼ\u݀gI<;t^CA+uKt!}aBFC7ۇmb_%a#u[΀h0ؘERt=HTލKU'P,~R23VAb%m1}k7-^ˊxHfM,꺤&7 ^++hA SxOHy O$r<чbJs>}`Σdm)7 M^t?^nJhr4|]46@gz{JU(,.}iD>.0 Z磂BLT3@Ҝ*[ ȶQ/Te_(+܁-\~t8O"`A5X*>- ׏+j[Ȓ3\%G5o7 Sy/+iP)INh#:/k2A6SZLy\o|ډ@|kL-movWbSDZ$x$$"S-=#|/ݐm)b/;w!j8:ZuOVDu:n#zF$ڝAJ!6(xR:k2"Wo{!ȾiwQ#T'X5XKl }AG6T.g:N4a(g1zΈ sM+v]; /A",{/ ={7'ȯDzP|ʔZX?rS}KvcڵI ܔ ?rS1[`|Qm{E7ZM+ u9~t }"c^:*V0bM,0/iL\j75LCҚk=9CڭPA5T?I=¸J'D?&g_x,HDPI-6^.* T G<RKC%Aj*lC {U|`WG}RH}>lѓh/[fAzyr)bu:^;yJqF{ ylND]w aV$6/NVDvBP`x6(48wUuO&~[na^`/xCJl| hWthq6Y-8?RL=Řh6io#Va$Ŵ gL.`w$GHb24PHao5IfKF/gx%oS;nUx`8&H|4`"On!-p64"")1 V"[CiEyQsN o'J8"&KzbΔ=F]C<ڣd$YMۄrӼn)x)Ow'1K?1TMM_c+uCc@AjؕF1ȩ%#'-L!=N Rb>NB< ݮ ^ "Uɇ ~Ԑ Q54I$#sfm,ϴC|EMNԷ.39IEDDx; m_$[f!W3G@#]{˦1Zq$Lȍm#].k~o}A4PT_LDRGoD^C$$uj–N.Pްy! e˷RD洉<*Gi2%*2>G+u%Xa$8oTqiatC:~/yQ{޸I&wTVAM /~W6CM"U-귖+үfCNSSI0Dz ,a֥ߊ!? HQ+ݫ^e [;ĠZCHDX@-I;."7 xIj lNYHR4 4=z굏UDlAW!djJU Q5KV1wCÃھ/kK! /G ?s=pgW O`(HOUJ&2odX}8r篚wlẎ2xKG[|u`M"d>QDͫ9S<_^svp}E&刦Xn$g{:ɐZ(df24m%.W d*fkf)ݐƳ~۵i@[CCnثauf^TWFBL 8 ]rf(kY  Kټ9qՇs.7"&ޒtn5 UA_= 4p>&$S;*-x qtZʩ7V#?'DžPHP?e[q`H| "6vwmrH/%ȔXQrL5bp>`ϮMSj4uܓYO 2,`Hr6-PZ:j ข %YjoNl 5 (^q׽﹜"\@Րn8vGtfaJL$WY8* 4J]V5砩"G3D_͚5yo;bP_RbvNpHbTį1BN0]kgo_W#?{mxSl2FcIFi~JpFf/QN=S'|Ј L^#/\_rȲ/'*K꿓JK&NcȠ-̗mT6fY?cا|K"kwfsp7&tEp7AlmH;9NiQ4!!pś0(1x7{p8eh}Oy2hM7;޻b+k87k^Zď򉁆#Ā;㷛B0HV6`;g  UHf%N|K@Gr +r < t/?0A(xCŬ]V OH3K>Dk>ZލviyOHNǍEhP٥7-봲#uNG)~ jͽ0O~C Mh0Z)Op¡ؔ2+A^fF Hɋ8X1ѶW2r,ZbPO:wm]-6:\Ġ:ӘpC&bp/D6nZϸ:!Y`!%I{SV]U_N+=-7q,-INԓ1@39ҳ'hIr9!Da?9.-@ׄϠ8憷!/5C]CXPS:.VY#E}G=1nYU#](\v7.<WaVC߇}p 'hA6cΒfo@ VyPl1 fĮ(YDTRYYG4>'4J׆K/DZe,7MFjhA xYX=*`I_JL'.HWz:г'6cuXZxC d(Ik_$(d=Z88Cw>w۩ fǹY7 mTH+_aW@xmdYS9q54K'"9ϨVuMqɑNx˔at:RtἹ-" {KF:grq#(gZڧLTij-@^g|Z^gNfaZUX1khۯ)X҈$u!qTL9ﴶI{HCлp G VJf{;9=t FSuI2ds Ri \“l j*#ˉ  䕻C>꡽9}O`#;Jjm[?R#&6y_wBT3voQ弩'ƙV/}){(@bcpD1~ۣyP&Aq^6ĩB9YWu fc5=13Vd +f'0,70hdBѸ= >X~`GKg%]=1޻ȝsR&&E aW uN-\x•glkY@o$MQorUJ9UL0eڒtȍXny]"R>՟× ov3;~Y)#[mz^O)x Ii)S`XO.lxRq$Q&1r! -RkỖ(C-H^:^h7sUNRkΩ%[yՃ]3Z9Q}qcmzV5H*ťb3y@H]ttcCj 2 F0ukMÙ+/C "O?:)fkyq"hLy0 t] G>E@RǸ@h^ #SN5{cKA>nQecPc8T=da+[ƛخ^kfMjlkb{k$aiaߖ>C] 16 dR;s/i6f kԵt6Okmtt }~1#[ ]3@7Z~Y(ɋ=_}wu!CKD8~ٰMa퇻ؒ H0/dq_OR:m>JNaZ]IÁf-q UlL].]W=2bݱ\=KU \o)%Ô=B]{dʆyTi&WL.b& ?w?DŐu5~V!,Lߺs77*( wM\ ;bMd(HcgEEOwf_n*0 )H3$'w"۠RPgHC>}?T9b;G@@+&M&aa冔ԃ*o^BԈ]7"@qKx>տ^[==~mggunX܎/kS#':u>NjVĞ=mgGF|6Hjאgr@V8y=ҷ8a&0)ʜRDG)FN@2u#h-$F>_xc|ң-l<\sWChXj6Ǖq&2H]T%ʆlLӑ @wzA[ٽg+B ~nL{UN91kɴ*.]IѦ! Z+  1x?d^`QޝnIhV@BZȃʸ&޹`؁Vn+S$kGbdF#Q:U#&5+ζPl]1N3^!wwZ^P# Kɏc)OlT%a&z߿ 0x7R?FL]X_Ds${^Mh(hjK+W߮ \}Oj y FkJAmw߳e.yzry GUir;P|$/tN)\ 05HaKL`izype)J`Viܲ .dJ.PI[:J*-|EW&=&ѐ:q^@O+y@̼)7DD^Ù6#>OidڛDŽʼnEks v jUP-em5嫅Pt9w"|ES`8NEEف0/2XG4-]_%[?E228y+g:"<2ܚIh卶2`8 6&kBZ FV3:ǾㅂC<yB م.T.ewݳ_ʭ3a\̼T+s|>\xҨtv B3N]x|v JIPǪkfߔZsd_n?Dہ/4@j}B3S\k B[QR{ 4wыϼ!gC0vK IZz]:Չ BoֹӉMpIw`3у_YIBanI >mblzwGy]'˥с"t Y[]A+(2(@ec`nw)8}GmOvjn~/$}l0uInXq¶g&RF CB `M^uqwqup kq SKھ,"?H`0ᎍV{fy'T&lieE|$"Du)Cdcc8gcP-]|О-iiȔbKFAډd$%{6<%"fvhKO=c!/(֗R MaO8ppMt0`h2k,P-=cDnOpfM^vB2PzuHaCo=v&apS^y4RV D%/D;4( j% ЋB׎Gm[4ӧ헎åirj_ʇWpGmberhnh@Xl(q^Ȋ^1PEiX( ,ۄŷfd fCR*1O`LSLiўԧ)ѵȭfQPe@oiBN:xR}Fae["F "#DBН%p3K*)uY=Zkfne@g𥬄c#"ZkGbWeht!݉{$u|fj1].C[Ps_|BR[iUaڲ.a柡CF;aT'T,T*'p2Pi}gc 9%Lz)\\94\N SJ~nHWCR*DHS] S1,jjLkobxC_ȿ9~pMuǝΣ4Z*`sK:t}}\ZW"%~]^§3Z VK"I9Ym m_#ٿʌBs"0‰rA)]#cQA1TCF !ln}SYmy7X==^lnW\͹H& gJo wv@yNZY]R#m2)XL am)L@ѼOv& YaB!AD^dNIoJFEUM~cz0$PnC2*z^PZDØֳ꿮؀}Vh,'m_#ANT(XȞV&{qezN5Oн).p7P)j6_cZsWe F]39tj Al7P2v%0a{j&ٓյ͏u'LorJ3Qr*|-V7Kz(+ $cĬp[>e?ZS0c})*@;cEdՊ-֟x1-߅ۦfK:c16\X͟Nw h~r 682,:#sgJ .p*;_{-} :)~OEG O[\Sji_Eb']9fU!vOM03h!-ɲPDބAA*_R`i[hUԸwky^zx'fpj "Y>sm!QUڵ?L=}^ޚfZ;Ӥp>YC#$ zgP9S)K'v4,/(NJ菕RTz7ڼ[p8c6B:;YeکJp‘>4K>G{ !sX* 0I!Ǻ.nH.ˬ&gr'˺iKxIٯ9_g2X"NY kS=h <ɬ`OVN'k"[Xc[]~q:K^ 3Z&f>-p,֨Z3 Wa6#fIm &i^6r@,}Vdwxmfg\ [?Nv\:DQ8颇S6Z9w^ĖIu;I?* Q"=Ҽ%/x9ĆoQR\0jIR=8ϟRYl\k(ޭFpL]S Yq޿U?C՛G~ ʚI?P0ɒE I<'1])fr\jӧ~?-6t2wǭuRae#By/Ŀ_=~"$`sFnSJF؁8JHMm<0€ڳbh?? :m;!wrN#&@kBiet D0'ؖdDi>֕qoQFZ`!s)- fn0;IqC5n0S۽Uucםd*iHVJr z<(|L(|懱 C_:{nJ\y,iGh}fdvORU8  w`Cuh6; t [r6]zF p:)aK഑-[W{w HZDGϢ{/$B6gԼWF0y,XE b9 x;J`Pkl^3FVgZң>8gQ뽽c)@n$A% 7D3ZW6UEdn/1"dj $s4{Y,KܸŎ:}I`aUмG+0RG,V|>ε{"f+Dm N ڻ>؋X8&uP40.7vkm9A\gn5ڜ`4<'2cǓƩi!lku BXovC2TOHj9n ٘ݴDy- [#)?qլnnچt{#+:Ǎ˘-3f旈vx1FL?KEb8^:&%¹?GH>+Ahq t2I&&!DpWpT%j{xw0{'F.9Y GXÂ.l&jE{z#jn+8>TTM{LK#斢mY1y=%j6||YaFւZUΜjhxJR[$r^[ݓ~Zg㱟{%:Ddl;dI1ܘ0#j2س:{4bV\䤈//.W-\H 4|qtj7X7ٔ=K#B)\wey2P!NSVl8;w M?;P。cQT!mw#S4RWZy 'yic٤+F|M[JSq#`ہ_;z S0M93Šd:P, ~Q}0ѵ2U%_ )X[FC35Lި{cU{ON$CHB#LƕxU]zѡ>QzO:찇o_7ga>~xc31D!6zxQ5PM>!U ܵ/]Uș3 ;ﺾ9I'^/7J>$w@Ê'e<@kx/WP(mGO1&V,Z8dLږ1ب$41HlH(Y,*" 1ۻ4l[m}%ãb"~`Rx>qsN;#v Fv(BMgHp+59u D]V_}=Av'6}zX|ǻ9ڄ3Aj5l/9'+m%F~[ uéϠFΌե4. N=ZvIku$zS3>?Fo(}aL$5덓qIyShڋwzt^AA3@HD;Ty\gy+4}Ex.&koiwEf⟃rb_0 /%Ye@ ! $3Ѝ HS\/C7)`(A th@l3.)~Q5s:N2fD ]1g> ӲG-]Dh}wldYHu$(5>n:ci.|]?yGLx8LZ:4 OXoХfk޽L3sDr r Cpն*b̀(vԘ-yLYtTP!(#>Nti3Z7"MPT}r/OɹdvD3ZмI iR7˒_/W;HF =-A e6DJı#M=[N8 VCq$@ڵ7UC-CF f9)QDJŋH*Ϟbi⡃gNی`ސ'$#5  k]˅`t7A}P3UNSUϜ"g&YwQ PV;<hkH G"siB4g8lht̊$#<A-*5FDy& =e @46m}`}?̨ <[-޷iܤJ()+i덢ʊaQw4tDdԹK:>s) )Ib3)G)ϛ>P1|tPw~0ĴrSJQ7wUpmzVˉ^C9.L%1o}Z#b80] hrJ1 `q—na.}(us|,7V!&uVrP}j-j鶟3F@Bw8tw(c4̐w֐up:9YiV5Aa5 dkL) Zrnw#v=zYZg5eGN1{ @Aud3AE_dWJ,yS &'E.k$Al.| g,8 303 }= +9r+hO"xH20n!8T-{Py(Op޽M&ҍ] =Pa?ڲyX􏎜L>3%%yyZ_ z Qp1z-#`K><{=$6!-]LH -yBa@7Ees>Zf]q]YK5#gYsgࢶHO~łW|A~TMZ">r$@శ=~O7*e7./.wT@m5`jizm'+0l~&%fn{(.&Ӎ eG_b.%E%ppt-qq»-Rm(yJ9ҷxfu|шR^ce6 /EKK3 dYnQ9ej^0dVHqU:àCry|)F-tc`-׃N%jӐǹO/CK>cW6+7$c%n$5hjW܉XK[$c^A˔@26-aUTf\PMi^Z۲ٲL VِGs]V1MZĵqg:4#izYDdiuIJٜhL U. M ǒ$H1h|wR׉oÎ6<`Z{ D]-Ի1i%hh_F*Hz s~(Ef'[m!dsϚ1{M7ǖ)WnӆzB|.-&p%k[Wq ilӷ͎?xU?gXc|Aj  @a%`ufHI ߪ:$+3h|zpU"7_K#r(&-LwY>l?'(tSAq ,>( `75~•HL2t:dAtV Gq/󈒂T;]ۣRJF 4OmT_̐\~{B!aI<@N8K8; #9??'rSfME: Oχo&RH/=\ J27N@'mBz ^Hͮ\9^^Qš[.~ܭe_IDz +!'*1߾GF1-B .\o\F!O?rX4ipIH=NDnMfg&;*x7V>(u5HyJseXK)'Kq' u(#]IIxdᘕk>uI`O℣">N`01{d\ oG._T6%G=̽=%GI_ TRe K/dY?3ɯ]$SK),l{D\zYQAn -3 >m5ZCыVBrY"&>-=^wcF6 /8@8dmFez|ZʰX3_Kr ݮ|% 55nvU1RV\7X2Vwy&,9+V<{IsZ ^h?2(4"U3 !tSPdȚ5 [?QZmc&_( s7r.ɦ5Lí]Gp{G09:|L)hH7I2ZiƟȭ8%)5 g\{I{ܿʏCxɠ#" xpk9Cp]܀8`,H{+mqԑ֚7^ 0 $1dħ(}S,Rh!u=]##%")A{Ty,TCjZrh& 4T s8jAkqԲ*\^8ݯղǒT/#e&GX " CtϮ!&<Z4AG )O>9F4ތ׮-(6{`8RT$$’WWnk8^wIg7}K@M:Co:2FR~T)}ͫ|~n1` #nàa,۸jO`OZ.GLދ[z‘)t17ƒ7O?cTMn7P hl~碪sәY]Ëo%;+W%^5Mh8uYpfe8zv2 V曭IM۳Ӳ~VWB`K]H 56!C/FEXn]avrގ‡ei8_f"&H&Z/^%*_rbHR Dwn4O$& 7B6<Ȟ&e|H8ҴAD{jr  Pml<.A4,}K?%0˞Q[˓k}_Yr/g ,ɏ<.)=^FI=4WK=!bxz+(WyL5B]jI ;p1ƚWཊY=ylˣ>WLwE0wuˁQ_YdžRӬnhtzyU%}٨֧;|={ @9w$]ybX)3l;Wd ;Zk-#J6m40~T;&f}^~1R^ o nq9 @6umU&!Oٴ_UCN~8ʆEvJݿ4}w &\6* Y;\GgǽVuW%TsEy?"F]uJ,MVz_oruG)='@7T #BVixAG< =GvnU n XT# I?_?_'7I7L-귝uy% f,ƈ3YQXH"6)U^K\8 {kI3fVۯ#JQV~`0~Rs\S™>x w.|Eë |Q@T[+zGLXf kCހ9 8 M=D%ۖO]CTfM7Azƛ@|'ŁpmؕgEoOūі bf=H:}f`'q]N,c#AיNHZDM!PPv5 :V7?#Iq}5b]TtK;fiKGɇX p_9(8sbSra|F[0?p,8>aĢK{_J=|EEI= +mtK S4$4&@9{?Ig-dAf@A; ?_ IeA)t? B(P,ۛj7_)ʨ" R>iP7h&o ?Йi %LlY '\:pk$j0k0>$;7t7'!$}S|ANc>d]y <:z'2I@RG,h=cInߘ_Qᦂ8v&uӐU徭"ȥKus}= A H[ 1dLe%T1/׼4ʜ ʋ#$ckնIͭL@ &c[gl w"mb%1mҩ}}TM[lHKs|LmP~2`i.eԸi?USxNlhDDpkcEwJ%l̤VM%{t*K_F+mI5ofU?s/!qS2Py"E$I3q.9H;z`Pǔ=`FZHZ9O̐e}ZI,̒'ݣ2wU+D\˕APP "o'y++Ž +ӪnY0eeq <5k-Egx9v/ש/.[+MM*r]!يW{ nu*ډ^iuBD="` Faj`~?U{>0Q*9ՂֈJ` 8 gh՞,ҭsod6v3geͳ*ҵmZyI蘒 _=)Ȼ?И@)wOSȔޱHPW8ouSO' \UA)$L88(ŖZPVR(w=r Σ|ZA)슻յE+40B ]r ќ)ÙbC4PדA'DEߵzױRDs%| rzb=nuLuUHg~Aa۞)U\ xkUc;rS x@H%|Mr}RMt.ٶ˩ hW4D !>Rw@XR4`"'({>=2mXu! B1ZJt^$34/=onUՖM~s9zqώ͇Ւ2 HUsւ]{TQz 7 Y'" \a 5DF _t9k`m3&'PJƩ5.qkT\'k 0Xtɖ'4ۧb@8x!g.+ U6zXX$hrD 6gd`~':Y _^:rP7·Ylz}֩Fztk jͥB)bcȯ ^Ob78k3>הd,>oM>jf&]U8?!@5=e4saZ%k\mU"zVyn<'&NSm]A ųd3L>pHpõFh2.%wRY SO6?\P} ^xS+QngJ ]uⳄ;A>7 eW #5\ tr.lun eDg[p2tgb}Ђx8onOԗ#nNfoPT9(RlorÏTiee B2NɏzyJ`"p!صoJ0̗V˶fecyXqŨɋwQ.O/ګr$GQښ z;[3uN6T~r#,  ,IACYo]O 5~{l'!$[' í|T;v$ Zo4B4h=KŸ́/y^Ue dk󱫅ê$sw+a^6tՉ̯J>E` Gg)mdI93ͣLV汚͈ s1 ^Ď&z>wsۉO?f0C4Jrس'@k\XJ˧hjHiv!_a =/LjմxNbXCC.c6 OXXƭWs8lqTb{NS"AC>7g?1oXSq@}Z[_.vMA>TX& '!f.Yd;q711<%O(Is!1k?Zʫ1('|kd|hA#ƔV8JHLT:4}t#SUʪ,..ZGG2Ōb1$R(i1!*gdegj*W+ }st &O<H`N(n q{%m~Yq:WswӈFXl ĿÆðnA $\Lᐬ/Y~!b[E 9~DSl"J~+:ݾc+\Nw"뗀Xqj1nLu'k"I]j@YLDs'++:,]3y Rז;.ݫxLˇYR pMj?1 )c9>Pb{w9)beu# {D/}7}c!ߴEras {Zh`ZkۿlW 96u+VgoLpD8i/#՘ނd^sS'pD:Wrof̯iČi〚>TfXRCSsrMe<gS8#er$HSTcQBOo(MomavBQ%S ^ <wk?(Ms\'ed/GqK"$nr5trQ!:$nȉZHTN3-9b(!GvLo.IЈj 14 NɫÁyCaI(DUHx)ULa` G|ε׈Q :]~"^wqx4.2Ḟ݂ώMmm{my>".j,'Cӓe60w/Vc.YOa`W\n&_+! fsQ<@Ԕ?o|3B$wg|p U$GdDqz٣)<79]Cr 5_y#`.7 v#[PWrK_$s0%JTہk'MwiPTK 'Wr{Q'ȃTc2&Q;II-eɯ[vn;d4cM#~0-ف _4!ц*q cC1)auϜQ^d7vfXHyOOa4V1zpNneN#Z}:?qN'7;pkxFs}"VR{0msSVu-ȩ8H!s ӑ0'5qCW[N+ʹD6$U)Ѣݵ@^b)6dox5,*#'OC:q7-;ЭKEYCqb)nьm*(UUϫl7'(2a8F'sQLf.im!~{H^?nul},fˤ zlė֝ /X;QgaH  -!FM[Kmp6up}q~|V}87AnzukJH90S)ڧO"u@2dx=0)ƱXgQ72yiDs#cG8!@{Kg,~{N8<d`6Bۓ.+W\_)OKsg45䷹hMJx" v9cyVusUVMH {LJ4Im}, sC~Z=Awo u`)‡X/c3щ DRQe=JSңw3a8d̢L k?*zd¢cEwqIN)ӽXz%;Θ%pBuH%"R1Uy8xeShF:t4:Xnk qlܐS ô= #zM=8VXp/nVGh% 1s-N,.la'phF;8H9nR~ZYԠ$w.<ջNJ_I@N>/Y^sׁCm_ujßink>Ӳ UD:r:|fo Sނph(%=~H*9Ev25Z_ǢTiVև\WrOÕy3~ջ" -@ߎ9˸[d '׈y)1e5Z$h1T?NpqBd: @J IlrU2M{%(_COaӏw}V\!1<#ۓ"ū7#6 s]^_۫9յSp0s/| Wץ"a_h¨Ȋ\s0J>KhS'*$^dBY3w8-as%% ?l94|/^,= ȼ׳Xh E)ZorGrUI#9NG2گDYL?)~o?x/\hz*O[֩j~4D(R{:8,,/ЬG_f\KAkGa`*I0G,Rmmb6a[3 z ״hp{EX*6 VV#| L~'nӀQ(a갓[|{2>8 L6P);D0{8S|S"t9%z0G07$ k ^m6T ~5]8\TrYrv[TƘiϿ Eښ >"P~lE]|2č{ .ƟR#H}`k%A ,<~^@'I(6k57$Q`['}֊l8+4p0 G2Nӯ=U:qp&ac2BGO6 sCDd"/s :!O9-<?ʉvqc\ț?)CQK} #,-6 0m?D *@c5IHnHBaUij'̠|.y._IiEwnZ{<Ư B~!!e2G<ټ Y:F" I i[_`[ƕ4"x@/d# /r//,()kMQNRH ڇuf~t6o(ٌM%RTɼ3‰3l׿:)߱4`,/W5܂31$#yTf&t" sO+V S @`vt KxtAC1avs5`g*5<#[F}}V=J'q->0ФnߝuόFRVA(j W)N-%I~\9^y~m_d!%*Ryc>cPck%j$ 2' '/rH5bvRX%QQ gE~ᾌ&%MP;Ko7MXhc(3+U =bz;mUlIuK,|`ǩZxK&wyT6y*;M1-?c(2-oƗO2u1 miR72"X\C `[\ K0M{,\[,nOTt:N/t;<+XJO`a IW:ClӠwgI%/ucR).`]5dDљUN(r}\dUEWJ!ٞ3юu,2;,AvLLeǶݖDkg7~2wkfܞT?2t !@2>ő|gK@Tbmg 3wTȪ#р?{ xor6>F)ׅid PeI3}Dc:Xxi Ǹ,̡Z"8*![J)#`Ahh<55ɐ7jNh4DtcݖTX:o;z`JHS]}rd`%e)NF62zA9+к56 MkmRâg27%"MbZѝ1T!@}>SE`lAϚ){Y/ )RkEM;rݓDG~8 $z90]DjI1A<Uj&lT7:דWhEI'%ql+Vl1T4Ճi-ӄ#fl8-'ﶘU;aµ|ox+˼GR8L7\t+\l"IGITVFk0SC$t AMλ%ζ(5 ňIxʒ^l!YфLXR_k5O5f\^,`G"NX $Ta}6<1(H >oPꩪ=Cb7~$zp/nIi|m~Rqs}-C0ԅ&9K1C}+F}5]*e72qAxG+(~|w-toI6ZE} }2# %bcNkJ&BBPĨ!Umkd3Q`ZUސ nH*`, `h5#?:o5SZPyO[sgA)3x\Q;Ty$qof^;#8v]q@&XG;g)Ah4$=x7]n (xsx@!>wˏ]qJl4==> c^>Hj[к]KIDC/HUɗV*7j6IǤH-_qpw;ϧ{%[dg{42ܺ')mfSl(})N $][ZOyiߥʹfrKޯ 0۷v,**@H<ppoD泱 Q3cȠzW0RDbLLbuDT$pk|owߊ/bsçst/Xq5䫤Ǣȯn;Z"y{?2ireWe8dYrܺt(곀r#j:=ueB2k.{Dļ~S:bEv"aG-㿃*vm/( Ѿۄ w]p U/s?]ӃֻF:|/Զ"jhPi*95+\ VH3 /ڋLZZ_iF)dflmW3R`^Q~yx;!%na<{L2to ސҲ2&I}f`C+k& lz2juRO6f; 'c_&e6$9L{L#b3lBa cZV<@_Zcr&g(rSmmVp++gv(C9ke[踥[%Nt$Y8'8ǴhoWlp;17>8< ԯamAc]5g?!ߕo"b/bQH ΀f&U(4!V٤߂( 'nKMqeC ̇jCTHb=mR/|@ՅiNAfCMFh:hTyZ RiBE%)L&[,u!4 [N=07";4Zn]̯;&b$}͉|_ebWXΗ7,&4F"VE_ _?RE@_Zr A=S3m <0l͕-c>2J9d Z-ݑetUI1Mw>hk)*EfU8 !VE;7ebF|Õ9f]Hxw c~!6CYy"j_ƫt3C|aV`Je8=WBd2>κw dՉ>hd'2ӳY7~.O=8ekZi7ɹPwɑQ~|iC9ZWLuE`=xV*Wݑ(r&3|8ٝ9z±5`;rOmί||z ߳ G毵 -?|p\=1ONOФ~y9C-c73جJk9?{{Z+k6#i`R]jMz*4|~xI4Pq_ghu*;6) GĪ~nV78Zgv_~Oj#bI6- ْgU`X l@XERNcK턯0 99M| k JG.C%j2zocOQX#|R#Rf`qZF b h Ϣ8w:ަ{@]H9K/Yׅ|(?Ө޿D4Άo;u7"ؘ?8 ^B89-m>\{qUAUu%X44+5u.łclsTm{?{ S8.5oet=.$sѱeSqz2XlƨĦ7"oYZjh)CW=fi 4dg9쳣ѫ=/kޅYP&wzC0#]7!}yӌT=7aE."K=ŊS`#r^qbDk1.PWP1?l .}*SUGPA[䙟dUa3+C`Tj'|1Vw,FC^~te#N,8<'ϲS;GP 3:9o®(gTLkrz&} O%0=ц1HlQfd=͋r mh쇫 6*Rrgey.яbcwqF5+ 5/Ը'l|a A `LzP*n'iaI0*M|NM3fʋЉ[VI*-IFQOy 4zrXwmW" E-݌*}ʌHDӰH(Ǻvqw5*t!gN j6z8Y\Oh_B:5?M@ig Zr"Pj?Wy4 /e.nWxN|>Y&ȧVT_mB엄U:1d/7% r;]E([&d"S_\`O*fDkxws; nHCt VSNqsqf~?dcƅ JH,wc[ՠF=C ^JC-߰hxmEG8Ry> 'JJrA-]fBhpˆZ=)5}:KUV5h5 (Uܥq [ Psg9d184 xI}#?-ܘKML/QhWdN7 *+c WMzwKfiYX!W,]5# ~:/!`^!q.YAڑL"FjMoN[8 M=8&aS6 QՈ{މHV^I#,mF'%ӽ?onZ[CF!?8Х-&˗Z},Á+׭-/ƺàk0Y=HvտHtu~U ONTb8JJ xFN%4|*0%8cWǣywoך`;85JjaP9{pOL` Yշa䉍'9*MvMƹ.47n 1ahvtCRi/c r^q+Ds9y1@Qv<Y`U6o]ܨcx'P>OȮUp/:!# 0bnq) G^違-rqG yj}ee,-d;q ɝcF wow9ڔ#6xz]aϙ-(ڠ5 Ry&7dmHm82LalEpk%&$@Zܙ>Egk$n ,g.>|rUΖK<ChsnKCe%B @yx݂>R1Hu SFf>tuv3 N3Y2ˏ#BCv O}yw/]Yy603Q ` Tv{7ȗ3ƘUi0YG <4*e[(JgKjM1YKݫˡ''X-#cD 9JAƌ d5.M-p čɗG|vza8?1qA6qQP/NiM <-'Y">6J&6F z}i4gȡM|"dz353P fM^ʟr[ 'Xǎ5bML\ fIJe8e=cbK\-gH8 ) ւo]~ҷ\ms9Gj-cկ;'g7pdcc &Čbkp_̨a);\ ?$Z#s4gTӢo} cf)3qQm,˯3!xBLn"'S%V)d/MuL +ZwXlLbs*6%C< LT @Z߂eɀʱ+ Zb8\MJn/œA,fS)Gьj/5@KSdw73ٴWY\ߔ;;r +*4RDEvSѐ7">*Dƒ)JskoEuPZF~U2w 2ـ tAY9rs׺͂lO}!L72zIBH$G$fBŧ_wl/탡K +|! >}z Wl3ƙuA (ߝ2a8dl=sкFZ"ʛ\IZI ߽cN .)U g"б!v5v3[F+lTV#`*WY(3@hF.@)""˾+Z-gm߫bTg(`V;ﯙ7(s[j0WdD|ōHl g6`ݥśչGݕ@=|}U1*U\W";d@yc2kI;hU魘f^Ԧ<hU * ۂ LgIeX8 =~ȿ"|uؼ*oGx̸ҌMnS< : MO 3Iylژh+nC33&kYd-~?,VWWB*aEˈk{O)^Xi/*sM}U%g~ҪBxҪ9CF8eaVp|BUZ/ݩg;omDj1ċn9Y{3}>NWsm!JtK?07w^s#B֚~2|wÊq! ( ?0|ŗ6WlQD\b7Z`UM *{LcP8gB-S'킦D1S"L-N/{MBϺ ,Y_<_*Q5՞~p=ZC "ҥ:id~%v1o&A *КHJ=w|-9h[DBZl<ܛM_1m>_JNާ bgGzNKYc뱭~:/ݎtT–yw\VMH;賏Ob>Pw`4UMh pXX?t.8EmfNO`ňB6T^vY dDA8lq?fby TVrnJ$ϴ_=M5VI U*b=*&vs3ExU(61RL)d(eWY Qۯ =QGEÉ% ![kGk snC/FHYN1m7$A ɜ\&^@bf/ȶ1Pdwp6w^k+["u1#sn|Tt!N|-*Z~ ޲PuəcWq9BƟwiQQ*RUiqQͼoկ܌^˪+A_$$YY8s~EۂKBal Għ}weYp0pgn\J~z3 Py]oښqآ^R+urԲ"J7ًνև+މ.P) {!^4qLp1Ķfegaш T, 71Eh2ѱ]H^MSu!)5fv􅤗#iqw0o<7*[L@|ۓ9e:@ K%~3Pϰ{gOLR|~SAN>A&?xggwoXZL?NUxX!;8tp4cg$/wJ-LV mQԺAr;Sz#ۡ.I[ȺOt!/}RGxeoo-KTbbl{8 ZJ喉#AXrQ\XamEzY:6A]5"-_PK=JGQ#J(zI}8?*[-((`, Mea!SNODx赵K10bؗB DBѦeKȉs^0ըlyG e`/v8~!=گ7Eϗ$H]c}_0ؕH 5qRi o3ӫ*=~Qt2 I5;';[ntIEgB'񯆒GSBKzIW[*h_~|6<|lajd0̘([f󗐅DaM4Jɀ =~Po׈+J?903b0!r̥=SR=l m;=/~9Z"m#(2x?p_d(DiBkb܋6=2D&KRv? TPAg,q fُd\L;Ί̧hd_;vSa lqsS8RN@F1| YUrOQ]ǖN#YMqwYmu/mw 8i%IZfk޽(Ǚu=t܏wk,}Axv0{fuK9|Í 'Haᓐ٧2 [y( kL̑FMK7Nãj$jj%*kӍ&F;'PǤiĹ>?.1F}3_:'6$w/ߟxm^X꺗bWt[lbhϫ{ٚ  j;OGϪbQ?`t/$9 [uW=+:؀{S GHoEcl#:+c&xrKOM.rGћj=`y+dg.%˲*fjڥ :`_9PO9D ~.gURLk[ӴUtcAMFzΓ.2sczuI>0 YZclubSandwich/data/dropoutPrevention.RData0000644000176200001440000004032413500611663020260 0ustar liggesusers7zXZi"6!X༺@])TW"nRʟJ zE#w9B̙n'y.;o(/~5(G)x o+ݬ2Ti3~/?+= QSnszĞ[ Dk@Y8U]RE7 TNNG'( FHՋg(0)STƙq~k^AacxwX|fMd$w+]hxf?Ǩ1V$(LN3p*p$Xq\? e|@’an|O!*E Vh;_9ʁ¤&Uj+3d&eHow۝qf4 k߄ߐH|)4[qza[q!whZ-Гq39 mi>~0@ܳJѪ鳷EYC\Ӎ-7B"D\pP)+4bsZ¤c\{yбB hTt Ak=K6?+P$&\SJ^D K 9}_lܐT6A)"ɱXOd7O/\}f>?gAM4tm9r.Jz9 <a_'G\Qhѭ ϭ~E=/ .\X<'i{ػuWq=I_R6YT9 -zA;g?KÍ,E};e T¦t;fK?(Zg(-^/q>bL~=3|)$e){'КR`}Tx_<^>`qu %J ‹Khc;{z,Y97!ݐ$A-eȸn3 2CG:5])eh5_6,k*aՖ}JMU&ׂY &ԻO:?2QJQ e}\4,6w8,uvIׂ(1n!"= }\yrڑ)1'[ u\Bq_ט4ubchQDbneCk.ƭ\C <ײjb~fX=(RL0҇7GD}1yY餱|k_do?.n)$.RS1mp|D(I;Yw{ȹR"gO_v_z:i=Y=20Z2AƼ@meHY982[vZ5S\;ث܏X5DY-8KB!*6JJF-Zؒ)' =Ⲇo 0RuJE]EY=iPI5}(˭Lޢ'PdaIBAi$h HJk sj>7\YAU V )7k1ݏ:9%HZ"OgFCޓL8YNI\sbvbC|چ6Tm5t Q Ň^uTQӯQ8Fb,19:z'Ț$,Nӭk0*8!R C:/+*CO2A?a #G#x~fZ P ]=+lÀFvzL ˷vZftjL] RB5s0d1"^uǺ%X`p">`ם=9et㨯PXX OG]*N׼^>CUvz3P@B\Da8\KU?1q]JsI2N.4o0s{o|KriY DwNGfNVirh/ɢдk-+wIa- e6rlHI5_|=c-*5DOw{>VҽҩtX{5k^`pPYĎI*_PE"n62F%akTh%/R[OLqo.a|_ s=tƪ:&ݿBRiKiac׷ȢM1ZqZ.~ܘ0Z\1sbwٸv,BtJGrg%9wT7A¶| S- ׍%+c[Ax}Zk"u} ;S$m{D];p<|/M¢#ͤGXW7J5ࡨh>䂑xLsнY,¼sU3JZ^/ lzbw߱Q;5:@,EŔ)@Ф|w (*Ɇ6W%SFlw9(d[}xeF8#QjNXQ6OgularKGGgI۷2C լw+Pu!TJJvzv_=,23)jgVYXeD)0ZsR=ΠYH5W!: iRanLǒlY'9PDəTwzY?p~@qm.s+Oj=K.ɔ!0gY[f€Ч|m@PYrvQ JO|Rjr^fr.ⲭgx d1'm "|M;~n+(턦- xjpⒶchB|sxF%e3r^ܖJgn-Fw3d[LҾT#̸4Z%ŝqf֛1>flӞ'w !nX,"(vVgiqhj-~$Cűjeskz>!Ip_K!w0~ue*KGP%;7c񡶠jZɷZ< Lﰿ`m%4~ >7ns_#QG)}0K(VuV m")pZbk$tX;3GXhj);pÂ܄{0܇^ᄎN[AR4*OTEcXzqdBFG/jKf]H,sh 㷋LFUfڧvi+-ƥg~׊>>sBPx |R M\6LWax6R3Y%&$i .%wC#!`h:>Et\rrVx{s}\@"ɰDf(s1NP~] '@oǞU$ڶ:o/ 4qZ$4_$CUbj<;\jYd Z$u9o._.&Nn=9@8h2R1}/pӭ$" V 7_~ߘ rX*xA)7_$i,noA6EQd!˔pOɧLKS#?\<| G9fN&c0S2fpH!50;<RL:KL.2=[^ 1XbNu=&cRiTgt F!%̍i$wfIu9UL;1b>RsYΟ}'M&2r[ڣS=qRQ<3m{AVbT:y҅{D4?2[rKvqj;hStgw1EAA vDNގY;zs>"褓*(Tք˾R>t="sW~; Ec[tvv}C.!@1^M9 }bެp!m;iQEx;zT5B6ӊHI&?N[g]нmWHV[+K P ";2;V|6J-"u(-ݴ^dA_ԩ>vKF)iwz&=mpbBMАAL֓4nM0wpTV`Si @|4G0U;͉{ytU݀EItl{"U~si- OpAT.BhFs}A5<ʙ#OʂQв&bMW׾pgq &mP2qyr:C" T7t^ )ѕ]W{$5xU(I%EufR8:J }1Q(J\NDѭj:D 1ce"n8!PoZ,F%Kf֐%YJ9h- s-XKD]šEa=/˲l45ꈢ'ajhպRœè ;ؘڣm_y[*#JK1J@ Rd'=rNae"U鴩R~>pggZI9ږѶv|z]Iai/3T:/~[R%sBek(E-7A?Ht!njeHݚYչ;>sJ63p'퍈=CoDh}rP~.ȧJ0 ٌq :Ҹrh#7{ErԬՁ5CasكW{_NCϡSV_ .s3$q5ZYmd*eP-lO{_DW6{u 36A}w 4*@3y$4 .G/0$0@0b0s1?(7'Cf'XwbR p)"O#>f#ZXb k߶awc/y( Vt[E@k_L`@`$pzW:9̃՘SE$xab C喕[}jOӠ y˘)#> k#͙Ly>odc=rKf9diϼ*G ?XA/b!FJJ"o]Q/s'&)?{gm؎]u':S>۠hjWOO*FU~_-q=M@^X[Lμւݧd:U] ޔ,[]*-_$j ј\ 2ƪoڴ!¾ZfyƐ7?ǥ9-k [^KAQefGV2>?&#RPb8" &S'aJD_W'H##=3A^p< @oJ˜Ù/@a~c,jɱN1ցZ<1@[bG7^Yz}@`\}e :3cvт"Ӧ4\Y'ymE>{>Ruc&AQ[w[6&aW3(A02Pu/VR#4D8)y(tds6|גpygJe˜/5 mȖ ݲa_*Lᾰ 8jG~H] 8pUNJP!2ƟBLܿ#VΟwַDY^m dsAvQ?趒 d{KLq4y)N4D"1vL,on2Ƿۻ'\:ZX%Jc]3.A \s_X|Hx;u?/5\PlӺt#U]1ftH2JZB}OLlOf0y$,`y4?a%հJaA7V{91)ɑ'KLH$O }  ΢"JڹdEbWd1 b )ik")1VV)^.z_iV{o5 !Q>(I3ՔBlr^%>y#pGe_PC8ƃ@ 捗1Dʶz3Bwi*좘F ;8Bj.+A4y,- Ĕw&ulw"Gh#R4݀v| M{0bK`D82 d,4@2kB~C/܍.`T̵,Qss֡ .ū ǽo7iM~/  < S_#3X: i%93* D,c] \5Z$vu/%h4}~T#2,CufLHK$g {O|^|J9f׼(P/6/y8ۤ>9խ~- =X>3zQ?֖}gMbv-l2p<}nWƟS_ʛT#%Nۍ=!2wi=mIC)NkVf@ W ~W֚iɒr/asΡJ5U^cnQUONEqU{Ĺ:Kٙ~Ғn]&8-VY~*$ i/`dN,/ U^DaEXHX/Z$› 78y<UV%kcP>ey8CJdVwqtE;#rm2ulŋصBAsD[l,I }9dE{5*"R[]쿷 )- Oq* ,FD0g5.z&ʊy|l.H`{/@3`Bӈ,;a4uP9DDp!I 4$4#3 T6~GWPM,rIU"~pM9|8m*PH!.0 ?toP(Ȳ2+2xEeу>F{R>񍐴%+hdfц2(yud&]0qis@zS~Yjė, Y-0Mq/I\- Pك__9D(}:}x\wuzoTTrB9Y(ByKdc~&6,A8qr8'14%4! ?OH?vӋ؝P Ґ(~ko;p+ypGw|3 U ap:̲R2eƤD D k؉rm } $D.,yEәJ+Α}l6NNg|f,& ;PoCP{ӏPň?D_)kQ(H:ۘwt|PN/;gv8b֧.G:[^HyIX|T&>԰vqb"eqs!WvUBwϱjpWHu@d m \?lB E~_]1OF裛_d;Hļrv!V !he)׍1$qwIz0KL)ǻMB[|20Un&YlACing+~ir f u)1*[cܳ/AɑӪ<,B9x7.D4"9ǿ%Ls \o3%.s=;v|  ds'oA332{Gƺ k7;o( Q'4i{u} .}v"E-L)g5ê֊D鵈6v]y/6OrBT!=nԙ!~y="c 8T7@ ה5S" z^6Ze*dYWSє%M HY#ibaT ӐE|E6XSl:Z^ELT)e7=bºs`rH}BHHvkNgdӊN)vn4 3BJiś9yyצ~wՃxMV8很iОO3=>!ɲjc @ZS5 Ӭ2MjR42?]1.9JBأ =}innh"B8)E˞4E!T&4LRUpńB5|o= 0d7j R_BzJ_1hNHDF%7[聕)'M)K#m˵//Ӹ|jrV#+g{v_֟ޒn6Zh+ص}Y笢K#hSPN;"`mn͜8<ω- z~LKfhc XnhF0yKA$XD, ,B-P5CFp¥tg+JwkodoȞFzN<&մw ^sr|`{eU﫞oy~W#Z.6#_7pOA醿%=i_w 0q _V ΃V3;zk Ĭ%eWi%fuvu[$>|((T=3Yo8)hY@n}G$v~N VR*czIp-vTOXJ\&c_97[O^L nA S/&:8Fh8(` }M7JS?(`iQ~IZMш 1o٪¶scͣD?B&709w$>"*PBL/oCЬ{r`m DƗ૞ȤL# ~h{[=_xJ1C)k:pux O,׈ڛT U=彩b Phv!fpu;*~]=*z#5qܛRMqQ1dq#8I[rJslz! ?sz@XD{pd J:FB{mR86Z4so),U`ҜgqIss8>  zvwTNŽ>i=Y1>W2kw"\JrN @T;Q6m1DuOKQk}2ڨ#jK]%[S/BW_ap,k|M ۉ^9! QMx$c?z}1 dG2NFQ~I mـ`W5Rf[Y=$^`tdx~-׾9j_RLai6K0a-Omʴ zje?pERqR0- [%I՛0}l˙9@x!#wDcS_xCƄo>R4 $L.jڝ%V;w 2Q?g$0V{Y<)˘:d#WrCJV ~g,'ƆoOsJ4 ^~G{ PGYxO<1mhbm{]POS<pc*bqu"rXl,VI̫] %2`D"[~Ŵj)od'l,.HCz.[J0R yQ8 I̬>qQmf_tn ZutF,W8BwsCܗ_HDO4 q=N޷h;Od!JPIFU1(X?3IgaH둪GZ!"ckU }# ^ލac(cA) |T#/&}Zɞ8gdv( 2| 9̀wh ?&h[3k|7,Elob¶s 5"<_odBw|=P^Ŵ"`HpMc j'' t@YdNb R;es=3qh(_F, 25Q)nn\xtߝޡ9 E T^Qg<C?qDZ^imz~,,]9 ݰ%Ư[2=wtBހ6wtd(nKݧ>"ѳ ]rF" L{h 41O!rk f0z/R!iV|ƉI8bH;>8"JSqdx%E?@Lhۀ+KVi3(y3jbT&T a]|j̥81I!Vy|ZqY3HMh;܆zp!< z o|De*>HRݞMidS=%l5q2uߙ+NL; giu: +ǹrW#bX-~Gzb`5 >Ύ[HKx85{)V?NJngJ0Q= ~ R,H޷%I|F,K<3hp! DՆg@~X قS+lV*0IZԘ*'B簖1 *w|:T[XZXrkSg%Z,S5\gHN< f8ƬTP r+nd'X }Oh҅'hxzŽ!~@L ᦙkA8 |8ylK? L ]f6ל^*7(3M*7V0.gbgD;YD8X > {FZ:n=W?YMxo >wA7ծV?I7K|ͪ1;鑪[f愇̵; !{"ըDk Q!MVg +Ezwj)wz!=h"GZ]IexYk2RD$j%;߸pmQ/>nl'g*`;a8@J.Hy`O3_bEUH<bKcuM9MD[nVh"uc[~]CgHjJCƉTp%YgaHNq.{BG@"!<A cnK]kXI+$The2vK 6AdP'݂aI3Bʻh6Hc+iP[մu0O1=|ZO@\ |ru%4K]7-ΧrdRuũyxdm~S(U%e./[S=-0n>C73$٢wjϧ6u+: )o1wyclO ~0 YZclubSandwich/data/SATcoaching.RData0000644000176200001440000000302413500611663016631 0ustar liggesusers7zXZi"6!X])TW"nRʟy,eYlD ݋o/APɮyZ0Ƚd:#BMk|If:#ή+;w 7ꔔ%Z+&<g iJ3ҍXL&}f^²6&96R:kzFNB=:\1N?ױm$\KKТ+63V_N{?bk7p6*ˠ+iaSVl J#4N<󥄓99a `ߕ+ 4X$_m[UZLB"VODtAcf~Y/Rw|q$q7?}Q`>ʖ,1z®Z~lq.-C7,*0Oʔa|,Սft`mqo:aPhI>_d!SH?Q&'uB3Z!ʕBJF0|#4x Yw e8$^j?VXk(,l}Ѝ20hh:YS3ƫ[FCS4}CS9Inr:n;eY2zڃ^\,;piu*[/(l'eqlx|bz_-s T|,7mf*^YjdMcє"ڤ*] ϬjBP_Jw`@71,2Ԑ3QUvcRan╫>,Bv_+T/ #,/TXnR?K^wʚ `8[2RUMŌ9(ցs3ijP i\C\K_ gl: *w]#qYG(Clg~tPq㮞;[ֹpo!I떧fbJ1}O]\>SiF#.' ЮDCP.2V.焮N\Ѱ,at3?A:!66nuȎP3V~8v!->7kjpoڇj*>m:X@=*wx#K\ me^S)՗YCc4n1Fo>&qp?Gsn wꌬ&f\촾5 :Fw>0 YZclubSandwich/man/0000755000176200001440000000000013576303502013437 5ustar liggesusersclubSandwich/man/coef_test.Rd0000644000176200001440000000461213500611663015701 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coef_test.R \name{coef_test} \alias{coef_test} \title{Test all or selected regression coefficients in a fitted model} \usage{ coef_test(obj, vcov, test = "Satterthwaite", coefs = "All", p_values = TRUE, ...) } \arguments{ \item{obj}{Fitted model for which to calculate t-tests.} \item{vcov}{Variance covariance matrix estimated using \code{vcovCR} or a character string specifying which small-sample adjustment should be used to calculate the variance-covariance.} \item{test}{Character vector specifying which small-sample corrections to calculate. \code{"z"} returns a z test (i.e., using a standard normal reference distribution). \code{"naive-t"} returns a t test with \code{m - 1} degrees of freedom. \code{"Satterthwaite"} returns a Satterthwaite correction. \code{"saddlepoint"} returns a saddlepoint correction. Default is \code{"Satterthwaite"}.} \item{coefs}{Character, integer, or logical vector specifying which coefficients should be tested. The default value \code{"All"} will test all estimated coefficients.} \item{p_values}{Logical indicating whether to report p-values. The defult value is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link{vcovCR}}, which are only needed if \code{vcov} is a character string.} } \value{ A data frame containing estimated regression coefficients, standard errors, and test results. For the Satterthwaite approximation, degrees of freedom and a p-value are reported. For the saddlepoint approximation, the saddlepoint and a p-value are reported. } \description{ \code{coef_test} reports t-tests for each coefficient estimate in a fitted linear regression model, using a sandwich estimator for the standard errors and a small sample correction for the p-value. The small-sample correction is based on a Satterthwaite approximation or a saddlepoint approximation. } \examples{ data("Produc", package = "plm") lm_individual <- lm(log(gsp) ~ 0 + state + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) individual_index <- !grepl("state", names(coef(lm_individual))) coef_test(lm_individual, vcov = "CR2", cluster = Produc$state, coefs = individual_index) V_CR2 <- vcovCR(lm_individual, cluster = Produc$state, type = "CR2") coef_test(lm_individual, vcov = V_CR2, coefs = individual_index) } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/vcovCR.mlm.Rd0000644000176200001440000000443413500611663015716 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mlm.R \name{vcovCR.mlm} \alias{vcovCR.mlm} \title{Cluster-robust variance-covariance matrix for an mlm object.} \usage{ \method{vcovCR}{mlm}(obj, cluster, type, target, inverse_var, form = "sandwich", ...) } \arguments{ \item{obj}{Fitted model for which to calculate the variance-covariance matrix} \item{cluster}{Optional expression or vector indicating which observations belong to the same cluster. If not specified, each row of the data will be treated as a separate cluster.} \item{type}{Character string specifying which small-sample adjustment should be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of \code{\link{vcovCR}} for further information.} \item{target}{Optional matrix or vector describing the working variance-covariance model used to calculate the \code{CR2} and \code{CR4} adjustment matrices. If not specified, the target is taken to be an identity matrix.} \item{inverse_var}{Optional logical indicating whether the weights used in fitting the model are inverse-variance. If not specified, \code{vcovCR} will attempt to infer a value.} \item{form}{Controls the form of the returned matrix. The default \code{"sandwich"} will return the sandwich variance-covariance matrix. Alternately, setting \code{form = "meat"} will return only the meat of the sandwich and setting \code{form = B}, where \code{B} is a matrix of appropriate dimension, will return the sandwich variance-covariance matrix calculated using \code{B} as the bread.} \item{...}{Additional arguments available for some classes of objects.} } \value{ An object of class \code{c("vcovCR","clubSandwich")}, which consists of a matrix of the estimated variance of and covariances between the regression coefficient estimates. } \description{ \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates from an \code{mlm} object. } \examples{ iris_fit <- lm(cbind(Sepal.Length, Sepal.Width) ~ Species + Petal.Length + Petal.Width, data = iris) Vcluster <- vcovCR(iris_fit, type = "CR2") } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/vcovCR.lmerMod.Rd0000644000176200001440000000503113576303502016525 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lmer.R \name{vcovCR.lmerMod} \alias{vcovCR.lmerMod} \title{Cluster-robust variance-covariance matrix for an lmerMod object.} \usage{ \method{vcovCR}{lmerMod}(obj, cluster, type, target, inverse_var, form = "sandwich", ...) } \arguments{ \item{obj}{Fitted model for which to calculate the variance-covariance matrix} \item{cluster}{Optional expression or vector indicating which observations belong to the same cluster. If not specified, will be set to \code{getGroups(obj)}.} \item{type}{Character string specifying which small-sample adjustment should be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of \code{\link{vcovCR}} for further information.} \item{target}{Optional matrix or vector describing the working variance-covariance model used to calculate the \code{CR2} and \code{CR4} adjustment matrices. If not specified, the target is taken to be the estimated variance-covariance structure of the \code{lmerMod} object.} \item{inverse_var}{Optional logical indicating whether the weights used in fitting the model are inverse-variance. If not specified, \code{vcovCR} will attempt to infer a value.} \item{form}{Controls the form of the returned matrix. The default \code{"sandwich"} will return the sandwich variance-covariance matrix. Alternately, setting \code{form = "meat"} will return only the meat of the sandwich and setting \code{form = B}, where \code{B} is a matrix of appropriate dimension, will return the sandwich variance-covariance matrix calculated using \code{B} as the bread.} \item{...}{Additional arguments available for some classes of objects.} } \value{ An object of class \code{c("vcovCR","clubSandwich")}, which consists of a matrix of the estimated variance of and covariances between the regression coefficient estimates. } \description{ \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates from \code{\link[lme4]{merMod}} object. } \examples{ library(lme4) sleep_fit <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) vcovCR(sleep_fit, type = "CR2") data(egsingle, package = "mlmRev") math_model <- lmer(math ~ year * size + female + black + hispanic + (1 | schoolid) + (1 | childid), data = egsingle) vcovCR(math_model, type = "CR2") } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/vcovCR.plm.Rd0000644000176200001440000000741013500611663015716 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plm.R \name{vcovCR.plm} \alias{vcovCR.plm} \title{Cluster-robust variance-covariance matrix for a plm object.} \usage{ \method{vcovCR}{plm}(obj, cluster, type, target, inverse_var, form = "sandwich", ignore_FE = FALSE, ...) } \arguments{ \item{obj}{Fitted model for which to calculate the variance-covariance matrix} \item{cluster}{Optional character string, expression, or vector indicating which observations belong to the same cluster. For fixed-effect models that include individual effects or time effects (but not both), the cluster will be taken equal to the included fixed effects if not otherwise specified. Clustering on individuals can also be obtained by taking \code{cluster = "individual"} and clustering on time periods can be obtained with \code{cluster = "time"}. For random-effects models, the cluster will be taken equal to the included random effect identifier if not otherwise specified.} \item{type}{Character string specifying which small-sample adjustment should be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of \code{\link{vcovCR}} for further information.} \item{target}{Optional matrix or vector describing the working variance-covariance model used to calculate the \code{CR2} and \code{CR4} adjustment matrices. By default, the target is taken to be an identity matrix for fixed effect models or the estimated compound-symmetric covariance matrix for random effects models.} \item{inverse_var}{Optional logical indicating whether the weights used in fitting the model are inverse-variance. If not specified, \code{vcovCR} will attempt to infer a value.} \item{form}{Controls the form of the returned matrix. The default \code{"sandwich"} will return the sandwich variance-covariance matrix. Alternately, setting \code{form = "meat"} will return only the meat of the sandwich and setting \code{form = B}, where \code{B} is a matrix of appropriate dimension, will return the sandwich variance-covariance matrix calculated using \code{B} as the bread.} \item{ignore_FE}{Optional logical controlling whether fixed effects are ignored when calculating small-sample adjustments in models where fixed effects are estimated through absorption.} \item{...}{Additional arguments available for some classes of objects.} } \value{ An object of class \code{c("vcovCR","clubSandwich")}, which consists of a matrix of the estimated variance of and covariances between the regression coefficient estimates. } \description{ \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates from a \code{\link[plm]{plm}} object. } \examples{ library(plm) # fixed effects data("Produc", package = "plm") plm_FE <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state","year"), effect = "individual", model = "within") vcovCR(plm_FE, type="CR2") # random effects plm_RE <- update(plm_FE, model = "random") vcovCR(plm_RE, type = "CR2") # first differencing data(Fatalities, package = "AER") Fatalities <- within(Fatalities, { frate <- 10000 * fatal / pop drinkagec <- cut(drinkage, breaks = 18:22, include.lowest = TRUE, right = FALSE) drinkagec <- relevel(drinkagec, ref = 4) }) plm_FD <- plm(frate ~ beertax + drinkagec + miles + unemp + log(income), data = Fatalities, index = c("state", "year"), model = "fd") vcovHC(plm_FD, method="arellano", type = "sss", cluster = "group") vcovCR(plm_FD, type = "CR1S") vcovCR(plm_FD, type = "CR2") } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/vcovCR.lme.Rd0000644000176200001440000000502713500611663015705 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lme.R \name{vcovCR.lme} \alias{vcovCR.lme} \title{Cluster-robust variance-covariance matrix for an lme object.} \usage{ \method{vcovCR}{lme}(obj, cluster, type, target, inverse_var, form = "sandwich", ...) } \arguments{ \item{obj}{Fitted model for which to calculate the variance-covariance matrix} \item{cluster}{Optional expression or vector indicating which observations belong to the same cluster. If not specified, will be set to \code{getGroups(obj)}.} \item{type}{Character string specifying which small-sample adjustment should be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of \code{\link{vcovCR}} for further information.} \item{target}{Optional matrix or vector describing the working variance-covariance model used to calculate the \code{CR2} and \code{CR4} adjustment matrices. If not specified, the target is taken to be the estimated variance-covariance structure of the \code{lme} object.} \item{inverse_var}{Optional logical indicating whether the weights used in fitting the model are inverse-variance. If not specified, \code{vcovCR} will attempt to infer a value.} \item{form}{Controls the form of the returned matrix. The default \code{"sandwich"} will return the sandwich variance-covariance matrix. Alternately, setting \code{form = "meat"} will return only the meat of the sandwich and setting \code{form = B}, where \code{B} is a matrix of appropriate dimension, will return the sandwich variance-covariance matrix calculated using \code{B} as the bread.} \item{...}{Additional arguments available for some classes of objects.} } \value{ An object of class \code{c("vcovCR","clubSandwich")}, which consists of a matrix of the estimated variance of and covariances between the regression coefficient estimates. } \description{ \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates from a \code{\link[nlme]{lme}} object. } \examples{ library(nlme) rat_weight <- lme(weight ~ Time * Diet, data=BodyWeight, ~ Time | Rat) vcovCR(rat_weight, type = "CR2") data(egsingle, package = "mlmRev") math_model <- lme(math ~ year * size + female + black + hispanic, random = list(~ year | schoolid, ~ 1 | childid), data = egsingle) vcovCR(math_model, type = "CR2") } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/conf_int.Rd0000644000176200001440000000435013500611663015524 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conf_int.R \name{conf_int} \alias{conf_int} \title{Calculate confidence intervals for all or selected regression coefficients in a fitted model} \usage{ conf_int(obj, vcov, level = 0.95, test = "Satterthwaite", coefs = "All", ...) } \arguments{ \item{obj}{Fitted model for which to calculate confidence intervals.} \item{vcov}{Variance covariance matrix estimated using \code{vcovCR} or a character string specifying which small-sample adjustment should be used to calculate the variance-covariance.} \item{level}{Desired coverage level for confidence intervals.} \item{test}{Character vector specifying which small-sample corrections to calculate. \code{"z"} returns a z test (i.e., using a standard normal reference distribution). \code{"naive-t"} returns a t test with \code{m - 1} degrees of freedom. \code{"Satterthwaite"} returns a Satterthwaite correction. \code{"saddlepoint"} returns a saddlepoint correction. Default is \code{"Satterthwaite"}.} \item{coefs}{Character, integer, or logical vector specifying which coefficients should be tested. The default value \code{"All"} will test all estimated coefficients.} \item{...}{Further arguments passed to \code{\link{vcovCR}}, which are only needed if \code{vcov} is a character string.} } \value{ A data frame containing estimated regression coefficients, standard errors, and confidence intervals. } \description{ \code{conf_int} reports confidence intervals for each coefficient estimate in a fitted linear regression model, using a sandwich estimator for the standard errors and a small sample correction for the critical values. The small-sample correction is based on a Satterthwaite approximation. } \examples{ data("Produc", package = "plm") lm_individual <- lm(log(gsp) ~ 0 + state + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) individual_index <- !grepl("state", names(coef(lm_individual))) conf_int(lm_individual, vcov = "CR2", cluster = Produc$state, coefs = individual_index) V_CR2 <- vcovCR(lm_individual, cluster = Produc$state, type = "CR2") conf_int(lm_individual, vcov = V_CR2, level = .99, coefs = individual_index) } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/SATcoaching.Rd0000644000176200001440000000255213574216757016072 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-documentation.R \docType{data} \name{SATcoaching} \alias{SATcoaching} \title{Randomized experiments on SAT coaching} \format{A data frame with 67 rows and 11 variables: \describe{ \item{study}{Study identifier} \item{year}{Year of publication} \item{test}{Character string indicating whether effect size corresponds to outcome on verbal (SATV) or math (SATM) test} \item{d}{Effect size estimate (Standardized mean difference)} \item{V}{Variance of effect size estimate} \item{nT}{Sample size in treatment condition} \item{nC}{Sample size in control condition} \item{study_type}{Character string indicating whether study design used a matched, non-equivalent, or randomized control group} \item{hrs}{Hours of coaching} \item{ETS}{Indicator variable for Educational Testing Service} \item{homework}{Indicator variable for homework} }} \usage{ SATcoaching } \description{ Effect sizes from studies on the effects of SAT coaching, reported in Kalaian and Raudenbush (1996) } \references{ Kalaian, H. A. & Raudenbush, S. W. (1996). A multivariate mixed linear model for meta-analysis. \emph{Psychological Methods, 1}(3), 227-235. doi:\href{https://doi.org/10.1037/1082-989X.1.3.227}{10.1037/1082-989X.1.3.227} } \keyword{datasets} clubSandwich/man/vcovCR.lm.Rd0000644000176200001440000000524613500611663015543 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lm.R \name{vcovCR.lm} \alias{vcovCR.lm} \title{Cluster-robust variance-covariance matrix for an lm object.} \usage{ \method{vcovCR}{lm}(obj, cluster, type, target = NULL, inverse_var = NULL, form = "sandwich", ...) } \arguments{ \item{obj}{Fitted model for which to calculate the variance-covariance matrix} \item{cluster}{Expression or vector indicating which observations belong to the same cluster. Required for \code{lm} objects.} \item{type}{Character string specifying which small-sample adjustment should be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of \code{\link{vcovCR}} for further information.} \item{target}{Optional matrix or vector describing the working variance-covariance model used to calculate the \code{CR2} and \code{CR4} adjustment matrices. If a vector, the target matrix is assumed to be diagonal. If not specified, the target is taken to be an identity matrix.} \item{inverse_var}{Optional logical indicating whether the weights used in fitting the model are inverse-variance. If not specified, \code{vcovCR} will attempt to infer a value.} \item{form}{Controls the form of the returned matrix. The default \code{"sandwich"} will return the sandwich variance-covariance matrix. Alternately, setting \code{form = "meat"} will return only the meat of the sandwich and setting \code{form = B}, where \code{B} is a matrix of appropriate dimension, will return the sandwich variance-covariance matrix calculated using \code{B} as the bread.} \item{...}{Additional arguments available for some classes of objects.} } \value{ An object of class \code{c("vcovCR","clubSandwich")}, which consists of a matrix of the estimated variance of and covariances between the regression coefficient estimates. } \description{ \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates from an \code{\link{lm}} object. } \examples{ data("Produc", package = "plm") lm_individual <- lm(log(gsp) ~ 0 + state + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) individual_index <- !grepl("state", names(coef(lm_individual))) vcovCR(lm_individual, cluster = Produc$state, type = "CR2")[individual_index,individual_index] # compare to plm() plm_FE <- plm::plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state","year"), effect = "individual", model = "within") vcovCR(plm_FE, type="CR2") } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/vcovCR.gls.Rd0000644000176200001440000000464413500611663015721 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gls.R \name{vcovCR.gls} \alias{vcovCR.gls} \title{Cluster-robust variance-covariance matrix for a gls object.} \usage{ \method{vcovCR}{gls}(obj, cluster, type, target, inverse_var, form = "sandwich", ...) } \arguments{ \item{obj}{Fitted model for which to calculate the variance-covariance matrix} \item{cluster}{Optional expression or vector indicating which observations belong to the same cluster. If not specified, will be set to \code{getGroups(obj)}.} \item{type}{Character string specifying which small-sample adjustment should be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of \code{\link{vcovCR}} for further information.} \item{target}{Optional matrix or vector describing the working variance-covariance model used to calculate the \code{CR2} and \code{CR4} adjustment matrices. If not specified, the target is taken to be the estimated variance-covariance structure of the \code{gls} object.} \item{inverse_var}{Optional logical indicating whether the weights used in fitting the model are inverse-variance. If not specified, \code{vcovCR} will attempt to infer a value.} \item{form}{Controls the form of the returned matrix. The default \code{"sandwich"} will return the sandwich variance-covariance matrix. Alternately, setting \code{form = "meat"} will return only the meat of the sandwich and setting \code{form = B}, where \code{B} is a matrix of appropriate dimension, will return the sandwich variance-covariance matrix calculated using \code{B} as the bread.} \item{...}{Additional arguments available for some classes of objects.} } \value{ An object of class \code{c("vcovCR","clubSandwich")}, which consists of a matrix of the estimated variance of and covariances between the regression coefficient estimates. } \description{ \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates from a \code{\link[nlme]{gls}} object. } \examples{ library(nlme) data(Ovary, package = "nlme") Ovary$time_int <- 1:nrow(Ovary) lm_AR1 <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = Ovary, correlation = corAR1(form = ~ time_int | Mare)) vcovCR(lm_AR1, type = "CR2") } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/vcovCR.rma.mv.Rd0000644000176200001440000000545713500611663016337 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rma-mv.R \name{vcovCR.rma.mv} \alias{vcovCR.rma.mv} \title{Cluster-robust variance-covariance matrix for a robu object.} \usage{ \method{vcovCR}{rma.mv}(obj, cluster, type, target, inverse_var, form = "sandwich", ...) } \arguments{ \item{obj}{Fitted model for which to calculate the variance-covariance matrix} \item{cluster}{Optional expression or vector indicating which observations belong to the same cluster. If not specified, will be set to the factor in the random-effects structure with the fewest distinct levels. Caveat emptor: the function does not check that the random effects are nested.} \item{type}{Character string specifying which small-sample adjustment should be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of \code{\link{vcovCR}} for further information.} \item{target}{Optional matrix or vector describing the working variance-covariance model used to calculate the \code{CR2} and \code{CR4} adjustment matrices. If not specified, the target is taken to be the estimated variance-covariance structure of the \code{rma.mv} object.} \item{inverse_var}{Optional logical indicating whether the weights used in fitting the model are inverse-variance. If not specified, \code{vcovCR} will attempt to infer a value.} \item{form}{Controls the form of the returned matrix. The default \code{"sandwich"} will return the sandwich variance-covariance matrix. Alternately, setting \code{form = "meat"} will return only the meat of the sandwich and setting \code{form = B}, where \code{B} is a matrix of appropriate dimension, will return the sandwich variance-covariance matrix calculated using \code{B} as the bread.} \item{...}{Additional arguments available for some classes of objects.} } \value{ An object of class \code{c("vcovCR","clubSandwich")}, which consists of a matrix of the estimated variance of and covariances between the regression coefficient estimates. } \description{ \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates from a \code{\link[metafor]{rma.mv}} object. } \examples{ library(metafor) data(hierdat, package = "robumeta") mfor_fit <- rma.mv(effectsize ~ binge + followup + sreport + age, V = var, random = list(~ 1 | esid, ~ 1 | studyid), data = hierdat) mfor_fit mfor_CR2 <- vcovCR(mfor_fit, type = "CR2") mfor_CR2 coef_test(mfor_fit, vcov = mfor_CR2, test = c("Satterthwaite", "saddlepoint")) Wald_test(mfor_fit, constraints = c(2,4), vcov = mfor_CR2) Wald_test(mfor_fit, constraints = 2:5, vcov = mfor_CR2) } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/vcovCR.rma.uni.Rd0000644000176200001440000000511313500611663016475 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rma-uni.R \name{vcovCR.rma.uni} \alias{vcovCR.rma.uni} \title{Cluster-robust variance-covariance matrix for a rma.uni object.} \usage{ \method{vcovCR}{rma.uni}(obj, cluster, type, target, inverse_var, form = "sandwich", ...) } \arguments{ \item{obj}{Fitted model for which to calculate the variance-covariance matrix} \item{cluster}{Expression or vector indicating which observations belong to the same cluster. Required for \code{rma.uni} objects.} \item{type}{Character string specifying which small-sample adjustment should be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of \code{\link{vcovCR}} for further information.} \item{target}{Optional matrix or vector describing the working variance-covariance model used to calculate the \code{CR2} and \code{CR4} adjustment matrices. If not specified, the target is taken to be diagonal with entries equal to the estimated marginal variance of the effect sizes.} \item{inverse_var}{Optional logical indicating whether the weights used in fitting the model are inverse-variance. If not specified, \code{vcovCR} will attempt to infer a value.} \item{form}{Controls the form of the returned matrix. The default \code{"sandwich"} will return the sandwich variance-covariance matrix. Alternately, setting \code{form = "meat"} will return only the meat of the sandwich and setting \code{form = B}, where \code{B} is a matrix of appropriate dimension, will return the sandwich variance-covariance matrix calculated using \code{B} as the bread.} \item{...}{Additional arguments available for some classes of objects.} } \value{ An object of class \code{c("vcovCR","clubSandwich")}, which consists of a matrix of the estimated variance of and covariances between the regression coefficient estimates. } \description{ \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates from a \code{\link[metafor]{rma.uni}} object. } \examples{ library(metafor) data(corrdat, package = "robumeta") mfor_fit <- rma.uni(effectsize ~ males + college + binge, vi = var, data = corrdat, method = "FE") mfor_fit mfor_CR2 <- vcovCR(mfor_fit, type = "CR2", cluster = corrdat$studyid) mfor_CR2 coef_test(mfor_fit, vcov = mfor_CR2, test = c("Satterthwaite", "saddlepoint")) Wald_test(mfor_fit, constraints = 2:4, vcov = mfor_CR2) } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/vcovCR.robu.Rd0000644000176200001440000000534413500611663016101 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/robu.R \name{vcovCR.robu} \alias{vcovCR.robu} \title{Cluster-robust variance-covariance matrix for a robu object.} \usage{ \method{vcovCR}{robu}(obj, cluster, type, target, inverse_var, form = "sandwich", ...) } \arguments{ \item{obj}{Fitted model for which to calculate the variance-covariance matrix} \item{cluster}{Optional expression or vector indicating which observations belong to the same cluster. If not specified, will be set to the \code{studynum} used in fitting the \code{\link[robumeta]{robu}} object.} \item{type}{Character string specifying which small-sample adjustment should be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of \code{\link{vcovCR}} for further information.} \item{target}{Optional matrix or vector describing the working variance-covariance model used to calculate the \code{CR2} and \code{CR4} adjustment matrices. If not specified, the target is taken to be the inverse of the estimated weights used in fitting the \code{\link[robumeta]{robu}} object.} \item{inverse_var}{Optional logical indicating whether the weights used in fitting the model are inverse-variance. If not specified, \code{vcovCR} will attempt to infer a value.} \item{form}{Controls the form of the returned matrix. The default \code{"sandwich"} will return the sandwich variance-covariance matrix. Alternately, setting \code{form = "meat"} will return only the meat of the sandwich and setting \code{form = B}, where \code{B} is a matrix of appropriate dimension, will return the sandwich variance-covariance matrix calculated using \code{B} as the bread.} \item{...}{Additional arguments available for some classes of objects.} } \value{ An object of class \code{c("vcovCR","clubSandwich")}, which consists of a matrix of the estimated variance of and covariances between the regression coefficient estimates. } \description{ \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates from a \code{\link[robumeta]{robu}} object. } \examples{ library(robumeta) data(hierdat) robu_fit <- robu(effectsize ~ binge + followup + sreport + age, data = hierdat, studynum = studyid, var.eff.size = var, modelweights = "HIER") robu_fit robu_CR2 <- vcovCR(robu_fit, type = "CR2") robu_CR2 coef_test(robu_fit, vcov = robu_CR2, test = c("Satterthwaite", "saddlepoint")) Wald_test(robu_fit, constraints = c(2,4), vcov = robu_CR2) Wald_test(robu_fit, constraints = 2:5, vcov = robu_CR2) } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/vcovCR.glm.Rd0000644000176200001440000000467013500611663015712 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glm.R \name{vcovCR.glm} \alias{vcovCR.glm} \title{Cluster-robust variance-covariance matrix for a glm object.} \usage{ \method{vcovCR}{glm}(obj, cluster, type, target = NULL, inverse_var = NULL, form = "sandwich", ...) } \arguments{ \item{obj}{Fitted model for which to calculate the variance-covariance matrix} \item{cluster}{Expression or vector indicating which observations belong to the same cluster. Required for \code{glm} objects.} \item{type}{Character string specifying which small-sample adjustment should be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of \code{\link{vcovCR}} for further information.} \item{target}{Optional matrix or vector describing the working variance-covariance model used to calculate the \code{CR2} and \code{CR4} adjustment matrices. If a vector, the target matrix is assumed to be diagonal. If not specified, the target is taken to be the estimated variance function.} \item{inverse_var}{Optional logical indicating whether the weights used in fitting the model are inverse-variance. If not specified, \code{vcovCR} will attempt to infer a value.} \item{form}{Controls the form of the returned matrix. The default \code{"sandwich"} will return the sandwich variance-covariance matrix. Alternately, setting \code{form = "meat"} will return only the meat of the sandwich and setting \code{form = B}, where \code{B} is a matrix of appropriate dimension, will return the sandwich variance-covariance matrix calculated using \code{B} as the bread.} \item{...}{Additional arguments available for some classes of objects.} } \value{ An object of class \code{c("vcovCR","clubSandwich")}, which consists of a matrix of the estimated variance of and covariances between the regression coefficient estimates. } \description{ \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates from an \code{\link{glm}} object. } \examples{ data(dietox, package = "geepack") dietox$Cu <- as.factor(dietox$Cu) weight_fit <- glm(Weight ~ Cu * poly(Time, 3), data=dietox, family = "quasipoisson") V_CR <- vcovCR(weight_fit, cluster = dietox$Pig, type = "CR2") coef_test(weight_fit, vcov = V_CR, test = "Satterthwaite") } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/Wald_test.Rd0000644000176200001440000000272613500611663015660 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Wald_test.R \name{Wald_test} \alias{Wald_test} \title{Test parameter constraints in a fitted linear regression model} \usage{ Wald_test(obj, constraints, vcov, test = "HTZ", ...) } \arguments{ \item{obj}{Fitted model for which to calculate Wald tests.} \item{constraints}{List of one or more constraints to test. See details below.} \item{vcov}{Variance covariance matrix estimated using \code{vcovCR} or a character string specifying which small-sample adjustment should be used to calculate the variance-covariance.} \item{test}{Character vector specifying which small-sample correction(s) to calculate. The following corrections are available: \code{"chi-sq"}, \code{"Naive-F"}, \code{"HTA"}, \code{"HTB"}, \code{"HTZ"}, \code{"EDF"}, \code{"EDT"}. Default is \code{"HTZ"}.} \item{...}{Further arguments passed to \code{\link{vcovCR}}, which are only needed if \code{vcov} is a character string.} } \value{ A list of test results. } \description{ \code{Wald_test} reports Wald-type tests of linear contrasts from a fitted linear regression model, using a sandwich estimator for the variance-covariance matrix and a small sample correction for the p-value. Several different small-sample corrections are available. } \details{ Constraints can be specified as character vectors, integer vectors, logical vectors, or matrices. } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/vcovCR.ivreg.Rd0000644000176200001440000000466613500611663016254 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ivreg.R \name{vcovCR.ivreg} \alias{vcovCR.ivreg} \title{Cluster-robust variance-covariance matrix for an ivreg object.} \usage{ \method{vcovCR}{ivreg}(obj, cluster, type, target = NULL, inverse_var = FALSE, form = "sandwich", ...) } \arguments{ \item{obj}{Fitted model for which to calculate the variance-covariance matrix} \item{cluster}{Expression or vector indicating which observations belong to the same cluster. Required for \code{ivreg} objects.} \item{type}{Character string specifying which small-sample adjustment should be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of \code{\link{vcovCR}} for further information.} \item{target}{Optional matrix or vector describing the working variance-covariance model used to calculate the \code{CR2} and \code{CR4} adjustment matrices. If a vector, the target matrix is assumed to be diagonal. If not specified, the target is taken to be an identity matrix.} \item{inverse_var}{Not used for \code{ivreg} objects.} \item{form}{Controls the form of the returned matrix. The default \code{"sandwich"} will return the sandwich variance-covariance matrix. Alternately, setting \code{form = "meat"} will return only the meat of the sandwich and setting \code{form = B}, where \code{B} is a matrix of appropriate dimension, will return the sandwich variance-covariance matrix calculated using \code{B} as the bread.} \item{...}{Additional arguments available for some classes of objects.} } \value{ An object of class \code{c("vcovCR","clubSandwich")}, which consists of a matrix of the estimated variance of and covariances between the regression coefficient estimates. } \description{ \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates from an \code{\link[AER]{ivreg}} object. } \examples{ library(AER) data("CigarettesSW") Cigs <- within(CigarettesSW, { rprice <- price/cpi rincome <- income/population/cpi tdiff <- (taxs - tax)/cpi }) iv_fit <- ivreg(log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax/cpi), data = Cigs) vcovCR(iv_fit, cluster = Cigs$state, type = "CR2") coef_test(iv_fit, vcov = "CR2", cluster = Cigs$state) } \seealso{ \code{\link{vcovCR}} } clubSandwich/man/impute_covariance_matrix.Rd0000644000176200001440000000307313500611663021007 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rma-mv.R \name{impute_covariance_matrix} \alias{impute_covariance_matrix} \title{Impute a block-diagonal covariance matrix} \usage{ impute_covariance_matrix(vi, cluster, r, return_list = identical(as.factor(cluster), sort(as.factor(cluster)))) } \arguments{ \item{vi}{Vector of variances} \item{cluster}{Vector indicating which effects belong to the same cluster. Effects with the same value of `cluster` will be treated as correlated.} \item{r}{Vector or numeric value of assume correlation(s) between effect size estimates from each study.} \item{return_list}{Optional logical indicating whether to return a list of matrices (with one entry per block) or the full variance-covariance matrix.} } \value{ If \code{cluster} is appropriately sorted, then a list of matrices, with one entry per cluster, will be returned by default. If \code{cluster} is out of order, then the full variance-covariate matrix will be returned by default. The output structure can be controlled with the optional \code{return_list} argument. } \description{ \code{impute_covariance_matrix} calculates a block-diagonal covariance matrix, given the marginal variances, the block structure, and an assumed correlation. } \examples{ library(metafor) data(SATcoaching) V_list <- impute_covariance_matrix(vi = SATcoaching$V, cluster = SATcoaching$study, r = 0.66) MVFE <- rma.mv(d ~ 0 + test, V = V_list, data = SATcoaching) coef_test(MVFE, vcov = "CR2", cluster = SATcoaching$study) } clubSandwich/man/dropoutPrevention.Rd0000644000176200001440000000541613500611663017477 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-documentation.R \docType{data} \name{dropoutPrevention} \alias{dropoutPrevention} \title{Dropout prevention/intervention program effects} \format{A data frame with 385 rows and 18 variables: \describe{ \item{LOR1}{log-odds ratio measuring the intervention effect} \item{varLOR}{estimated sampling variance of the log-odds ratio} \item{studyID}{unique identifier for each study} \item{studySample}{unique identifier for each sample within a study} \item{study_design}{study design (randomized, matched, or non-randomized and unmatched)} \item{outcome}{outcome measure for the intervention effect is estimated (school dropout, school enrollment, graduation, graduation or GED receipt)} \item{evaluator_independence}{degree of evaluator independence (independent, indirect but influential, involved in planning but not delivery, involved in delivery)} \item{implementation_quality}{level of implementation quality (clear problems, possible problems, no apparent problems)} \item{program_site}{Program delivery site (community, mixed, school classroom, school but outside of classroom)} \item{attrition}{Overall attrition (proportion)} \item{group_equivalence}{pretest group-equivalence log-odds ratio} \item{adjusted}{adjusted or unadjusted data used to calculate intervention effect} \item{male_pct}{proportion of the sample that is male} \item{white_pct}{proportion of the sample that is white} \item{average_age}{average age of the sample} \item{duration}{program duration (in weeks)} \item{service_hrs}{program contact hours per week} \item{big_study}{indicator for the 32 studies with 3 or more effect sizes} }} \source{ Wilson, S. J., Lipsey, M. W., Tanner-Smith, E., Huang, C. H., & Steinka-Fry, K. T. (2011). Dropout prevention and intervention programs: Effects on school completion and dropout Among school-aged children and youth: A systematic review. Campbell Systematic Reviews, 7(8). } \usage{ dropoutPrevention } \description{ A dataset containing estimated effect sizes, variances, and covariates from a meta-analysis of dropout prevention/intervention program effects, conducted by Wilson et al. (2011). Missing observations were imputed. } \references{ Wilson, S. J., Lipsey, M. W., Tanner-Smith, E., Huang, C. H., & Steinka-Fry, K. T. (2011). Dropout prevention and intervention programs: Effects on school completion and dropout Among school-aged children and youth: A systematic review. Campbell Systematic Reviews, 7(8). Tipton, E., & Pustejovsky, J. E. (2015). Small-sample adjustments for tests of moderators and model fit using robust variance estimation in meta-regression. } \keyword{datasets} clubSandwich/man/vcovCR.Rd0000644000176200001440000001522113500611663015126 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clubSandwich.R \name{vcovCR} \alias{vcovCR} \alias{vcovCR.default} \title{Cluster-robust variance-covariance matrix} \usage{ vcovCR(obj, cluster, type, target, inverse_var, form, ...) \method{vcovCR}{default}(obj, cluster, type, target = NULL, inverse_var = FALSE, form = "sandwich", ...) } \arguments{ \item{obj}{Fitted model for which to calculate the variance-covariance matrix} \item{cluster}{Expression or vector indicating which observations belong to the same cluster. For some classes, the cluster will be detected automatically if not specified.} \item{type}{Character string specifying which small-sample adjustment should be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of \code{\link{vcovCR}} for further information.} \item{target}{Optional matrix or vector describing the working variance-covariance model used to calculate the \code{CR2} and \code{CR4} adjustment matrices. If a vector, the target matrix is assumed to be diagonal. If not specified, \code{vcovCR} will attempt to infer a value.} \item{inverse_var}{Optional logical indicating whether the weights used in fitting the model are inverse-variance. If not specified, \code{vcovCR} will attempt to infer a value.} \item{form}{Controls the form of the returned matrix. The default \code{"sandwich"} will return the sandwich variance-covariance matrix. Alternately, setting \code{form = "meat"} will return only the meat of the sandwich and setting \code{form = B}, where \code{B} is a matrix of appropriate dimension, will return the sandwich variance-covariance matrix calculated using \code{B} as the bread.} \item{...}{Additional arguments available for some classes of objects.} } \value{ An object of class \code{c("vcovCR","clubSandwich")}, which consists of a matrix of the estimated variance of and covariances between the regression coefficient estimates. The matrix has several attributes: \describe{ \item{type}{indicates which small-sample adjustment was used} \item{cluster}{contains the factor vector that defines independent clusters} \item{bread}{contains the bread matrix} \item{v_scale}{constant used in scaling the sandwich estimator} \item{est_mats}{contains a list of estimating matrices used to calculate the sandwich estimator} \item{adjustments}{contains a list of adjustment matrices used to calculate the sandwich estimator} \item{target}{contains the working variance-covariance model used to calculate the adjustment matrices. This is needed for calculating small-sample corrections for Wald tests.} } } \description{ This is a generic function, with specific methods defined for \code{\link[stats]{lm}}, \code{\link[plm]{plm}}, \code{\link[stats]{glm}}, \code{\link[nlme]{gls}}, \code{\link[nlme]{lme}}, \code{\link[robumeta]{robu}}, \code{\link[metafor]{rma.uni}}, and \code{\link[metafor]{rma.mv}} objects. \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates. } \details{ \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix of a set of regression coefficient estimates. Several different small sample corrections are available, which run parallel with the "HC" corrections for heteroskedasticity-consistent variance estimators, as implemented in \code{\link[sandwich]{vcovHC}}. The "CR2" adjustment is recommended (Pustejovsky & Tipton, 2017; Imbens & Kolesar, 2016). See Pustejovsky and Tipton (2017) and Cameron and Miller (2015) for further technical details. Available options include: \describe{ \item{"CR0"}{is the original form of the sandwich estimator (Liang & Zeger, 1986), which does not make any small-sample correction.} \item{"CR1"}{multiplies CR0 by \code{m / (m - 1)}, where \code{m} is the number of clusters.} \item{"CR1p"}{multiplies CR0 by \code{m / (m - p)}, where \code{m} is the number of clusters and \code{p} is the number of covariates.} \item{"CR1S"}{multiplies CR0 by \code{(m (N-1)) / [(m - 1)(N - p)]}, where \code{m} is the number of clusters, \code{N} is the total number of observations, and \code{p} is the number of covariates. Some Stata commands use this correction by default.} \item{"CR2"}{is the "bias-reduced linearization" adjustment proposed by Bell and McCaffrey (2002) and further developed in Pustejovsky and Tipton (2017). The adjustment is chosen so that the variance-covariance estimator is exactly unbiased under a user-specified working model.} \item{"CR3"}{approximates the leave-one-cluster-out jackknife variance estimator (Bell & McCaffrey, 2002).} } } \examples{ # simulate design with cluster-dependence m <- 8 cluster <- factor(rep(LETTERS[1:m], 3 + rpois(m, 5))) n <- length(cluster) X <- matrix(rnorm(3 * n), n, 3) nu <- rnorm(m)[cluster] e <- rnorm(n) y <- X \%*\% c(.4, .3, -.3) + nu + e dat <- data.frame(y, X, cluster, row = 1:n) # fit linear model lm_fit <- lm(y ~ X1 + X2 + X3, data = dat) vcov(lm_fit) # cluster-robust variance estimator with CR2 small-sample correction vcovCR(lm_fit, cluster = dat$cluster, type = "CR2") # compare small-sample adjustments CR_types <- paste0("CR",c("0","1","1S","2","3")) sapply(CR_types, function(type) sqrt(diag(vcovCR(lm_fit, cluster = dat$cluster, type = type)))) } \references{ Bell, R. M., & McCaffrey, D. F. (2002). Bias reduction in standard errors for linear regression with multi-stage samples. Survey Methodology, 28(2), 169-181. Cameron, A. C., & Miller, D. L. (2015). A Practitioner's Guide to Cluster-Robust Inference. \emph{Journal of Human Resources, 50}(2), 317-372. \doi{10.3368/jhr.50.2.317} Imbens, G. W., & Kolesar, M. (2016). Robust standard errors in small samples: Some practical advice. \emph{Review of Economics and Statistics, 98}(4), 701-712. \doi{10.1162/rest_a_00552} Liang, K.-Y., & Zeger, S. L. (1986). Longitudinal data analysis using generalized linear models. \emph{Biometrika, 73}(1), 13-22. \doi{10.1093/biomet/73.1.13} Pustejovsky, J. E. & Tipton, E. (2017). Small sample methods for cluster-robust variance estimation and hypothesis testing in fixed effects models. \emph{Journal of Business and Economic Statistics}. In Press. \doi{10.1080/07350015.2016.1247004} } \seealso{ \code{\link{vcovCR.lm}}, \code{\link{vcovCR.plm}}, \code{\link{vcovCR.glm}}, \code{\link{vcovCR.gls}}, \code{\link{vcovCR.lme}}, \code{\link{vcovCR.robu}}, \code{\link{vcovCR.rma.uni}}, \code{\link{vcovCR.rma.mv}} } clubSandwich/man/MortalityRates.Rd0000644000176200001440000000307313574216757016731 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-documentation.R \docType{data} \name{MortalityRates} \alias{MortalityRates} \title{State-level annual mortality rates by cause among 18-20 year-olds} \format{A data frame with 5508 rows and 12 variables: \describe{ \item{year}{Year of observation} \item{state}{identifier for state} \item{count}{Number of deaths} \item{pop}{Population size} \item{legal}{Proportion of 18-20 year-old population that is legally allowed to drink} \item{beertaxa}{Beer taxation rate} \item{beerpercap}{Beer consumption per capita} \item{winepercap}{Wine consumption per capita} \item{spiritpercap}{Spirits consumption per capita} \item{totpercap}{Total alcohol consumption per capita} \item{mrate}{Mortality rate per 10,000} \item{cause}{Cause of death} }} \source{ \href{http://masteringmetrics.com/wp-content/uploads/2015/01/deaths.dta}{Mastering 'Metrics data archive} } \usage{ MortalityRates } \description{ A dataset containing state-level annual mortality rates for select causes of death, as well as data related to the minimum legal drinking age and alcohol consumption. } \references{ Angrist, J. D., and Pischke, J. S. (2014). _Mastering'metrics: the path from cause to effect_. Princeton University Press, 2014. Carpenter, C., & Dobkin, C. (2011). The minimum legal drinking age and public health. _Journal of Economic Perspectives, 25_(2), 133-156. doi:[10.1257/jep.25.2.133](https://doi.org/10.1257/jep.25.2.133) } \keyword{datasets} clubSandwich/man/AchievementAwardsRCT.Rd0000644000176200001440000000407213574216757017711 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-documentation.R \docType{data} \name{AchievementAwardsRCT} \alias{AchievementAwardsRCT} \title{Achievement Awards Demonstration program} \format{A data frame with 16526 rows and 21 variables: \describe{ \item{school_id}{Fictitious school identification number} \item{school_type}{Factor identifying the school type (Arab religious, Jewish religious, Jewish secular)} \item{pair}{Number of treatment pair. Note that 7 is a triple.} \item{treated}{Indicator for whether school was in treatment group} \item{year}{Cohort year} \item{student_id}{Fictitious student identification number} \item{sex}{Factor identifying student sex} \item{siblings}{Number of siblings} \item{immigrant}{Indicator for immigrant status} \item{father_ed}{Father's level of education} \item{mother_ed}{Mother's level of education} \item{Bagrut_status}{Indicator for Bagrut attainment} \item{attempted}{Number of Bagrut units attempted} \item{awarded}{Number of Bagrut units awarded} \item{achv_math}{Indicator for satisfaction of math requirement} \item{achv_english}{Indicator for satisfaction of English requirement} \item{achv_hebrew}{Indicator for satisfaction of Hebrew requirement} \item{lagscore}{Lagged Bagrut score} \item{qrtl}{Quartile within distribution of lagscore, calculated by cohort and sex} \item{half}{Lower or upper half within distribution of lagscore, calculated by cohort and sex} }} \source{ \href{https://economics.mit.edu/faculty/angrist/data1/data/angrist}{Angrist Data Archive} } \usage{ AchievementAwardsRCT } \description{ Data from a randomized trial of the Achievement Awards Demonstration program, reported in Angrist & Lavy (2009). } \references{ Angrist, J. D., & Lavy, V. (2009). The effects of high stakes high school achievement awards : Evidence from a randomized trial. \emph{American Economic Review, 99}(4), 1384-1414. doi:\href{https://doi.org/10.1257/aer.99.4.1384}{10.1257/aer.99.4.1384} } \keyword{datasets} clubSandwich/DESCRIPTION0000644000176200001440000000375113576443015014404 0ustar liggesusersPackage: clubSandwich Title: Cluster-Robust (Sandwich) Variance Estimators with Small-Sample Corrections Version: 0.4.0 Authors@R: person("James", "Pustejovsky", email = "jepusto@gmail.com", role = c("aut", "cre"), comment = c(ORCID = "0000-0003-0591-9465")) Description: Provides several cluster-robust variance estimators (i.e., sandwich estimators) for ordinary and weighted least squares linear regression models, including the bias-reduced linearization estimator introduced by Bell and McCaffrey (2002) and developed further by Pustejovsky and Tipton (2017) . The package includes functions for estimating the variance- covariance matrix and for testing single- and multiple- contrast hypotheses based on Wald test statistics. Tests of single regression coefficients use Satterthwaite or saddle-point corrections. Tests of multiple- contrast hypotheses use an approximation to Hotelling's T-squared distribution. Methods are provided for a variety of fitted models, including lm() and mlm objects, glm(), ivreg() (from package 'AER'), plm() (from package 'plm'), gls() and lme() (from 'nlme'), lmer() (from `lme4`), robu() (from 'robumeta'), and rma.uni() and rma.mv() (from 'metafor'). URL: https://github.com/jepusto/clubSandwich BugReports: https://github.com/jepusto/clubSandwich/issues Depends: R (>= 3.0.0) License: GPL-3 VignetteBuilder: knitr LazyData: true Imports: stats, sandwich Suggests: Formula, knitr, carData, geepack, metafor, robumeta, nlme, mlmRev, AER, plm (>= 1.6-4), Matrix, lme4, zoo, testthat, rmarkdown RoxygenNote: 6.1.1 Encoding: UTF-8 Language: en-US NeedsCompilation: no Packaged: 2019-12-18 13:31:19 UTC; jep2963 Author: James Pustejovsky [aut, cre] () Maintainer: James Pustejovsky Repository: CRAN Date/Publication: 2019-12-18 15:30:21 UTC clubSandwich/build/0000755000176200001440000000000013576425047013774 5ustar liggesusersclubSandwich/build/vignette.rds0000644000176200001440000000046013576425047016333 0ustar liggesusersuP]K0M:@PC@/2+ $7jRV䞓s!4CC w $'07VzX UM$'<^ ]^N]w%2tՇ+7ь0Li=TK$%bZiU[+[7`&q XX( ܌BOVYf[7Q\ZN >\ $ۙ22Qg6It$5^X3\(=^2_X_y]ri n- ,մ2oTfV{Jf(*+ZjgY+duRd'[SRXoB~2UEl|:Z=K-ڟǫM?خM06JakZvkmϟZ,u‡x^?I\c/+bn+zUm i:7eJ:B5g LdG>#A!N8v)(9~QGe8j+u4P!}-!RYPAhp\^D*R?C<10yI-+-X4}(SOuhP!~ѐ}'_-[5'Lc l[15E/ bAڇPRYKJZL\KI,CK/R~x"fb')Vt-)읚VMNXbfnN)V6jq 0 @ӹ 1,YJ$Z; ^ÐZ@N{: @ U2J!@>ba{jJبY]w_8%ZG%d*a?-e3CU?!IOp**!PO̎[ a]+=Zl>%BoqfxX(-5fKy-6+hZ QA9ݷzR‡xMc9ag~yAr`PITo KjjY4R}[-4[7Q K5m+hF=kQ5ia(Z)c^Tx*XIyzy}Mq #ȏ"R ExW*;L+;VM-jeM-I1ː/w޵eY4C<ݦGθ8 2*ÐKcurqK1"67]vS*7 ^ok)nҨ1zENR:!GoM-NbʶU NبP)ȵ#FQЛ| fȣW)ȩ֓) `r:"I p\w$,2Y.Kvb+L|C1N~? <뚔*9(cG.$JAfʔT|bOk/ٺǪ1kY`a#1>,cU4]~I';o+_Wl+garݴTؠjgC;MNpQjXw R,?ǒtoꥃC *bN/R8oZES۠5r7w$->0rgʝuW >+ BH%&sw#u9<}>%2|Ae2-S&'w;D9p&OnO}vS*V䎺c:nKbY[7TᴜwMeCʒl{u4.+Vz{5"pxҺN(6j6xx OUЎ>w drt S>mv&;["HRC>.kwcf'aUwe[n\˚m{)-"e prtOv)V'`+ N;K>>weOi\Axءpd6p=:aF 6GgͤQvdSNkΜ8^mgqp=$CEȋû|Ynd驅bTvyI[ ykb qL&N; 3dj^q,15+"x 9zwQhHҬuĬ`VD-Zb =y+l] j E SxIbLes@P|2<P;b}YL3\֪RFc6I:L'뮕8 9:ޘk(TETqƇx:] H Yg#0SyT'w7곈$dx#ZϊԏOirtZ:(?7Y(4)s[-\nُ@>xmK&!Gːj 1q&᷐5 C<1pQx~7"v,!*6&aWLcţQ6m8<܅P}==E{AQ">3e'x02$MjZՉ*Ż,mx߫.ݞa]#G1Mo^PeD"IDrx`e۪8ꦶQ &cMM&,@:C>s#+M-:0!Qs콧NXJ?\I4t:!uψOgU-h"5<"m=-fx3f+YpubTs5A/]|چ.CfކgKOeL;Վ G _Z;?Μ}qHP; V6ٺOP o*$NAϴ-XE"; yۊ $>k"]5IQfy 6g-# ԎCaOI x#xM&vmȷc>Ra޽D#|ybS 94;q뛣[0I0Cg)}ޛ}$=>#E3p=1W8>YI7hEVc-SdF]/$ߦ'>'f<|oHTDw!wƴԍ]O/qk%l0b3m<< H]/0@S%oO;yrg e5)f g:bG$d_f!gcq!g)r{T{W t>=CcQw/BZi3M D`5t)]װ\!KߑSݨ*JUt>hO@>KM_v1 uݛ3+ 5J< B 4 Yö_ Kz8 y2 8 9QqO~5k;ҝf/@:H ݪj+G!//BBz _wN/dU%]5 r7~2Ҳ1NA^2}r7 >0R]Գǐz?=}g?Du- "8YUOA\˃K|X!uː/G6Í}yՆx*K)FہV[zpw\vH?E{i¦u,q+<%wELEe΢On&开&LCNVG1s)6s(rTI}HVőtD>ij~`=o-pZ KmH!ZeXjqKdYet3A,`a8!5kOmnĤӍB|hد&h, G/:$\3wg45Q!̇x:m4 dhEh|eWѤDICj@~\:* au8 Gb q`I<$Oy IY!f;uO2N>wBE)܅}ʁ1}r_6y&<9[mcqx:](4"dMɿL!^M. K szC<(y譕tJ4B~UlV*>16,VL<[^|uQqӇx:m?0MQO/ [>}uucVl77#1Nߝ<ӿiNt0Mj/B݇`ކ,uc`{d=a>giA`֛݊(Zn=Ӑ06)քE*rǛ֤Uf<ƫ#nE\6F@^%gFv]R-(jŨ\",@0eAPq,=8=l)2R+…;0㲌u26Nh #P"01,꞉+/<]2S B + I]a(wl*;n ~"{YOTkեW??̚Yj͌ /0,}S:ѮDBĹ0%OknSKaVSSL9*NS5BEyFO`uʠDH&z.;]<6ApY"]qKODzj7;kEf*~#AIM&wRu)f=\ 4 }Wv]y Ӯce=flѥPJ Q#WM?9ns'/nTؼL9i, FfK[S< G3=Ζ AeA}D'YON`M;Q /Նϡ[99P]iቛOa<06F 5r5%;1co$-rD5m69J;hu$F%F \ s]Zg'Hlp#9zoɴ%ML .'-2z) ½6p=* O\˧Z:4 r;iHShtNk^$JՔo,)fi}G8-naSVmٴcל2vrwWss9sK>E/ j̏{=lѵ\gcil!OTɒѪ7Tf A~9|sEwNa)i&Dxb?uvBe^UtW-_Ejg\'"ܤjm+Dy"SKr&nM RY~B>+jqg3K)ۨkҏmèXzMВc__驩ۙ[fz&L߭//G%[Uh[Ip_%cm-XDxI8/[-q,(Y<۲5҆ zu|ߓWRmbaG AWjJQK.u\ւQ;%=!ss~Wy VE3-G?d:]:!Q˖R-7;Ͼ[[DMVՊlVʗZEmyݢDɗ1I:t7nh`<]{)m}ޭ}:Q!Mo:ԚMӿF:C~9M_;/jm5!¯!K7\ϫۚKܔVtӮNP2'KEqX2@C~bA_rz\x(QX@^LeV67q+7)lDC8yPN#dFyL$mϑJadi8zc=c}Ur8|UC|Q'b#'C7KDz!:_Wea_i?=9&k=q >ڇ>|6?S+íotqO[pòsw=KO3\4MͮS v,QMѴ{'\9-7Uny~nl8wT[½R!G#ͨSR֔Q`r&F2@.уdLML/* &9w۳uOS l85Υsaggoaz>8yb{ `љo=/vOVx)ѡQ':uś%aϫcG؀d_q5}_WV.ŝ.oΣ'Q0K,s4/[~qqH߹ա,NKiݹnlq&ApBd<>b w-n {K!">7iuJYZ.s{ bDwH'PF77};QjQ)e)Gsb!Gn89ƸiNx.CmffL}sӷ49ackBUx瑾F F¾#:"cTbJ1ĠRlfIXGfB.Wƅ/TU&;ۘSBbgkh$n«?Wf?Vj=R*X?u!_F5]]]BGGőUIJLJx$WQC]Iˍ0 5 g/+UAz'<9Λ+.Dm?41 ah?|6v8 IYu`r;VYX9`rszS&I4)|aE ,B?eIFg K+fnxT3OR <9FqE/@6)UNq$Xp Tw"pr Bx&B\[r>RU%H >g# ؔ 8,ƀӐY\@0"[o1995gUIuM8X(^,CBI]/KHe b!eT%(N u0CZ nWw G]x8!R?PTUXFp'?mRCc}5PZ+o&kuM镐jڒ1u\m5*TJϱ6'u^fm5iS6T0Ϫw{&ק?vwQ㷸OVZ}a^<LskMWj?hL+%_GN9+kxhrV!VJ^+;/ZU4t9׻Ҷ‰@K 85wB|`K) ڭ=F,;=ShCʵ⫝ڞhTC\u8= le^ia'K}o vw'-JhMS雼U'N%n)7*+w){{BS`3Ї~bHY,Ԕ;Su65d^MP_zA/Vķ.d,C.,^-OC& xXozk_X_y]=piVo er\RL5/ecڱN{#w6dުɹ٩BX̪QT*W7O~V\d'[SRXo}{^i_{c}2R+7L}W|r,h@a4w!^ziWWk_B'OGjt$A^%DDM0|ȂY9~)&,@.1tGڞ RI y5ry{Mt՘|Q̫ ໽>Q\StP + yC`9ڽ*M9߄[ ~,/4,>clubSandwich/tests/0000755000176200001440000000000013500611664014024 5ustar liggesusersclubSandwich/tests/testthat/0000755000176200001440000000000013576443015015672 5ustar liggesusersclubSandwich/tests/testthat/test_plm-unbalanced-fixed-effects.R0000644000176200001440000003150113500611664024442 0ustar liggesuserscontext("plm objects - unbalanced fixed effects") set.seed(20190513) library(plm, quietly=TRUE) #------------------------------------- # Produc example #------------------------------------- data("Produc", package = "plm") Produc$gsp[sample(nrow(Produc), size = 75)] <- NA Produc$cluster <- sample(LETTERS[1:10], size = nrow(Produc), replace=TRUE) n <- nrow(Produc) n_obs <- sum(!is.na(Produc$gsp)) plm_individual <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state","year"), effect = "individual", model = "within") lm_individual <- lm(log(gsp) ~ 0 + state + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) individual_names <- names(coef(plm_individual)) individual_index <- names(coef(lm_individual)) %in% individual_names target <- as.vector(1 / plm_individual$model$"log(emp)") test_that("individual effects agree with lm", { expect_equal(coef(plm_individual), coef(lm_individual)[individual_index]) expect_equal(vcovCR(plm_individual, type="CR0")[individual_names,individual_names], vcovCR(lm_individual, cluster = Produc$state, type = "CR0")[individual_index,individual_index]) expect_equal(vcovCR(plm_individual, type="CR1")[individual_names,individual_names], vcovCR(lm_individual, cluster = Produc$state, type = "CR1")[individual_index,individual_index]) expect_equal(vcovCR(plm_individual, type="CR2")[individual_names,individual_names], vcovCR(lm_individual, cluster = Produc$state, type = "CR2")[individual_index,individual_index]) expect_equal(vcovCR(plm_individual, type="CR2", inverse_var=FALSE)[individual_names,individual_names], vcovCR(lm_individual, cluster = Produc$state, type = "CR2")[individual_index,individual_index]) }) plm_time <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state","year"), effect = "time", model = "within") lm_time <- lm(log(gsp) ~ 0 + factor(year) + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) time_names <- names(coef(plm_time)) time_index <- names(coef(lm_time)) %in% time_names test_that("time effects agree with lm", { expect_equal(coef(plm_time), coef(lm_time)[time_index]) expect_equal(vcovCR(plm_time, type="CR0")[time_names,time_names], vcovCR(lm_time, cluster = Produc$year, type = "CR0")[time_index,time_index]) expect_equal(vcovCR(plm_time, type="CR1")[time_names,time_names], vcovCR(lm_time, cluster = Produc$year, type = "CR1")[time_index,time_index]) expect_equal(vcovCR(plm_time, type="CR2")[time_names,time_names], vcovCR(lm_time, cluster = Produc$year, type = "CR2")[time_index,time_index]) expect_equal(vcovCR(plm_time, type="CR2", inverse_var=FALSE)[time_names,time_names], vcovCR(lm_time, cluster = Produc$year, type = "CR2")[time_index,time_index]) }) plm_twoways <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state","year"), effect = "twoways", model = "within") lm_twoways <- lm(log(gsp) ~ 0 + state + factor(year) + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) twoway_names <- names(coef(plm_twoways)) twoway_index <- names(coef(lm_twoways)) %in% twoway_names test_that("two-way effects agree with lm", { expect_equal(coef(plm_twoways), coef(lm_twoways)[twoway_index]) # clustering on individual expect_equal(vcovCR(plm_twoways, cluster = "individual", type="CR0")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$state, type = "CR0")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = "individual", type="CR1")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$state, type = "CR1")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = "individual", type="CR2")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$state, type = "CR2")[twoway_index,twoway_index], tolerance = 10^-5) expect_equal(vcovCR(plm_twoways, cluster = "individual", type="CR2", inverse_var=FALSE)[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$state, type = "CR2")[twoway_index,twoway_index], tolerance = 10^-5) # clustering on time expect_equal(vcovCR(plm_twoways, cluster = "time", type="CR0")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$year, type = "CR0")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = "time", type="CR1")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$year, type = "CR1")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = "time", type="CR2")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$year, type = "CR2")[twoway_index,twoway_index], tolerance = 10^-5) expect_equal(vcovCR(plm_twoways, cluster = "time", type="CR2", inverse_var=FALSE)[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$year, type = "CR2")[twoway_index,twoway_index], tolerance = 10^-5) # clustering on a randomly generated factor expect_equal(vcovCR(plm_twoways, cluster = Produc$cluster, type="CR0")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$cluster, type = "CR0")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = Produc$cluster, type="CR1")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$cluster, type = "CR1")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = Produc$cluster, type="CR2")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$cluster, type = "CR2")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = Produc$cluster, type="CR2", inverse_var=FALSE)[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$cluster, type = "CR2")[twoway_index,twoway_index]) }) test_that("bread works", { y <- plm_individual$model$"log(gsp)" expect_true(check_bread(plm_individual, cluster = findCluster.plm(plm_individual), y = y)) sigma_sq_ind <- with(plm_individual, sum(residuals^2) / df.residual) expect_equal(vcov(plm_individual), bread(plm_individual) * sigma_sq_ind / v_scale(plm_individual)) expect_true(check_bread(plm_time, cluster = findCluster.plm(plm_time), y = y)) sigma_sq_time <- with(plm_time, sum(residuals^2) / df.residual) expect_equal(vcov(plm_time), bread(plm_time) * sigma_sq_time / v_scale(plm_time)) expect_true(check_bread(plm_twoways, cluster = findCluster.plm(plm_twoways, "state"), y = y)) expect_true(check_bread(plm_twoways, cluster = findCluster.plm(plm_twoways, "year"), y = y)) sigma_sq_two <- with(plm_twoways, sum(residuals^2) / df.residual) expect_equal(vcov(plm_twoways), bread(plm_twoways) * sigma_sq_two / v_scale(plm_twoways)) }) test_that("CR0 and CR1S agree with arellano vcov", { expect_equal(vcovHC(plm_individual, method="arellano", type = "HC0", cluster = "group"), as.matrix(vcovCR(plm_individual, type = "CR0")), check.attributes = FALSE) expect_equal(vcovHC(plm_individual, method="arellano", type = "sss", cluster = "group"), as.matrix(vcovCR(plm_individual, type = "CR1S")), check.attributes = FALSE) expect_equal(vcovHC(plm_time, method="arellano", type = "HC0", cluster = "time"), as.matrix(vcovCR(plm_time, type = "CR0")), check.attributes = FALSE) expect_equal(vcovHC(plm_time, method="arellano", type = "sss", cluster = "time"), as.matrix(vcovCR(plm_time, type = "CR1S")), check.attributes = FALSE) expect_equal(vcovHC(plm_twoways, method="arellano", type = "HC0", cluster = "group"), as.matrix(vcovCR(plm_twoways, cluster = "individual", type = "CR0")), check.attributes = FALSE) expect_equal(vcovHC(plm_twoways, method="arellano", type = "sss", cluster = "group"), as.matrix(vcovCR(plm_twoways, cluster = "individual", type = "CR1S")), check.attributes = FALSE) expect_equal(vcovHC(plm_twoways, method="arellano", type = "HC0", cluster = "time"), as.matrix(vcovCR(plm_twoways, cluster = "time", type = "CR0")), check.attributes = FALSE) expect_equal(vcovHC(plm_twoways, method="arellano", type = "sss", cluster = "time"), as.matrix(vcovCR(plm_twoways, cluster = "time", type = "CR1S")), check.attributes = FALSE) }) test_that("vcovCR options work for CR2", { CR2_iv <- vcovCR(plm_individual, type = "CR2") expect_identical(vcovCR(plm_individual, cluster = Produc$state, type = "CR2"), CR2_iv) expect_identical(vcovCR(plm_individual, type = "CR2", inverse_var = TRUE), CR2_iv) expect_identical(vcovCR(plm_individual, type = "CR2", target = rep(1, n_obs), inverse_var = TRUE), CR2_iv) CR2_not <- vcovCR(plm_individual, type = "CR2", inverse_var = FALSE) expect_equivalent(CR2_not, CR2_iv) expect_identical(vcovCR(plm_individual, cluster = Produc$state, type = "CR2", inverse_var = FALSE), CR2_not) expect_identical(vcovCR(plm_individual, type = "CR2", target = rep(1, n_obs)), CR2_not) expect_identical(vcovCR(plm_individual, type = "CR2", target = rep(1, n_obs), inverse_var = FALSE), CR2_not) expect_false(identical(vcovCR(plm_individual, type = "CR2", target = target), CR2_not)) }) test_that("vcovCR options work for CR4", { CR4_iv <- vcovCR(plm_individual, type = "CR4") expect_identical(vcovCR(plm_individual, cluster = Produc$state, type = "CR4"), CR4_iv) expect_identical(vcovCR(plm_individual, type = "CR4", inverse_var = TRUE), CR4_iv) expect_identical(vcovCR(plm_individual, type = "CR4", target = rep(1, n_obs), inverse_var = TRUE), CR4_iv) CR4_not <- vcovCR(plm_individual, type = "CR4", inverse_var = FALSE) expect_equivalent(CR4_not, CR4_iv) expect_identical(vcovCR(plm_individual, cluster = Produc$state, type = "CR4", inverse_var = FALSE), CR4_not) expect_identical(vcovCR(plm_individual, type = "CR4", target = rep(1, n_obs)), CR4_not) expect_identical(vcovCR(plm_individual, type = "CR4", target = rep(1, n_obs), inverse_var = FALSE), CR4_not) expect_false(identical(vcovCR(plm_individual, type = "CR4", target = target), CR4_not)) }) test_that("CR2 and CR4 are target-unbiased", { expect_true(check_CR(plm_individual, vcov = "CR2")) expect_true(check_CR(plm_individual, vcov = "CR4")) expect_true(check_CR(plm_individual, vcov = "CR2", inverse_var = FALSE)) expect_true(check_CR(plm_individual, vcov = "CR4", inverse_var = FALSE)) expect_true(check_CR(plm_time, vcov = "CR2")) expect_true(check_CR(plm_time, vcov = "CR4")) expect_true(check_CR(plm_time, vcov = "CR2", inverse_var = FALSE)) expect_true(check_CR(plm_time, vcov = "CR4", inverse_var = FALSE)) expect_true(check_CR(plm_twoways, vcov = "CR2", cluster = "individual")) expect_true(check_CR(plm_twoways, vcov = "CR4", cluster = "individual")) expect_true(check_CR(plm_twoways, vcov = "CR2", cluster = "individual", inverse_var = FALSE)) expect_true(check_CR(plm_twoways, vcov = "CR4", cluster = "individual", inverse_var = FALSE)) expect_true(check_CR(plm_twoways, vcov = "CR2", cluster = "time")) expect_true(check_CR(plm_twoways, vcov = "CR4", cluster = "time")) expect_true(check_CR(plm_twoways, vcov = "CR2", cluster = "time", inverse_var = FALSE)) expect_true(check_CR(plm_twoways, vcov = "CR4", cluster = "time", inverse_var = FALSE)) expect_true(check_CR(plm_twoways, vcov = "CR2", cluster = Produc$cluster)) expect_true(check_CR(plm_twoways, vcov = "CR4", cluster = Produc$cluster)) expect_true(check_CR(plm_twoways, vcov = "CR2", cluster = Produc$cluster, inverse_var = FALSE)) expect_true(check_CR(plm_twoways, vcov = "CR4", cluster = Produc$cluster, inverse_var = FALSE)) }) test_that("vcovCR is equivalent to vcovHC when clusters are all of size 1", { library(sandwich, quietly=TRUE) CR_types <- paste0("CR",c(0,2)) HC_types <- paste0("HC",c(0,2)) CR_individual <- lapply(CR_types, function(t) as.matrix(vcovCR(plm_individual, cluster = 1:n, type = t))) HC_individual <- lapply(HC_types, function(t) vcovHC(lm_individual, type = t)[individual_index,individual_index]) expect_equal(CR_individual, HC_individual) CR_time <- lapply(CR_types, function(t) as.matrix(vcovCR(plm_time, cluster = 1:n, type = t))) HC_time <- lapply(HC_types, function(t) vcovHC(lm_time, type = t)[time_index,time_index]) expect_equal(CR_time, HC_time) CR_twoways <- lapply(CR_types, function(t) as.matrix(vcovCR(plm_twoways, cluster = 1:n, type = t))) HC_twoways <- lapply(HC_types, function(t) vcovHC(lm_twoways, type = t)[twoway_index,twoway_index]) expect_equal(CR_twoways, HC_twoways) }) clubSandwich/tests/testthat/test_plm-random-effects.R0000644000176200001440000001430513500611664022534 0ustar liggesuserscontext("plm objects - random effects") set.seed(20190513) library(nlme, quietly=TRUE) library(plm, quietly=TRUE) data("Grunfeld", package = "plm") data("Produc", package = "plm") # grun_re <- plm(inv ~ value + capital, data = Grunfeld, model="random") # Grunfeld$cluster <- sample(LETTERS[1:10], size = nrow(Grunfeld), replace=TRUE) # Grunfeld_scramble <- Grunfeld[sample(nrow(Grunfeld)),] CR_types <- paste0("CR",0:4) plm_individual <- plm(inv ~ value + capital, data = Grunfeld, model="random") obj <- plm_individual test_that("individual effects agree with gls", { icc <- with(plm_individual$ercomp, sigma2[["id"]] / (sigma2[["id"]] + sigma2[["idios"]])) gls_individual <- gls(inv ~ value + capital, data = Grunfeld, correlation = corCompSymm(value = icc, form = ~ 1 | firm, fixed=TRUE)) expect_equal(model_matrix(plm_individual), model_matrix(gls_individual)) expect_identical(nobs(plm_individual), nobs(gls_individual)) V_ratio <- Map("/", targetVariance(plm_individual, cluster = Grunfeld$firm), targetVariance(gls_individual, cluster = Grunfeld$firm)) expect_equal(lapply(V_ratio, min), lapply(V_ratio, max)) expect_equivalent(residuals_CS(plm_individual), residuals_CS(gls_individual)) CR_plm <- lapply(CR_types, function(x) vcovCR(plm_individual, type = x)) CR_gls <- lapply(CR_types, function(x) vcovCR(gls_individual, type = x)) expect_equivalent(CR_plm, CR_gls) test_plm <- lapply(CR_types, function(x) coef_test(plm_individual, vcov = x, test = "All", p_values = FALSE)[,-3]) test_gls <- lapply(CR_types, function(x) coef_test(gls_individual, vcov = x, test = "All", p_values = FALSE)[,-3]) expect_equivalent(test_plm, test_gls) }) plm_time <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state","year"), effect = "time", model = "random") test_that("time effects agree with gls", { icc <- with(plm_time$ercomp, sigma2[[2]] / (sigma2[[2]] + sigma2[[1]])) gls_time <- gls(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, correlation = corCompSymm(value = icc, form = ~ 1 | year, fixed = TRUE)) expect_equal(model_matrix(plm_time), model_matrix(gls_time)) expect_identical(nobs(plm_time), nobs(gls_time)) expect_equivalent(residuals_CS(plm_time), residuals_CS(gls_time)) CR_plm <- lapply(CR_types, function(x) vcovCR(plm_time, type = x)) CR_gls <- lapply(CR_types, function(x) vcovCR(gls_time, type = x)) expect_equivalent(CR_plm, CR_gls) test_plm <- lapply(CR_types, function(x) coef_test(plm_time, vcov = x, test = "All", p_values = FALSE)[,-3]) test_gls <- lapply(CR_types, function(x) coef_test(gls_time, vcov = x, test = "All", p_values = FALSE)[,-3]) expect_equivalent(test_plm, test_gls) }) plm_twoways <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state","year"), effect = "twoways", model = "random") test_that("two-way effects throws error", { expect_error(vcovCR(plm_twoways, type = "CR2")) }) test_that("bread works", { expect_true(check_bread(plm_individual, cluster = findCluster.plm(plm_individual), y = plm_individual$model$inv)) expect_true(check_bread(plm_time, cluster = findCluster.plm(plm_time), y = plm_time$model$"log(gsp)")) }) test_that("CR0 and CR1S agree with arellano vcov", { expect_equal(vcovHC(plm_individual, method="arellano", type = "HC0", cluster = "group"), as.matrix(vcovCR(plm_individual, type = "CR0")), check.attributes = FALSE) expect_equal(vcovHC(plm_individual, method="arellano", type = "sss", cluster = "group"), as.matrix(vcovCR(plm_individual, type = "CR1S")), check.attributes = FALSE) expect_equal(vcovHC(plm_time, method="arellano", type = "HC0", cluster = "time"), as.matrix(vcovCR(plm_time, type = "CR0")), check.attributes = FALSE) expect_equal(vcovHC(plm_time, method="arellano", type = "sss", cluster = "time"), as.matrix(vcovCR(plm_time, type = "CR1S")), check.attributes = FALSE) }) test_that("vcovCR options work for CR2", { CR2_iv <- vcovCR(plm_individual, type = "CR2") expect_identical(vcovCR(plm_individual, cluster = Grunfeld$firm, type = "CR2"), CR2_iv) expect_identical(vcovCR(plm_individual, type = "CR2", inverse_var = TRUE), CR2_iv) tgt <- targetVariance(plm_individual, cluster = Grunfeld$firm) expect_equivalent(vcovCR(plm_individual, type = "CR2", target = tgt, inverse_var = TRUE), CR2_iv) CR2_not <- vcovCR(plm_individual, type = "CR2", inverse_var = FALSE) expect_equivalent(CR2_not, CR2_iv) expect_identical(vcovCR(plm_individual, cluster = Grunfeld$firm, type = "CR2", inverse_var = FALSE), CR2_not) expect_identical(vcovCR(plm_individual, type = "CR2", target = tgt), CR2_not) expect_identical(vcovCR(plm_individual, type = "CR2", target = tgt, inverse_var = FALSE), CR2_not) }) test_that("vcovCR options work for CR4", { CR4_iv <- vcovCR(plm_individual, type = "CR4") expect_identical(vcovCR(plm_individual, cluster = Grunfeld$firm, type = "CR4"), CR4_iv) expect_identical(vcovCR(plm_individual, type = "CR4", inverse_var = TRUE), CR4_iv) tgt <- targetVariance(plm_individual, cluster = Grunfeld$firm) expect_equivalent(vcovCR(plm_individual, type = "CR4", target = tgt, inverse_var = TRUE), CR4_iv) CR4_not <- vcovCR(plm_individual, type = "CR4", inverse_var = FALSE) expect_equivalent(CR4_not, CR4_iv) expect_identical(vcovCR(plm_individual, cluster = Grunfeld$firm, type = "CR4", inverse_var = FALSE), CR4_not) expect_identical(vcovCR(plm_individual, type = "CR4", target = tgt), CR4_not) expect_identical(vcovCR(plm_individual, type = "CR4", target = tgt, inverse_var = FALSE), CR4_not) }) test_that("CR2 and CR4 are target-unbiased", { expect_true(check_CR(plm_individual, vcov = "CR2")) expect_true(check_CR(plm_individual, vcov = "CR4")) expect_true(check_CR(plm_time, vcov = "CR2")) expect_true(check_CR(plm_time, vcov = "CR4")) }) clubSandwich/tests/testthat/test_robu.R0000644000176200001440000003203613500611664020021 0ustar liggesuserscontext("robu objects") set.seed(20190513) library(robumeta, quietly=TRUE) data(corrdat) corr_large <- robu(effectsize ~ males + college + binge, data = corrdat, modelweights = "CORR", studynum = studyid, var.eff.size = var, small = FALSE) test_that("CR0 z-tests agree with robumeta for correlated effects", { p <- length(coef_CS(corr_large)) N <- corr_large$N robu_CR0 <- vcovCR(corr_large, type = "CR0") ztests <- coef_test(corr_large, vcov = robu_CR0 * N / (N - p), test = "z") expect_equivalent(corr_large$VR.r, as.matrix(robu_CR0)) expect_equivalent(corr_large$reg_table$SE, ztests$SE) expect_equal(with(corr_large$reg_table, 2 * pnorm(abs(b.r / SE),lower.tail=FALSE)), ztests$p_z) }) corr_small <- robu(effectsize ~ males + college + binge, data = corrdat, modelweights = "CORR", studynum = studyid, var.eff.size = var) test_that("CR2 t-tests agree with robumeta for correlated effects", { robu_CR2 <- vcovCR(corr_small, type = "CR2") expect_true(check_CR(corr_small, vcov = robu_CR2)) # expect_true(check_CR(corr_small, vcov = "CR4")) CR2_ttests <- coef_test(corr_small, vcov = robu_CR2, test = "Satterthwaite") expect_equivalent(corr_small$VR.r, as.matrix(robu_CR2)) expect_equal(corr_small$dfs, CR2_ttests$df) expect_equal(corr_small$reg_table$prob, CR2_ttests$p_Satt) }) data(hierdat) hier_large <- robu(effectsize ~ binge + followup + sreport + age, data = hierdat, studynum = studyid, var.eff.size = var, modelweights = "HIER", small = FALSE) test_that("CR0 z-tests agree with robumeta for hierarchical effects", { p <- length(coef_CS(hier_large)) N <- hier_large$N robu_CR0 <- vcovCR(hier_large, type = "CR0") ztests <- coef_test(hier_large, vcov = robu_CR0 * N / (N - p), test = "z") expect_equivalent(hier_large$VR.r, as.matrix(robu_CR0)) expect_equivalent(hier_large$reg_table$SE, ztests$SE) expect_equal(with(hier_large$reg_table, 2 * pnorm(abs(b.r / SE),lower.tail=FALSE)), ztests$p_z) }) hier_small <- robu(effectsize ~ binge + followup + sreport + age, data = hierdat, studynum = studyid, var.eff.size = var, modelweights = "HIER") test_that("CR2 t-tests agree with robumeta for hierarchical effects", { robu_CR2 <- vcovCR(hier_small, type = "CR2") expect_true(check_CR(hier_small, vcov = robu_CR2)) # expect_true(check_CR(hier_small, vcov = "CR4")) CR2_ttests <- coef_test(hier_small, vcov = robu_CR2, test = "Satterthwaite") expect_equivalent(hier_small$VR.r, as.matrix(robu_CR2)) expect_equal(hier_small$dfs, CR2_ttests$df) expect_equal(hier_small$reg_table$prob, CR2_ttests$p_Satt) }) hierdat$user_wt <- 1 + rpois(nrow(hierdat), lambda = 3) user_large <- robu(effectsize ~ binge + followup + sreport + age, data = hierdat, studynum = studyid, var.eff.size = var, userweights = user_wt, small = FALSE) test_that("CR0 z-tests agree with robumeta for user weighting", { p <- length(coef_CS(user_large)) N <- user_large$N robu_CR0 <- vcovCR(user_large, type = "CR0") ztests <- coef_test(user_large, vcov = robu_CR0 * N / (N - p), test = "z") expect_equivalent(user_large$VR.r, as.matrix(robu_CR0)) expect_equivalent(user_large$reg_table$SE, ztests$SE) expect_equal(with(user_large$reg_table, 2 * pnorm(abs(b.r / SE),lower.tail=FALSE)), ztests$p_z) }) user_small <- robu(effectsize ~ binge + followup + sreport + age, data = hierdat, studynum = studyid, var.eff.size = var, userweights = user_wt) test_that("CR2 t-tests agree with robumeta for user weighting", { user_lm <- lm(effectsize ~ binge + followup + sreport + age, data = hierdat, weights = user_wt) expect_equivalent(coef_CS(user_lm), coef(user_lm)) robu_CR2 <- vcovCR(user_small, type = "CR2") expect_true(check_CR(user_small, vcov = robu_CR2)) # expect_true(check_CR(user_small, vcov = "CR4")) expect_equivalent(user_small$VR.r, as.matrix(robu_CR2)) target <- user_small$data.full$avg.var.eff.size lm_CR2 <- vcovCR(user_lm, cluster = hierdat$studyid, type = "CR2", target = target) expect_equivalent(robu_CR2, lm_CR2) CR2_ttests <- coef_test(user_small, vcov = robu_CR2, test = "Satterthwaite", p_values = FALSE) # expect_equal(user_small$dfs, CR2_ttests$df) # expect_equal(user_small$reg_table$prob, CR2_ttests$p_Satt) lm_CR2_ttests <- coef_test(user_lm, vcov = "CR2", cluster = hierdat$studyid, target = user_small$data.full$avg.var.eff.size, test = "Satterthwaite", p_values = FALSE) expect_equivalent(CR2_ttests, lm_CR2_ttests) }) test_that("bread works", { vcov_corr_large <- with(corr_large, chol2inv(chol(crossprod(Xreg, data.full$r.weights * Xreg)))) expect_equal(vcov_corr_large, bread(corr_large) / v_scale(corr_large)) vcov_corr_small <- with(corr_small, chol2inv(chol(crossprod(Xreg, data.full$r.weights * Xreg)))) expect_equal(vcov_corr_small, bread(corr_small) / v_scale(corr_small)) vcov_hier_large <- with(hier_large, chol2inv(chol(crossprod(Xreg, data.full$r.weights * Xreg)))) expect_equal(vcov_hier_large, bread(hier_large) / v_scale(hier_large)) vcov_hier_small <- with(hier_small, chol2inv(chol(crossprod(Xreg, data.full$r.weights * Xreg)))) expect_equal(vcov_hier_small, bread(hier_small) / v_scale(hier_small)) vcov_user_large <- with(user_large, chol2inv(chol(crossprod(Xreg, data.full$userweights * Xreg)))) expect_equal(vcov_user_large, bread(user_large) / v_scale(user_large)) vcov_user_small <- with(user_small, chol2inv(chol(crossprod(Xreg, data.full$userweights * Xreg)))) expect_equal(vcov_user_small, bread(user_small) / v_scale(user_small)) }) data(dropoutPrevention) test_that("dropoutPrevention tests replicate Tipton & Pustejovsky (2015) - full sample", { skip_on_cran() m3_hier <- robu(LOR1 ~ study_design + attrition + group_equivalence + adjusted + outcome + evaluator_independence + male_pct + white_pct + average_age + implementation_quality + program_site + duration + service_hrs, data = dropoutPrevention, studynum = studyID, var.eff.size = varLOR, modelweights = "HIER") m3_hier_CR2 <- vcovCR(m3_hier, cluster = dropoutPrevention$studyID, type = "CR2") expect_true(check_CR(m3_hier, vcov = m3_hier_CR2)) # expect_true(check_CR(m3_hier, vcov = "CR4")) CR2_ttests <- coef_test(m3_hier, vcov = m3_hier_CR2, test = "Satterthwaite") expect_equivalent(m3_hier$VR.r, as.matrix(m3_hier_CR2)) expect_equal(m3_hier$dfs, CR2_ttests$df) expect_equal(m3_hier$reg_table$prob, CR2_ttests$p_Satt) contrast_list <- list("Study design" = 2:3, "Outcome measure" = 7:9, "Evaluator independence" = 10:12, "Implmentation quality" = 16:17, "Program format" = 18:20) dropout_tests <- Wald_test(m3_hier, constraints = contrast_list, vcov = m3_hier_CR2, test = c("Naive-F","HTZ")) Fstat_club <- sapply(dropout_tests, function(x) x$F) attr(Fstat_club, "dimnames") <- NULL Fstat_paper <- matrix(c(0.23, 0.22, 0.91, 0.84, 3.11, 2.78, 14.15, 13.78, 3.85, 3.65), nrow = 2) expect_equivalent(Fstat_paper, round(Fstat_club, 2)) df_club <- sapply(dropout_tests, function(x) x$df[2]) df_paper <- c(42.9, 21.5, 16.8, 36.9, 37.5) attr(df_club, "names") <- NULL expect_equivalent(df_paper, round(df_club, 1)) }) test_that("dropoutPrevention tests replicate Tipton & Pustejovsky (2015) - reduced sample", { skip_on_cran() dp_subset <- subset(dropoutPrevention, big_study==TRUE) m3_hier <- robu(LOR1 ~ study_design + attrition + group_equivalence + adjusted + outcome + evaluator_independence + male_pct + white_pct + average_age + implementation_quality + program_site + duration + service_hrs, data = dp_subset, studynum = studyID, var.eff.size = varLOR, modelweights = "HIER") m3_hier_CR2 <- vcovCR(m3_hier, cluster = dp_subset$studyID, type = "CR2") expect_true(check_CR(m3_hier, vcov = m3_hier_CR2)) # expect_true(check_CR(m3_hier, vcov = "CR4")) CR2_ttests <- coef_test(m3_hier, vcov = m3_hier_CR2, test = "Satterthwaite") expect_equivalent(m3_hier$VR.r, as.matrix(m3_hier_CR2)) expect_equal(m3_hier$dfs, CR2_ttests$df) expect_equal(m3_hier$reg_table$prob, CR2_ttests$p_Satt) contrast_list <- list("Study design" = 2:3, "Outcome measure" = 7:9, "Evaluator independence" = 10:11, "Implmentation quality" = 15:16, "Program format" = 17:19) dropout_tests <- Wald_test(m3_hier, constraints = contrast_list, vcov = "CR2", test = c("Naive-F","HTZ")) Fstat_club <- sapply(dropout_tests, function(x) x$F) Fstat_paper <- matrix(c(3.19, 2.93, 1.05, 0.84, 0.32, 0.26, 4.02, 3.69, 1.19, 0.98), nrow = 2) attr(Fstat_club, "dimnames") <- NULL expect_equivalent(Fstat_paper, round(Fstat_club, 2)) df_club <- sapply(dropout_tests, function(x) x$df[2]) df_paper <- c(11.0, 7.7, 4.6, 11.0, 9.1) attr(df_club, "names") <- NULL expect_equivalent(df_paper, round(df_club, 1)) }) CR_types <- paste0("CR",0:4) test_that("order doesn't matter", { dat_scramble <- corrdat[sample(nrow(corrdat)),] corr_scramble <- robu(effectsize ~ males + college + binge, data = dat_scramble, modelweights = "CORR", studynum = studyid, var.eff.size = var) CR_fit <- lapply(CR_types, function(x) vcovCR(corr_small, type = x)) CR_scramble <- lapply(CR_types, function(x) vcovCR(corr_scramble, type = x)) expect_equivalent(CR_fit, CR_scramble) test_fit <- lapply(CR_types, function(x) coef_test(corr_small, vcov = x, test = "All", p_values = FALSE)) test_scramble <- lapply(CR_types, function(x) coef_test(corr_scramble, vcov = x, test = "All", p_values = FALSE)) expect_equal(test_fit, test_scramble, tolerance = 10^-6) constraints <- combn(length(coef_CS(corr_small)), 2, simplify = FALSE) Wald_fit <- Wald_test(corr_small, constraints = constraints, vcov = "CR2", test = "All") Wald_scramble <- Wald_test(corr_scramble, constraints = constraints, vcov = "CR2", test = "All") expect_equal(Wald_fit, Wald_scramble) }) test_that("clubSandwich works with dropped observations", { dat_miss <- hierdat dat_miss$binge[sample.int(nrow(hierdat), size = round(nrow(hierdat) / 10))] <- NA dat_miss$followup[sample.int(nrow(hierdat), size = round(nrow(hierdat) / 20))] <- NA hier_drop <- robu(effectsize ~ binge + followup + sreport + age, data = dat_miss, studynum = studyid, var.eff.size = var, modelweights = "HIER") dat_complete <- subset(dat_miss, !is.na(binge) & !is.na(followup)) hier_complete <- robu(effectsize ~ binge + followup + sreport + age, data = dat_complete, studynum = studyid, var.eff.size = var, modelweights = "HIER") CR_drop <- lapply(CR_types, function(x) vcovCR(hier_drop, type = x)) CR_complete <- lapply(CR_types, function(x) vcovCR(hier_complete, type = x)) expect_identical(CR_drop, CR_complete) test_drop <- lapply(CR_types, function(x) coef_test(hier_drop, vcov = x, test = "All", p_values = FALSE)) test_complete <- lapply(CR_types, function(x) coef_test(hier_complete, vcov = x, test = "All", p_values = FALSE)) expect_identical(test_drop, test_complete) }) test_that("vcovCR options work for CR2", { dp_subset <- subset(dropoutPrevention, big_study==TRUE) m3_hier <- robu(LOR1 ~ study_design + attrition + group_equivalence + adjusted + outcome + evaluator_independence + male_pct + white_pct + average_age + implementation_quality + program_site + duration + service_hrs, data = dp_subset, studynum = studyID, var.eff.size = varLOR, modelweights = "HIER") iv <- mean(m3_hier$data.full$r.weights) / m3_hier$data.full$r.weights CR2_iv <- vcovCR(m3_hier, type = "CR2") expect_identical(vcovCR(m3_hier, type = "CR2", inverse_var = TRUE), CR2_iv) expect_equal(vcovCR(m3_hier, type = "CR2", target = iv, inverse_var = TRUE), CR2_iv) CR2_not <- vcovCR(m3_hier, type = "CR2", inverse_var = FALSE) attr(CR2_iv, "inverse_var") <- FALSE attr(CR2_iv, "target") <- attr(CR2_not, "target") expect_equal(CR2_not, CR2_iv) expect_identical(vcovCR(m3_hier, type = "CR2", target = iv), CR2_not) expect_identical(vcovCR(m3_hier, type = "CR2", target = iv, inverse_var = FALSE), CR2_not) expect_false(identical(vcovCR(m3_hier, type = "CR2", target = m3_hier$data.full$var.eff.size), CR2_not)) }) clubSandwich/tests/testthat/test_impute_covariance_matrix.R0000644000176200001440000000506113500611664024131 0ustar liggesuserscontext("impute_covariance_matrix") set.seed(20190513) test_that("impute_covariance_matrix returns correct correlations.", { K <- 10 N <- sum(1:K) dat <- data.frame(study = rep(LETTERS[1:K], 1:K), yi = rnorm(N), vi = rchisq(N, df = 2), stringsAsFactors = FALSE) r <- 0.7 V_single_r <- impute_covariance_matrix(vi = dat$vi, cluster = dat$study, r = r) r_list <- rbeta(K, 2, 2) V_multiple_r <- impute_covariance_matrix(vi = dat$vi, cluster = dat$study, r = r_list) check_correlation <- function(M, r) if (nrow(M) > 1) max(abs(cov2cor(M)[lower.tri(M)] - r)) else 0 check_singles <- sapply(V_single_r, check_correlation, r = r) expect_true(all(check_singles < 10^-14)) check_multiples <- Map(check_correlation, M = V_multiple_r, r = r_list) expect_true(all(check_multiples < 10^-14)) dat_scramble <- dat[sample(nrow(dat)),] V_mat <- impute_covariance_matrix(vi = dat_scramble$vi, cluster = dat_scramble$study, r = r) expect_equal(dat_scramble$vi, diag(V_mat)) V_resorted <- V_mat[order(dat_scramble$study), order(dat_scramble$study)] dat_unscramble <- dat_scramble[order(dat_scramble$study),] V_unscramble <- impute_covariance_matrix(vi = dat_unscramble$vi, cluster = dat_unscramble$study, r = r) expect_equal(V_resorted, metafor::bldiag(V_unscramble)) }) test_that("impute_covariance_matrix works with unobserved factors.", { K <- 10 N <- sum(1:K) dat <- data.frame(study = rep(LETTERS[1:K], 1:K), yi = rnorm(N), vi = rchisq(N, df = 2)) levels(dat$study) <- LETTERS[1:(K + 3)] r <- 0.7 V_single_r <- impute_covariance_matrix(vi = dat$vi, cluster = dat$study, r = r) r_list <- rbeta(K, 2, 2) V_multiple_r <- impute_covariance_matrix(vi = dat$vi, cluster = dat$study, r = r_list) check_correlation <- function(M, r) if (nrow(M) > 1) max(abs(cov2cor(M)[lower.tri(M)] - r)) else 0 check_singles <- sapply(V_single_r, check_correlation, r = r) expect_true(all(check_singles < 10^-14)) dat_scramble <- dat[sample(nrow(dat)),] V_mat <- impute_covariance_matrix(vi = dat_scramble$vi, cluster = dat_scramble$study, r = r) expect_equal(dat_scramble$vi, diag(V_mat)) V_resorted <- V_mat[order(dat_scramble$study), order(dat_scramble$study)] dat_unscramble <- dat_scramble[order(dat_scramble$study),] V_unscramble <- impute_covariance_matrix(vi = dat_unscramble$vi, cluster = dat_unscramble$study, r = r) expect_equal(V_resorted, metafor::bldiag(V_unscramble)) }) clubSandwich/tests/testthat/test_plm-first-differences.R0000644000176200001440000001115313500611664023237 0ustar liggesuserscontext("plm objects - first differences model") set.seed(20190513) library(plm, quietly=TRUE) data(Fatalities, package = "AER") Fatalities <- within(Fatalities, { frate <- 10000 * fatal / pop drinkagec <- cut(drinkage, breaks = 18:22, include.lowest = TRUE, right = FALSE) drinkagec <- relevel(drinkagec, ref = 4) }) plm_FD <- plm(frate ~ beertax + drinkagec + miles + unemp + log(income), data = Fatalities, index = c("state", "year"), model = "fd") n_obs <- nobs(plm_FD) target <- with(Fatalities, 1 / pop[year != levels(year)[1]]) test_that("bread works", { y <- na.omit(diff(plm_FD$model$frate)) cluster <- findCluster.plm(plm_FD) expect_true(check_bread(plm_FD, cluster = cluster, y = y)) sigma_sq <- with(plm_FD, sum(residuals^2) / df.residual) expect_equal(vcov(plm_FD), bread(plm_FD) * sigma_sq / v_scale(plm_FD)) }) test_that("CR0 and CR1S agree with arellano vcov", { expect_equal(vcovHC(plm_FD, method="arellano", type = "HC0", cluster = "group"), as.matrix(vcovCR(plm_FD, type = "CR0")), check.attributes = FALSE) expect_equal(vcovHC(plm_FD, method="arellano", type = "sss", cluster = "group"), as.matrix(vcovCR(plm_FD, type = "CR1S")), check.attributes = FALSE) X <- model_matrix(plm_FD) e <- residuals(plm_FD) index <- attr(model.frame(plm_FD), "index") cluster <- index[[2]] cluster <- cluster[index[[2]] != levels(index[[2]])[1]] estmats <- sapply(split.data.frame(e * X, cluster, drop = TRUE), colSums) meat <- tcrossprod(estmats) bread <- chol2inv(chol(crossprod(X))) vcov_time <- bread %*% meat %*% bread attr(vcov_time, "dimnames") <- attr(meat, "dimnames") expect_equal(vcov_time, as.matrix(vcovCR(plm_FD, cluster = "time", type = "CR0"))) baloney <- tcrossprod(estmats[,-6]) vcov_baloney <- bread %*% baloney %*% bread attr(vcov_baloney, "dimnames") <- attr(baloney, "dimnames") expect_equal(vcov_baloney, vcovHC(plm_FD, method="arellano", type = "HC0", cluster = "time"), check.attributes = FALSE) }) test_that("vcovCR options work for CR2", { CR2_iv <- vcovCR(plm_FD, type = "CR2") expect_identical(vcovCR(plm_FD, cluster = Fatalities$state, type = "CR2"), CR2_iv) expect_identical(vcovCR(plm_FD, type = "CR2", inverse_var = TRUE), CR2_iv) expect_identical(vcovCR(plm_FD, type = "CR2", target = rep(1, n_obs), inverse_var = TRUE), CR2_iv) CR2_not <- vcovCR(plm_FD, type = "CR2", inverse_var = FALSE) expect_equivalent(CR2_not, CR2_iv) expect_identical(vcovCR(plm_FD, cluster = Fatalities$state, type = "CR2", inverse_var = FALSE), CR2_not) expect_identical(vcovCR(plm_FD, type = "CR2", target = rep(1, n_obs)), CR2_not) expect_identical(vcovCR(plm_FD, type = "CR2", target = rep(1, n_obs), inverse_var = FALSE), CR2_not) expect_false(identical(vcovCR(plm_FD, type = "CR2", target = target), CR2_not)) }) test_that("vcovCR options work for CR4", { skip_on_cran() CR4_iv <- vcovCR(plm_FD, type = "CR4") expect_identical(vcovCR(plm_FD, cluster = Fatalities$state, type = "CR4"), CR4_iv) expect_identical(vcovCR(plm_FD, type = "CR4", inverse_var = TRUE), CR4_iv) expect_identical(vcovCR(plm_FD, type = "CR4", target = rep(1, n_obs), inverse_var = TRUE), CR4_iv) CR4_not <- vcovCR(plm_FD, type = "CR4", inverse_var = FALSE) expect_equivalent(CR4_not, CR4_iv, tolerance = 10^-6) expect_identical(vcovCR(plm_FD, cluster = Fatalities$state, type = "CR4", inverse_var = FALSE), CR4_not) expect_identical(vcovCR(plm_FD, type = "CR4", target = rep(1, n_obs)), CR4_not) expect_identical(vcovCR(plm_FD, type = "CR4", target = rep(1, n_obs), inverse_var = FALSE), CR4_not) expect_false(identical(vcovCR(plm_FD, type = "CR4", target = target), CR4_not)) }) test_that("CR2 is target-unbiased", { expect_true(check_CR(plm_FD, vcov = "CR2", tol = 10^-7)) expect_true(check_CR(plm_FD, vcov = "CR2", inverse_var = FALSE)) expect_true(check_CR(plm_FD, cluster = "time", vcov = "CR2", tol = 10^-7)) expect_true(check_CR(plm_FD, cluster = "time", vcov = "CR2", inverse_var = FALSE)) }) test_that("vcovCR is equivalent to vcovHC when clusters are all of size 1", { CR_types <- paste0("CR",c(0,2)) HC_types <- paste0("HC",c(0,2)) CR_individual <- lapply(CR_types, function(t) as.matrix(vcovCR(plm_FD, cluster = 1:nrow(Fatalities), type = t))) HC_individual <- lapply(HC_types, function(t) vcovHC(plm_FD, method = "white1", type = t)) expect_equal(CR_individual, HC_individual, check.attributes = FALSE) }) clubSandwich/tests/testthat/test_lm.R0000644000176200001440000003005513500611664017461 0ustar liggesuserscontext("lm objects") set.seed(20190513) m <- 8 cluster <- factor(rep(LETTERS[1:m], 3 + rpois(m, 5))) n <- length(cluster) X <- matrix(rnorm(3 * n), n, 3) nu <- rnorm(m)[cluster] e <- rnorm(n) w <- rgamma(n, shape = 3, scale = 3) y <- X %*% c(.4, .3, -.3) + nu + e dat <- data.frame(y, X, cluster, w, row = 1:n) lm_fit <- lm(y ~ X1 + X2 + X3, data = dat) WLS_fit <- lm(y ~ X1 + X2 + X3, data = dat, weights = w) CR_types <- paste0("CR",0:4) # obj <- WLS_fit # y <- dat$y # type <- "CR2" # vcov <- vcovCR(obj, cluster = cluster, type = type) # target = NULL # inverse_var = FALSE test_that("bread works", { expect_true(check_bread(lm_fit, cluster = dat$cluster, y = dat$y)) lm_vcov <- bread(lm_fit) * summary(lm_fit)$sigma^2 / v_scale(lm_fit) expect_equal(vcov(lm_fit), lm_vcov) expect_true(check_bread(WLS_fit, cluster = dat$cluster, y = dat$y)) wls_vcov <- bread(WLS_fit) * summary(WLS_fit)$sigma^2 / v_scale(WLS_fit) expect_equal(vcov(WLS_fit), wls_vcov) }) test_that("vcovCR options don't matter for CR0", { expect_error(vcovCR(lm_fit, type = "CR0")) CR0 <- vcovCR(lm_fit, cluster = dat$cluster, type = "CR0") expect_output(print(CR0)) attr(CR0, "target") <- NULL attr(CR0, "inverse_var") <- NULL CR0_A <- vcovCR(lm_fit, cluster = dat$cluster, type = "CR0", target = 1 / dat$w) attr(CR0_A, "target") <- NULL attr(CR0_A, "inverse_var") <- NULL expect_identical(CR0_A, CR0) CR0_B <- vcovCR(lm_fit, cluster = dat$cluster, type = "CR0", target = 1 / dat$w, inverse_var = FALSE) attr(CR0_B, "target") <- NULL attr(CR0_B, "inverse_var") <- NULL expect_identical(CR0_A, CR0) CR0_C <- vcovCR(lm_fit, cluster = dat$cluster, type = "CR0", target = 1 / dat$w, inverse_var = TRUE) attr(CR0_C, "target") <- NULL attr(CR0_C, "inverse_var") <- NULL expect_identical(CR0_C, CR0) wCR0 <- vcovCR(WLS_fit, cluster = dat$cluster, type = "CR0") attr(wCR0, "target") <- NULL attr(wCR0, "inverse_var") <- NULL wCR0_A <- vcovCR(WLS_fit, cluster = dat$cluster, type = "CR0", target = 1 / dat$w) attr(wCR0_A, "target") <- NULL attr(wCR0_A, "inverse_var") <- NULL expect_identical(wCR0_A, wCR0) wCR0_B <- vcovCR(WLS_fit, cluster = dat$cluster, type = "CR0", target = 1 / dat$w, inverse_var = FALSE) attr(wCR0_B, "target") <- NULL attr(wCR0_B, "inverse_var") <- NULL expect_identical(wCR0_B, wCR0) wCR0_C <- vcovCR(WLS_fit, cluster = dat$cluster, type = "CR0", target = 1 / dat$w, inverse_var = TRUE) attr(wCR0_C, "target") <- NULL attr(wCR0_C, "inverse_var") <- NULL expect_identical(wCR0_C, wCR0) }) test_that("vcovCR options work for CR2", { CR2_iv <- vcovCR(lm_fit, cluster = dat$cluster, type = "CR2") expect_identical(vcovCR(lm_fit, cluster = dat$cluster, type = "CR2", inverse_var = TRUE), CR2_iv) expect_identical(vcovCR(lm_fit, cluster = dat$cluster, type = "CR2", target = rep(1, n), inverse_var = TRUE), CR2_iv) attr(CR2_iv, "inverse_var") <- FALSE CR2_not <- vcovCR(lm_fit, cluster = dat$cluster, type = "CR2", inverse_var = FALSE) expect_equal(CR2_not, CR2_iv) expect_identical(vcovCR(lm_fit, cluster = dat$cluster, type = "CR2", target = rep(1, n)), CR2_not) expect_identical(vcovCR(lm_fit, cluster = dat$cluster, type = "CR2", target = rep(1, n), inverse_var = FALSE), CR2_not) expect_false(identical(vcovCR(lm_fit, cluster = dat$cluster, type = "CR2", target = 1 / dat$w), CR2_not)) wCR2_id <- vcovCR(WLS_fit, cluster = dat$cluster, type = "CR2") expect_identical(vcovCR(WLS_fit, cluster = dat$cluster, type = "CR2", inverse_var = FALSE), wCR2_id) expect_identical(vcovCR(WLS_fit, cluster = dat$cluster, type = "CR2", target = rep(1, n)), wCR2_id) expect_identical(vcovCR(WLS_fit, cluster = dat$cluster, type = "CR2", target = rep(1, n), inverse_var = FALSE), wCR2_id) wCR2_iv <- vcovCR(WLS_fit, cluster = dat$cluster, type = "CR2", inverse_var = TRUE) wCR2_target <- vcovCR(WLS_fit, cluster = dat$cluster, type = "CR2", target = 1 / dat$w, inverse_var = TRUE) expect_false(identical(wCR2_target, wCR2_id)) expect_equal(matrix(wCR2_target, dim(wCR2_target)), matrix(wCR2_iv, dim(wCR2_iv))) expect_equal(vcovCR(WLS_fit, cluster = dat$cluster, type = "CR2", target = 1 / dat$w, inverse_var = TRUE), wCR2_target) }) test_that("vcovCR options work for CR4", { CR4_iv <- vcovCR(lm_fit, cluster = dat$cluster, type = "CR4") expect_identical(vcovCR(lm_fit, cluster = dat$cluster, type = "CR4", inverse_var = TRUE), CR4_iv) expect_identical(vcovCR(lm_fit, cluster = dat$cluster, type = "CR4", target = rep(1, n), inverse_var = TRUE), CR4_iv) attr(CR4_iv, "inverse_var") <- FALSE CR4_not <- vcovCR(lm_fit, cluster = dat$cluster, type = "CR4", inverse_var = FALSE) expect_equal(CR4_not, CR4_iv) expect_identical(vcovCR(lm_fit, cluster = dat$cluster, type = "CR4", target = rep(1, n)), CR4_not) expect_identical(vcovCR(lm_fit, cluster = dat$cluster, type = "CR4", target = rep(1, n), inverse_var = FALSE), CR4_not) expect_false(identical(vcovCR(lm_fit, cluster = dat$cluster, type = "CR4", target = 1 / dat$w), CR4_not)) wCR4_id <- vcovCR(WLS_fit, cluster = dat$cluster, type = "CR4") expect_identical(vcovCR(WLS_fit, cluster = dat$cluster, type = "CR4", inverse_var = FALSE), wCR4_id) expect_identical(vcovCR(WLS_fit, cluster = dat$cluster, type = "CR4", target = rep(1, n)), wCR4_id) expect_identical(vcovCR(WLS_fit, cluster = dat$cluster, type = "CR4", target = rep(1, n), inverse_var = FALSE), wCR4_id) wCR4_iv <- vcovCR(WLS_fit, cluster = dat$cluster, type = "CR4", inverse_var = TRUE) wCR4_target <- vcovCR(WLS_fit, cluster = dat$cluster, type = "CR4", target = 1 / dat$w, inverse_var = TRUE) expect_false(identical(wCR4_target, wCR4_id)) expect_identical(matrix(wCR4_target, dim(wCR4_target)), matrix(wCR4_iv, dim(wCR4_iv))) expect_equal(vcovCR(WLS_fit, cluster = dat$cluster, type = "CR4", target = 1 / dat$w, inverse_var = TRUE), wCR4_target) }) test_that("CR2 and CR4 are target-unbiased", { expect_true(check_CR(lm_fit, vcov = "CR2", cluster = dat$cluster)) expect_true(check_CR(WLS_fit, vcov = "CR2", cluster = dat$cluster)) expect_true(check_CR(lm_fit, vcov = "CR4", cluster = dat$cluster)) expect_true(check_CR(WLS_fit, vcov = "CR4", cluster = dat$cluster)) }) test_that("vcovCR is equivalent to vcovHC when clusters are all of size 1", { library(sandwich, quietly=TRUE) CR0 <- vcovCR(lm_fit, cluster = dat$row, type = "CR0") expect_equal(vcovHC(lm_fit, type = "HC0"), as.matrix(CR0)) CR1 <- vcovCR(lm_fit, cluster = dat$row, type = "CR1S") expect_equal(vcovHC(lm_fit, type = "HC1"), as.matrix(CR1)) CR2 <- vcovCR(lm_fit, cluster = dat$row, type = "CR2") expect_equal(vcovHC(lm_fit, type = "HC2"), as.matrix(CR2)) CR3 <- vcovCR(lm_fit, cluster = dat$row, type = "CR3") expect_equal(vcovHC(lm_fit, type = "HC3"), as.matrix(CR3)) }) test_that("CR2 is equivalent to Welch t-test for DiD design", { m0 <- 4 m1 <- 9 m <- m0 + m1 cluster <- factor(rep(LETTERS[1:m], each = 2)) n <- length(cluster) time <- rep(c(1,2), m) trt_clusters <- c(rep(0,m0), rep(1,m1)) trt <- (time - 1) * rep(trt_clusters, each = 2) nu <- rnorm(m)[cluster] e <- rnorm(n) y <- 0.4 * trt + nu + e dat <- data.frame(y, time, trt, cluster) lm_DID <- lm(y ~ cluster + factor(time) + trt, data = dat) t_Satt <- coef_test(lm_DID, vcov = "CR2", cluster = dat$cluster)["trt",] y_diff <- apply(matrix(y, nrow = 2), 2, diff) t_Welch <- t.test(y_diff ~ trt_clusters) expect_equal(with(t_Welch, estimate[[2]] - estimate[[1]]), t_Satt$beta) expect_equal(as.numeric(-t_Welch$statistic), with(t_Satt, beta / SE)) expect_is(all.equal(as.numeric(t_Welch$parameter), t_Satt$df), "character") df <- m^2 * (m0 - 1) * (m1 - 1) / (m0^2 * (m0 - 1) + m1^2 * (m1 - 1)) expect_equal(t_Satt$df, df) }) test_that("Order doesn't matter.",{ dat_scramble <- dat[sample(n),] WLS_scramble <- update(WLS_fit, data = dat_scramble) CR_fit <- lapply(CR_types, function(x) vcovCR(WLS_fit, cluster = dat$cluster, type = x)) CR_scramble <- lapply(CR_types, function(x) vcovCR(WLS_scramble, cluster = dat_scramble$cluster, type = x)) expect_equivalent(CR_fit, CR_scramble) test_fit <- lapply(CR_types, function(x) coef_test(WLS_fit, vcov = x, cluster = dat$cluster, test = "All", p_values = FALSE)) test_scramble <- lapply(CR_types, function(x) coef_test(WLS_scramble, vcov = x, cluster = dat_scramble$cluster, test = "All", p_values = FALSE)) expect_equal(test_fit, test_scramble, tolerance = 10^-6) constraints <- combn(length(coef(lm_fit)), 2, simplify = FALSE) Wald_fit <- Wald_test(WLS_fit, constraints = constraints, vcov = "CR2", cluster = dat$cluster, test = "All") Wald_scramble <- Wald_test(WLS_scramble, constraints = constraints, vcov = "CR2", cluster = dat_scramble$cluster, test = "All") expect_equal(Wald_fit, Wald_scramble) }) test_that("clubSandwich works with dropped observations", { dat_miss <- dat miss_indicator <- sample.int(n, size = round(n / 10)) dat_miss$X1[miss_indicator] <- NA dat_miss$cluster[miss_indicator] <- NA lm_dropped <- lm(y ~ X1 + X2 + X3, data = dat_miss) dat_complete <- subset(dat_miss, !is.na(X1)) lm_complete <- lm(y ~ X1 + X2 + X3, data = dat_complete) CR_drop <- lapply(CR_types, function(x) vcovCR(lm_dropped, cluster = dat_miss$cluster, type = x)) CR_complete <- lapply(CR_types, function(x) vcovCR(lm_complete, cluster = dat_complete$cluster, type = x)) expect_identical(CR_drop, CR_complete) test_drop <- lapply(CR_types, function(x) coef_test(lm_dropped, vcov = x, cluster = dat_miss$cluster, test = "All", p_values = FALSE)) test_complete <- lapply(CR_types, function(x) coef_test(lm_complete, vcov = x, cluster = dat_complete$cluster, test = "All", p_values = FALSE)) expect_identical(test_drop, test_complete) }) test_that("clubSandwich requires no missing values on the clustering variable", { dat_miss <- dat miss_indicator <- sample.int(n, size = round(n / 10)) dat_miss$cluster[miss_indicator] <- NA lm_dropped <- lm(y ~ X1 + X2 + X3, data = dat_miss) expect_error(vcovCR(lm_dropped, cluster = dat_miss$cluster, type = "CR0"), "Clustering variable cannot have missing values.") expect_error(coef_test(lm_dropped, vcov = "CR0", cluster = dat_miss$cluster, test = "All"), "Clustering variable cannot have missing values.") }) test_that("clubSandwich works with aliased predictors", { data(npk, package = "datasets") npk_alias <- lm(yield ~ block + N*P*K, npk) npk_drop <- lm(yield ~ block + N + P + K + N:P + N:K + P:K, npk) CR_alias <- lapply(CR_types[-4], function(x) vcovCR(npk_alias, cluster = npk$block, type = x)) CR_drop <- lapply(CR_types[-4], function(x) vcovCR(npk_drop, cluster = npk$block, type = x)) expect_identical(CR_alias, CR_drop) test_drop <- lapply(CR_types[-4], function(x) coef_test(npk_alias, vcov = x, cluster = npk$block, test = c("z","naive-t","Satterthwaite"), p_values = FALSE)) test_complete <- lapply(CR_types[-4], function(x) coef_test(npk_drop, vcov = x, cluster = npk$block, test = c("z","naive-t","Satterthwaite"), p_values = FALSE)) expect_identical(test_drop, test_complete) }) test_that("weight scale doesn't matter", { lm_fit_w <- lm(y ~ X1 + X2 + X3, data = dat, weights = rep(4, nrow(dat))) unweighted_fit <- lapply(CR_types, function(x) vcovCR(lm_fit, cluster = cluster, type = x)) weighted_fit <- lapply(CR_types, function(x) vcovCR(lm_fit_w, cluster = cluster, type = x)) expect_equal(lapply(unweighted_fit, as.matrix), lapply(weighted_fit, as.matrix), tol = 5 * 10^-7) target <- 1 + rpois(nrow(dat), lambda = 8) unweighted_fit <- lapply(CR_types, function(x) vcovCR(lm_fit, cluster = cluster, type = x, target = target)) weighted_fit <- lapply(CR_types, function(x) vcovCR(lm_fit_w, cluster = cluster, type = x, target = target * 15)) expect_equal(lapply(unweighted_fit, as.matrix), lapply(weighted_fit, as.matrix), tol = 5 * 10^-7) }) clubSandwich/tests/testthat/test_ignore_absorption.R0000644000176200001440000000527213500611664022577 0ustar liggesuserscontext("ignoring absorbed fixed effects") set.seed(20190513) library(plm) data(MortalityRates) MV_Mortality <- subset(MortalityRates, cause=="Motor Vehicle" & state %in% 1:8) table(MV_Mortality$state) MV_Mortality$state_fac <- factor(MV_Mortality$state) # MV_Mortality$pop <- with(MV_Mortality, 1 + rbinom(nlevels(state_fac), size = 4, prob = 0.5)[state_fac]) summary(MV_Mortality$pop) MV_Mortality$pop_scale <- with(MV_Mortality, pop / mean(pop)) summary(MV_Mortality$pop_scale) # model specification specification <- mrate ~ 0 + legal + beertaxa + beerpercap + winepercap + factor(state) #----------------------- # unweighted #----------------------- ols_LSDV <- lm(specification, data = MV_Mortality) ols_within <- plm(update(specification, . ~ . - 0 - factor(state)), data = MV_Mortality, effect = "individual", index = c("state","year")) test_that("Unweighted lsdv and within estimators are equivalent", { lsdv <- coef_test(ols_LSDV, vcov = "CR2", cluster = MV_Mortality$state, coefs = 1:4, p_values = FALSE) wthn <- coef_test(ols_within, vcov = "CR2", p_values = FALSE) expect_equal(lsdv, wthn) }) #----------------------- # iv-weights #----------------------- wls_LSDV <- lm(specification, weights = pop_scale, data = MV_Mortality) MV_Mortality_full <- model.frame(lm(specification, weights = pop_scale, data = MV_Mortality)) U_mat <- model.matrix(update(specification, . ~ . - factor(state)), data = MV_Mortality_full) T_mat <- model.matrix(~ factor(state), data = MV_Mortality_full) w <- MV_Mortality_full$"(weights)" state <- MV_Mortality_full$"factor(state)" U_absorb <- residuals(stats:::lm.wfit(x = T_mat, y = U_mat, w = w))[,-31] Y_absorb <- residuals(stats:::lm.wfit(x = T_mat, y = MV_Mortality_full$mrate, w = w)) wls_within <- lm(Y_absorb ~ 0 + U_absorb, weights = w) test_that("Inverse-variance weighted lsdv and within estimators are equivalent.", { lsdv <- coef_test(wls_LSDV, vcov = "CR2", cluster = MV_Mortality$state, inverse_var = TRUE, p_values = FALSE)[1:4,] wthn <- coef_test(wls_within, vcov = "CR2", cluster = state, inverse_var = TRUE, p_values = FALSE)[1:4,] lsdv / wthn expect_equal(lsdv, wthn, check.attributes = FALSE, tolerance = 10^-3) }) #----------------------- # p-weights #----------------------- test_that("Probability-weighted lsdv and within estimators are not necessarily equivalent.", { lsdv <- coef_test(wls_LSDV, vcov = "CR2", cluster = MV_Mortality$state, inverse_var = FALSE, coefs = 1:4, p_values = FALSE) wthn <- coef_test(wls_within, vcov = "CR2", cluster = state, inverse_var = FALSE, p_values = FALSE) lsdv / wthn expect_equal(lsdv, wthn, check.attributes = FALSE, tolerance = 10^-2) }) clubSandwich/tests/testthat/test_rma-mv.R0000644000176200001440000002647313500611664020261 0ustar liggesuserscontext("rma.mv objects") set.seed(20190513) library(robumeta, quietly=TRUE) suppressMessages(library(metafor, quietly=TRUE)) data(corrdat) corr_robu <- robu(effectsize ~ males + college + binge, data = corrdat, modelweights = "CORR", studynum = studyid, var.eff.size = var) corrdat$wt <- corr_robu$data.full$r.weights corr_meta <- rma.mv(effectsize ~ males + college + binge, data = corrdat, V = var, W = wt, method = "FE") test_that("CR2 t-tests agree with robumeta for correlated effects", { robu_CR2 <- vcovCR(corr_meta, cluster = corrdat$studyid, target = 1 / corrdat$wt, type = "CR2") expect_true(check_CR(corr_meta, vcov = robu_CR2)) # expect_true(check_CR(corr_meta, vcov = "CR4", cluster = corrdat$studyid)) expect_equivalent(as.matrix(robu_CR2), corr_robu$VR.r) expect_that(all.equal(as.matrix(vcovCR(corr_meta, cluster = corrdat$studyid, inverse_var = TRUE, type = "CR2")), corr_robu$VR.r), is_a("character")) CR2_ttests <- coef_test(corr_meta, vcov = robu_CR2, test = "Satterthwaite") expect_equal(corr_robu$dfs, CR2_ttests$df) expect_equal(corr_robu$reg_table$prob, CR2_ttests$p_Satt) }) data(hierdat) hier_meta <- rma.mv(effectsize ~ binge + followup + sreport + age, data = hierdat, random = list(~ 1 | esid, ~ 1 | studyid), V = var, method = "REML") hier_robu <- robu(effectsize ~ binge + followup + sreport + age, data = hierdat, studynum = studyid, var.eff.size = var, modelweights = "HIER") test_that("CR2 t-tests do not exactly agree with robumeta for hierarchical weighting", { robu_CR2_iv <- vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid) robu_CR2_not <- vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid, target = hier_robu$data.full$avg.var.eff.size) expect_true(check_CR(hier_meta, vcov = robu_CR2_iv)) # expect_true(check_CR(hier_meta, vcov = "CR4")) expect_true(check_CR(hier_meta, vcov = robu_CR2_not)) # expect_true(check_CR(hier_meta, vcov = "CR4", # target = hier_robu$data.full$avg.var.eff.size)) expect_that(all.equal(hier_robu$VR.r, as.matrix(robu_CR2_iv), check.attributes=FALSE), is_a("character")) expect_that(all.equal(hier_robu$VR.r, as.matrix(robu_CR2_not), check.attributes=FALSE), is_a("character")) CR2_ttests <- coef_test(hier_meta, vcov = robu_CR2_not, test = "Satterthwaite") expect_that(all.equal(hier_robu$dfs, CR2_ttests$df), is_a("character")) expect_that(all.equal(hier_robu$reg_table$prob, CR2_ttests$p_Satt), is_a("character")) }) CR_types <- paste0("CR",0:4) dat_long <- to.long(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) levels(dat_long$group) <- c("exp", "con") dat_long$group <- relevel(dat_long$group, ref="con") dat_long$esid <- factor(1:nrow(dat_long)) dat_long <- escalc(measure="PLO", xi=out1, mi=out2, data=dat_long) rma_G <- rma.mv(yi, vi, mods = ~ group, random = ~ group | study, struct="CS", data=dat_long) rma_S <- rma.mv(yi, vi, mods = ~ group, random = list(~ 1 | esid, ~ 1 | study), data=dat_long) test_that("withS and withG model specifications agree.", { CR_G <- lapply(CR_types, function(x) vcovCR(rma_G, type = x)) CR_S <- lapply(CR_types, function(x) vcovCR(rma_S, type = x)) expect_equivalent(CR_G, CR_S) tests_G <- lapply(CR_types, function(x) coef_test(rma_G, vcov = x, test = "All", p_values = FALSE)) tests_S <- lapply(CR_types, function(x) coef_test(rma_S, vcov = x, test = "All", p_values = FALSE)) expect_equal(tests_G, tests_S, tolerance = 10^-6) }) test_that("bread works", { expect_true(check_bread(corr_meta, cluster = corrdat$studyid, y = corrdat$effectsize)) X <- model_matrix(corr_meta) W <- corr_meta$W V <- corr_meta$vi vcov_corr <- bread(corr_meta) %*% t(X) %*% W %*% (V * W) %*% X %*% bread(corr_meta) / nobs(corr_meta)^2 attr(vcov_corr, "dimnames") <- attr(vcov(corr_meta), "dimnames") expect_equal(vcov(corr_meta), vcov_corr) expect_true(check_bread(hier_meta, cluster = hierdat$studyid, y = hierdat$effectsize)) expect_equal(vcov(hier_meta), bread(hier_meta) / nobs(hier_meta)) expect_true(check_bread(rma_G, cluster = dat_long$study, y = dat_long$yi)) expect_equal(vcov(rma_G), bread(rma_G) / nobs(rma_G)) expect_true(check_bread(rma_S, cluster = dat_long$study, y = dat_long$yi)) expect_equal(vcov(rma_S), bread(rma_S) / nobs(rma_S)) }) test_that("order doesn't matter", { dat_scramble <- hierdat[sample(nrow(hierdat)),] hier_scramble <- rma.mv(effectsize ~ binge + followup + sreport + age, random = list(~ 1 | esid, ~ 1 | studyid), data = dat_scramble, V = var, method = "REML") CR_fit <- lapply(CR_types, function(x) vcovCR(hier_meta, type = x, cluster = hierdat$studyid)) CR_scramble <- lapply(CR_types, function(x) vcovCR(hier_scramble, type = x, cluster = dat_scramble$studyid)) expect_equivalent(CR_fit, CR_scramble) test_fit <- lapply(CR_types, function(x) coef_test(hier_meta, vcov = x, cluster = hierdat$studyid, test = "All", p_values = FALSE)) test_scramble <- lapply(CR_types, function(x) coef_test(hier_scramble, vcov = x, cluster = dat_scramble$studyid, test = "All", p_values = FALSE)) expect_equal(test_fit, test_scramble, tolerance = 10^-6) constraints <- combn(length(coef(hier_scramble)), 2, simplify = FALSE) Wald_fit <- Wald_test(hier_meta, constraints = constraints, vcov = "CR2", cluster = hierdat$studyid, test = "All") Wald_scramble <- Wald_test(hier_scramble, constraints = constraints, vcov = "CR2", cluster = dat_scramble$studyid, test = "All") expect_equal(Wald_fit, Wald_scramble) }) test_that("clubSandwich works with dropped covariates", { dat_miss <- hierdat dat_miss$binge[sample.int(nrow(hierdat), size = round(nrow(hierdat) / 10))] <- NA dat_miss$followup[sample.int(nrow(hierdat), size = round(nrow(hierdat) / 20))] <- NA expect_warning(hier_drop <- rma.mv(effectsize ~ binge + followup + sreport + age, random = list(~ 1 | esid, ~ 1 | studyid), data = dat_miss, V = var, method = "REML")) hier_complete <- rma.mv(effectsize ~ binge + followup + sreport + age, random = list(~ 1 | esid, ~ 1 | studyid), subset = !is.na(binge) & !is.na(followup), data = dat_miss, V = var, method = "REML") expect_error(vcovCR(hier_complete, type = "CR0", cluster = dat_miss$studyid)) CR_drop_A <- lapply(CR_types, function(x) vcovCR(hier_drop, type = x)) CR_drop_B <- lapply(CR_types, function(x) vcovCR(hier_drop, type = x, cluster = dat_miss$studyid)) CR_complete <- lapply(CR_types, function(x) vcovCR(hier_complete, type = x)) expect_equal(CR_drop_A, CR_complete) expect_equal(CR_drop_B, CR_complete) test_drop_A <- lapply(CR_types, function(x) coef_test(hier_drop, vcov = x, test = "All", p_values = FALSE)) test_drop_B <- lapply(CR_types, function(x) coef_test(hier_drop, vcov = x, cluster = dat_miss$studyid, test = "All", p_values = FALSE)) test_complete <- lapply(CR_types, function(x) coef_test(hier_complete, vcov = x, test = "All", p_values = FALSE)) expect_equal(test_drop_A, test_complete, tolerance = 10^-6) expect_equal(test_drop_B, test_complete, tolerance = 10^-6) }) test_that("clubSandwich works with missing diagonal variances", { dat_miss <- hierdat dat_miss$var[sample.int(nrow(hierdat), size = round(nrow(hierdat) / 10))] <- NA expect_warning(hier_drop <- rma.mv(effectsize ~ binge + followup + sreport + age, random = list(~ 1 | esid, ~ 1 | studyid), data = dat_miss, V = var, method = "REML")) hier_complete <- rma.mv(effectsize ~ binge + followup + sreport + age, random = list(~ 1 | esid, ~ 1 | studyid), subset = !is.na(var), data = dat_miss, V = var, method = "REML") expect_error(vcovCR(hier_complete, type = "CR0", cluster = dat_miss$studyid)) CR_drop_A <- lapply(CR_types, function(x) vcovCR(hier_drop, type = x)) CR_drop_B <- lapply(CR_types, function(x) vcovCR(hier_drop, type = x, cluster = dat_miss$studyid)) CR_complete <- lapply(CR_types, function(x) vcovCR(hier_complete, type = x)) expect_equal(CR_drop_A, CR_complete) expect_equal(CR_drop_B, CR_complete) }) test_that("clubSandwich works with missing vcov matrix", { skip_if(packageVersion("metafor") < 2.1) dat_miss <- corrdat dat_miss$var[sample.int(nrow(corrdat), size = round(nrow(corrdat) / 10))] <- NA V_missing <- impute_covariance_matrix(dat_miss$var, cluster = dat_miss$studyid, r = 0.8) expect_warning(corr_drop <- rma.mv(effectsize ~ males + college + binge, random = ~ 1 | studyid, V = V_missing, data = dat_miss)) corr_complete <- rma.mv(effectsize ~ males + college + binge, random = ~ 1 | studyid, subset = !is.na(var), data = dat_miss, V = V_missing) expect_error(vcovCR(corr_complete, type = "CR0", cluster = dat_miss$studyid)) CR_drop <- lapply(CR_types, function(x) vcovCR(corr_drop, cluster = dat_miss$studyid, type = x)) CR_complete <- lapply(CR_types, function(x) vcovCR(corr_complete, type = x)) expect_equal(CR_drop, CR_complete) # V_complete <- impute_covariance_matrix(corrdat$var, cluster = corrdat$studyid, r = 0.8) # W_missing <- lapply(V_complete, function(x) chol2inv(chol(x))) # # corr_drop <- rma.mv(effectsize ~ males + college + binge, # random = ~ 1 | studyid, # V = V_complete, W = bldiag(W_missing), # data = dat_miss) # # corr_complete <- rma.mv(effectsize ~ males + college + binge, # random = ~ 1 | studyid, # V = V_complete, W = bldiag(W_missing), # data = dat_miss, subset = !is.na(var)) # # expect_error(vcovCR(corr_complete, type = "CR0", cluster = dat_miss$studyid)) # # CR_drop <- lapply(CR_types, function(x) vcovCR(corr_drop, type = x)) # CR_complete <- lapply(CR_types, function(x) vcovCR(corr_complete, type = x)) # expect_equal(CR_drop, CR_complete) }) test_that("vcovCR options work for CR2", { RE_var <- targetVariance(hier_meta, cluster = factor(hierdat$studyid)) CR2_iv <- vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid) expect_identical(vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid, inverse_var = TRUE), CR2_iv) CR2_not <- vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid, inverse_var = FALSE) expect_equal(CR2_not, CR2_iv) expect_equivalent(vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid, target = RE_var), CR2_not) expect_equivalent(vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid, target = RE_var, inverse_var = FALSE), CR2_not) expect_false(identical(vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid, target = hierdat$var), CR2_not)) }) clubSandwich/tests/testthat/test_plm-ID-variables.R0000644000176200001440000001373113500611664022103 0ustar liggesuserscontext("plm objects - ID variables") set.seed(20190513) library(plm, quietly=TRUE) data("Produc", package = "plm") Produc <- Produc[sample(nrow(Produc)),] Produc$cluster <- sample(LETTERS[1:10], size = nrow(Produc), replace=TRUE) plm_individual <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = "state", effect = "individual", model = "within") lm_individual <- lm(log(gsp) ~ 0 + state + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) individual_names <- names(coef(plm_individual)) individual_index <- names(coef(lm_individual)) %in% individual_names lm_CR0 <- vcovCR(lm_individual, cluster = Produc$state, type = "CR0")[individual_index,individual_index] lm_CR1 <- vcovCR(lm_individual, cluster = Produc$state, type = "CR1")[individual_index,individual_index] lm_CR2 <- vcovCR(lm_individual, cluster = Produc$state, type = "CR2")[individual_index,individual_index] plm_CR0 <- vcovCR(plm_individual, type="CR0")[individual_names,individual_names] test_that("individual effects agree with lm under automatic clustering", { plm_CR1 <- vcovCR(plm_individual, type="CR1")[individual_names,individual_names] plm_CR2 <- vcovCR(plm_individual, type="CR2")[individual_names,individual_names] expect_equal(plm_CR0, lm_CR0) expect_equal(plm_CR1, lm_CR1) expect_equal(plm_CR2, lm_CR2) }) test_that("individual effects agree with lm under explicit clustering", { plm_CR1 <- vcovCR(plm_individual, cluster = Produc$state, type="CR1")[individual_names,individual_names] plm_CR2 <- vcovCR(plm_individual, cluster = Produc$state, type="CR2")[individual_names,individual_names] expect_equal(plm_CR1, lm_CR1) expect_equal(plm_CR2, lm_CR2) }) test_that("individual effects agree with lm under random clustering", { lm_CR1 <- vcovCR(lm_individual, cluster = Produc$cluster, type = "CR1")[individual_index,individual_index] lm_CR2 <- vcovCR(lm_individual, cluster = Produc$cluster, type = "CR2")[individual_index,individual_index] plm_CR1 <- vcovCR(plm_individual, cluster = Produc$cluster, type="CR1")[individual_names,individual_names] plm_CR2 <- vcovCR(plm_individual, cluster = Produc$cluster, type="CR2")[individual_names,individual_names] expect_equal(plm_CR1, lm_CR1) expect_equal(plm_CR2, lm_CR2) }) test_that("CR0 and CR1S agree with arellano vcov", { expect_equal(vcovHC(plm_individual, method="arellano", type = "HC0", cluster = "group"), as.matrix(plm_CR0), check.attributes = FALSE) expect_equal(vcovHC(plm_individual, method="arellano", type = "sss", cluster = "group"), as.matrix(vcovCR(plm_individual, type = "CR1S")), check.attributes = FALSE) }) test_that("plm works for Yuki Takahashi's reprex.",{ N <- 100 id <- rep(1:N, 2) gid <- rep(1:(N/2), 4) Trt <- rep(c(0,1), each = N) a <- rep(rnorm(N, mean=0, sd=0.005), 2) gp <- rep(rnorm(N / 2, mean=0, sd=0.0005), 4) u <- rnorm(N * 2, mean=0, sd=0.05) Ylatent <- -0.05 * Trt + gp + a + u Data <- data.frame( Y = ifelse(Ylatent > 0, 1, 0), id, gid, Trt ) fe_fit <- plm(formula = Y ~ Trt, data = Data, model = "within", index = "id", effect = "individual", singular.ok = FALSE) implicit <- vcovCR(fe_fit, type = "CR2") explicit <- vcovCR(fe_fit, cluster=Data$id, type = "CR2") expect_equal(implicit, explicit) expect_s3_class(vcovCR(fe_fit, cluster=Data$gid, type = "CR2"), "vcovCR") }) test_that("Clustering works for various ways of specifying unit and time indices in plm.", { data("Grunfeld", package = "plm") Grunfeld$cluster <- sample(LETTERS[1:10], size = nrow(Grunfeld), replace=TRUE) rearrange <- mapply(function(s,b) seq(s, nrow(Grunfeld), b), 1:10, 11:20) rearrange <- unique(unlist(rearrange)) rearrange <- c(rearrange, setdiff(1:200, rearrange)) Grunfeld_scramble <- Grunfeld[rearrange,] Grunfeld_pdata <- pdata.frame(Grunfeld_scramble, index = c("firm","year")) plm_pdata <- plm(inv ~ value + capital, data = Grunfeld_pdata, model="within") plm_numeric <- plm(inv ~ value + capital, data = Grunfeld, index = 10, model="within") plm_noindex <- plm(inv ~ value + capital, data = Grunfeld_scramble, model="within") plm_oneindex <- plm(inv ~ value + capital, data = Grunfeld_scramble, index = "firm", model="within") plm_twoindex <- plm(inv ~ value + capital, data = Grunfeld_scramble, index = c("firm","year"), model="within") CR_types <- paste0("CR",0:3) # auto clustering vcovCRs <- function(model, types) lapply(types, function(x) vcovCR(model, type = x)) CR_pdata <- vcovCRs(plm_pdata, CR_types) expect_equivalent(CR_pdata, vcovCRs(plm_numeric, CR_types)) expect_equivalent(CR_pdata, vcovCRs(plm_noindex, CR_types)) expect_equivalent(CR_pdata, vcovCRs(plm_oneindex, CR_types)) expect_equivalent(CR_pdata, vcovCRs(plm_twoindex, CR_types)) # manual clustering on firm vcovCRs <- function(model, types, cluster) lapply(types, function(x) vcovCR(model, type = x, cluster = cluster)) expect_equivalent(CR_pdata, vcovCRs(plm_numeric, CR_types, cluster = Grunfeld$firm)) expect_equivalent(CR_pdata, vcovCRs(plm_noindex, CR_types, cluster = Grunfeld_scramble$firm)) expect_equivalent(CR_pdata, vcovCRs(plm_oneindex, CR_types, cluster = Grunfeld_scramble$firm)) expect_equivalent(CR_pdata, vcovCRs(plm_twoindex, CR_types, cluster = Grunfeld_scramble$firm)) # manual clustering on arbitrary id CR_pdata <- vcovCRs(plm_pdata, CR_types, cluster = Grunfeld_pdata$cluster) expect_equivalent(CR_pdata, vcovCRs(plm_numeric, CR_types, cluster = Grunfeld$cluster)) expect_equivalent(CR_pdata, vcovCRs(plm_noindex, CR_types, cluster = Grunfeld_scramble$cluster)) expect_equivalent(CR_pdata, vcovCRs(plm_oneindex, CR_types, cluster = Grunfeld_scramble$cluster)) expect_equivalent(CR_pdata, vcovCRs(plm_twoindex, CR_types, cluster = Grunfeld_scramble$cluster)) }) clubSandwich/tests/testthat/test_intercept_formulas.R0000644000176200001440000000433413500611664022757 0ustar liggesuserscontext("population mean estimation") set.seed(20190513) m <- 14 icc <- 0.2 mu <- 5 size <- 2 nj <- 1 + rnbinom(m, size = size, mu = mu) group <- factor(rep(LETTERS[1:m], nj)) N <- sum(nj) Y <- rnorm(m, sd = sqrt(icc))[group] + rnorm(N, sd = sqrt(1 - icc)) y_bar <- tapply(Y, group, mean) lm_fit <- lm(Y ~ 1) test_that("CR0 and df agree with formulas", { CR0 <- coef_test(lm_fit, vcov = "CR0", cluster = group, test = "Satterthwaite") VCR0_f <- sum(nj^2 * (y_bar - mean(Y))^2) / sum(nj)^2 df0_f <- (N^2 - sum(nj^2))^2 / (N^2 * sum(nj^2) - 2 * N * sum(nj^3) + sum(nj^2)^2) expect_equal(as.numeric(CR0$SE), sqrt(VCR0_f)) expect_equal(CR0$df, df0_f) }) test_that("CR1 and df agree with formulas", { CR1 <- coef_test(lm_fit, vcov = "CR1", cluster = group, test = "Satterthwaite") VCR1_f <- (m / (m - 1)) * sum(nj^2 * (y_bar - mean(Y))^2) / sum(nj)^2 df1_f <- (N^2 - sum(nj^2))^2 / (N^2 * sum(nj^2) - 2 * N * sum(nj^3) + sum(nj^2)^2) expect_equal(as.numeric(CR1$SE), sqrt(VCR1_f)) expect_equal(CR1$df, df1_f) }) test_that("CR2 and df agree with formulas", { CR2 <- coef_test(lm_fit, vcov = "CR2", cluster = group, test = "Satterthwaite") VCR2_f <- sum(nj^2 * (y_bar - mean(Y))^2 / (1 - nj / N)) / sum(nj)^2 df2_f <- N^2 / (N^2 * sum(nj^2 / (N - nj)^2) - 2 * N * sum(nj^3 / (N - nj)^2) + sum(nj^2 / (N - nj))^2) expect_equal(as.numeric(CR2$SE), sqrt(VCR2_f)) expect_equal(CR2$df, df2_f) }) test_that("CR3 agrees with formula", { CR3 <- coef_test(lm_fit, vcov = "CR3", cluster = group, test = "Satterthwaite") VCR3_f <- sum(nj^2 * (y_bar - mean(Y))^2 / (1 - nj / N)^2) / sum(nj)^2 # df2_f <- N^2 / (N^2 * sum(nj^2 / (N - nj)^2) - 2 * N * sum(nj^3 / (N - nj)^2) + sum(nj^2 / (N - nj))^2) expect_equal(as.numeric(CR3$SE), sqrt(VCR3_f)) # expect_equal(CR2$df, df2_f) }) test_that("CR4 and df agree with formulas", { CR4 <- coef_test(lm_fit, vcov = "CR4", cluster = group, test = "Satterthwaite") VCR4_f <- sum(nj^2 * (y_bar - mean(Y))^2 / (1 - nj / N)) / sum(nj)^2 df4_f <- N^2 / (N^2 * sum(nj^2 / (N - nj)^2) - 2 * N * sum(nj^3 / (N - nj)^2) + sum(nj^2 / (N - nj))^2) expect_equal(as.numeric(CR4$SE), sqrt(VCR4_f)) expect_equal(CR4$df, df4_f) }) clubSandwich/tests/testthat/test_gls.R0000644000176200001440000001641213500611664017637 0ustar liggesuserscontext("gls objects") set.seed(20190513) library(nlme, quietly=TRUE, warn.conflicts=FALSE) data(Ovary, package = "nlme") Ovary$time_int <- 1:nrow(Ovary) lm_hom <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = Ovary) lm_power <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = Ovary, weights = varPower()) lm_AR1 <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = Ovary, correlation = corAR1(form = ~ time_int | Mare)) lm_AR1_power <- update(lm_AR1, weights = varPower()) test_that("bread works", { expect_true(check_bread(lm_hom, cluster = Ovary$Mare, y = Ovary$follicles)) expect_true(check_bread(lm_power, cluster = Ovary$Mare, y = Ovary$follicles)) expect_true(check_bread(lm_AR1, cluster = Ovary$Mare, y = Ovary$follicles)) expect_true(check_bread(lm_AR1_power, cluster = Ovary$Mare, y = Ovary$follicles)) expect_equal(vcov(lm_hom), lm_hom$sigma^2 * bread(lm_hom) / v_scale(lm_hom)) expect_equal(vcov(lm_power), lm_power$sigma^2 * bread(lm_power) / v_scale(lm_power)) expect_equal(vcov(lm_AR1), lm_AR1$sigma^2 * bread(lm_AR1) / v_scale(lm_AR1)) expect_equal(vcov(lm_AR1_power), lm_AR1_power$sigma^2 * bread(lm_AR1_power) / v_scale(lm_AR1_power)) }) test_that("vcovCR options work for CR2", { CR2_AR1 <- vcovCR(lm_AR1, type = "CR2") expect_identical(vcovCR(lm_AR1, cluster = Ovary$Mare, type = "CR2"), CR2_AR1) expect_identical(vcovCR(lm_AR1, type = "CR2", inverse_var = TRUE), CR2_AR1) expect_false(identical(vcovCR(lm_AR1, type = "CR2", inverse_var = FALSE), CR2_AR1)) target <- targetVariance(lm_AR1) expect_equal(vcovCR(lm_AR1, type = "CR2", target = target, inverse_var = TRUE), CR2_AR1) attr(CR2_AR1, "inverse_var") <- FALSE expect_equal(vcovCR(lm_AR1, type = "CR2", target = target, inverse_var = FALSE), CR2_AR1) CR2_power <- vcovCR(lm_AR1_power, type = "CR2") expect_identical(vcovCR(lm_AR1_power, cluster = Ovary$Mare, type = "CR2"), CR2_power) expect_identical(vcovCR(lm_AR1_power, type = "CR2", inverse_var = TRUE), CR2_power) expect_false(identical(vcovCR(lm_AR1_power, type = "CR2", inverse_var = FALSE), CR2_power)) target <- targetVariance(lm_AR1_power, cluster = Ovary$Mare) expect_equal(vcovCR(lm_AR1_power, type = "CR2", target = target, inverse_var = TRUE), CR2_power) attr(CR2_power, "inverse_var") <- FALSE expect_equal(vcovCR(lm_AR1_power, type = "CR2", target = target, inverse_var = FALSE), CR2_power) }) test_that("vcovCR options work for CR4", { CR4_AR1 <- vcovCR(lm_AR1, type = "CR4") expect_identical(vcovCR(lm_AR1, cluster = Ovary$Mare, type = "CR4"), CR4_AR1) expect_identical(vcovCR(lm_AR1, type = "CR4", inverse_var = TRUE), CR4_AR1) expect_false(identical(vcovCR(lm_AR1, type = "CR4", inverse_var = FALSE), CR4_AR1)) target <- targetVariance(lm_AR1) expect_equal(vcovCR(lm_AR1, type = "CR4", target = target, inverse_var = TRUE), CR4_AR1) attr(CR4_AR1, "inverse_var") <- FALSE expect_equal(vcovCR(lm_AR1, type = "CR4", target = target, inverse_var = FALSE), CR4_AR1) CR4_power <- vcovCR(lm_AR1_power, type = "CR4") expect_identical(vcovCR(lm_AR1_power, cluster = Ovary$Mare, type = "CR4"), CR4_power) expect_identical(vcovCR(lm_AR1_power, type = "CR4", inverse_var = TRUE), CR4_power) expect_false(identical(vcovCR(lm_AR1_power, type = "CR4", inverse_var = FALSE), CR4_power)) target <- targetVariance(lm_AR1_power) expect_equal(vcovCR(lm_AR1_power, type = "CR4", target = target, inverse_var = TRUE), CR4_power) attr(CR4_power, "inverse_var") <- FALSE expect_equal(vcovCR(lm_AR1_power, type = "CR4", target = target, inverse_var = FALSE), CR4_power) }) test_that("CR2 and CR4 are target-unbiased", { expect_true(check_CR(lm_AR1, vcov = "CR2")) expect_true(check_CR(lm_AR1_power, vcov = "CR2")) expect_true(check_CR(lm_AR1, vcov = "CR4")) expect_true(check_CR(lm_AR1_power, vcov = "CR4")) }) test_that("getData works.", { re_order <- sample(nrow(Ovary)) egg_scramble <- Ovary[re_order,] gls_scramble <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = egg_scramble) scramble_dat <- getData(gls_scramble) expect_identical(egg_scramble, scramble_dat) }) CR_types <- paste0("CR",0:4) test_that("Order doesn't matter.", { re_order <- sample(nrow(Ovary)) dat_scramble <- Ovary[re_order,] lm_scramble <- update(lm_AR1_power, data = dat_scramble) CR_fit <- lapply(CR_types, function(x) vcovCR(lm_AR1_power, type = x)) CR_scramble <- lapply(CR_types, function(x) vcovCR(lm_scramble, type = x)) expect_equal(lapply(CR_fit, as.matrix), lapply(CR_scramble, as.matrix), tol = 5 * 10^-5) test_fit <- lapply(CR_types, function(x) coef_test(lm_AR1_power, vcov = x, test = "All", p_values = FALSE)) test_scramble <- lapply(CR_types, function(x) coef_test(lm_scramble, vcov = x, test = "All", p_values = FALSE)) expect_equal(test_fit, test_scramble, tolerance = 5 * 10^-5) constraints <- combn(length(coef(lm_AR1_power)), 2, simplify = FALSE) Wald_fit <- Wald_test(lm_AR1_power, constraints = constraints, vcov = "CR2", test = "All") Wald_scramble <- Wald_test(lm_scramble, constraints = constraints, vcov = "CR2", test = "All") expect_equal(Wald_fit, Wald_scramble, tol = 5 * 10^-5) }) test_that("clubSandwich works with dropped observations", { dat_miss <- Ovary dat_miss$follicles[sample.int(nrow(Ovary), size = round(nrow(Ovary) / 10))] <- NA lm_dropped <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = dat_miss, correlation = corAR1(form = ~ 1 | Mare), na.action = na.omit) lm_complete <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = dat_miss, subset = !is.na(follicles), correlation = corAR1(form = ~ 1 | Mare)) CR_drop <- lapply(CR_types, function(x) vcovCR(lm_dropped, type = x)) CR_complete <- lapply(CR_types, function(x) vcovCR(lm_complete, type = x)) expect_identical(CR_drop, CR_complete) test_drop <- lapply(CR_types, function(x) coef_test(lm_dropped, vcov = x, test = "All", p_values = FALSE)) test_complete <- lapply(CR_types, function(x) coef_test(lm_complete, vcov = x, test = "All", p_values = FALSE)) expect_identical(test_drop, test_complete) }) test_that("Possible to cluster at higher level than random effects", { # create higher level pair_id <- rep(1:nlevels(Ovary$Mare), each = 3, length.out = nlevels(Ovary$Mare))[Ovary$Mare] re_order <- sample(nrow(Ovary)) dat_scramble <- Ovary[re_order,] pair_scramble <- pair_id[re_order] # cluster at higher level expect_is(vcovCR(lm_hom, type = "CR2", cluster = pair_id), "vcovCR") expect_is(vcovCR(lm_power, type = "CR2", cluster = pair_id), "vcovCR") expect_is(vcovCR(lm_AR1, type = "CR2", cluster = pair_id), "vcovCR") V <- vcovCR(lm_AR1_power, type = "CR2", cluster = pair_id) expect_is(V, "vcovCR") expect_error(vcovCR(lm_AR1, type = "CR2", cluster = pair_scramble)) expect_error(vcovCR(lm_AR1_power, type = "CR2", cluster = pair_scramble)) # check that result does not depend on sort-order V_scramble <- vcovCR(update(lm_AR1_power, data = dat_scramble), type = "CR2", cluster = pair_scramble) expect_equal(diag(V), diag(V_scramble), tol = 10^-6) }) clubSandwich/tests/testthat/test_lme_3level.R0000644000176200001440000002546213500611664021106 0ustar liggesuserscontext("3-level lme objects") set.seed(20190513) suppressMessages(library(lme4, quietly=TRUE)) library(nlme, quietly=TRUE, warn.conflicts=FALSE) library(mlmRev, quietly=TRUE, warn.conflicts=FALSE) school_subset <- levels(egsingle$schoolid) school_subset <- sample(school_subset, size = 15) egsingle <- droplevels(subset(egsingle, schoolid %in% school_subset)) obj_A1 <- lme(math ~ year * size + female + black + hispanic, random = list(~ year | schoolid, ~ 1 | childid), data = egsingle) obj_A2 <- update(obj_A1, weights = varIdent(form = ~ 1 | female)) obj_A3 <- update(obj_A1, correlation = corExp(form = ~ year)) obj_A4 <- update(obj_A2, correlation = corExp(form = ~ year)) objects <- list(A1 = obj_A1, A2 = obj_A2, A3 = obj_A3, A4 = obj_A4) CR2_mats <- lapply(objects, vcovCR, type = "CR2") test_that("bread works", { bread_checks <- lapply(objects, check_bread, cluster = egsingle$schoolid, y = egsingle$math) expect_true(all(unlist(bread_checks))) obj_vcovs <- lapply(objects, vcov) obj_bread <- lapply(objects, function(obj) obj$sigma^2 * sandwich::bread(obj) / v_scale(obj)) expect_equal(obj_vcovs, obj_bread) }) test_that("vcovCR options work for CR2", { skip_on_cran() expect_identical(vcovCR(obj_A1, cluster = egsingle$schoolid, type = "CR2"), CR2_mats[["A1"]]) expect_equal(vcovCR(obj_A1, type = "CR2", inverse_var = TRUE), CR2_mats[["A1"]]) expect_false(identical(vcovCR(obj_A1, type = "CR2", inverse_var = FALSE), CR2_mats[["A1"]])) target <- targetVariance(obj_A1) expect_equal(vcovCR(obj_A1, type = "CR2", target = target, inverse_var = TRUE), CR2_mats[["A1"]]) attr(CR2_mats[["A1"]], "inverse_var") <- FALSE expect_equal(vcovCR(obj_A1, type = "CR2", target = target, inverse_var = FALSE), CR2_mats[["A1"]]) expect_identical(vcovCR(obj_A2, cluster = egsingle$schoolid, type = "CR2"), CR2_mats[["A2"]]) expect_equal(vcovCR(obj_A2, type = "CR2", inverse_var = TRUE), CR2_mats[["A2"]]) expect_false(identical(vcovCR(obj_A2, type = "CR2", inverse_var = FALSE), CR2_mats[["A2"]])) target <- targetVariance(obj_A2) expect_equal(vcovCR(obj_A2, type = "CR2", target = target, inverse_var = TRUE), CR2_mats[["A2"]]) attr(CR2_mats[["A2"]], "inverse_var") <- FALSE expect_equal(vcovCR(obj_A2, type = "CR2", target = target, inverse_var = FALSE), CR2_mats[["A2"]]) expect_identical(vcovCR(obj_A3, cluster = egsingle$schoolid, type = "CR2"), CR2_mats[["A3"]]) expect_equal(vcovCR(obj_A3, type = "CR2", inverse_var = TRUE), CR2_mats[["A3"]]) expect_false(identical(vcovCR(obj_A3, type = "CR2", inverse_var = FALSE), CR2_mats[["A3"]])) target <- targetVariance(obj_A3) expect_equal(vcovCR(obj_A3, type = "CR2", target = target, inverse_var = TRUE), CR2_mats[["A3"]]) attr(CR2_mats[["A3"]], "inverse_var") <- FALSE expect_equal(vcovCR(obj_A3, type = "CR2", target = target, inverse_var = FALSE), CR2_mats[["A3"]]) expect_identical(vcovCR(obj_A4, cluster = egsingle$schoolid, type = "CR2"), CR2_mats[["A4"]]) expect_equal(vcovCR(obj_A4, type = "CR2", inverse_var = TRUE), CR2_mats[["A4"]]) expect_false(identical(vcovCR(obj_A4, type = "CR2", inverse_var = FALSE), CR2_mats[["A4"]])) target <- targetVariance(obj_A4) expect_equal(vcovCR(obj_A4, type = "CR2", target = target, inverse_var = TRUE), CR2_mats[["A4"]]) attr(CR2_mats[["A4"]], "inverse_var") <- FALSE expect_equal(vcovCR(obj_A4, type = "CR2", target = target, inverse_var = FALSE), CR2_mats[["A4"]]) }) test_that("vcovCR options work for CR4", { skip_on_cran() skip("Not worrying about CR4 for now.") CR4_mats <- lapply(objects, vcovCR, type = "CR4") expect_identical(vcovCR(obj_A1, cluster = egsingle$schoolid, type = "CR4"), CR4_mats[["A1"]]) expect_identical(vcovCR(obj_A1, type = "CR4", inverse_var = TRUE), CR4_mats[["A1"]]) expect_false(identical(vcovCR(obj_A1, type = "CR4", inverse_var = FALSE), CR4_mats[["A1"]])) target <- targetVariance(obj_A1) expect_equal(vcovCR(obj_A1, type = "CR4", target = target, inverse_var = TRUE), CR4_mats[["A1"]]) attr(CR4_mats[["A1"]], "inverse_var") <- FALSE expect_equal(vcovCR(obj_A1, type = "CR4", target = target, inverse_var = FALSE), CR4_mats[["A1"]]) expect_identical(vcovCR(obj_A2, cluster = egsingle$schoolid, type = "CR4"), CR4_mats[["A2"]]) expect_identical(vcovCR(obj_A2, type = "CR4", inverse_var = TRUE), CR4_mats[["A2"]]) expect_false(identical(vcovCR(obj_A2, type = "CR4", inverse_var = FALSE), CR4_mats[["A2"]])) target <- targetVariance(obj_A2) expect_equal(vcovCR(obj_A2, type = "CR4", target = target, inverse_var = TRUE), CR4_mats[["A2"]]) attr(CR4_mats[["A2"]], "inverse_var") <- FALSE expect_equal(vcovCR(obj_A2, type = "CR4", target = target, inverse_var = FALSE), CR4_mats[["A2"]]) expect_identical(vcovCR(obj_A3, cluster = egsingle$schoolid, type = "CR4"), CR4_mats[["A3"]]) expect_identical(vcovCR(obj_A3, type = "CR4", inverse_var = TRUE), CR4_mats[["A3"]]) expect_false(identical(vcovCR(obj_A3, type = "CR4", inverse_var = FALSE), CR4_mats[["A3"]])) target <- targetVariance(obj_A3) expect_equal(vcovCR(obj_A3, type = "CR4", target = target, inverse_var = TRUE), CR4_mats[["A3"]]) attr(CR4_mats[["A3"]], "inverse_var") <- FALSE expect_equal(vcovCR(obj_A3, type = "CR4", target = target, inverse_var = FALSE), CR4_mats[["A3"]]) expect_identical(vcovCR(obj_A4, cluster = egsingle$schoolid, type = "CR4"), CR4_mats[["A4"]]) expect_identical(vcovCR(obj_A4, type = "CR4", inverse_var = TRUE), CR4_mats[["A4"]]) expect_false(identical(vcovCR(obj_A4, type = "CR4", inverse_var = FALSE), CR4_mats[["A4"]])) target <- targetVariance(obj_A4) expect_equal(vcovCR(obj_A4, type = "CR4", target = target, inverse_var = TRUE), CR4_mats[["A4"]]) attr(CR4_mats[["A4"]], "inverse_var") <- FALSE expect_equal(vcovCR(obj_A4, type = "CR4", target = target, inverse_var = FALSE), CR4_mats[["A4"]]) }) test_that("CR2 is target-unbiased", { skip_on_cran() CR2_checks <- mapply(check_CR, obj = objects, vcov = CR2_mats) expect_true(all(CR2_checks)) # CR4_checks <- mapply(check_CR, obj = objects, vcov = CR4_mats) # expect_true(all(CR4_checks)) }) CR_types <- paste0("CR",0:3) test_that("Order doesn't matter.", { skip_on_cran() re_order <- sample(nrow(egsingle)) dat_scramble <- egsingle[re_order,] obj <- obj_A4 obj_scramble <- update(obj, data = dat_scramble) # expect_equal(vcov(obj), vcov(obj_scramble)) # expect_equal(v_scale(obj), v_scale(obj_scramble)) # expect_equal(obj$sigma, obj_scramble$sigma) # expect_equal(bread(obj), bread(obj_scramble), tol = 10^-5) # expect_equal(coef_CS(obj), coef_CS(obj_scramble)) # expect_equal(residuals_CS(obj)[re_order], residuals_CS(obj_scramble), check.attributes=FALSE) # expect_equal(model_matrix(obj)[re_order,], model_matrix(obj_scramble), check.attributes=FALSE) # expect_equal(model.matrix(obj$modelStruct$reStruc, getData(obj))[re_order,], # model.matrix(obj_scramble$modelStruct$reStruct, getData(obj_scramble)), check.attributes=FALSE) # # V_list <- targetVariance(obj) # V_mat <- as.matrix(bdiag(V_list)) # attr(V_mat, "dimnames") <- NULL # V_resorted <- matrix_list(V_mat[re_order,re_order], dat_scramble$schoolid, "both") # names(V_resorted) <- levels(dat_scramble$schoolid) # expect_equal(targetVariance(obj_scramble), V_resorted, tol = 10^-6) # # W_list <- weightMatrix(obj) # W_mat <- as.matrix(bdiag(W_list)) # attr(W_mat, "dimnames") <- NULL # W_resorted <- matrix_list(W_mat[re_order,re_order], dat_scramble$schoolid, "both") # names(W_resorted) <- levels(dat_scramble$schoolid) # expect_equal(weightMatrix(obj_scramble), W_resorted, tol = 10^-6) # # X_list <- matrix_list(model_matrix(obj), egsingle$schoolid, "row") # XWX <- Reduce("+", Map(function(w, x) t(x) %*% w %*% x, w = W_list, x = X_list)) # W_scramble <- weightMatrix(obj_scramble) # X_scramble <- matrix_list(model_matrix(obj_scramble), dat_scramble$schoolid, "row") # XWX_scramble <- Reduce("+", Map(function(w, x) t(x) %*% w %*% x, w = W_scramble, x = X_scramble)) # expect_equal(XWX, XWX_scramble) CR_fit <- lapply(CR_types, function(x) vcovCR(obj, type = x)) CR_scramble <- lapply(CR_types, function(x) vcovCR(obj_scramble, type = x)) expect_equal(lapply(CR_fit, as.matrix), lapply(CR_scramble, as.matrix), tol = 5 * 10^-5) test_fit <- lapply(CR_fit, function(x) coef_test(obj, vcov = x, test = "All", p_values = FALSE)) test_scramble <- lapply(CR_scramble, function(x) coef_test(obj_scramble, vcov = x, test = "All", p_values = FALSE)) expect_equal(test_fit, test_scramble, tol = 10^-6) constraints <- combn(length(coef(obj)), 2, simplify = FALSE)[10:16] Wald_fit <- Wald_test(obj, constraints = constraints, vcov = "CR2", test = "All") Wald_scramble <- Wald_test(obj_scramble, constraints = constraints, vcov = "CR2", test = "All") expect_equal(Wald_fit, Wald_scramble, tol = 5 * 10^-5) }) test_that("clubSandwich works with dropped observations", { skip_on_cran() dat_miss <- egsingle dat_miss$math[sample.int(nrow(egsingle), size = round(nrow(egsingle) / 10))] <- NA obj_dropped <- update(obj_A4, data = dat_miss, na.action = na.omit) obj_complete <- update(obj_A4, data = dat_miss, subset = !is.na(math)) obj <- obj_dropped cluster <- nlme::getGroups(obj, level = 1) target <- NULL inverse_var <- is.null(target) type <- "CR2" form <- "sandwich" CR_drop <- lapply(CR_types, function(x) vcovCR(obj_dropped, type = x)) CR_complete <- lapply(CR_types, function(x) vcovCR(obj_complete, type = x)) expect_identical(CR_drop, CR_complete) test_drop <- lapply(CR_drop, function(x) coef_test(obj_dropped, vcov = x, test = "All", p_values = FALSE)) test_complete <- lapply(CR_complete, function(x) coef_test(obj_complete, vcov = x, test = "All", p_values = FALSE)) expect_identical(test_drop, test_complete) }) test_that("Possible to cluster at higher level than random effects", { skip_on_cran() # fit two-level model obj_2level <- lme(math ~ year * size + female + black + hispanic, random = ~ year | childid, data = egsingle) # cluster at level 3 V <- vcovCR(obj_2level, type = "CR2", cluster = egsingle$schoolid) expect_is(V, "vcovCR") # create 4th level n_districts <- nlevels(egsingle$schoolid) / 3 districtid <- rep(1:n_districts, each = 3)[egsingle$schoolid] # cluster at level 4 expect_is(vcovCR(obj_2level, type = "CR2", cluster = districtid), "vcovCR") expect_is(vcovCR(obj_A1, type = "CR2", cluster = districtid), "vcovCR") expect_is(vcovCR(obj_A2, type = "CR2", cluster = districtid), "vcovCR") expect_is(vcovCR(obj_A3, type = "CR2", cluster = districtid), "vcovCR") expect_is(vcovCR(obj_A4, type = "CR2", cluster = districtid), "vcovCR") }) clubSandwich/tests/testthat/test_rma-uni.R0000644000176200001440000001767513500611664020436 0ustar liggesuserscontext("rma.uni objects") set.seed(20190513) library(robumeta, quietly=TRUE) suppressMessages(library(metafor, quietly=TRUE)) data(corrdat) corr_robu <- robu(effectsize ~ males + college + binge, data = corrdat, modelweights = "CORR", studynum = studyid, var.eff.size = var) corrdat$wt <- corr_robu$data.full$r.weights corr_meta <- rma(effectsize ~ males + college + binge, data = corrdat, weights = wt, vi = var, method = "FE") test_that("CR2 t-tests agree with robumeta for correlated effects", { robu_CR2 <- vcovCR(corr_meta, cluster = corrdat$studyid, target = 1 / corrdat$wt, type = "CR2") expect_true(check_CR(corr_meta, vcov = robu_CR2)) # expect_true(check_CR(corr_meta, vcov = "CR4", cluster = corrdat$studyid)) expect_equivalent(as.matrix(robu_CR2), corr_robu$VR.r) expect_equivalent(as.matrix(vcovCR(corr_meta, cluster = corrdat$studyid, inverse_var = TRUE, type = "CR2")), corr_robu$VR.r) CR2_ttests <- coef_test(corr_meta, vcov = robu_CR2, test = "Satterthwaite") expect_equal(corr_robu$dfs, CR2_ttests$df) expect_equal(corr_robu$reg_table$prob, CR2_ttests$p_Satt) }) data(hierdat) hier_meta <- rma(effectsize ~ binge + followup + sreport + age, data = hierdat, vi = var, method = "REML") hierdat$wt <- with(hier_meta, 1 / (vi + tau2)) hier_robu <- robu(effectsize ~ binge + followup + sreport + age, data = hierdat, studynum = studyid, var.eff.size = var, userweights = wt) test_that("CR2 t-tests agree with robumeta for user weighting", { skip("Skip until robumeta discrepancies resolved.") robu_CR2_iv <- vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid) robu_CR2_not <- vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid, target = hier_robu$data.full$avg.var.eff.size) expect_true(check_CR(hier_meta, vcov = robu_CR2_iv)) # expect_true(check_CR(hier_meta, vcov = "CR4", cluster = hierdat$studyid)) expect_true(check_CR(hier_meta, vcov = robu_CR2_not)) # expect_true(check_CR(hier_meta, vcov = "CR4", cluster = hierdat$studyid, # target = hier_robu$data.full$avg.var.eff.size)) expect_that(all.equal(hier_robu$VR.r, as.matrix(robu_CR2_iv)), is_a("character")) expect_equivalent(hier_robu$VR.r, as.matrix(robu_CR2_not)) CR2_ttests <- coef_test(hier_meta, vcov = robu_CR2_not, test = "Satterthwaite") expect_equal(hier_robu$dfs, CR2_ttests$df) expect_equal(hier_robu$reg_table$prob, CR2_ttests$p_Satt) }) test_that("bread works", { expect_true(check_bread(corr_meta, cluster = corrdat$studyid, y = corrdat$effectsize)) X <- model_matrix(corr_meta) W <- corr_meta$weights V <- corr_meta$vi vcov_corr <- crossprod((sqrt(V) * W * X) %*% bread(corr_meta) / nobs(corr_meta)) attr(vcov_corr, "dimnames") <- attr(vcov(corr_meta), "dimnames") expect_equal(vcov(corr_meta), vcov_corr) expect_true(check_bread(hier_meta, cluster = hierdat$studyid, y = hierdat$effectsize)) expect_equal(vcov(hier_meta), bread(hier_meta) / nobs(hier_meta)) }) CR_types <- paste0("CR",0:4) test_that("order doesn't matter", { dat_scramble <- hierdat[sample(nrow(hierdat)),] hier_scramble <- rma(effectsize ~ binge + followup + sreport + age, data = dat_scramble, vi = var, method = "REML") CR_fit <- lapply(CR_types, function(x) vcovCR(hier_meta, type = x, cluster = hierdat$studyid)) CR_scramble <- lapply(CR_types, function(x) vcovCR(hier_scramble, type = x, cluster = dat_scramble$studyid)) expect_equivalent(CR_fit, CR_scramble) test_fit <- lapply(CR_types, function(x) coef_test(hier_meta, vcov = x, cluster = hierdat$studyid, test = "All", p_values = FALSE)) test_scramble <- lapply(CR_types, function(x) coef_test(hier_scramble, vcov = x, cluster = dat_scramble$studyid, test = "All", p_values = FALSE)) expect_equal(test_fit, test_scramble, tolerance = 10^-6) constraints <- combn(length(coef(hier_scramble)), 2, simplify = FALSE) Wald_fit <- Wald_test(hier_meta, constraints = constraints, vcov = "CR2", cluster = hierdat$studyid, test = "All") Wald_scramble <- Wald_test(hier_scramble, constraints = constraints, vcov = "CR2", cluster = dat_scramble$studyid, test = "All") expect_equal(Wald_fit, Wald_scramble) }) test_that("clubSandwich works with dropped covariates", { dat_miss <- hierdat dat_miss$binge[sample.int(nrow(hierdat), size = round(nrow(hierdat) / 10))] <- NA dat_miss$followup[sample.int(nrow(hierdat), size = round(nrow(hierdat) / 20))] <- NA expect_warning(hier_drop <- rma(effectsize ~ binge + followup + sreport + age, data = dat_miss, vi = var, method = "REML")) subset_ind <- with(dat_miss, !is.na(binge) & !is.na(followup)) hier_complete <- rma(effectsize ~ binge + followup + sreport + age, subset = !is.na(binge) & !is.na(followup), data = dat_miss, vi = var, method = "REML") expect_error(vcovCR(hier_complete, type = "CR0", cluster = dat_miss$studyid)) CR_drop_A <- lapply(CR_types, function(x) vcovCR(hier_drop, type = x, cluster = dat_miss$studyid)) CR_drop_B <- lapply(CR_types, function(x) vcovCR(hier_drop, type = x, cluster = hierdat$studyid)) CR_complete <- lapply(CR_types, function(x) vcovCR(hier_complete, type = x, cluster = dat_miss$studyid[subset_ind])) expect_equal(CR_drop_A, CR_complete) expect_equal(CR_drop_B, CR_complete) test_drop_A <- lapply(CR_types, function(x) coef_test(hier_drop, vcov = x, cluster = dat_miss$studyid, test = "All", p_values = FALSE)) test_drop_B <- lapply(CR_types, function(x) coef_test(hier_drop, vcov = x, cluster = hierdat$studyid, test = "All", p_values = FALSE)) test_complete <- lapply(CR_types, function(x) coef_test(hier_complete, vcov = x, cluster = dat_miss$studyid[subset_ind], test = "All", p_values = FALSE)) expect_equal(test_drop_A, test_complete, tolerance = 10^-6) expect_equal(test_drop_B, test_complete, tolerance = 10^-6) }) test_that("clubSandwich works with missing variances", { dat_miss <- hierdat dat_miss$var[sample.int(nrow(hierdat), size = round(nrow(hierdat) / 10))] <- NA expect_warning(hier_drop <- rma(effectsize ~ binge + followup + sreport + age, data = dat_miss, vi = var, method = "REML")) subset_ind <- with(dat_miss, !is.na(var)) hier_complete <- rma(effectsize ~ binge + followup + sreport + age, subset = !is.na(var), data = dat_miss, vi = var, method = "REML") expect_error(vcovCR(hier_complete, type = "CR0", cluster = dat_miss$studyid)) CR_drop_A <- lapply(CR_types, function(x) vcovCR(hier_drop, type = x, cluster = dat_miss$studyid)) CR_drop_B <- lapply(CR_types, function(x) vcovCR(hier_drop, type = x, cluster = hierdat$studyid)) CR_complete <- lapply(CR_types, function(x) vcovCR(hier_complete, type = x, cluster = dat_miss$studyid[subset_ind])) expect_equal(CR_drop_A, CR_complete) expect_equal(CR_drop_B, CR_complete) }) test_that("vcovCR options work for CR2", { RE_var <- hier_meta$tau2 + hierdat$var CR2_iv <- vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid) expect_identical(vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid, inverse_var = TRUE), CR2_iv) CR2_not <- vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid, inverse_var = FALSE) attr(CR2_iv, "inverse_var") <- FALSE attr(CR2_iv, "target") <- attr(CR2_not, "target") expect_equal(CR2_not, CR2_iv) expect_identical(vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid, target = RE_var), CR2_not) expect_identical(vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid, target = RE_var, inverse_var = FALSE), CR2_not) expect_false(identical(vcovCR(hier_meta, type = "CR2", cluster = hierdat$studyid, target = hierdat$var), CR2_not)) }) clubSandwich/tests/testthat/test_lme_2level.R0000644000176200001440000002337413500611664021105 0ustar liggesuserscontext("2-level lme objects") set.seed(20190513) suppressMessages(library(lme4, quietly=TRUE)) library(nlme, quietly=TRUE, warn.conflicts=FALSE) library(mlmRev, quietly=TRUE, warn.conflicts=FALSE) obj_A <- lme(weight ~ Time * Diet, data=BodyWeight, ~ Time | Rat) obj_A2 <- update(obj_A, weights = varPower()) obj_A3 <- update(obj_A, correlation = corExp(form = ~ Time)) obj_A4 <- update(obj_A2, correlation = corExp(form = ~ Time)) obj_B <- lme(distance ~ age, random = ~ age, data = Orthodont) test_that("bread works", { expect_true(check_bread(obj_A, cluster = BodyWeight$Rat, y = BodyWeight$weight)) expect_true(check_bread(obj_A2, cluster = BodyWeight$Rat, y = BodyWeight$weight, tol = 5 * 10^-5)) expect_true(check_bread(obj_A3, cluster = BodyWeight$Rat, y = BodyWeight$weight)) expect_true(check_bread(obj_A4, cluster = BodyWeight$Rat, y = BodyWeight$weight)) expect_true(check_bread(obj_B, cluster = Orthodont$Subject, y = Orthodont$distance)) expect_equal(vcov(obj_A), obj_A$sigma^2 * bread(obj_A) / v_scale(obj_A)) expect_equal(vcov(obj_A2), obj_A2$sigma^2 * bread(obj_A2) / v_scale(obj_A2)) expect_equal(vcov(obj_A3), obj_A3$sigma^2 * bread(obj_A3) / v_scale(obj_A3)) expect_equal(vcov(obj_A4), obj_A4$sigma^2 * bread(obj_A4) / v_scale(obj_A4)) expect_equal(vcov(obj_B), obj_B$sigma^2 * bread(obj_B) / v_scale(obj_B)) }) test_that("vcovCR options work for CR2", { CR2_A <- vcovCR(obj_A, type = "CR2") expect_identical(vcovCR(obj_A, cluster = BodyWeight$Rat, type = "CR2"), CR2_A) expect_identical(vcovCR(obj_A, type = "CR2", inverse_var = TRUE), CR2_A) expect_false(identical(vcovCR(obj_A, type = "CR2", inverse_var = FALSE), CR2_A)) target <- targetVariance(obj_A) expect_equal(vcovCR(obj_A, type = "CR2", target = target, inverse_var = TRUE), CR2_A) attr(CR2_A, "inverse_var") <- FALSE expect_equal(vcovCR(obj_A, type = "CR2", target = target, inverse_var = FALSE), CR2_A) CR2_A2 <- vcovCR(obj_A2, type = "CR2") expect_identical(vcovCR(obj_A2, cluster = BodyWeight$Rat, type = "CR2"), CR2_A2) expect_identical(vcovCR(obj_A2, type = "CR2", inverse_var = TRUE), CR2_A2) expect_false(identical(vcovCR(obj_A2, type = "CR2", inverse_var = FALSE), CR2_A2)) target <- targetVariance(obj_A2) expect_equal(vcovCR(obj_A2, type = "CR2", target = target, inverse_var = TRUE), CR2_A2) attr(CR2_A2, "inverse_var") <- FALSE expect_equal(vcovCR(obj_A2, type = "CR2", target = target, inverse_var = FALSE), CR2_A2) CR2_A3 <- vcovCR(obj_A3, type = "CR2") expect_identical(vcovCR(obj_A3, cluster = BodyWeight$Rat, type = "CR2"), CR2_A3) expect_identical(vcovCR(obj_A3, type = "CR2", inverse_var = TRUE), CR2_A3) expect_false(identical(vcovCR(obj_A3, type = "CR2", inverse_var = FALSE), CR2_A3)) target <- targetVariance(obj_A3) expect_equal(vcovCR(obj_A3, type = "CR2", target = target, inverse_var = TRUE), CR2_A3) attr(CR2_A3, "inverse_var") <- FALSE expect_equal(vcovCR(obj_A3, type = "CR2", target = target, inverse_var = FALSE), CR2_A3) CR2_B <- vcovCR(obj_B, type = "CR2") expect_identical(vcovCR(obj_B, cluster = Orthodont$Subject, type = "CR2"), CR2_B) expect_identical(vcovCR(obj_B, type = "CR2", inverse_var = TRUE), CR2_B) expect_false(identical(vcovCR(obj_B, type = "CR2", inverse_var = FALSE), CR2_B)) target <- targetVariance(obj_B) expect_equal(vcovCR(obj_B, type = "CR2", target = target, inverse_var = TRUE), CR2_B) attr(CR2_B, "inverse_var") <- FALSE expect_equal(vcovCR(obj_B, type = "CR2", target = target, inverse_var = FALSE), CR2_B) }) test_that("vcovCR options work for CR4", { CR4_A <- vcovCR(obj_A, type = "CR4") expect_identical(vcovCR(obj_A, cluster = BodyWeight$Rat, type = "CR4"), CR4_A) expect_identical(vcovCR(obj_A, type = "CR4", inverse_var = TRUE), CR4_A) expect_false(identical(vcovCR(obj_A, type = "CR4", inverse_var = FALSE), CR4_A)) target <- targetVariance(obj_A) expect_equal(vcovCR(obj_A, type = "CR4", target = target, inverse_var = TRUE), CR4_A) attr(CR4_A, "inverse_var") <- FALSE expect_equal(vcovCR(obj_A, type = "CR4", target = target, inverse_var = FALSE), CR4_A) CR4_B <- vcovCR(obj_B, type = "CR4") expect_identical(vcovCR(obj_B, cluster = Orthodont$Subject, type = "CR4"), CR4_B) expect_identical(vcovCR(obj_B, type = "CR4", inverse_var = TRUE), CR4_B) expect_false(identical(vcovCR(obj_B, type = "CR4", inverse_var = FALSE), CR4_B)) target <- targetVariance(obj_B) expect_equal(vcovCR(obj_B, type = "CR4", target = target, inverse_var = TRUE), CR4_B) attr(CR4_B, "inverse_var") <- FALSE expect_equal(vcovCR(obj_B, type = "CR4", target = target, inverse_var = FALSE), CR4_B) }) test_that("CR2 and CR4 are target-unbiased", { expect_true(check_CR(obj_A, vcov = "CR2")) expect_true(check_CR(obj_B, vcov = "CR2")) expect_true(check_CR(obj_A, vcov = "CR4")) expect_true(check_CR(obj_B, vcov = "CR4")) }) CR_types <- paste0("CR",0:4) test_that("Order doesn't matter.", { re_order <- sample(nrow(BodyWeight)) dat_scramble <- BodyWeight[re_order,] obj_scramble <- update(obj_A, data = dat_scramble) CR_fit <- lapply(CR_types, function(x) vcovCR(obj_A, type = x)) CR_scramble <- lapply(CR_types, function(x) vcovCR(obj_scramble, type = x)) expect_equivalent(CR_fit, CR_scramble) test_fit <- lapply(CR_types, function(x) coef_test(obj_A, vcov = x, test = "All", p_values = FALSE)) test_scramble <- lapply(CR_types, function(x) coef_test(obj_scramble, vcov = x, test = "All", p_values = FALSE)) expect_equal(test_fit, test_scramble, tolerance = 10^-6) constraints <- combn(length(coef(obj_A)), 2, simplify = FALSE) Wald_fit <- Wald_test(obj_A, constraints = constraints, vcov = "CR2", test = "All") Wald_scramble <- Wald_test(obj_scramble, constraints = constraints, vcov = "CR2", test = "All") expect_equal(Wald_fit, Wald_scramble) }) test_that("clubSandwich works with dropped observations", { dat_miss <- BodyWeight dat_miss$weight[sample.int(nrow(BodyWeight), size = round(nrow(BodyWeight) / 10))] <- NA obj_dropped <- update(obj_A, data = dat_miss, na.action = na.omit) obj_complete <- update(obj_A, data = dat_miss, subset = !is.na(weight)) CR_drop <- lapply(CR_types, function(x) vcovCR(obj_dropped, type = x)) CR_complete <- lapply(CR_types, function(x) vcovCR(obj_complete, type = x)) expect_identical(CR_drop, CR_complete) test_drop <- lapply(CR_types, function(x) coef_test(obj_dropped, vcov = x, test = "All", p_values = FALSE)) test_complete <- lapply(CR_types, function(x) coef_test(obj_complete, vcov = x, test = "All", p_values = FALSE)) expect_identical(test_drop, test_complete) }) test_that("lme agrees with gls", { lme_fit <- lme(weight ~ Time * Diet, data=BodyWeight, ~ 1 | Rat) gls_fit <- gls(weight ~ Time * Diet, data=BodyWeight, correlation = corCompSymm(form = ~ 1 | Rat)) CR_lme <- lapply(CR_types, function(x) vcovCR(lme_fit, type = x)) CR_gls <- lapply(CR_types, function(x) vcovCR(gls_fit, type = x)) # max_ratio <- mapply(function(a, b) max(abs(a / b - 1)), CR_lme, CR_gls) # expect_true(all(max_ratio < 10^-4)) expect_equivalent(CR_lme, CR_gls, tolerance = 10^-6) test_lme <- lapply(CR_types, function(x) coef_test(lme_fit, vcov = x, test = "All", p_values = FALSE)) test_gls <- lapply(CR_types, function(x) coef_test(gls_fit, vcov = x, test = "All", p_values = FALSE)) expect_equal(test_lme, test_gls, tolerance = 10^-5) constraints <- c(combn(length(coef(lme_fit)), 2, simplify = FALSE), combn(length(coef(lme_fit)), 3, simplify = FALSE)) Wald_lme <- Wald_test(lme_fit, constraints = constraints, vcov = "CR2", test = "All") Wald_gls <- Wald_test(gls_fit, constraints = constraints, vcov = "CR2", test = "All") expect_equal(Wald_lme, Wald_gls) }) test_that("Emply levels are dropped in model_matrix", { data(AchievementAwardsRCT) AA_RCT_females <- subset(AchievementAwardsRCT, sex=="Girl" & year != "1999") AA_RCT_females <- within(AA_RCT_females, { sibs_4 <- siblings >= 4 treated2001 <- treated * (year=="2001") }) lme_fit <- lme(Bagrut_status ~ year * school_type + father_ed + mother_ed + immigrant + sibs_4 + qrtl + treated2001:half, random = ~ 1 | school_id, data = AA_RCT_females) betas <- fixef(lme_fit) X <- model_matrix(lme_fit) expect_identical(names(betas), colnames(X)) }) test_that("Possible to cluster at higher level than random effects", { n_districts <- 10 n_schools_per <- rnbinom(n_districts, size = 4, prob = 0.3) n_schools <- sum(n_schools_per) n_students_per <- 10 n_students <- n_schools * n_students_per # identifiers for each level district_id <- factor(rep(1:n_districts, n_schools_per * n_students_per)) school_id <- factor(rep(1:sum(n_schools_per), each = n_students_per)) student_id <- 1:n_students # simulated outcome Y <- rnorm(n_districts)[district_id] + rnorm(n_schools)[school_id] + rnorm(n_students) X <- rnorm(n_students) dat <- data.frame(district_id, school_id, student_id, Y, X) dat_scramble <- dat[sample(nrow(dat)),] # fit two-level model lme_2level <- lme(Y ~ X, random = ~ 1 | school_id, data = dat) # cluster at level 3 V <- vcovCR(lme_2level, type = "CR2", cluster = dat$district_id) expect_is(V, "vcovCR") expect_error(vcovCR(lme_2level, type = "CR2", cluster = dat_scramble$district_id)) # check that result does not depend on sort-order V_scramble <- vcovCR(lme(Y ~ X, random = ~ 1 | school_id, data = dat_scramble), type = "CR2", cluster = dat_scramble$district_id) expect_equal(as.matrix(V), as.matrix(V_scramble)) }) clubSandwich/tests/testthat/test_conf_int.R0000644000176200001440000000460213500611664020647 0ustar liggesuserscontext("confidence intervals") set.seed(20190513) library(nlme, quietly=TRUE, warn.conflicts=FALSE) data(Ovary, package = "nlme") Ovary$time_int <- 1:nrow(Ovary) gls_fit <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = Ovary, correlation = corAR1(form = ~ time_int | Mare), weights = varPower()) CRs <- paste0("CR", 0:4) test_that("vcov arguments work", { VCR <- lapply(CRs, function(t) vcovCR(gls_fit, type = t)) CI_A <- lapply(VCR, function(v) conf_int(gls_fit, vcov = v, level = .98)) CI_B <- lapply(CRs, function(t) conf_int(gls_fit, vcov = t, level = .98)) expect_identical(CI_A, CI_B) }) test_that("coefs argument works", { which_grid <- expand.grid(rep(list(c(FALSE,TRUE)), length(coef(gls_fit)))) tests_all <- conf_int(gls_fit, vcov = "CR0", coefs = "All") CI_A <- apply(which_grid[-1,], 1, function(x) tests_all[x,]) CI_B <- apply(which_grid[-1,], 1, function(x) conf_int(gls_fit, vcov = "CR0", coefs = x)) expect_identical(CI_A, CI_B) }) test_that("printing works", { CIs <- conf_int(gls_fit, vcov = "CR0") expect_output(print(CIs)) }) test_that("level checks work", { expect_error(conf_int(gls_fit, vcov = "CR0", level = -0.01)) expect_error(conf_int(gls_fit, vcov = "CR0", level = 95)) expect_output(print(conf_int(gls_fit, vcov = "CR0", level = runif(1)))) }) test_that("CI boundaries are ordered", { lev <- runif(1) CI_z <- conf_int(gls_fit, vcov = "CR0", test = "z", level = lev) CI_t <- conf_int(gls_fit, vcov = "CR0", test = "naive-t", level = lev) CI_Satt <- conf_int(gls_fit, vcov = "CR0", test = "Satterthwaite", level = lev) expect_true(all(CI_t$CI_L < CI_z$CI_L)) expect_true(all(CI_t$CI_U > CI_z$CI_U)) expect_true(all(CI_Satt$CI_L < CI_z$CI_L)) expect_true(all(CI_Satt$CI_U > CI_z$CI_U)) }) test_that("conf_int() is consistent with coef_test()", { lev <- runif(1) CIs <- lapply(CRs, function(v) conf_int(gls_fit, vcov = v, test = "Satterthwaite", level = lev)) ttests <- lapply(CRs, function(v) coef_test(gls_fit, vcov = v, test = "Satterthwaite")) CI_L <- lapply(ttests, function(x) x$beta - x$SE * qt(1 - (1 - lev) / 2, df = x$df)) CI_U <- lapply(ttests, function(x) x$beta + x$SE * qt(1 - (1 - lev) / 2, df = x$df)) expect_identical(lapply(CIs, function(x) x$CI_L), CI_L) expect_identical(lapply(CIs, function(x) x$CI_U), CI_U) }) clubSandwich/tests/testthat/test_plm-fixed-effects.R0000644000176200001440000003107513500611664022356 0ustar liggesuserscontext("plm objects - fixed effects") set.seed(20190513) library(plm, quietly=TRUE) data("Produc", package = "plm") Produc$cluster <- sample(LETTERS[1:10], size = nrow(Produc), replace=TRUE) Produc_scramble <- Produc[sample(nrow(Produc)),] n <- nrow(Produc_scramble) plm_individual <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc_scramble, index = c("state","year"), effect = "individual", model = "within") lm_individual <- lm(log(gsp) ~ 0 + state + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) individual_names <- names(coef(plm_individual)) individual_index <- names(coef(lm_individual)) %in% individual_names test_that("individual effects agree with lm", { expect_equal(vcovCR(plm_individual, type="CR0")[individual_names,individual_names], vcovCR(lm_individual, cluster = Produc$state, type = "CR0")[individual_index,individual_index]) expect_equal(vcovCR(plm_individual, type="CR1")[individual_names,individual_names], vcovCR(lm_individual, cluster = Produc$state, type = "CR1")[individual_index,individual_index]) expect_equal(vcovCR(plm_individual, type="CR2")[individual_names,individual_names], vcovCR(lm_individual, cluster = Produc$state, type = "CR2")[individual_index,individual_index]) }) plm_time <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc_scramble, index = c("state","year"), effect = "time", model = "within") lm_time <- lm(log(gsp) ~ 0 + factor(year) + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) time_names <- names(coef(plm_time)) time_index <- names(coef(lm_time)) %in% time_names test_that("time effects agree with lm", { expect_equal(vcovCR(plm_time, type="CR0")[time_names,time_names], vcovCR(lm_time, cluster = Produc$year, type = "CR0")[time_index,time_index]) expect_equal(vcovCR(plm_time, type="CR1")[time_names,time_names], vcovCR(lm_time, cluster = Produc$year, type = "CR1")[time_index,time_index]) expect_equal(vcovCR(plm_time, type="CR2")[time_names,time_names], vcovCR(lm_time, cluster = Produc$year, type = "CR2")[time_index,time_index]) }) plm_twoways <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc_scramble, index = c("state","year"), effect = "twoways", model = "within") lm_twoways <- lm(log(gsp) ~ 0 + state + factor(year) + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) twoway_names <- names(coef(plm_twoways)) twoway_index <- names(coef(lm_twoways)) %in% twoway_names test_that("two-way effects agree with lm", { # clustering on individual expect_equal(vcovCR(plm_twoways, cluster = "individual", type="CR0")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$state, type = "CR0")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = "individual", type="CR1")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$state, type = "CR1")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = "individual", type="CR2")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$state, type = "CR2")[twoway_index,twoway_index]) # clustering on time expect_equal(vcovCR(plm_twoways, cluster = "time", type="CR0")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$year, type = "CR0")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = "time", type="CR1")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$year, type = "CR1")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = "time", type="CR2")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$year, type = "CR2")[twoway_index,twoway_index]) # clustering on a randomly generated factor expect_equal(vcovCR(plm_twoways, cluster = Produc_scramble$cluster, type="CR0")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$cluster, type = "CR0")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = Produc_scramble$cluster, type="CR1")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$cluster, type = "CR1")[twoway_index,twoway_index]) expect_equal(vcovCR(plm_twoways, cluster = Produc_scramble$cluster, type="CR2")[twoway_names,twoway_names], vcovCR(lm_twoways, cluster = Produc$cluster, type = "CR2")[twoway_index,twoway_index]) }) test_that("bread works", { y <- plm_individual$model$"log(gsp)" expect_true(check_bread(plm_individual, cluster = findCluster.plm(plm_individual), y = y)) sigma_sq_ind <- with(plm_individual, sum(residuals^2) / df.residual) expect_equal(vcov(plm_individual), bread(plm_individual) * sigma_sq_ind / v_scale(plm_individual)) expect_true(check_bread(plm_time, cluster = findCluster.plm(plm_time), y = y)) sigma_sq_time <- with(plm_time, sum(residuals^2) / df.residual) expect_equal(vcov(plm_time), bread(plm_time) * sigma_sq_time / v_scale(plm_time)) expect_true(check_bread(plm_twoways, cluster = Produc_scramble$state, y = y)) expect_true(check_bread(plm_twoways, cluster = Produc_scramble$year, y = y)) sigma_sq_two <- with(plm_twoways, sum(residuals^2) / df.residual) expect_equal(vcov(plm_twoways), bread(plm_twoways) * sigma_sq_two / v_scale(plm_twoways)) }) test_that("CR0 and CR1S agree with arellano vcov", { expect_equal(vcovHC(plm_individual, method="arellano", type = "HC0", cluster = "group"), as.matrix(vcovCR(plm_individual, type = "CR0")), check.attributes = FALSE) expect_equal(vcovHC(plm_individual, method="arellano", type = "sss", cluster = "group"), as.matrix(vcovCR(plm_individual, type = "CR1S")), check.attributes = FALSE) expect_equal(vcovHC(plm_time, method="arellano", type = "HC0", cluster = "time"), as.matrix(vcovCR(plm_time, type = "CR0")), check.attributes = FALSE) expect_equal(vcovHC(plm_time, method="arellano", type = "sss", cluster = "time"), as.matrix(vcovCR(plm_time, type = "CR1S")), check.attributes = FALSE) expect_equal(vcovHC(plm_twoways, method="arellano", type = "HC0", cluster = "group"), as.matrix(vcovCR(plm_twoways, cluster = "individual", type = "CR0")), check.attributes = FALSE) expect_equal(vcovHC(plm_twoways, method="arellano", type = "sss", cluster = "group"), as.matrix(vcovCR(plm_twoways, cluster = "individual", type = "CR1S")), check.attributes = FALSE) expect_equal(vcovHC(plm_twoways, method="arellano", type = "HC0", cluster = "time"), as.matrix(vcovCR(plm_twoways, cluster = "time", type = "CR0")), check.attributes = FALSE) expect_equal(vcovHC(plm_twoways, method="arellano", type = "sss", cluster = "time"), as.matrix(vcovCR(plm_twoways, cluster = "time", type = "CR1S")), check.attributes = FALSE) }) test_that("vcovCR options work for CR2", { CR2_iv <- vcovCR(plm_individual, type = "CR2") expect_identical(vcovCR(plm_individual, cluster = Produc_scramble$state, type = "CR2"), CR2_iv) expect_equal(vcovCR(plm_individual, type = "CR2", inverse_var = TRUE), CR2_iv) expect_equal(vcovCR(plm_individual, type = "CR2", target = rep(1, n), inverse_var = TRUE), CR2_iv) CR2_not <- vcovCR(plm_individual, type = "CR2", inverse_var = FALSE) expect_equivalent(CR2_not, CR2_iv) expect_identical(vcovCR(plm_individual, cluster = Produc_scramble$state, type = "CR2", inverse_var = FALSE), CR2_not) expect_identical(vcovCR(plm_individual, type = "CR2", target = rep(1, n)), CR2_not) expect_identical(vcovCR(plm_individual, type = "CR2", target = rep(1, n), inverse_var = FALSE), CR2_not) expect_false(identical(vcovCR(plm_individual, type = "CR2", target = 1 / Produc_scramble$emp), CR2_not)) }) test_that("vcovCR options work for CR4", { CR4_iv <- vcovCR(plm_individual, type = "CR4") expect_identical(vcovCR(plm_individual, cluster = Produc_scramble$state, type = "CR4"), CR4_iv) expect_identical(vcovCR(plm_individual, type = "CR4", inverse_var = TRUE), CR4_iv) expect_identical(vcovCR(plm_individual, type = "CR4", target = rep(1, n), inverse_var = TRUE), CR4_iv) CR4_not <- vcovCR(plm_individual, type = "CR4", inverse_var = FALSE) expect_equivalent(CR4_not, CR4_iv) expect_identical(vcovCR(plm_individual, cluster = Produc_scramble$state, type = "CR4", inverse_var = FALSE), CR4_not) expect_identical(vcovCR(plm_individual, type = "CR4", target = rep(1, n)), CR4_not) expect_identical(vcovCR(plm_individual, type = "CR4", target = rep(1, n), inverse_var = FALSE), CR4_not) expect_false(identical(vcovCR(plm_individual, type = "CR4", target = 1 / Produc_scramble$emp), CR4_not)) }) test_that("CR2 and CR4 are target-unbiased", { expect_true(check_CR(plm_individual, vcov = "CR2")) expect_true(check_CR(plm_individual, vcov = "CR4")) expect_true(check_CR(plm_individual, vcov = "CR2", inverse_var = FALSE)) expect_true(check_CR(plm_individual, vcov = "CR4", inverse_var = FALSE)) expect_true(check_CR(plm_time, vcov = "CR2")) expect_true(check_CR(plm_time, vcov = "CR4")) expect_true(check_CR(plm_time, vcov = "CR2", inverse_var = FALSE)) expect_true(check_CR(plm_time, vcov = "CR4", inverse_var = FALSE)) expect_true(check_CR(plm_twoways, vcov = "CR2", cluster = "individual")) expect_true(check_CR(plm_twoways, vcov = "CR4", cluster = "individual")) expect_true(check_CR(plm_twoways, vcov = "CR2", cluster = "individual", inverse_var = FALSE)) expect_true(check_CR(plm_twoways, vcov = "CR4", cluster = "individual", inverse_var = FALSE)) expect_true(check_CR(plm_twoways, vcov = "CR2", cluster = "time")) expect_true(check_CR(plm_twoways, vcov = "CR4", cluster = "time")) expect_true(check_CR(plm_twoways, vcov = "CR2", cluster = "time", inverse_var = FALSE)) expect_true(check_CR(plm_twoways, vcov = "CR4", cluster = "time", inverse_var = FALSE)) expect_true(check_CR(plm_twoways, vcov = "CR2", cluster = Produc_scramble$cluster)) expect_true(check_CR(plm_twoways, vcov = "CR4", cluster = Produc_scramble$cluster)) expect_true(check_CR(plm_twoways, vcov = "CR2", cluster = Produc_scramble$cluster, inverse_var = FALSE)) expect_true(check_CR(plm_twoways, vcov = "CR4", cluster = Produc_scramble$cluster, inverse_var = FALSE)) }) test_that("vcovCR is equivalent to vcovHC when clusters are all of size 1", { library(sandwich, quietly=TRUE) CR_types <- paste0("CR",c(0,2)) HC_types <- paste0("HC",c(0,2)) CR_individual <- lapply(CR_types, function(t) as.matrix(vcovCR(plm_individual, cluster = 1:n, type = t))) HC_individual <- lapply(HC_types, function(t) vcovHC(lm_individual, type = t)[individual_index,individual_index]) expect_equal(CR_individual, HC_individual) CR_time <- lapply(CR_types, function(t) as.matrix(vcovCR(plm_time, cluster = 1:n, type = t))) HC_time <- lapply(HC_types, function(t) vcovHC(lm_time, type = t)[time_index,time_index]) expect_equal(CR_time, HC_time) CR_twoways <- lapply(CR_types, function(t) as.matrix(vcovCR(plm_twoways, cluster = 1:n, type = t))) HC_twoways <- lapply(HC_types, function(t) vcovHC(lm_twoways, type = t)[twoway_index,twoway_index]) expect_equal(CR_twoways, HC_twoways) }) test_that("CR2 is equivalent to Welch t-test for DiD design", { m0 <- 4 m1 <- 9 m <- m0 + m1 cluster <- factor(rep(LETTERS[1:m], each = 2)) n <- length(cluster) time <- rep(c(1,2), m) trt_clusters <- c(rep(0,m0), rep(1,m1)) trt <- (time - 1) * rep(trt_clusters, each = 2) nu <- rnorm(m)[cluster] e <- rnorm(n) y <- 0.4 * trt + nu + e dat <- data.frame(y, time, trt, cluster) plm_DID <- plm(y ~ trt, data = dat, index = c("cluster","time"), effect = "twoways", model = "within") plm_Satt <- coef_test(plm_DID, vcov = "CR2", cluster = dat$cluster)["trt",] plm_Wald <- Wald_test(plm_DID, constraints = 1, vcov = "CR2", cluster = dat$cluster) df <- m^2 * (m0 - 1) * (m1 - 1) / (m0^2 * (m0 - 1) + m1^2 * (m1 - 1)) y_diff <- apply(matrix(y, nrow = 2), 2, diff) t_Welch <- t.test(y_diff ~ trt_clusters) expect_equal(with(t_Welch, estimate[[2]] - estimate[[1]]), plm_Satt$beta) expect_equal(as.numeric(-t_Welch$statistic), with(plm_Satt, beta / SE)) expect_equal(as.numeric(-t_Welch$statistic)^2, plm_Wald$Fstat) expect_is(all.equal(as.numeric(t_Welch$parameter), plm_Satt$df), "character") expect_equal(plm_Satt$df, df) expect_equal(plm_Wald$df, df) }) clubSandwich/tests/testthat/test_mlm.R0000644000176200001440000002127113500611664017636 0ustar liggesuserscontext("mlm objects") set.seed(20190513) n <- nrow(iris) lm_fit <- lm(cbind(Sepal.Length, Sepal.Width) ~ Species + Petal.Length, data = iris) lm_A_fit <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) lm_B_fit <- lm(Sepal.Width ~ Species + Petal.Length, data = iris) WLS_fit <- lm(cbind(Sepal.Length, Sepal.Width) ~ Species + Petal.Length, data = iris, weights = Petal.Width) CR_types <- paste0("CR",0:4) test_that("bread works", { expect_equal(bread.mlm(lm_fit), sandwich:::bread.mlm(lm_fit)) y <- with(iris, as.vector(rbind(Sepal.Length, Sepal.Width))) cluster <- rep(rownames(iris), each = ncol(residuals(lm_fit))) expect_true(check_bread(lm_fit, cluster = cluster, y = y)) expect_true(check_bread(WLS_fit, cluster = cluster, y = y)) }) test_that("CR2 and CR4 are target-unbiased", { expect_true(check_CR(lm_fit, vcov = "CR2")) expect_true(check_CR(WLS_fit, vcov = "CR2")) expect_true(check_CR(lm_fit, vcov = "CR4")) expect_true(check_CR(WLS_fit, vcov = "CR4")) }) test_that("vcovCR is mostly equivalent to vcovHC when clusters are all of size 1", { library(sandwich, quietly=TRUE) CR_mats <- sapply(c("CR0","CR2","CR3","CR1","CR1p","CR1S"), function(t) as.matrix(vcovCR(lm_fit, type = t)), simplify = FALSE, USE.NAMES = TRUE) HC_mats <- sapply(c("HC0","HC2","HC3","HC1"), function(t) vcovHC(lm_fit, type = t), simplify = FALSE, USE.NAMES = TRUE) expect_equal(CR_mats$CR0, HC_mats$HC0) expect_equal(CR_mats$CR2, HC_mats$HC2) expect_equal(CR_mats$CR3, HC_mats$HC3) J <- nobs(lm_fit) p <- ncol(model.matrix(lm_fit)) N <- nrow(model_matrix(lm_fit)) expect_equal(CR_mats$CR1 * (J - 1), HC_mats$HC1 * (J - p)) expect_equal(CR_mats$CR1p * (J - 2 * p), HC_mats$HC1 * (J - p)) expect_equal(CR_mats$CR1S * (J - 1) * (N - 2 * p) / (N - 1), HC_mats$HC1 * (J - p)) HC_A_mats <- sapply(c("HC0","HC2","HC3"), function(t) vcovHC(lm_A_fit, type = t), simplify = FALSE, USE.NAMES = TRUE) HC_B_mats <- sapply(c("HC0","HC2","HC3"), function(t) vcovHC(lm_B_fit, type = t), simplify = FALSE, USE.NAMES = TRUE) expect_equal(CR_mats$CR0[1:p,1:p], HC_A_mats$HC0, check.attributes = FALSE) expect_equal(CR_mats$CR2[1:p,1:p], HC_A_mats$HC2, check.attributes = FALSE) expect_equal(CR_mats$CR3[1:p,1:p], HC_A_mats$HC3, check.attributes = FALSE) expect_equal(CR_mats$CR0[p + 1:p,p + 1:p], HC_B_mats$HC0, check.attributes = FALSE) expect_equal(CR_mats$CR2[p + 1:p,p + 1:p], HC_B_mats$HC2, check.attributes = FALSE) expect_equal(CR_mats$CR3[p + 1:p,p + 1:p], HC_B_mats$HC3, check.attributes = FALSE) }) test_that("mlm is equivalent to lm with long data.", { iris_long <- reshape(iris, c("Sepal.Length","Sepal.Width"), direction = "long", times = "outcome") iris_long$outcome <- paste0("Sepal.", iris_long$time) lm_long <- lm(Sepal ~ 0 + outcome + outcome:Species + outcome:Petal.Length, data = iris_long) i <- order(rep(1:2, 4)) expect_equal(coef_CS(lm_fit), coef(lm_long)[i], check.attributes = FALSE) CR_fit <- lapply(CR_types, function(x) as.matrix(vcovCR(lm_fit, type = x))) CR_long <- lapply(CR_types, function(x) vcovCR(lm_long, type = x, cluster = iris_long$id)[i,i]) expect_equivalent(CR_fit, CR_long) test_fit <- lapply(CR_types, function(x) coef_test(lm_fit, vcov = x, test = "All", p_values = FALSE)) test_long <- lapply(CR_types, function(x) coef_test(lm_long, vcov = x, cluster = iris_long$id, test = "All", p_values = FALSE)[i,]) expect_equal(test_fit, test_long, check.attributes = FALSE) CR_fit <- lapply(CR_types, function(x) as.matrix(vcovCR(lm_fit, type = x, cluster = iris$Petal.Length))) CR_long <- lapply(CR_types, function(x) vcovCR(lm_long, type = x, cluster = iris_long$Petal.Length)[i,i]) expect_equivalent(CR_fit, CR_long) test_fit <- lapply(CR_types, function(x) coef_test(lm_fit, vcov = x, test = "All", p_values = FALSE)) test_long <- lapply(CR_types, function(x) coef_test(lm_long, vcov = x, cluster = iris_long$id, test = "All", p_values = FALSE)[i,]) expect_equal(test_fit, test_long, check.attributes = FALSE) }) test_that("Order doesn't matter.",{ dat_scramble <- iris[sample(n),] WLS_scramble <- update(WLS_fit, data = dat_scramble) CR_fit <- lapply(CR_types, function(x) vcovCR(WLS_fit, type = x)) CR_scramble <- lapply(CR_types, function(x) vcovCR(WLS_scramble, type = x)) expect_equivalent(CR_fit, CR_scramble) test_fit <- lapply(CR_types, function(x) coef_test(WLS_fit, vcov = x, test = "All", p_values = FALSE)) test_scramble <- lapply(CR_types, function(x) coef_test(WLS_scramble, vcov = x, test = "All", p_values = FALSE)) expect_equal(test_fit, test_scramble, tolerance = 10^-6) # constraints <- combn(length(coef_CS(lm_fit)), 2, simplify = FALSE) # Wald_fit <- Wald_test(WLS_fit, constraints = constraints, vcov = "CR2", test = "All") # Wald_scramble <- Wald_test(WLS_scramble, constraints = constraints, vcov = "CR2", test = "All") # expect_equal(Wald_fit, Wald_scramble) }) test_that("clubSandwich works with dropped covariates", { dat_miss <- iris dat_miss$Petal.Length[sample.int(n, size = round(n / 10))] <- NA lm_dropped <- update(lm_fit, data = dat_miss) dat_complete <- subset(dat_miss, !is.na(Petal.Length)) lm_complete <- update(lm_fit, data = dat_complete) CR_drop <- lapply(CR_types, function(x) vcovCR(lm_dropped, type = x)) CR_complete <- lapply(CR_types, function(x) vcovCR(lm_complete, type = x)) expect_identical(CR_drop, CR_complete) test_drop <- lapply(CR_types, function(x) coef_test(lm_dropped, vcov = x, test = "All", p_values = FALSE)) test_complete <- lapply(CR_types, function(x) coef_test(lm_complete, vcov = x, test = "All", p_values = FALSE)) expect_identical(test_drop, test_complete) }) test_that("clubSandwich works with dropped outcomes", { dat_miss <- iris n <- nrow(dat_miss) dat_miss$Sepal.Length[sample.int(n, size = round(n / 10))] <- NA dat_miss$Sepal.Width[sample.int(n, size = round(n / 10))] <- NA lm_dropped <- update(lm_fit, data = dat_miss) dat_complete <- subset(dat_miss, !is.na(Sepal.Length) & !is.na(Sepal.Width)) lm_complete <- update(lm_fit, data = dat_complete) CR_drop <- lapply(CR_types, function(x) vcovCR(lm_dropped, type = x)) CR_complete <- lapply(CR_types, function(x) vcovCR(lm_complete, type = x)) expect_identical(CR_drop, CR_complete) test_drop <- lapply(CR_types, function(x) coef_test(lm_dropped, vcov = x, test = "All", p_values = FALSE)) test_complete <- lapply(CR_types, function(x) coef_test(lm_complete, vcov = x, test = "All", p_values = FALSE)) expect_equal(test_drop, test_complete) }) test_that("clubSandwich works with dropped outcomes, covariates, and weights", { dat_miss <- iris n <- nrow(dat_miss) dat_miss$Sepal.Length[sample.int(n, size = round(n / 5))] <- NA dat_miss$Sepal.Width[sample.int(n, size = round(n / 5))] <- NA dat_miss$Petal.Length[sample.int(n, size = round(n / 5))] <- NA dat_miss$Petal.Width[sample.int(n, size = round(n / 5))] <- NA WLS_dropped <- update(WLS_fit, data = dat_miss) dat_complete <- subset(dat_miss, !is.na(Petal.Length) & !is.na(Petal.Width) & !is.na(Sepal.Length) & !is.na(Sepal.Width)) WLS_complete <- update(WLS_fit, data = dat_complete) CR_drop <- lapply(CR_types, function(x) vcovCR(WLS_dropped, type = x)) CR_complete <- lapply(CR_types, function(x) vcovCR(WLS_complete, type = x)) expect_identical(CR_drop, CR_complete) test_drop <- lapply(CR_types, function(x) coef_test(WLS_dropped, vcov = x, test = "All", p_values = FALSE)) test_complete <- lapply(CR_types, function(x) coef_test(WLS_complete, vcov = x, test = "All", p_values = FALSE)) expect_identical(test_drop, test_complete) }) test_that("weight scale doesn't matter", { lm_fit_w <- update(lm_fit, weights = rep(10, nrow(iris))) unweighted_fit <- lapply(CR_types, function(x) vcovCR(lm_fit, type = x)) weighted_fit <- lapply(CR_types, function(x) vcovCR(lm_fit_w, type = x)) expect_equal(lapply(unweighted_fit, as.matrix), lapply(weighted_fit, as.matrix)) target <- rep(1 + rpois(nrow(iris), lambda = 8), each = ncol(residuals(lm_fit))) unweighted_fit <- lapply(CR_types, function(x) vcovCR(lm_fit, type = x, target = target)) weighted_fit <- lapply(CR_types, function(x) vcovCR(lm_fit_w, type = x, target = target * 15)) expect_equal(lapply(unweighted_fit, as.matrix), lapply(weighted_fit, as.matrix)) }) clubSandwich/tests/testthat/test_Wald.R0000644000176200001440000000344513576050444017751 0ustar liggesuserscontext("Wald tests") set.seed(20190513) data(Duncan, package = "carData") Duncan$cluster <- sample(LETTERS[1:8], size = nrow(Duncan), replace = TRUE) duncan_fit <- lm(prestige ~ type * (income + education), data=Duncan) coefs <- names(coef(duncan_fit)) Duncan_CR2 <- vcovCR(duncan_fit, type = "CR2", cluster = Duncan$cluster) test_that("constraint expressions are equivalent", { constraints_logical <- grepl("typeprof:", coefs) constraints_int <- which(constraints_logical) constraints_num <- as.numeric(constraints_int) constraints_char <- coefs[constraints_logical] constraints_mat <- diag(1L, nrow = length(coefs))[constraints_logical,,drop=FALSE] Wald_logical <- Wald_test(duncan_fit, vcov = "CR2", cluster = Duncan$cluster, constraints = constraints_logical, test = "All") expect_output(print(Wald_logical)) constraint_list <- list(integer = constraints_int, numeric = constraints_num, char = constraints_char, matrix = constraints_mat) Walds <- Wald_test(duncan_fit, vcov = "CR2", cluster = Duncan$cluster, constraints = constraint_list, test = "All") expect_identical(Wald_logical, Walds$integer) expect_identical(Wald_logical, Walds$numeric) expect_identical(Wald_logical, Walds$char) expect_identical(Wald_logical, Walds$matrix) }) test_that("Wald test is equivalent to Satterthwaite for q = 1.",{ p <- length(coefs) t_tests <- coef_test(duncan_fit, vcov = Duncan_CR2) F_tests <- Wald_test(duncan_fit, vcov = Duncan_CR2, constraints = as.list(1:9)) expect_equal(t_tests$df, sapply(F_tests, function(x) x$df)) expect_equal(t_tests$p_Satt, sapply(F_tests, function(x) x$p_val)) }) clubSandwich/tests/testthat/test_lmerMod.R0000644000176200001440000003027113576303502020452 0ustar liggesuserscontext("lmerMod objects") set.seed(20191217) suppressMessages(library(lme4, quietly=TRUE)) library(nlme, quietly=TRUE, warn.conflicts=FALSE) obj_A1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) obj_A2 <- lmer(Reaction ~ Days + (Days || Subject), sleepstudy) data(Orthodont, package="nlme") obj_B1 <- lmer(distance ~ age + (1 | Subject), data=Orthodont) obj_B2 <- lmer(distance ~ age + (age || Subject), data=Orthodont) data(egsingle, package = "mlmRev") egsingle <- within(egsingle, { size <- (size - mean(size)) / sd(size) }) obj_C1 <- lmer(math ~ year * size + female + black + hispanic + (1 | schoolid) + (1 | childid), data = egsingle) obj_C2 <- lmer(math ~ year * size + female + black + hispanic + (year | schoolid) + (1 | childid), data = egsingle, control = lmerControl(check.conv.grad = .makeCC("ignore", tol = 2e-3, relTol = NULL))) test_that("bread works", { expect_true(check_bread(obj_A1, cluster = sleepstudy$Subject, y = sleepstudy$Reaction)) expect_true(check_bread(obj_A2, cluster = sleepstudy$Subject, y = sleepstudy$Reaction)) expect_true(check_bread(obj_B1, cluster = Orthodont$Subject, y = Orthodont$distance)) expect_true(check_bread(obj_B2, cluster = Orthodont$Subject, y = Orthodont$distance)) expect_true(check_bread(obj_C1, cluster = egsingle$schoolid, y = egsingle$math)) expect_true(check_bread(obj_C2, cluster = egsingle$schoolid, y = egsingle$math)) expect_equal(as.matrix(vcov(obj_A1)), bread(obj_A1) * getME(obj_A1, "sigma")^2 / v_scale(obj_A1)) expect_equal(as.matrix(vcov(obj_A2)), bread(obj_A2) * getME(obj_A2, "sigma")^2 / v_scale(obj_A2)) expect_equal(as.matrix(vcov(obj_B1)), bread(obj_B1) * getME(obj_B1, "sigma")^2 / v_scale(obj_B1)) expect_equal(as.matrix(vcov(obj_B2)), bread(obj_B2) * getME(obj_B2, "sigma")^2 / v_scale(obj_B2)) expect_equal(as.matrix(vcov(obj_C1)), bread(obj_C1) * getME(obj_C1, "sigma")^2 / v_scale(obj_C1)) expect_equal(as.matrix(vcov(obj_C2)), bread(obj_C2) * getME(obj_C2, "sigma")^2 / v_scale(obj_C2)) }) test_that("vcovCR options work for CR2", { CR2_A <- vcovCR(obj_A1, type = "CR2") expect_identical(vcovCR(obj_A1, cluster = sleepstudy$Subject, type = "CR2"), CR2_A) expect_identical(vcovCR(obj_A1, type = "CR2", inverse_var = TRUE), CR2_A) expect_false(identical(vcovCR(obj_A1, type = "CR2", inverse_var = FALSE), CR2_A)) target <- targetVariance(obj_A1) expect_equal(vcovCR(obj_A1, type = "CR2", target = target, inverse_var = TRUE), CR2_A, check.attributes = FALSE) expect_equal(vcovCR(obj_A1, type = "CR2", target = target, inverse_var = FALSE), CR2_A, check.attributes = FALSE) CR2_B <- vcovCR(obj_B1, type = "CR2") expect_identical(vcovCR(obj_B1, cluster = Orthodont$Subject, type = "CR2"), CR2_B) expect_identical(vcovCR(obj_B1, type = "CR2", inverse_var = TRUE), CR2_B) expect_false(identical(vcovCR(obj_B1, type = "CR2", inverse_var = FALSE), CR2_B)) target <- targetVariance(obj_B1) expect_equal(vcovCR(obj_B1, type = "CR2", target = target, inverse_var = TRUE), CR2_B, check.attributes = FALSE) expect_equal(vcovCR(obj_B1, type = "CR2", target = target, inverse_var = FALSE), CR2_B, check.attributes = FALSE) CR2_C <- vcovCR(obj_C1, type = "CR2") expect_identical(vcovCR(obj_C1, cluster = egsingle$schoolid, type = "CR2"), CR2_C) expect_identical(vcovCR(obj_C1, type = "CR2", inverse_var = TRUE), CR2_C) expect_false(identical(vcovCR(obj_C1, type = "CR2", inverse_var = FALSE), CR2_C)) target <- targetVariance(obj_C1) expect_equal(vcovCR(obj_C1, type = "CR2", target = target, inverse_var = TRUE), CR2_C, check.attributes = FALSE) expect_equal(vcovCR(obj_C1, type = "CR2", target = target, inverse_var = FALSE), CR2_C, check.attributes = FALSE) }) test_that("vcovCR options work for CR4", { CR4_A <- vcovCR(obj_A1, type = "CR4") expect_identical(vcovCR(obj_A1, cluster = sleepstudy$Subject, type = "CR4"), CR4_A) expect_identical(vcovCR(obj_A1, type = "CR4", inverse_var = TRUE), CR4_A) expect_false(identical(vcovCR(obj_A1, type = "CR4", inverse_var = FALSE), CR4_A)) target <- targetVariance(obj_A1) expect_equal(vcovCR(obj_A1, type = "CR4", target = target, inverse_var = TRUE), CR4_A, check.attributes = FALSE) expect_equal(vcovCR(obj_A1, type = "CR4", target = target, inverse_var = FALSE), CR4_A, check.attributes = FALSE) CR4_B <- vcovCR(obj_B1, type = "CR4") expect_identical(vcovCR(obj_B1, cluster = Orthodont$Subject, type = "CR4"), CR4_B) expect_identical(vcovCR(obj_B1, type = "CR4", inverse_var = TRUE), CR4_B) expect_false(identical(vcovCR(obj_B1, type = "CR4", inverse_var = FALSE), CR4_B)) target <- targetVariance(obj_B1) expect_equal(vcovCR(obj_B1, type = "CR4", target = target, inverse_var = TRUE), CR4_B, check.attributes = FALSE) expect_equal(vcovCR(obj_B1, type = "CR4", target = target, inverse_var = FALSE), CR4_B, check.attributes = FALSE) CR4_C <- vcovCR(obj_C1, type = "CR4") expect_identical(vcovCR(obj_C1, cluster = egsingle$schoolid, type = "CR4"), CR4_C) expect_identical(vcovCR(obj_C1, type = "CR4", inverse_var = TRUE), CR4_C) expect_false(identical(vcovCR(obj_C1, type = "CR4", inverse_var = FALSE), CR4_C)) target <- targetVariance(obj_C1) expect_equal(vcovCR(obj_C1, type = "CR4", target = target, inverse_var = TRUE), CR4_C, check.attributes = FALSE) expect_equal(vcovCR(obj_C1, type = "CR4", target = target, inverse_var = FALSE), CR4_C, check.attributes = FALSE) }) test_that("CR2 and CR4 are target-unbiased", { expect_true(check_CR(obj_A1, vcov = "CR2")) expect_true(check_CR(obj_A2, vcov = "CR2")) expect_true(check_CR(obj_B1, vcov = "CR2")) expect_true(check_CR(obj_B2, vcov = "CR2")) expect_true(check_CR(obj_C1, vcov = "CR2")) expect_true(check_CR(obj_C2, vcov = "CR2")) expect_true(check_CR(obj_A1, vcov = "CR4")) expect_true(check_CR(obj_A2, vcov = "CR4")) expect_true(check_CR(obj_B1, vcov = "CR4")) expect_true(check_CR(obj_B2, vcov = "CR4")) expect_true(check_CR(obj_C1, vcov = "CR4")) expect_true(check_CR(obj_C2, vcov = "CR4")) }) CR_types <- paste0("CR",0:4) test_that("Order doesn't matter.", { # Model A1 re_order <- sample(nrow(sleepstudy)) dat_scramble <- sleepstudy[re_order,] obj_scramble <- update(obj_A1, data = dat_scramble) CR_fit <- lapply(CR_types, function(x) vcovCR(obj_A1, type = x)) CR_scramble <- lapply(CR_types, function(x) vcovCR(obj_scramble, type = x)) expect_equivalent(CR_fit, CR_scramble) test_fit <- lapply(CR_types, function(x) coef_test(obj_A1, vcov = x, test = "All", p_values = FALSE)) test_scramble <- lapply(CR_types, function(x) coef_test(obj_scramble, vcov = x, test = "All", p_values = FALSE)) expect_equal(test_fit, test_scramble, tolerance = 10^-6) constraints <- combn(length(coef_CS(obj_A1)), 2, simplify = FALSE) Wald_fit <- Wald_test(obj_A1, constraints = constraints, vcov = "CR2", test = "All") Wald_scramble <- Wald_test(obj_scramble, constraints = constraints, vcov = "CR2", test = "All") expect_equal(Wald_fit, Wald_scramble) # Model C1 re_order <- sample(nrow(egsingle)) eg_scramble <- egsingle[re_order,] C1_scramble <- update(obj_C1, data = eg_scramble) expect_equal(coef_CS(obj_C1), coef_CS(C1_scramble)) expect_equal(nobs(obj_C1), nobs(C1_scramble)) expect_equal(residuals_CS(obj_C1)[re_order], residuals_CS(C1_scramble)) expect_equal(model_matrix(obj_C1)[re_order,], model_matrix(C1_scramble), check.attributes = FALSE) constraints <- combn(length(coef_CS(obj_C1)), 2, simplify = FALSE) Wald_fit <- Wald_test(obj_C1, constraints = constraints, vcov = "CR2", test = "All") Wald_scramble <- Wald_test(C1_scramble, constraints = constraints, vcov = "CR2", test = "All") expect_equal(Wald_fit, Wald_scramble) }) test_that("clubSandwich works with dropped observations", { dat_miss <- sleepstudy dat_miss$Reaction[sample.int(nrow(sleepstudy), size = round(nrow(sleepstudy) / 10))] <- NA obj_dropped <- update(obj_A1, data = dat_miss, na.action = na.omit) obj_complete <- update(obj_A1, data = dat_miss, subset = !is.na(Reaction)) CR_drop <- lapply(CR_types, function(x) vcovCR(obj_dropped, type = x)) CR_complete <- lapply(CR_types, function(x) vcovCR(obj_complete, type = x)) expect_identical(CR_drop, CR_complete) test_drop <- lapply(CR_types, function(x) coef_test(obj_dropped, vcov = x, test = "All", p_values = FALSE)) test_complete <- lapply(CR_types, function(x) coef_test(obj_complete, vcov = x, test = "All", p_values = FALSE)) expect_identical(test_drop, test_complete) }) test_that("lmer agrees with lme", { data(BodyWeight, package="nlme") lmer_fit <- lmer(weight ~ Time * Diet + (1 | Rat), data=BodyWeight) lme_fit <- lme(weight ~ Time * Diet, data=BodyWeight, ~ 1 | Rat) expect_equal(coef_CS(lmer_fit), coef_CS(lme_fit)) expect_equal(nobs(lmer_fit), nobs(lme_fit)) expect_equal(model_matrix(lmer_fit), model_matrix(lme_fit), check.attributes = FALSE) expect_equal(residuals_CS(lmer_fit), residuals_CS(lme_fit), check.attributes = FALSE) expect_equal(v_scale(lmer_fit), v_scale(lme_fit)) p <- length(coef_CS(lmer_fit)) expect_equal(bread(lmer_fit) / bread(lme_fit), matrix(1, p, p), check.attributes = FALSE) expect_equal(targetVariance(lmer_fit), targetVariance(lme_fit), check.attributes = FALSE, tol = 10^-6) expect_equal(weightMatrix(lmer_fit), weightMatrix(lme_fit), check.attributes = FALSE) CR_lmer <- lapply(CR_types, function(x) vcovCR(lmer_fit, type = x)) CR_lme <- lapply(CR_types, function(x) vcovCR(lme_fit, type = x)) expect_equivalent(CR_lmer, CR_lme, tolerance = 10^-6) test_lmer <- lapply(CR_types, function(x) coef_test(lmer_fit, vcov = x, test = "All", p_values = FALSE)) test_lme <- lapply(CR_types, function(x) coef_test(lme_fit, vcov = x, test = "All", p_values = FALSE)) expect_equal(test_lmer, test_lme, tolerance = 10^-5) constraints <- c(combn(length(coef_CS(lmer_fit)), 2, simplify = FALSE), combn(length(coef_CS(lmer_fit)), 3, simplify = FALSE)) Wald_lmer <- Wald_test(lmer_fit, constraints = constraints, vcov = "CR2", test = "All") Wald_lme <- Wald_test(lme_fit, constraints = constraints, vcov = "CR2", test = "All") expect_equal(Wald_lmer, Wald_lme) }) test_that("Emply levels are dropped in model_matrix", { data(AchievementAwardsRCT) AA_RCT_females <- subset(AchievementAwardsRCT, sex=="Girl" & year != "1999") AA_RCT_females <- within(AA_RCT_females, { sibs_4 <- siblings >= 4 treated2001 <- treated * (year=="2001") }) lmer_fit <- lmer(Bagrut_status ~ year * school_type + father_ed + mother_ed + immigrant + sibs_4 + qrtl + treated2001:half + (1 | school_id), data = AA_RCT_females) betas <- fixef(lmer_fit) X <- model_matrix(lmer_fit) expect_identical(names(betas), colnames(X)) }) test_that("Possible to cluster at higher level than random effects", { n_districts <- 10 n_schools_per <- rnbinom(n_districts, size = 4, prob = 0.3) n_schools <- sum(n_schools_per) n_students_per <- 10 n_students <- n_schools * n_students_per # identifiers for each level district_id <- factor(rep(1:n_districts, n_schools_per * n_students_per)) school_id <- factor(rep(1:sum(n_schools_per), each = n_students_per)) student_id <- 1:n_students # simulated outcome Y <- rnorm(n_districts)[district_id] + rnorm(n_schools)[school_id] + rnorm(n_students) X <- rnorm(n_students) dat <- data.frame(district_id, school_id, student_id, Y, X) dat_scramble <- dat[sample(nrow(dat)),] # fit two-level model lme_2level <- lmer(Y ~ X + (1 | school_id), data = dat) # cluster at level 3 V <- vcovCR(lme_2level, type = "CR2", cluster = dat$district_id) expect_is(V, "vcovCR") expect_error(vcovCR(lme_2level, type = "CR2", cluster = dat_scramble$district_id)) # check that result does not depend on sort-order V_scramble <- vcovCR(lmer(Y ~ X + (1 | school_id), data = dat_scramble), type = "CR2", cluster = dat_scramble$district_id) expect_equal(as.matrix(V), as.matrix(V_scramble)) }) clubSandwich/tests/testthat/test_ivreg.R0000644000176200001440000002301613500611664020164 0ustar liggesuserscontext("ivreg objects") set.seed(20190513) library(zoo, quietly=TRUE) library(AER, quietly=TRUE) data("CigarettesSW", package = "AER") Cigs <- within(CigarettesSW, { rprice <- price/cpi rincome <- income/population/cpi tdiff <- (taxs - tax)/cpi }) CR_types <- paste0("CR",0:4) obj_un <- ivreg(log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax/cpi), data = Cigs) obj_wt <- ivreg(log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax/cpi), data = Cigs, weights = population) X <- model.matrix(obj_wt, component = "regressors") Z <- model.matrix(obj_wt, component = "instruments") y <- log(CigarettesSW$packs) w <- weights(obj_wt) test_that("Basic calculations from ivreg agree for unweighted model.", { XZ <- model.matrix(obj_un, component = "projected") ZtZ_inv <- chol2inv(chol(t(Z) %*% Z)) XZ_check <- Z %*% ZtZ_inv %*% t(Z) %*% X expect_equal(XZ, XZ_check, check.attributes=FALSE) expect_equal(coef(obj_un), lm.fit(XZ, y)$coefficients) expect_equal (bread(obj_un), chol2inv(chol(t(XZ) %*% XZ)) * nobs(obj_un), check.attributes=FALSE) hii <- diag(X %*% chol2inv(chol(t(XZ) %*% XZ)) %*% t(XZ)) expect_equal(hatvalues(obj_un), hii) r <- as.vector(y - X %*% coef(obj_un)) expect_equal(r, as.vector(residuals_CS(obj_un))) }) test_that("Basic calculations from ivreg agree for weighted model.", { XZ <- model.matrix(obj_wt, component = "projected") ZwZ_inv <- chol2inv(chol(t(Z) %*% (w * Z))) XZ_check <- Z %*% ZwZ_inv %*% t(Z) %*% (w * X) expect_equal(XZ, XZ_check, check.attributes=FALSE) expect_equal(coef(obj_wt), lm.wfit(XZ, y, w)$coefficients) expect_equal(bread(obj_wt), chol2inv(chol(t(XZ) %*% (w * XZ))) * nobs(obj_wt), check.attributes=FALSE) hii <- diag(X%*% chol2inv(chol(t(XZ) %*% (w * XZ))) %*% t(w * XZ)) expect_false(all(hatvalues(obj_wt) == hii)) # does not agree because hatvalues doesn't work with weighting r <- as.vector(y - X %*% coef(obj_wt)) expect_equal(r, as.vector(residuals_CS(obj_wt))) }) test_that("bread works", { expect_true(check_bread(obj_un, cluster = Cigs$state, y = log(Cigs$packs))) tsls_vcov <- bread(obj_un) * summary(obj_un)$sigma^2 / v_scale(obj_un) expect_equal(vcov(obj_un), tsls_vcov) expect_true(check_bread(obj_wt, cluster = Cigs$state, y = log(Cigs$packs))) wtsls_vcov <- bread(obj_wt) * summary(obj_wt)$sigma^2 / v_scale(obj_wt) expect_equal(vcov(obj_wt), wtsls_vcov) }) test_that("vcovCR options don't matter for CR0", { expect_error(vcovCR(obj_un, type = "CR0")) CR0 <- vcovCR(obj_un, cluster = Cigs$state, type = "CR0") expect_output(print(CR0)) attr(CR0, "target") <- NULL attr(CR0, "inverse_var") <- NULL CR0_A <- vcovCR(obj_un, cluster = Cigs$state, type = "CR0", target = 1 / Cigs$population) attr(CR0_A, "target") <- NULL attr(CR0_A, "inverse_var") <- NULL expect_identical(CR0_A, CR0) CR0_B <- vcovCR(obj_un, cluster = Cigs$state, type = "CR0", target = 1 / Cigs$population, inverse_var = FALSE) attr(CR0_B, "target") <- NULL attr(CR0_B, "inverse_var") <- NULL expect_identical(CR0_A, CR0) expect_error(vcovCR(obj_un, cluster = Cigs$state, type = "CR0", target = 1 / Cigs$population, inverse_var = TRUE)) wCR0 <- vcovCR(obj_wt, cluster = Cigs$state, type = "CR0") attr(wCR0, "target") <- NULL attr(wCR0, "inverse_var") <- NULL wCR0_A <- vcovCR(obj_wt, cluster = Cigs$state, type = "CR0", target = 1 / Cigs$population) attr(wCR0_A, "target") <- NULL attr(wCR0_A, "inverse_var") <- NULL expect_identical(wCR0_A, wCR0) wCR0_B <- vcovCR(obj_wt, cluster = Cigs$state, type = "CR0", target = 1 / Cigs$population, inverse_var = FALSE) attr(wCR0_B, "target") <- NULL attr(wCR0_B, "inverse_var") <- NULL expect_identical(wCR0_B, wCR0) expect_error(vcovCR(obj_wt, cluster = Cigs$state, type = "CR0", target = 1 / Cigs$population, inverse_var = TRUE)) }) test_that("vcovCR options work for CR2", { CR2_iv <- vcovCR(obj_un, cluster = Cigs$state, type = "CR2") expect_identical(vcovCR(obj_un, cluster = Cigs$state, type = "CR2", target = rep(1, nobs(obj_un))), CR2_iv) expect_false(identical(vcovCR(obj_un, cluster = Cigs$state, type = "CR2", target = 1 / Cigs$population), CR2_iv)) wCR2_id <- vcovCR(obj_wt, cluster = Cigs$state, type = "CR2") expect_identical(vcovCR(obj_wt, cluster = Cigs$state, type = "CR2", inverse_var = FALSE), wCR2_id) expect_identical(vcovCR(obj_wt, cluster = Cigs$state, type = "CR2", target = rep(1, nobs(obj_un))), wCR2_id) expect_identical(vcovCR(obj_wt, cluster = Cigs$state, type = "CR2", target = rep(1, nobs(obj_un)), inverse_var = FALSE), wCR2_id) }) test_that("vcovCR options work for CR4", { CR4_not <- vcovCR(obj_un, cluster = Cigs$state, type = "CR4") expect_identical(vcovCR(obj_un, cluster = Cigs$state, type = "CR4", target = rep(1, nobs(obj_un))), CR4_not) expect_identical(vcovCR(obj_un, cluster = Cigs$state, type = "CR4", target = rep(1, nobs(obj_un)), inverse_var = FALSE), CR4_not) expect_false(identical(vcovCR(obj_un, cluster = Cigs$state, type = "CR4", target = 1 / Cigs$population), CR4_not)) wCR4_id <- vcovCR(obj_wt, cluster = Cigs$state, type = "CR4") expect_identical(vcovCR(obj_wt, cluster = Cigs$state, type = "CR4", inverse_var = FALSE), wCR4_id) expect_identical(vcovCR(obj_wt, cluster = Cigs$state, type = "CR4", target = rep(1, nobs(obj_wt))), wCR4_id) expect_identical(vcovCR(obj_wt, cluster = Cigs$state, type = "CR4", target = rep(1, nobs(obj_wt)), inverse_var = FALSE), wCR4_id) }) test_that("CR2 is target-unbiased", { expect_true(check_CR(obj_un, vcov = "CR2", cluster = Cigs$state)) expect_true(check_CR(obj_wt, vcov = "CR2", cluster = Cigs$state)) }) test_that("CR4 is target-unbiased", { skip("Need to understand target-unbiasedness for ivreg objects.") expect_true(check_CR(obj_un, vcov = "CR4", cluster = Cigs$state)) expect_true(check_CR(obj_wt, vcov = "CR4", cluster = Cigs$state)) }) test_that("vcovCR is equivalent to vcovHC (with HC0 or HC1) when clusters are all of size 1", { library(sandwich, quietly=TRUE) CR0 <- vcovCR(obj_un, cluster = 1:nobs(obj_un), type = "CR0") expect_equal(vcovHC(obj_un, type = "HC0"), as.matrix(CR0)) CR1 <- vcovCR(obj_un, cluster = 1:nobs(obj_un), type = "CR1S") expect_equal(vcovHC(obj_un, type = "HC1"), as.matrix(CR1)) CR2 <- vcovCR(obj_un, cluster = 1:nobs(obj_un), type = "CR2") expect_false(all(vcovHC(obj_un, type = "HC2") == as.matrix(CR2))) CR3 <- vcovCR(obj_un, cluster = 1:nobs(obj_un), type = "CR3") expect_false(all(vcovHC(obj_un, type = "HC3") == as.matrix(CR3))) }) test_that("Order doesn't matter.",{ dat_scramble <- Cigs[sample(nrow(Cigs)),] obj_scramble <- update(obj_wt, data = dat_scramble) CR_fit <- lapply(CR_types, function(x) vcovCR(obj_wt, cluster = Cigs$state, type = x)) CR_scramble <- lapply(CR_types, function(x) vcovCR(obj_scramble, cluster = dat_scramble$state, type = x)) expect_equal(CR_fit, CR_scramble, check.attributes = FALSE, tolerance = 5 * 10^-7) test_fit <- lapply(CR_types, function(x) coef_test(obj_wt, vcov = x, cluster = Cigs$state, test = "All", p_values = FALSE)) test_scramble <- lapply(CR_types, function(x) coef_test(obj_scramble, vcov = x, cluster = dat_scramble$state, test = "All", p_values = FALSE)) expect_equal(test_fit, test_scramble, tolerance = 10^-6) constraints <- combn(length(coef(obj_wt)), 2, simplify = FALSE) Wald_fit <- Wald_test(obj_wt, constraints = constraints, vcov = "CR2", cluster = Cigs$state, test = "All") Wald_scramble <- Wald_test(obj_scramble, constraints = constraints, vcov = "CR2", cluster = dat_scramble$state, test = "All") expect_equal(Wald_fit, Wald_scramble, tolerance = 10^-6) }) test_that("clubSandwich works with dropped observations", { dat_miss <- Cigs dat_miss$rincome[sample.int(nrow(Cigs), size = round(nrow(Cigs) / 10))] <- NA iv_dropped <- update(obj_un, data = dat_miss) dat_complete <- subset(dat_miss, !is.na(rincome)) iv_complete <- update(obj_un, data = dat_complete) CR_drop <- lapply(CR_types, function(x) vcovCR(iv_dropped, cluster = dat_miss$state, type = x)) CR_complete <- lapply(CR_types, function(x) vcovCR(iv_complete, cluster = dat_complete$state, type = x)) expect_identical(CR_drop, CR_complete) test_drop <- lapply(CR_types, function(x) coef_test(iv_dropped, vcov = x, cluster = dat_miss$state, test = "All", p_values = FALSE)) test_complete <- lapply(CR_types, function(x) coef_test(iv_complete, vcov = x, cluster = dat_complete$state, test = "All", p_values = FALSE)) expect_identical(test_drop, test_complete) }) test_that("weight scale doesn't matter", { iv_fit_w <- update(obj_un, weights = rep(4, nobs(obj_un))) unweighted_fit <- lapply(CR_types, function(x) vcovCR(obj_un, cluster = Cigs$state, type = x)) weighted_fit <- lapply(CR_types, function(x) vcovCR(iv_fit_w, cluster = Cigs$state, type = x)) expect_equal(lapply(unweighted_fit, as.matrix), lapply(weighted_fit, as.matrix), tol = 5 * 10^-7) target <- 1 + rpois(nrow(Cigs), lambda = 8) unweighted_fit <- lapply(CR_types, function(x) vcovCR(obj_un, cluster = Cigs$state, type = x, target = target)) weighted_fit <- lapply(CR_types, function(x) vcovCR(iv_fit_w, cluster = Cigs$state, type = x, target = target * 15)) expect_equal(lapply(unweighted_fit, as.matrix), lapply(weighted_fit, as.matrix), tol = 5 * 10^-7) }) clubSandwich/tests/testthat/test_glm_logit.R0000644000176200001440000002673013500611664021033 0ustar liggesuserscontext("logit glm objects") set.seed(20190513) m <- 20 cluster <- factor(rep(LETTERS[1:m], 3 + rpois(m, 5))) n <- length(cluster) X1 <- c(rep(-0.5, m / 2), rep(0.5, m / 2))[cluster] X2 <- c(rep(-0.3, 0.4 * m), rep(0.7, 0.3 * m), rep(-0.3, 0.4 * m))[cluster] X3 <- rnorm(m)[cluster] + rnorm(n) X4 <- rnorm(n) X <- cbind(X1, X2, X3, X4) eta <- -0.4 + X %*% c(0.3, -0.6, 0.15, 0.15) p <- 1 / (1 + exp(-eta)) summary(p) w <- sample(1:4, size = n, replace = TRUE) y1 <- rbinom(n, size = 1, prob = p) y2 <- rbinom(n, size = w, prob = p) yp <- y2 / w dat <- data.frame(y1, y2, yp, X, cluster, w, row = 1:n) logit_fit <- glm(y1 ~ X1 + X2 + X3 + X4, data = dat, family = "binomial") sflogit_fit <- glm(cbind(y2, w - y2) ~ X1 + X2 + X3 + X4, data = dat, family = "binomial") plogit_fit <- glm(yp ~ X1 + X2 + X3 + X4, data = dat, weights = w, family = "quasibinomial") # obj <- logit_fit # y <- dat$y1 # type <- "CR2" # vcov <- vcovCR(obj, cluster = cluster, type = type) # target = NULL # inverse_var = FALSE # # cluster <- droplevels(as.factor(cluster)) # B <- sandwich::bread(obj) / v_scale(obj) # X_list <- matrix_list(model_matrix(obj), cluster, "row") # W_list <- weightMatrix(obj, cluster) # XWX <- Reduce("+", Map(function(x, w) t(x) %*% w %*% x, x = X_list, w = W_list)) # M <- chol2inv(chol(XWX)) # attr(M, "dimnames") <- attr(B, "dimnames") # # M / B # diff(range(M / B)) test_that("bread works", { expect_true(check_bread(logit_fit, cluster = dat$cluster, check_coef = FALSE, tol = 1.5 * 10^-3)) glm_vcov <- bread(logit_fit) * summary(logit_fit)$dispersion / v_scale(logit_fit) expect_equal(vcov(logit_fit), glm_vcov) expect_true(check_bread(sflogit_fit, cluster = dat$cluster, check_coef = FALSE, tol = 1.5 * 10^-3)) glm_vcov <- bread(sflogit_fit) * summary(sflogit_fit)$dispersion / v_scale(sflogit_fit) expect_equal(vcov(sflogit_fit), glm_vcov) expect_true(check_bread(plogit_fit, cluster = dat$cluster, check_coef = FALSE, tol = 1.5 * 10^-3)) glm_vcov <- bread(plogit_fit) * summary(plogit_fit)$dispersion / v_scale(plogit_fit) expect_equal(vcov(plogit_fit), glm_vcov) }) test_that("vcovCR options work for CR2", { CR2_iv <- vcovCR(logit_fit, cluster = dat$cluster, type = "CR2") expect_identical(vcovCR(logit_fit, cluster = dat$cluster, type = "CR2", inverse_var = TRUE), CR2_iv) expect_equal(vcovCR(logit_fit, cluster = dat$cluster, type = "CR2", target = targetVariance(logit_fit, cluster = dat$cluster), inverse_var = TRUE), CR2_iv) attr(CR2_iv, "inverse_var") <- FALSE expect_equal(vcovCR(logit_fit, cluster = dat$cluster, type = "CR2", target = targetVariance(logit_fit, cluster = dat$cluster), inverse_var = FALSE), CR2_iv) CR2_iv <- vcovCR(sflogit_fit, cluster = dat$cluster, type = "CR2") expect_identical(vcovCR(sflogit_fit, cluster = dat$cluster, type = "CR2", inverse_var = TRUE), CR2_iv) expect_equal(vcovCR(sflogit_fit, cluster = dat$cluster, type = "CR2", target = targetVariance(sflogit_fit, cluster = dat$cluster), inverse_var = TRUE), CR2_iv) attr(CR2_iv, "inverse_var") <- FALSE expect_equal(vcovCR(sflogit_fit, cluster = dat$cluster, type = "CR2", target = targetVariance(sflogit_fit, cluster = dat$cluster), inverse_var = FALSE), CR2_iv) CR2_iv <- vcovCR(plogit_fit, cluster = dat$cluster, type = "CR2") expect_identical(vcovCR(plogit_fit, cluster = dat$cluster, type = "CR2", inverse_var = TRUE), CR2_iv) expect_equal(vcovCR(plogit_fit, cluster = dat$cluster, type = "CR2", target = targetVariance(plogit_fit, cluster = dat$cluster), inverse_var = TRUE), CR2_iv) attr(CR2_iv, "inverse_var") <- FALSE expect_equal(vcovCR(plogit_fit, cluster = dat$cluster, type = "CR2", target = targetVariance(plogit_fit, cluster = dat$cluster), inverse_var = FALSE), CR2_iv) }) test_that("vcovCR options work for CR4", { CR4_iv <- vcovCR(logit_fit, cluster = dat$cluster, type = "CR4") expect_identical(vcovCR(logit_fit, cluster = dat$cluster, type = "CR4", inverse_var = TRUE), CR4_iv) expect_equal(vcovCR(logit_fit, cluster = dat$cluster, type = "CR4", target = targetVariance(logit_fit, cluster = dat$cluster), inverse_var = TRUE), CR4_iv) attr(CR4_iv, "inverse_var") <- FALSE expect_equal(vcovCR(logit_fit, cluster = dat$cluster, type = "CR4", target = targetVariance(logit_fit, cluster = dat$cluster), inverse_var = FALSE), CR4_iv) CR4_iv <- vcovCR(sflogit_fit, cluster = dat$cluster, type = "CR4") expect_identical(vcovCR(sflogit_fit, cluster = dat$cluster, type = "CR4", inverse_var = TRUE), CR4_iv) expect_equal(vcovCR(sflogit_fit, cluster = dat$cluster, type = "CR4", target = targetVariance(sflogit_fit, cluster = dat$cluster), inverse_var = TRUE), CR4_iv) attr(CR4_iv, "inverse_var") <- FALSE expect_equal(vcovCR(sflogit_fit, cluster = dat$cluster, type = "CR4", target = targetVariance(sflogit_fit, cluster = dat$cluster), inverse_var = FALSE), CR4_iv) CR4_iv <- vcovCR(plogit_fit, cluster = dat$cluster, type = "CR4") expect_identical(vcovCR(plogit_fit, cluster = dat$cluster, type = "CR4", inverse_var = TRUE), CR4_iv) expect_equal(vcovCR(plogit_fit, cluster = dat$cluster, type = "CR4", target = targetVariance(plogit_fit, cluster = dat$cluster), inverse_var = TRUE), CR4_iv) attr(CR4_iv, "inverse_var") <- FALSE expect_equal(vcovCR(plogit_fit, cluster = dat$cluster, type = "CR4", target = targetVariance(plogit_fit, cluster = dat$cluster), inverse_var = FALSE), CR4_iv) }) test_that("CR2 and CR4 are target-unbiased", { expect_true(check_CR(logit_fit, vcov = "CR2", cluster = dat$cluster)) expect_true(check_CR(sflogit_fit, vcov = "CR2", cluster = dat$cluster)) expect_true(check_CR(plogit_fit, vcov = "CR2", cluster = dat$cluster)) expect_true(check_CR(logit_fit, vcov = "CR4", cluster = dat$cluster)) expect_true(check_CR(sflogit_fit, vcov = "CR4", cluster = dat$cluster)) expect_true(check_CR(plogit_fit, vcov = "CR4", cluster = dat$cluster)) }) test_that("vcovCR is equivalent to vcovHC when clusters are all of size 1", { library(sandwich, quietly=TRUE) HC_types <- paste0("HC", 0:3) HC_list <- lapply(HC_types, function(t) vcovHC(logit_fit, type = t)) CR_types <- paste0("CR", 0:3) CR_types[2] <- "CR1S" CR_list <- lapply(CR_types, function(t) as.matrix(vcovCR(logit_fit, cluster = dat$row, type = t))) expect_equal(HC_list, CR_list, tol = 4 * 10^-4) }) CR_types <- paste0("CR", 0:4) test_that("Order doesn't matter.",{ dat_scramble <- dat[sample(n),] logit_scramble <- update(logit_fit, data = dat_scramble) CR_fit <- lapply(CR_types, function(x) vcovCR(logit_fit, cluster = dat$cluster, type = x)) CR_scramble <- lapply(CR_types, function(x) vcovCR(logit_scramble, cluster = dat_scramble$cluster, type = x)) expect_equivalent(CR_fit, CR_scramble) test_fit <- lapply(CR_types, function(x) coef_test(logit_fit, vcov = x, cluster = dat$cluster, test = "All", p_values = FALSE)) test_scramble <- lapply(CR_types, function(x) coef_test(logit_scramble, vcov = x, cluster = dat_scramble$cluster, test = "All", p_values = FALSE)) # compare_tests <- mapply(function(a, b) max(abs(a / b - 1), na.rm = TRUE), test_fit, test_scramble) # expect_true(all(compare_tests < 10^-4)) expect_equal(test_fit, test_scramble, tolerance = 10^-6) constraints <- combn(length(coef(logit_fit)), 2, simplify = FALSE) Wald_fit <- Wald_test(logit_fit, constraints = constraints, vcov = "CR2", cluster = dat$cluster, test = "All") Wald_scramble <- Wald_test(logit_scramble, constraints = constraints, vcov = "CR2", cluster = dat_scramble$cluster, test = "All") expect_equal(Wald_fit, Wald_scramble) }) test_that("clubSandwich works with dropped observations", { dat_miss <- dat dat_miss$X1[sample.int(n, size = round(n / 10))] <- NA logit_dropped <- update(logit_fit, data = dat_miss) dat_complete <- subset(dat_miss, !is.na(X1)) logit_complete <- update(logit_fit, data = dat_complete) CR_drop <- lapply(CR_types, function(x) vcovCR(logit_dropped, cluster = dat_miss$cluster, type = x)) CR_complete <- lapply(CR_types, function(x) vcovCR(logit_complete, cluster = dat_complete$cluster, type = x)) expect_identical(CR_drop, CR_complete) test_drop <- lapply(CR_types, function(x) coef_test(logit_dropped, vcov = x, cluster = dat_miss$cluster, test = "All", p_values = FALSE)) test_complete <- lapply(CR_types, function(x) coef_test(logit_complete, vcov = x, cluster = dat_complete$cluster, test = "All", p_values = FALSE)) expect_identical(test_drop, test_complete) }) test_that("clubSandwich works with aliased predictors", { data(npk, package = "datasets") npk_alias <- glm(yield ~ block + N*P*K, data = npk) npk_drop <- glm(yield ~ block + N + P + K + N:P + N:K + P:K, data = npk) CR_alias <- lapply(CR_types[-4], function(x) vcovCR(npk_alias, cluster = npk$block, type = x)) CR_drop <- lapply(CR_types[-4], function(x) vcovCR(npk_drop, cluster = npk$block, type = x)) expect_identical(CR_alias, CR_drop) test_drop <- lapply(CR_types[-4], function(x) coef_test(npk_alias, vcov = x, cluster = npk$block, test = c("z","naive-t","Satterthwaite"), p_values = FALSE)[-13,]) test_complete <- lapply(CR_types[-4], function(x) coef_test(npk_drop, vcov = x, cluster = npk$block, test = c("z","naive-t","Satterthwaite"), p_values = FALSE)) expect_identical(test_drop, test_complete) }) test_that("clubSandwich results are equivalent to geepack", { library(geepack) # check CR0 with logit logit_gee <- geeglm(y1 ~ X1 + X2 + X3 + X4, id = cluster, data = dat, family = "binomial") logit_refit <- update(logit_fit, start = coef(logit_gee)) expect_equal(coef(logit_refit), coef(logit_gee)) V_gee0 <- summary(logit_gee)$cov.scaled V_CR0 <- as.matrix(vcovCR(logit_refit, cluster = dat$cluster, type = "CR0")) attr(V_gee0, "dimnames") <- attr(V_CR0, "dimnames") expect_equal(V_gee0, V_CR0) # check CR3 with logit logit_gee <- geeglm(y1 ~ X1 + X2 + X3 + X4, id = cluster, data = dat, family = "binomial", std.err = "jack") V_gee3 <- summary(logit_gee)$cov.scaled V_CR3 <- as.matrix(vcovCR(logit_refit, cluster = dat$cluster, type = "CR3")) attr(V_gee3, "dimnames") <- attr(V_CR3, "dimnames") expect_equal(V_gee3 * m / (m - 6), V_CR3) # check CR0 with plogit plogit_gee <- geeglm(yp ~ X1 + X2 + X3 + X4, id = cluster, data = dat, weights = w, family = "binomial") plogit_refit <- update(plogit_fit, start = coef(plogit_gee)) expect_equal(coef(plogit_refit), coef(plogit_gee)) V_gee0 <- summary(plogit_gee)$cov.scaled V_CR0 <- as.matrix(vcovCR(plogit_refit, cluster = dat$cluster, type = "CR0")) attr(V_gee0, "dimnames") <- attr(V_CR0, "dimnames") expect_equal(V_gee0, V_CR0) }) clubSandwich/tests/testthat/test_coef.R0000644000176200001440000000737413500611664017775 0ustar liggesuserscontext("t-tests") set.seed(20190513) balanced_dat <- function(m, n) { cluster <- factor(rep(LETTERS[1:m], each = n)) N <- length(cluster) m1 <- sample(3:(m-7), size = 1) m2 <- sample((m1 + 3):(m-3), size = 1) - m1 m3 <- m - m1 - m2 c(m1, m2, m3) X_btw <- rep(rep(LETTERS[1:3], c(m1, m2, m3)), each = n) X_wth <- rep(rep(c(0,1), each = n / 2), m) nu <- rnorm(m)[cluster] e <- rnorm(n * m) y <- nu + e data.frame(y, X_btw, X_wth, cluster, row = 1:N) } CRs <- paste0("CR", 0:4) test_that("vcov arguments work", { dat <- balanced_dat(m = 15, n = 8) lm_fit <- lm(y ~ X_btw + X_wth, data = dat) VCR <- lapply(CRs, function(t) vcovCR(lm_fit, cluster = dat$cluster, type = t)) test_A <- lapply(VCR, function(v) coef_test(lm_fit, vcov = v, test = "All", p_values = FALSE)) test_B <- lapply(CRs, function(t) coef_test(lm_fit, vcov = t, cluster = dat$cluster, test = "All", p_values = FALSE)) expect_identical(test_A, test_B) }) test_that("get_which_coef() works", { f <- 6 beta <- 1:f beta_names <- letters[1:f] names(beta) <- beta_names which_grid <- as.matrix(expand.grid(rep(list(c(FALSE,TRUE)), f))) dimnames(which_grid) <- NULL name_list <- apply(which_grid, 1, function(x) beta_names[x]) int_list <- apply(which_grid, 1, which) which_log <- apply(which_grid[-1,], 1, get_which_coef, beta = beta) which_char <- sapply(name_list[-1], get_which_coef, beta = beta) which_int <- sapply(int_list[-1], get_which_coef, beta = beta) expect_identical(get_which_coef(beta, coefs = "All"), rep(TRUE, f)) expect_error(get_which_coef(beta, coefs = which_grid[1,])) expect_error(get_which_coef(beta, coefs = name_list[[1]])) expect_error(get_which_coef(beta, coefs = int_list[[1]])) expect_identical(which_log, which_char) expect_identical(which_log, which_int) expect_identical(which_char, which_int) }) test_that("coefs argument works", { dat <- balanced_dat(m = 15, n = 8) lm_fit <- lm(y ~ X_btw + X_wth, data = dat) which_grid <- expand.grid(rep(list(c(FALSE,TRUE)), length(coef(lm_fit)))) tests_all <- coef_test(lm_fit, vcov = "CR0", cluster = dat$cluster, test = "All", coefs = "All", p_values = FALSE) tests_A <- apply(which_grid[-1,], 1, function(x) tests_all[x,]) tests_B <- apply(which_grid[-1,], 1, function(x) coef_test(lm_fit, vcov = "CR0", cluster = dat$cluster, test = "All", coefs = x, p_values = FALSE)) expect_identical(tests_A, tests_B) }) test_that("printing works", { dat <- balanced_dat(m = 15, n = 8) lm_fit <- lm(y ~ X_btw + X_wth, data = dat) t_tests <- coef_test(lm_fit, vcov = "CR2", cluster = dat$cluster, test = "All") expect_output(print(t_tests)) }) test_that("p-values are ordered", { dat <- balanced_dat(m = 15, n = 8) lm_fit <- lm(y ~ X_btw + X_wth, data = dat) test_results <- lapply(CRs, function(t) coef_test(lm_fit, vcov = t, cluster = dat$cluster, test = "All")) test_results <- do.call(rbind, test_results) expect_true(with(test_results, all(p_z < p_t))) expect_true(with(test_results, all(p_z < p_Satt))) }) test_that("Satterthwaite df work for special cases", { m <- sample(12:26, size = 1) n <- sample(seq(4,12,2), size = 1) dat <- balanced_dat(m, n) lm_fit <- lm(y ~ X_btw + X_wth, data = dat) t_tests <- coef_test(lm_fit, vcov = "CR2", cluster = dat$cluster, test = "Satterthwaite") expect_equal(t_tests$df[4], m - 1) mg <- table(dat$X_btw) / n df <- apply(cbind(mg[1], mg[-1]), 1, function(x) sum(x)^2 * prod(x - 1) / sum(x^2 * (x - 1))) expect_equivalent(t_tests$df[2:3], df) lm_fit <- lm(y ~ 0 + cluster + X_wth, data = dat) t_tests <- coef_test(lm_fit, vcov = "CR2", cluster = dat$cluster, test = "Satterthwaite") expect_equal(t_tests$df[m + 1], m - 1) }) clubSandwich/tests/testthat.R0000644000176200001440000000011013500611664015777 0ustar liggesuserslibrary(testthat) library(clubSandwich) test_check("clubSandwich") clubSandwich/vignettes/0000755000176200001440000000000013576425047014705 5ustar liggesusersclubSandwich/vignettes/meta-analysis-with-CRVE.Rmd0000644000176200001440000001465213574216757021642 0ustar liggesusers--- title: "Meta-analysis with cluster-robust variance estimation" author: "James E. Pustejovsky" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Meta-analysis with cluster-robust variance estimation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- This vignette dimeonstrates how to use the `clubSandwich` package to conduct a meta-analysis of dependent effect sizes with robust variance estimation. Tests of meta-regression coefficients and F-tests of multiple-coefficient hypotheses are calculated using small-sample corrections proposed by Tipton (2015) and Tipton and Pustejovsky (2015). The example uses a dataset of effect sizes from a Campbell Collaboration systematic review of dropout prevention programs, conducted by Sandra Jo Wilson and colleagues (2011). The original analysis included a meta-regression with covariates that capture methodological, participant, and program characteristics. The regression specification used here is similar to Model III from Wilson et al. (2011), but treats the `evaluator_independence` and `implementation_quality` variables as categorical rather than interval-level. Also, the original analysis clustered at the level of the sample (some studies reported results from multiple samples), whereas here we cluster at the study level. The meta-regression can be fit in several different ways. We first demonstrate using the `robumeta` package (Fisher & Tipton, 2015) and then using the `metafor` package (Viechtbauer, 2010). ## robumeta model ```{r, include=FALSE} options(width = 100) ``` ```{r, message = FALSE} library(clubSandwich) library(robumeta) data(dropoutPrevention) # clean formatting names(dropoutPrevention)[7:8] <- c("eval","implement") levels(dropoutPrevention$eval) <- c("independent","indirect","planning","delivery") levels(dropoutPrevention$implement) <- c("low","medium","high") levels(dropoutPrevention$program_site) <- c("community","mixed","classroom","school") levels(dropoutPrevention$study_design) <- c("matched","unmatched","RCT") levels(dropoutPrevention$adjusted) <- c("no","yes") m3_robu <- robu(LOR1 ~ study_design + attrition + group_equivalence + adjusted + outcome + eval + male_pct + white_pct + average_age + implement + program_site + duration + service_hrs, data = dropoutPrevention, studynum = studyID, var.eff.size = varLOR, modelweights = "HIER") print(m3_robu) ``` Note that `robumeta` produces small-sample corrected standard errors and t-tests, and so there is no need to repeat those calculations with `clubSandwich`. The `eval` variable has four levels, and it might be of interest to test whether the average program effects differ by the degree of evaluator independence. The null hypothesis in this case is that the 10th, 11th, and 12th regression coefficients are all equal to zero. A small-sample adjusted F-test for this hypothesis can be obtained as follows. The `vcov = "CR2"` option means that the standard errors will be corrected using the bias-reduced linearization estimator described in Tipton and Pustejovsky (2015). ```{r} Wald_test(m3_robu, constraints = 10:12, vcov = "CR2") ``` By default, the `Wald_test` function provides an F-type test with degrees of freedom estimated using the approximate Hotelling's $T^2_Z$ method. The test has less than 17 degrees of freedom, even though there are 152 independent studies in the data, and has a p-value that is not quite significant at conventional levels. The low degrees of freedom are a consequence of the fact that one of the levels of `evaluator independence` has only a few effect sizes in it: ```{r} table(dropoutPrevention$eval) ``` ## metafor model `clubSandwich` also works with models fit using the `metafor` package. Here we re-fit the same regression specification, but use REML to estimate the variance components (`robumeta` uses a method-of-moments estimator), as well as a somewhat different weighting scheme than that used in `robumeta`. ```{r, message = FALSE} library(metafor) m3_metafor <- rma.mv(LOR1 ~ study_design + attrition + group_equivalence + adjusted + outcome + eval + male_pct + white_pct + average_age + implement + program_site + duration + service_hrs, V = varLOR, random = list(~ 1 | studyID, ~ 1 | studySample), data = dropoutPrevention) summary(m3_metafor) ``` `metafor` produces model-based standard errors, t-tests, and confidence intervals. The `coef_test` function from `clubSandwich` will calculate robust standard errors and robust t-tests for each of the coefficients: ```{r} coef_test(m3_metafor, vcov = "CR2") ``` Note that `coef_test` assumed that it should cluster based on `studyID`, which is the outer-most random effect in the metafor model. This can be specified explicitly by including the option `cluster = dropoutPrevention$studyID` in the call. The F-test for degree of evaluator independence uses the same syntax as before: ```{r} Wald_test(m3_metafor, constraints = 10:12, vcov = "CR2") ``` Despite some differences in weighting schemes, the p-value is very close to the result obtained using `robumeta`. ## References Fisher, Z., & Tipton, E. (2015). robumeta: An R-package for robust variance estimation in meta-analysis. [arXiv:1503.02220](http://arxiv.org/abs/1503.02220) Tipton, E. (2015). Small sample adjustments for robust variance estimation with meta-regression. _Psychological Methods, 20_(3), 375-393. doi: [10.1037/met0000011](https://doi.org/10.1037/met0000011) Tipton, E., & Pustejovsky, J. E. (2015). Small-sample adjustments for tests of moderators and model fit using robust variance estimation in meta-regression. _Journal of Educational and Behavioral Statistics, 40_(6), 604-634. doi: [10.3102/1076998615606099](https://doi.org/10.3102/1076998615606099) Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. _Journal of Statistical Software, 36_(3), 1-48. URL: http://www.jstatsoft.org/v36/i03/ Wilson, S. J., Lipsey, M. W., Tanner-Smith, E., Huang, C. H., & Steinka-Fry, K. T. (2011). Dropout prevention and intervention programs: Effects on school completion and dropout Among school-aged children and youth: A systematic review. _Campbell Systematic Reviews, 7_(8). URL: http://www.campbellcollaboration.org/lib/project/158/clubSandwich/vignettes/panel-data-CRVE.Rmd0000644000176200001440000003573213574216757020132 0ustar liggesusers--- title: "Cluster-robust standard errors and hypothesis tests in panel data models" author: "James E. Pustejovsky" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Cluster-robust standard errors and hypothesis tests in panel data models} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The importance of using cluster-robust variance estimators (i.e., "clustered standard errors") in panel models is now widely recognized. Less widely recognized is the fact that standard methods for constructing hypothesis tests and confidence intervals based on CRVE can perform quite poorly in when based on a limited number of independent clusters. Furthermore, it can be difficult to determine what counts as a large-enough sample to trust standard CRVE methods, because the finite-sample behavior of the variance estimators and test statistics depends on the configuration of the covariates, not just the total number of clusters. One solution to this problem is to use bias-reduced linearization (BRL), which was proposed by Bell and McCaffrey (2002) and has recently begun to receive attention in the econometrics literature (e.g., Cameron & Miller, 2015; Imbens & Kolesar, 2015). The idea of BRL is to correct the bias of standard CRVE based on a working model, and then to use a degrees-of-freedom correction for Wald tests based on the bias-reduced CRVE. That may seem silly (after all, the whole point of CRVE is to avoid making distributional assumptions about the errors in your model), but it turns out that the correction can help quite a bit, even when the working model is wrong. The degrees-of-freedom correction is based on a standard Satterthwaite-type approximation, and also relies on the working model. A problem with Bell and McCaffrey's original formulation of BRL is that it does not work in some very common models for panel data, such as state-by-year panels that include fixed effects for each state and each year (Angrist and Pischke, 2009, point out this issue in their chapter on "non-standard standard error issues"; see also Young, 2016). However, Pustejovsky and Tipton (2016) proposed a generalization of BRL that works even in models with arbitrary sets of fixed effects, and this generalization is implemented in `clubSandwich` as CRVE type `CR2`. The package also implements small-sample corrections for multiple-constraint hypothesis tests based on an approximation proposed by Pustejovsky and Tipton (2016). For one-parameter constraints, the test reduces to a t-test with Satterthwaite degrees of freedom, and so it is a natural extension of BRL. The following example demonstrates how to use `clubSandwich` to do cluster-robust inference for a state-by-year panel model with fixed effects in both dimensions, clustering by states. ## Effects of changing the minimum legal drinking age Carpenter and Dobkin (2011) analyzed the effects of changes in the minimum legal drinking age on rates of motor vehicle fatalies among 18-20 year olds, using state-level panel data from the National Highway Traffic Administration's Fatal Accident Reporting System. In their new textbook, Angrist and Pischke (2014) developed a stylized example based on Carpenter and Dobkin's work. The following example uses Angrist and Pischke's data and follows their analysis because their data are [easily available](http://masteringmetrics.com/resources/). The outcome is the incidence of deaths in motor vehicle crashes among 18-20 year-olds (per 100,000 residents), for each state plus the District of Columbia, over the period 1970 to 1983. There were several changes in the minimum legal drinking age during this time period, with variability in the timing of changes across states. Angrist and Pischke (following Carpenter and Dobkin) use a difference-in-differences strategy to estimate the effects of lowering the minimum legal drinking age from 21 to 18. Their specification is $$y_{it} = \alpha_i + \beta_t + \gamma b_{it} + \delta d_{it} + \epsilon_{it},$$ for $i$ = 1,...,51 and $t$ = 1970,...,1983. In this model, $\alpha_i$ is a state-specific fixed effect, $\beta_t$ is a year-specific fixed effect, $b_{it}$ is the current rate of beer taxation in state $i$ in year $t$, $d_{it}$ is the proportion of 18-20 year-olds in state $i$ in year $t$ who are legally allowed to drink, and $\delta$ captures the effect of shifting the minimum legal drinking age from 21 to 18. Following Angrist and Pischke's analysis, we estimate this model both by (unweighted) OLs and by weighted least squares with weights corresponding to population size in a given state and year. We also demonstrate random effects estimation and implement a cluster-robust Hausmann specification test. ## Unweighted OLS The following code does some simple data-munging and the estimates the model by OLS: ```{r} library(clubSandwich) data(MortalityRates) # subset for deaths in motor vehicle accidents, 1970-1983 MV_deaths <- subset(MortalityRates, cause=="Motor Vehicle" & year <= 1983 & !is.na(beertaxa)) # fit by OLS lm_unweighted <- lm(mrate ~ 0 + legal + beertaxa + factor(state) + factor(year), data = MV_deaths) ``` The `coef_test` function from `clubSandwich` can then be used to test the hypothesis that changing the minimum legal drinking age has no effect on motor vehicle deaths in this cohort (i.e., $H_0: \delta = 0$). The usual way to test this is to cluster the standard errors by state, calculate the robust Wald statistic, and compare that to a standard normal reference distribution. The code and results are as follows: ```{r} coef_test(lm_unweighted, vcov = "CR1", cluster = MV_deaths$state, test = "naive-t")[1:2,] ``` A better approach would be to use the generalized, bias-reduced linearization CRVE, together with Satterthwaite degrees of freedom. In the `clubSandwich` package, the BRL adjustment is called "CR2" because it is directly analogous to the HC2 correction used in heteroskedasticity-robust variance estimation. When applied to an OLS model estimated by `lm`, the default working model is an identity matrix, which amounts to the "working" assumption that the errors are all uncorrelated and homoskedastic. Here's how to apply this approach in the example: ```{r} coef_test(lm_unweighted, vcov = "CR2", cluster = MV_deaths$state, test = "Satterthwaite")[1:2,] ``` The Satterthwaite degrees of freedom are different for each coefficient in the model, and so the `coef_test` function reports them right alongside the standard error. For the effect of legal drinking age, the degrees of freedom are about half of what might be expected, given that there are 51 clusters. The p-value for the CR2+Satterthwaite test is about twice as large as the p-value based on the standard Wald test, although the coefficient is still statistically significant at conventional levels. Note, however, that the degrees of freedom on the beer taxation rate are considerably smaller because there are only a few states with substantial variability in taxation rates over time. ## Unweighted "within" estimation The `plm` package in R provides another way to estimate the same model. It is convenient because it absorbs the state and year fixed effects before estimating the effect of `legal`. The `clubSandwich` package works with fitted `plm` models too: ```{r, message = FALSE} library(plm) plm_unweighted <- plm(mrate ~ legal + beertaxa, data = MV_deaths, effect = "twoways", index = c("state","year")) coef_test(plm_unweighted, vcov = "CR1", cluster = "individual", test = "naive-t") coef_test(plm_unweighted, vcov = "CR2", cluster = "individual", test = "Satterthwaite") ``` ## Population-weighted estimation The difference between the standard method and the new method are not terribly exciting in the above example. However, things change quite a bit if the model is estimated using population weights. We go back to fitting in `lm` with dummies for all the fixed effects because `plm` does not handle weighted least squares. ```{r} lm_weighted <- lm(mrate ~ 0 + legal + beertaxa + factor(state) + factor(year), weights = pop, data = MV_deaths) coef_test(lm_weighted, vcov = "CR1", cluster = MV_deaths$state, test = "naive-t")[1:2,] coef_test(lm_weighted, vcov = "CR2", cluster = MV_deaths$state, test = "Satterthwaite")[1:2,] ``` Using population weights slightly reduces the point estimate of the effect, while also slightly increasing its precision. If you were following the standard approach, you would probably be happy with the weighted estimates and wouldn't think about it any further. However, using the CR2 variance estimator and Satterthwaite correction produces a p-value that is an order of magnitude larger (though still significant at the conventional 5% level). The degrees of freedom are just `r round(coef_test(lm_weighted, vcov = "CR2", cluster = MV_deaths$state, test = "Satterthwaite")["legal","df"], 1)`---drastically smaller than would be expected based on the number of clusters. Even with weights, the `coef_test` function uses an "independent, homoskedastic" working model as a default for `lm` objects. In the present example, the outcome is a standardized rate and so a better assumption might be that the error variances are inversely proportional to population size. The following code uses this alternate working model: ```{r} coef_test(lm_weighted, vcov = "CR2", cluster = MV_deaths$state, target = 1 / MV_deaths$pop, test = "Satterthwaite")[1:2,] ``` The new working model leads to slightly smaller standard errors and a couple of additional degrees of freedom, though they remain in small-sample territory. ## Random effects estimation If the unobserved effects $\alpha_1,...,\alpha_{51}$ are uncorrelated with the regressors, then a more efficient way to estimate $\gamma,\delta$ is by weighted least squares, with weights based on a random effects model. We still treat the year effects as fixed. ```{r} plm_random <- plm(mrate ~ 0 + legal + beertaxa + year, data = MV_deaths, effect = "individual", index = c("state","year"), model = "random") coef_test(plm_random, vcov = "CR1", test = "naive-t")[1:2,] coef_test(plm_random, vcov = "CR2", test = "Satterthwaite")[1:2,] ``` With random effects estimation, the effect of legal drinking age is smaller by about 1 death per 100,000. As a procedural aside, note that `coef_test` infers that `state` is the clustering variable because the call to plm includes only one type of effects (random state effects). ## Robust Hausman test CRVE is also used in specification tests, as in the artificial Hausman-type test for endogeneity of unobserved effects (Arellano, 1993). As noted above, random effects estimation is more efficient than fixed effects estimation, but requires the assumption that the unobserved effects are uncorrelated with the regressors. However, if the unobserved effects covary with $\mathbf{b}_i, \mathbf{d}_i$, then the random-effects estimator will be biased. We can test for whether endogeneity is a problem by including group-centered covariates as additional regressors. Let $\tilde{d}_{it} = d_{it} - \frac{1}{T}\sum_t d_{it}$, with $\tilde{b}_{it}$ defined analogously. Now estimate the regression $$y_{it} = \beta_t + \gamma_1 b_{it} + \gamma_2 \tilde{b}_{it} + \delta_1 d_{it} + \delta_2 \tilde{d}_{it} + \epsilon_{it},$$ which does not include state fixed effects. The parameters $\gamma_2,\delta_2$ represent the differences between the within-groups and between-groups estimands of $\gamma_1, \delta_1$. If these are both zero, then the random effects estimator is unbiased. Thus, the joint test for $H_0: \gamma_2 = \delta_2 = 0$ amounts to a test for exogeneity of the unobserved effects. For efficiency, we estimate this specification using weighted least squares (although OLS would be valid too): ```{r} MV_deaths <- within(MV_deaths, { legal_cent <- legal - tapply(legal, state, mean)[factor(state)] beer_cent <- beertaxa - tapply(beertaxa, state, mean)[factor(state)] }) plm_Hausman <- plm(mrate ~ 0 + legal + beertaxa + legal_cent + beer_cent + factor(year), data = MV_deaths, effect = "individual", index = c("state","year"), model = "random") coef_test(plm_Hausman, vcov = "CR2", test = "Satterthwaite")[1:4,] ``` To conduct a joint test on the centered covariates, we can use the `Wald_test` function. The usual way to test this hypothesis would be to use the `CR1` variance estimator to calculate the robust Wald statistic, then use a $\chi^2_2$ reference distribution (or equivalently, compare a re-scaled Wald statistic to an $F(2,\infty)$ distribution). The `Wald_test` function reports the latter version: ```{r} Wald_test(plm_Hausman, constraints = c("legal_cent","beer_cent"), vcov = "CR1", test = "chi-sq") ``` The test is just shy of significance at the 5% level. If we instead use the `CR2` variance estimator and our newly proposed approximate F-test (which is the default in `Wald_test`), then we get: ```{r} Wald_test(plm_Hausman, constraints = c("legal_cent","beer_cent"), vcov = "CR2") ``` The low degrees of freedom of the test indicate that we're definitely in small-sample territory and should not trust the asymptotic $\chi^2$ approximation. ## References Angrist, J. D., & Pischke, J. (2009). _Mostly harmless econometrics: An empiricist’s companion_. Princeton, NJ: Princeton University Press. Angrist, J. D., and Pischke, J. S. (2014). _Mastering'metrics: the path from cause to effect_. Princeton, NJ: Princeton University Press. Arellano, M. (1993). On the testing of correlated effects with panel data. Journal of Econometrics, 59(1-2), 87-97. doi: [10.1016/0304-4076(93)90040-C](http://www.sciencedirect.com/science/article/pii/030440769390040C) Bell, R. M., & McCaffrey, D. F. (2002). Bias reduction in standard errors for linear regression with multi-stage samples. _Survey Methodology, 28_(2), 169-181. Cameron, A. C., & Miller, D. L. (2015). A practitioner’s guide to cluster-robust inference. URL: http://cameron.econ.ucdavis.edu/research/Cameron_Miller_JHR_2015_February.pdf Carpenter, C., & Dobkin, C. (2011). The minimum legal drinking age and public health. _Journal of Economic Perspectives, 25_(2), 133-156. doi: [10.1257/jep.25.2.133](https://doi.org/10.1257/jep.25.2.133) Imbens, G. W., & Kolesar, M. (2015). Robust standard errors in small samples: Some practical advice. URL: https://www.princeton.edu/~mkolesar/papers/small-robust.pdf Pustejovsky, J. E. & Tipton, E. (2016). Small sample methods for cluster-robust variance estimation and hypothesis testing in fixed effects models. arXiv: [1601.01981](https://arxiv.org/abs/1601.01981) [stat.ME] Young, A. (2016). Improved, nearly exact, statistical inference with robust and clustered covariance matrices using effective degrees of freedom corrections. clubSandwich/NEWS0000644000176200001440000000445613576303502013374 0ustar liggesusersclubSandwich 0.4.0 ======================= * Added methods for lmerMod objects fitted by lme4::lmer(). * Updated internals to use inherits() instead of checking class() directly. clubSandwich 0.3.5 ======================= * Added t statistics to output of coef_test(). * Fixed a bug in get_index_order(), an internal function used with plm objects. Previously, the function assumed that both individual and time indices were specified in the plm call. The new function works even when zero or one indices are specified. clubSandwich 0.3.3 ======================= * impute_covariance_matrix() now drops unobserved factor levels. * updated method for handling residuals from rma.uni and rma.mv objects, for consistency with metafor 2.1-0. clubSandwich 0.3.2 ======================= * Added conf_int() to provide easy cluster-robust confidence intervals. * Added examples to documentaiton for conf_int() and coef_test(). clubSandwich 0.3.1 ======================= * Added "coefs" option to coef_test() to allow testing of subsets of coefficients. * Updated tests to use carData instead of car package. clubSandwich 0.3.0 ======================= * Added methods for ivreg objects. * Added methods for mlm objects. * Updated residuals_CS.plm to account for changes in plm 1.6-6. clubSandwich 0.2.3 ======================= * Updated methods for plm objects to account for changes in plm 1.6-6. * Added methods for glm objects. * Added documentation of "type" options in vcovCR(). * Added examples for all vcovCR() methods. * Provide facility to cluster at higher level than highest random effects for lme and gls objects. * Added impute_covariance_matrix() utility function for multivariate meta-analysis. clubSandwich 0.2.2 ======================= * Added bread() methods for all supported model classes. * vcovCR() is now calculated using bread(), and carries attributes for bread, est_mat, and adjustment matrices. * vcovCR() gains a 'form' argument to obtain just the meat of the sandwich, or to use a user-specified bread matrix. * Refactored internal functions for degrees of freedom calculation to improve speed and memory usage. * Bug fixes: - updated nobs.plm method to handle first-differenced models clubSandwich 0.2.1 ======================= * First version released on CRAN.clubSandwich/R/0000755000176200001440000000000013576303502013065 5ustar liggesusersclubSandwich/R/conf_int.R0000644000176200001440000000617613576051427015027 0ustar liggesusers #-------------------------------------------------- # confidence intervals for all model coefficients #--------------------------------------------------- #' Calculate confidence intervals for all or selected regression coefficients in a fitted model #' #' \code{conf_int} reports confidence intervals for each coefficient estimate in a fitted #' linear regression model, using a sandwich estimator for the standard errors #' and a small sample correction for the critical values. The small-sample correction is #' based on a Satterthwaite approximation. #' #' @param obj Fitted model for which to calculate confidence intervals. #' @param level Desired coverage level for confidence intervals. #' @inheritParams coef_test #' #' @return A data frame containing estimated regression coefficients, standard errors, and confidence intervals. #' #' @seealso \code{\link{vcovCR}} #' #' @examples #' data("Produc", package = "plm") #' lm_individual <- lm(log(gsp) ~ 0 + state + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) #' individual_index <- !grepl("state", names(coef(lm_individual))) #' conf_int(lm_individual, vcov = "CR2", cluster = Produc$state, coefs = individual_index) #' #' V_CR2 <- vcovCR(lm_individual, cluster = Produc$state, type = "CR2") #' conf_int(lm_individual, vcov = V_CR2, level = .99, coefs = individual_index) #' #' @export conf_int <- function(obj, vcov, level = .95, test = "Satterthwaite", coefs = "All", ...) { if (level <= 0 | level >= 1) stop("Confidence level must be between 0 and 1.") beta_full <- coef_CS(obj) beta_NA <- is.na(beta_full) which_beta <- get_which_coef(beta_full, coefs) beta <- beta_full[which_beta & !beta_NA] if (is.character(vcov)) vcov <- vcovCR(obj, type = vcov, ...) if (!inherits(vcov, "clubSandwich")) stop("Variance-covariance matrix must be a clubSandwich.") all_tests <- c("z","naive-t","Satterthwaite") test <- match.arg(test, all_tests, several.ok = FALSE) SE <- sqrt(diag(vcov))[which_beta[!beta_NA]] if (test=="Satterthwaite") { P_array <- get_P_array(get_GH(obj, vcov))[,,which_beta[!beta_NA],drop=FALSE] } df <- switch(test, z = Inf, `naive-t` = nlevels(attr(vcov, "cluster")) - 1, `Satterthwaite` = Satterthwaite(beta = beta, SE = SE, P_array = P_array)$df ) crit <- qt(1 - (1 - level) / 2, df = df) result <- data.frame( beta = beta, SE = SE, df = df, CI_L = beta - SE * crit, CI_U = beta + SE * crit ) class(result) <- c("conf_int_clubSandwich", class(result)) attr(result, "type") <- attr(vcov, "type") attr(result, "level") <- level result } #--------------------------------------------- # print method for conf_int #--------------------------------------------- #' @export print.conf_int_clubSandwich <- function(x, digits = 3, ...) { lev <- paste0(100 * attr(x, "level"), "%") res <- data.frame("Coef" = rownames(x), x) rownames(res) <- NULL names(res) <- c("Coef", "Estimate", "SE", "d.f.", paste(c("Lower", "Upper"), lev, "CI")) print(format(res, digits = 3)) } clubSandwich/R/robu.R0000644000176200001440000001003513500611662014152 0ustar liggesusers#------------------------------------- # vcovCR with defaults #------------------------------------- #' Cluster-robust variance-covariance matrix for a robu object. #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates from a #' \code{\link[robumeta]{robu}} object. #' #' @param cluster Optional expression or vector indicating which observations #' belong to the same cluster. If not specified, will be set to the #' \code{studynum} used in fitting the \code{\link[robumeta]{robu}} object. #' @param target Optional matrix or vector describing the working #' variance-covariance model used to calculate the \code{CR2} and \code{CR4} #' adjustment matrices. If not specified, the target is taken to be the #' inverse of the estimated weights used in fitting the #' \code{\link[robumeta]{robu}} object. #' @inheritParams vcovCR #' #' @return An object of class \code{c("vcovCR","clubSandwich")}, which consists #' of a matrix of the estimated variance of and covariances between the #' regression coefficient estimates. #' #' @seealso \code{\link{vcovCR}} #' #' @export #' #' @examples #' library(robumeta) #' data(hierdat) #' #' robu_fit <- robu(effectsize ~ binge + followup + sreport + age, #' data = hierdat, studynum = studyid, #' var.eff.size = var, modelweights = "HIER") #' robu_fit #' #' robu_CR2 <- vcovCR(robu_fit, type = "CR2") #' robu_CR2 #' coef_test(robu_fit, vcov = robu_CR2, test = c("Satterthwaite", "saddlepoint")) #' #' Wald_test(robu_fit, constraints = c(2,4), vcov = robu_CR2) #' Wald_test(robu_fit, constraints = 2:5, vcov = robu_CR2) #' vcovCR.robu <- function(obj, cluster, type, target, inverse_var, form = "sandwich", ...) { if (missing(cluster)) cluster <- obj$study_orig_id if (missing(target)) target <- NULL if (missing(inverse_var)) inverse_var <- is.null(target) & (!obj$user_weighting) vcov_CR(obj, cluster = cluster, type = type, target = target, inverse_var = inverse_var, form = form) } #----------------------------------------------- # coefficients #----------------------------------------------- coef_CS.robu <- function(obj) { beta <- as.vector(obj$b.r) labs <- obj$reg_table$labels names(beta) <- levels(labs)[labs] beta } #----------------------------------------------- # residuals #----------------------------------------------- residuals_CS.robu <- function(obj) { ord <- order(order(obj$study_orig_id)) obj$data.full$e.r[ord] } #----------------------------------------------- # Model matrix #----------------------------------------------- model_matrix.robu <- function(obj) { ord <- order(order(obj$study_orig_id)) obj$Xreg[ord,] } #------------------------------------- # Get (model-based) working variance matrix #------------------------------------- targetVariance.robu <- function(obj, cluster) { ord <- order(order(obj$study_orig_id)) if (obj$user_weighting) { V <- obj$data.full$avg.var.eff.size[ord] } else { V <- mean(obj$data.full$r.weights) / obj$data.full$r.weights[ord] } matrix_list(V, cluster, "both") } #------------------------------------- # Get weighting matrix #------------------------------------- weightMatrix.robu <- function(obj, cluster) { ord <- order(order(obj$study_orig_id)) if (obj$user_weighting) { W <- obj$data.full$userweights[ord] } else{ W <- obj$data.full$r.weights[ord] } w_scale <- mean(W) W <- W / w_scale W_list <- matrix_list(W, cluster, "both") attr(W_list, "w_scale") <- w_scale W_list } #--------------------------------------- # Get bread matrix and scaling constant #--------------------------------------- bread.robu <- function(x, ...) { if (x$user_weighting) { W <- x$data.full$userweights } else{ W <- x$data.full$r.weights } x$N * chol2inv(chol(crossprod(x$Xreg, W * x$Xreg))) } v_scale.robu <- function(obj) { obj$N } clubSandwich/R/lme.R0000644000176200001440000001505513576303502013773 0ustar liggesusers#------------------------------------- # vcovCR with defaults #------------------------------------- #' Cluster-robust variance-covariance matrix for an lme object. #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates from a \code{\link[nlme]{lme}} object. #' #' @param cluster Optional expression or vector indicating which observations #' belong to the same cluster. If not specified, will be set to #' \code{getGroups(obj)}. #' @param target Optional matrix or vector describing the working #' variance-covariance model used to calculate the \code{CR2} and \code{CR4} #' adjustment matrices. If not specified, the target is taken to be the #' estimated variance-covariance structure of the \code{lme} object. #' @inheritParams vcovCR #' #' @return An object of class \code{c("vcovCR","clubSandwich")}, which consists #' of a matrix of the estimated variance of and covariances between the #' regression coefficient estimates. #' #' @seealso \code{\link{vcovCR}} #' #' @examples #' library(nlme) #' rat_weight <- lme(weight ~ Time * Diet, data=BodyWeight, ~ Time | Rat) #' vcovCR(rat_weight, type = "CR2") #' #' data(egsingle, package = "mlmRev") #' math_model <- lme(math ~ year * size + female + black + hispanic, #' random = list(~ year | schoolid, ~ 1 | childid), #' data = egsingle) #' vcovCR(math_model, type = "CR2") #' #' @export vcovCR.lme <- function(obj, cluster, type, target, inverse_var, form = "sandwich", ...) { if (missing(cluster)) cluster <- nlme::getGroups(obj, level = 1) if (missing(target)) target <- NULL if (missing(inverse_var)) inverse_var <- is.null(target) vcov_CR(obj, cluster = cluster, type = type, target = target, inverse_var = inverse_var, form = form) } # nobs() #------------------------------------- # residuals_CS() #------------------------------------- residuals_CS.lme <- function(obj) residuals(obj, level = 0) #------------------------------------- # coef_CS() #------------------------------------- coef_CS.lme <- function(obj) nlme::fixef(obj) #------------------------------------- # model_matrix() #------------------------------------- model_matrix.lme <- function(obj) { dat <- droplevels(getData(obj)) model.matrix(formula(obj), data = dat) } #------------------------------------- # Get (model-based) working variance matrix #------------------------------------- ZDZt <- function(D, Z_list) { lapply(Z_list, function(z) z %*% D %*% t(z)) } targetVariance.lme <- function(obj, cluster = nlme::getGroups(obj, level = 1)) { if (inherits(obj, "nlme")) stop("not implemented for \"nlme\" objects") all_groups <- rev(obj$groups) smallest_groups <- all_groups[[1]] largest_groups <- all_groups[[length(all_groups)]] # Get level-1 variance-covariance structure as V_list if (is.null(obj$modelStruct$corStruct)) { if (is.null(obj$modelStruct$varStruct)) { V_list <- matrix_list(rep(1, length(smallest_groups)), smallest_groups, "both") } else { wts <- nlme::varWeights(obj$modelStruct$varStruct)[order(do.call(order, all_groups))] V_list <- matrix_list(1 / wts^2, smallest_groups, "both") } } else { R_list <- as.list(rep(1, nlevels(smallest_groups))) names(R_list) <- levels(smallest_groups) R_sublist <- nlme::corMatrix(obj$modelStruct$corStruct) R_list[names(R_sublist)] <- R_sublist if (is.null(obj$modelStruct$varStruct)) { V_list <- R_list } else { sd_vec <- 1 / nlme::varWeights(obj$modelStruct$varStruct)[order(do.call(order, all_groups))] sd_list <- split(sd_vec, smallest_groups) V_list <- Map(function(R, s) tcrossprod(s) * R, R = R_list, s = sd_list) } } # Get random effects structure if (length(all_groups) == 1) { D_mat <- as.matrix(obj$modelStruct$reStruct[[1]]) Z_mat <- model.matrix(obj$modelStruct$reStruc, getData(obj)) row.names(Z_mat) <- NULL Z_list <- matrix_list(Z_mat, all_groups[[1]], "row") ZDZ_list <- ZDZt(D_mat, Z_list) target_list <- Map("+", ZDZ_list, V_list) } else { D_list <- lapply(obj$modelStruct$reStruct, as.matrix) Z_mat <- model.matrix(obj$modelStruct$reStruc, getData(obj)) Z_names <- sapply(strsplit(colnames(Z_mat), ".", fixed=TRUE), function(x) x[1]) row.names(Z_mat) <- NULL Z_levels <- lapply(names(all_groups), function(x) Z_mat[,x==Z_names,drop=FALSE]) Z_levels <- Map(matrix_list, x = Z_levels, fac = all_groups, dim = "row") ZDZ_lists <- Map(ZDZt, D = D_list, Z_list = Z_levels) ZDZ_lists[[1]] <- Map("+", ZDZ_lists[[1]], V_list) for (i in 2:length(all_groups)) { ZDZ_lists[[i]] <- add_bdiag(small_mats = ZDZ_lists[[i-1]], big_mats = ZDZ_lists[[i]], crosswalk = all_groups[c(i-1,i)]) } target_list <- ZDZ_lists[[i]] } # check if clustering level is higher than highest level of random effects tb_groups <- table(largest_groups) tb_cluster <- table(cluster) if (length(tb_groups) < length(tb_cluster) | any(as.vector(tb_groups) != rep(as.vector(tb_cluster), length.out = length(tb_groups))) | any(names(tb_groups) != rep(names(tb_cluster), length.out = length(tb_groups)))) { # check that random effects are nested within clusters tb_cross <- table(largest_groups, cluster) nested <- apply(tb_cross, 1, function(x) sum(x > 0) == 1) if (!all(nested)) stop("Random effects are not nested within clustering variable.") # expand target_list to level of clustering crosswalk <- data.frame(largest_groups, cluster) target_list <- add_bdiag(small_mats = target_list, big_mats = matrix_list(rep(0, length(cluster)), cluster, dim = "both"), crosswalk = crosswalk) } return(target_list) } #------------------------------------- # Get weighting matrix #------------------------------------- weightMatrix.lme <- function(obj, cluster = nlme::getGroups(obj, level = 1)) { V_list <- targetVariance(obj, cluster) lapply(V_list, function(v) chol2inv(chol(v))) } #--------------------------------------- # Get bread matrix and scaling constant #--------------------------------------- #' @export bread.lme <- function(x, ...) { vcov(x) * v_scale(x) / x$sigma^2 } v_scale.lme <- function(obj) { nlevels(nlme::getGroups(obj)) } clubSandwich/R/rma-mv.R0000644000176200001440000001350513500611662014407 0ustar liggesusers #---------------------------------------------------------------------- # utility function for computing block-diagonal covariance matrices #---------------------------------------------------------------------- #' Impute a block-diagonal covariance matrix #' #' \code{impute_covariance_matrix} calculates a block-diagonal covariance #' matrix, given the marginal variances, the block structure, and an assumed #' correlation. #' #' @param vi Vector of variances #' @param cluster Vector indicating which effects belong to the same cluster. #' Effects with the same value of `cluster` will be treated as correlated. #' @param r Vector or numeric value of assume correlation(s) between effect size #' estimates from each study. #' @param return_list Optional logical indicating whether to return a list of matrices #' (with one entry per block) or the full variance-covariance matrix. #' #' @return If \code{cluster} is appropriately sorted, then a list of matrices, #' with one entry per cluster, will be returned by default. If \code{cluster} #' is out of order, then the full variance-covariate matrix will be returned #' by default. The output structure can be controlled with the optional #' \code{return_list} argument. #' #' @export #' #' @examples #' library(metafor) #' data(SATcoaching) #' V_list <- impute_covariance_matrix(vi = SATcoaching$V, cluster = SATcoaching$study, r = 0.66) #' MVFE <- rma.mv(d ~ 0 + test, V = V_list, data = SATcoaching) #' coef_test(MVFE, vcov = "CR2", cluster = SATcoaching$study) #' impute_covariance_matrix <- function(vi, cluster, r, return_list = identical(as.factor(cluster), sort(as.factor(cluster)))) { cluster <- droplevels(as.factor(cluster)) vi_list <- split(vi, cluster) r_list <- rep_len(r, length(vi_list)) vcov_list <- Map(function(V, rho) (rho + diag(1 - rho, nrow = length(V))) * tcrossprod(sqrt(V)), V = vi_list, rho = r_list) if (return_list) { return(vcov_list) } else { vcov_mat <- metafor::bldiag(vcov_list) cluster_index <- order(order(cluster)) return(vcov_mat[cluster_index, cluster_index]) } } #------------------------------------- # vcovCR with defaults #------------------------------------- #' Cluster-robust variance-covariance matrix for a robu object. #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates from a #' \code{\link[metafor]{rma.mv}} object. #' #' @param cluster Optional expression or vector indicating which observations #' belong to the same cluster. If not specified, will be set to the factor in #' the random-effects structure with the fewest distinct levels. Caveat #' emptor: the function does not check that the random effects are nested. #' @param target Optional matrix or vector describing the working #' variance-covariance model used to calculate the \code{CR2} and \code{CR4} #' adjustment matrices. If not specified, the target is taken to be the #' estimated variance-covariance structure of the \code{rma.mv} object. #' @inheritParams vcovCR #' #' @return An object of class \code{c("vcovCR","clubSandwich")}, which consists #' of a matrix of the estimated variance of and covariances between the #' regression coefficient estimates. #' #' @seealso \code{\link{vcovCR}} #' #' @export #' #' @examples #' library(metafor) #' data(hierdat, package = "robumeta") #' #' mfor_fit <- rma.mv(effectsize ~ binge + followup + sreport + age, #' V = var, random = list(~ 1 | esid, ~ 1 | studyid), #' data = hierdat) #' mfor_fit #' #' mfor_CR2 <- vcovCR(mfor_fit, type = "CR2") #' mfor_CR2 #' coef_test(mfor_fit, vcov = mfor_CR2, test = c("Satterthwaite", "saddlepoint")) #' #' Wald_test(mfor_fit, constraints = c(2,4), vcov = mfor_CR2) #' Wald_test(mfor_fit, constraints = 2:5, vcov = mfor_CR2) vcovCR.rma.mv <- function(obj, cluster, type, target, inverse_var, form = "sandwich", ...) { if (missing(cluster)) cluster <- findCluster.rma.mv(obj) if (missing(target)) { target <- NULL inverse_var <- is.null(obj$W) } else { if (missing(inverse_var)) inverse_var <- FALSE } vcov_CR(obj, cluster = cluster, type = type, target = target, inverse_var = inverse_var, form = form) } # coef() # residuals_CS() # vcov() # model_matrix #------------------------------------- # Get (model-based) working variance matrix #------------------------------------- targetVariance.rma.mv <- function(obj, cluster) { matrix_list(obj$M, cluster, "both") } #------------------------------------- # Get weighting matrix #------------------------------------- weightMatrix.rma.mv <- function(obj, cluster) { if (is.null(obj$W)) { V_list <- targetVariance(obj, cluster) lapply(V_list, function(v) chol2inv(chol(v))) } else{ matrix_list(obj$W, cluster, "both") } } #----------------------------------------------- # Get outer-most clustering variable #----------------------------------------------- findCluster.rma.mv <- function(obj) { if (obj$withS) { r <- which.min(obj$s.nlevels) cluster <- obj$mf.r[[r]][[obj$s.names[r]]] } else if (obj$withG) { cluster <- obj$mf.r[[1]][[obj$g.names[2]]] } else { stop("No clustering variable specified.") } droplevels(as.factor(cluster)) } #--------------------------------------- # Get bread matrix and scaling constant #--------------------------------------- bread.rma.mv <- function(x, ...) { if (is.null(x$W)) { B <- vcov(x) * nobs(x) } else{ X_mat <- model_matrix(x) XWX <- t(X_mat) %*% x$W %*% X_mat B <- chol2inv(chol(XWX)) * nobs(x) rownames(B) <- colnames(B) <- colnames(X_mat) } B } v_scale.rma.mv <- function(obj) { nobs(obj) } clubSandwich/R/data-documentation.R0000644000176200001440000001650113574216757017010 0ustar liggesusers#' Achievement Awards Demonstration program #' #' Data from a randomized trial of the Achievement Awards #' Demonstration program, reported in Angrist & Lavy (2009). #' #' @format A data frame with 16526 rows and 21 variables: \describe{ #' \item{school_id}{Fictitious school identification number} #' \item{school_type}{Factor identifying the school type (Arab religious, Jewish religious, Jewish secular)} #' \item{pair}{Number of treatment pair. Note that 7 is a triple.} #' \item{treated}{Indicator for whether school was in treatment group} #' \item{year}{Cohort year} #' \item{student_id}{Fictitious student identification number} #' \item{sex}{Factor identifying student sex} #' \item{siblings}{Number of siblings} #' \item{immigrant}{Indicator for immigrant status} #' \item{father_ed}{Father's level of education} #' \item{mother_ed}{Mother's level of education} #' \item{Bagrut_status}{Indicator for Bagrut attainment} #' \item{attempted}{Number of Bagrut units attempted} #' \item{awarded}{Number of Bagrut units awarded} #' \item{achv_math}{Indicator for satisfaction of math requirement} #' \item{achv_english}{Indicator for satisfaction of English requirement} #' \item{achv_hebrew}{Indicator for satisfaction of Hebrew requirement} #' \item{lagscore}{Lagged Bagrut score} #' \item{qrtl}{Quartile within distribution of lagscore, calculated by cohort and sex} #' \item{half}{Lower or upper half within distribution of lagscore, calculated by cohort and sex} #' } #' #' @source \href{https://economics.mit.edu/faculty/angrist/data1/data/angrist}{Angrist Data Archive} #' #' @references Angrist, J. D., & Lavy, V. (2009). The effects of high stakes #' high school achievement awards : Evidence from a randomized trial. #' \emph{American Economic Review, 99}(4), 1384-1414. #' doi:\href{https://doi.org/10.1257/aer.99.4.1384}{10.1257/aer.99.4.1384} #' "AchievementAwardsRCT" #' Dropout prevention/intervention program effects #' #' A dataset containing estimated effect sizes, variances, and covariates from a #' meta-analysis of dropout prevention/intervention program effects, conducted #' by Wilson et al. (2011). Missing observations were imputed. #' #' @format A data frame with 385 rows and 18 variables: \describe{ #' \item{LOR1}{log-odds ratio measuring the intervention effect} #' \item{varLOR}{estimated sampling variance of the log-odds ratio} #' \item{studyID}{unique identifier for each study} #' \item{studySample}{unique identifier for each sample within a study} #' \item{study_design}{study design (randomized, matched, or non-randomized and unmatched)} #' \item{outcome}{outcome measure for the intervention effect is estimated (school dropout, #' school enrollment, graduation, graduation or GED receipt)} #' \item{evaluator_independence}{degree of evaluator independence (independent, indirect #' but influential, involved in planning but not delivery, involved in delivery)} #' \item{implementation_quality}{level of implementation quality (clear problems, #' possible problems, no apparent problems)} #' \item{program_site}{Program delivery site (community, mixed, school classroom, #' school but outside of classroom)} #' \item{attrition}{Overall attrition (proportion)} #' \item{group_equivalence}{pretest group-equivalence log-odds ratio} #' \item{adjusted}{adjusted or unadjusted data used to calculate intervention effect} #' \item{male_pct}{proportion of the sample that is male} #' \item{white_pct}{proportion of the sample that is white} #' \item{average_age}{average age of the sample} #' \item{duration}{program duration (in weeks)} #' \item{service_hrs}{program contact hours per week} #' \item{big_study}{indicator for the 32 studies with 3 or more effect sizes} #' } #' #' @source Wilson, S. J., Lipsey, M. W., Tanner-Smith, E., Huang, C. H., & #' Steinka-Fry, K. T. (2011). Dropout prevention and intervention programs: #' Effects on school completion and dropout Among school-aged children and #' youth: A systematic review. Campbell Systematic Reviews, 7(8). #' #' @references Wilson, S. J., Lipsey, M. W., Tanner-Smith, E., Huang, C. H., & #' Steinka-Fry, K. T. (2011). Dropout prevention and intervention programs: #' Effects on school completion and dropout Among school-aged children and #' youth: A systematic review. Campbell Systematic Reviews, 7(8). #' #' Tipton, E., & Pustejovsky, J. E. (2015). Small-sample adjustments for tests #' of moderators and model fit using robust variance estimation in #' meta-regression. #' "dropoutPrevention" #' State-level annual mortality rates by cause among 18-20 year-olds #' #' A dataset containing state-level annual mortality rates for select causes of #' death, as well as data related to the minimum legal drinking age and alcohol #' consumption. #' #' @format A data frame with 5508 rows and 12 variables: \describe{ #' \item{year}{Year of observation} #' \item{state}{identifier for state} #' \item{count}{Number of deaths} #' \item{pop}{Population size} #' \item{legal}{Proportion of 18-20 year-old population that is legally allowed to drink} #' \item{beertaxa}{Beer taxation rate} #' \item{beerpercap}{Beer consumption per capita} #' \item{winepercap}{Wine consumption per capita} #' \item{spiritpercap}{Spirits consumption per capita} #' \item{totpercap}{Total alcohol consumption per capita} #' \item{mrate}{Mortality rate per 10,000} #' \item{cause}{Cause of death} #' } #' #' @source #' \href{http://masteringmetrics.com/wp-content/uploads/2015/01/deaths.dta}{Mastering #' 'Metrics data archive} #' #' @references #' #' Angrist, J. D., and Pischke, J. S. (2014). _Mastering'metrics: the path from #' cause to effect_. Princeton University Press, 2014. #' #' Carpenter, C., & Dobkin, C. (2011). The minimum legal drinking age and public #' health. _Journal of Economic Perspectives, 25_(2), 133-156. #' doi:[10.1257/jep.25.2.133](https://doi.org/10.1257/jep.25.2.133) #' "MortalityRates" #' Randomized experiments on SAT coaching #' #' Effect sizes from studies on the effects of SAT coaching, #' reported in Kalaian and Raudenbush (1996) #' #' @format A data frame with 67 rows and 11 variables: #' \describe{ #' \item{study}{Study identifier} #' \item{year}{Year of publication} #' \item{test}{Character string indicating whether effect size corresponds to outcome on verbal (SATV) or math (SATM) test} #' \item{d}{Effect size estimate (Standardized mean difference)} #' \item{V}{Variance of effect size estimate} #' \item{nT}{Sample size in treatment condition} #' \item{nC}{Sample size in control condition} #' \item{study_type}{Character string indicating whether study design used a matched, non-equivalent, or randomized control group} #' \item{hrs}{Hours of coaching} #' \item{ETS}{Indicator variable for Educational Testing Service} #' \item{homework}{Indicator variable for homework} #' } #' #' @references Kalaian, H. A. & Raudenbush, S. W. (1996). A multivariate mixed #' linear model for meta-analysis. \emph{Psychological Methods, 1}(3), #' 227-235. #' doi:\href{https://doi.org/10.1037/1082-989X.1.3.227}{10.1037/1082-989X.1.3.227} #' "SATcoaching" clubSandwich/R/utilities.R0000644000176200001440000000462213500611662015223 0ustar liggesusers#----------------------------------------------------- # check that bread can be re-constructed from X and W #----------------------------------------------------- check_bread <- function(obj, cluster, y, check_coef = TRUE, tol = 10^-6) { cluster <- droplevels(as.factor(cluster)) B <- sandwich::bread(obj) / v_scale(obj) X_list <- matrix_list(model_matrix(obj), cluster, "row") W_list <- weightMatrix(obj, cluster) XWX <- Reduce("+", Map(function(x, w) t(x) %*% w %*% x, x = X_list, w = W_list)) M <- chol2inv(chol(XWX)) attr(M, "dimnames") <- attr(B, "dimnames") eq_bread <- diff(range((B / M)[XWX != 0])) < tol if (check_coef) { coef <- coef_CS(obj) y_list <- split(y, cluster) XWy <- Reduce("+", Map(function(x, w, y) t(x) %*% w %*% y, x = X_list, w = W_list, y = y_list)) beta <- as.vector(M %*% XWy) names(beta) <- names(coef) eq_coef <- all.equal(beta, coef, tol = tol) if (all(c(eq_coef, eq_bread) == TRUE)) TRUE else list(M = M, B = B, beta = beta, coef = coef) } else { if (eq_bread) TRUE else list(M = M, B = B) } } #---------------------------------------------- # check that CR2 and CR4 are target-unbiased #---------------------------------------------- check_CR <- function(obj, vcov, ..., tol = .Machine$double.eps^0.5) { if (is.character(vcov)) vcov <- vcovCR(obj, type = vcov, ...) if (!("clubSandwich" %in% class(vcov))) stop("Variance-covariance matrix must be a clubSandwich.") # calculate E(V^CRj) cluster <- attr(vcov, "cluster") S_array <- get_S_array(obj, vcov) E_CRj <- lapply(1:nlevels(cluster), function(j) tcrossprod(S_array[,,j])) # calculate target Theta_list <- attr(vcov, "target") X <- model_matrix(obj) alias <- is.na(coef_CS(obj)) if (any(alias)) X <- X[, !alias, drop = FALSE] p <- NCOL(X) N <- length(cluster) J <- nlevels(cluster) X_list <- matrix_list(X, cluster, "row") W_list <- weightMatrix(obj, cluster) XW_list <- Map(function(x, w) as.matrix(t(x) %*% w), x = X_list, w = W_list) M <- attr(vcov, "bread") / attr(vcov, "v_scale") attr(M, "dimnames") <- NULL MXWTWXM <- Map(function(xw, theta) M %*% as.matrix(xw %*% theta %*% t(xw)) %*% M, xw = XW_list, theta = Theta_list) eq <- all.equal(E_CRj, MXWTWXM, tolerance = tol) if (all(eq==TRUE)) TRUE else list(E_CRj = E_CRj, target = MXWTWXM) } clubSandwich/R/Wald_test.R0000644000176200001440000002525413576051206015147 0ustar liggesusers#-------------------------------------------------- # translate constraint arguments into a matrix #-------------------------------------------------- get_constraint_mat <- function(obj, constraints) { p <- length(coef_CS(obj)) beta_NA <- is.na(coef_CS(obj)) if (inherits(constraints, "matrix")) { if (ncol(constraints) != p) stop(paste0("Constraint matrix must have ",p," columns.")) if (nrow(constraints) == 0) stop("Constraint matrix must have at least one row.") C_mat <- constraints } else { C_mat <- switch(class(constraints), logical = { if (length(constraints) != p) stop(paste0("Constraint logicals must be of length ",p,".")) if (sum(constraints) == 0) stop("You must specify at least one constraint.") diag(1L, nrow = p)[constraints,,drop=FALSE] }, numeric = { if (any(!(constraints %in% 1:p))) stop(paste0("Constraint indices must be less than or equal to ",p,".")) if (length(constraints) == 0) stop("You must specify at least one constraint.") diag(1L, nrow = p)[constraints,,drop=FALSE] }, integer = { if (any(!(constraints %in% 1:p))) stop(paste0("Constraint indices must be less than or equal to ",p,".")) if (length(constraints) == 0) stop("You must specify at least one constraint.") diag(1L, nrow = p)[constraints,,drop=FALSE] }, character = { term_names <- names(coef_CS(obj)) if (any(!constraints %in% term_names)) stop("Constraint names not in model specification.") if (length(constraints) == 0) stop("You must specify at least one constraint.") diag(1L, nrow = p)[term_names %in% constraints,,drop=FALSE] }) } C_mat[,!beta_NA,drop=FALSE] } #-------------------------------------------------- # calculate a covariance array #-------------------------------------------------- covariance_array <- function(P_array, Omega_nsqrt, q = nrow(Omega_nsqrt)) { B_jk <- array(apply(P_array, 3:4, function(p) Omega_nsqrt %*% p %*% Omega_nsqrt), dim = dim(P_array)) Cov_arr <- array(NA, dim = rep(q, 4)) for (s in 1:q) for (t in 1:s) for (u in 1:s) for (v in 1:(ifelse(u==s,t,u))) { temp <- sum(B_jk[s,v,,] * B_jk[t,u,,]) + sum(B_jk[s,u,,] * B_jk[t,v,,]) Cov_arr[s,t,u,v] <- temp Cov_arr[s,t,v,u] <- temp Cov_arr[t,s,u,v] <- temp Cov_arr[t,s,v,u] <- temp Cov_arr[u,v,s,t] <- temp Cov_arr[u,v,t,s] <- temp Cov_arr[v,u,s,t] <- temp Cov_arr[v,u,t,s] <- temp } Cov_arr } #--------------------------------------------------------- # calculate total variance of clubSandwich estimator #--------------------------------------------------------- total_variance_mat <- function(P_array, Omega_nsqrt, q = nrow(Omega_nsqrt)) { B_jk <- array(apply(P_array, 3:4, function(p) Omega_nsqrt %*% p %*% Omega_nsqrt), dim = dim(P_array)) var_mat <- matrix(NA, q, q) for (s in 1:q) for (t in 1:s) { temp <- sum(B_jk[s,t,,] * B_jk[t,s,,]) + sum(B_jk[s,s,,] * B_jk[t,t,,]) var_mat[s,t] <- temp var_mat[t,s] <- temp } var_mat } #-------------------------------------------------- # Hotelling's T-squared approximation #-------------------------------------------------- Hotelling_Tsq <- function(Q, q, nu) { delta <- (nu - q + 1) / nu df <- nu - q + 1 Fstat <- delta * Q / q p_val <- ifelse(df > 0, pf(Fstat, df1 = q, df2 = df, lower.tail = FALSE), NA) c(Fstat = Fstat, delta = delta, df = df, p_val = p_val) } #--------------------------------------------- # Wald-type tests #--------------------------------------------- #' Test parameter constraints in a fitted linear regression model #' #' \code{Wald_test} reports Wald-type tests of linear contrasts from a fitted #' linear regression model, using a sandwich estimator for the #' variance-covariance matrix and a small sample correction for the p-value. #' Several different small-sample corrections are available. #' #' @param obj Fitted model for which to calculate Wald tests. #' @param constraints List of one or more constraints to test. See details #' below. #' @param vcov Variance covariance matrix estimated using \code{vcovCR} or a #' character string specifying which small-sample adjustment should be used to #' calculate the variance-covariance. #' @param test Character vector specifying which small-sample correction(s) to #' calculate. The following corrections are available: \code{"chi-sq"}, #' \code{"Naive-F"}, \code{"HTA"}, \code{"HTB"}, \code{"HTZ"}, \code{"EDF"}, #' \code{"EDT"}. Default is \code{"HTZ"}. #' @param ... Further arguments passed to \code{\link{vcovCR}}, which are only #' needed if \code{vcov} is a character string. #' #' @details Constraints can be specified as character vectors, integer vectors, #' logical vectors, or matrices. #' #' @return A list of test results. #' #' @seealso \code{\link{vcovCR}} #' #' @export Wald_test <- function(obj, constraints, vcov, test = "HTZ", ...) { if (is.character(vcov)) vcov <- vcovCR(obj, type = vcov, ...) if (!inherits(vcov, "clubSandwich")) stop("Variance-covariance matrix must be a clubSandwich.") if (all(test == "All")) test <- c("chi-sq","Naive-F","HTA","HTB","HTZ","EDF","EDT") beta <- na.omit(coef_CS(obj)) GH <- get_GH(obj, vcov) if (is.list(constraints)) { C_mats <- lapply(constraints, get_constraint_mat, obj = obj) results <- lapply(C_mats, Wald_testing, beta = beta, vcov = vcov, test = test, GH = GH) } else { C_mat <- get_constraint_mat(obj, constraints) results <- Wald_testing(C_mat, beta = beta, vcov = vcov, test = test, GH = GH) } results } array_multiply <- function(mat, arr) { new_mat <- apply(arr, 3, function(s) mat %*% s) array(new_mat, dim = c(nrow(mat), dim(arr)[2], dim(arr)[3])) } Wald_testing <- function(C_mat, beta, vcov, test, GH) { q <- nrow(C_mat) dims <- dim(GH$H) J <- dims[length(dims)] if (any(c("HTA","HTB","HTZ","EDF","EDT") %in% test)) { GH$G <- lapply(GH$G, function(s) C_mat %*% s) if (length(dims)==3) { GH$H <- array_multiply(C_mat, GH$H) } else { H <- array(NA, dim = c(3, q, dims[3:4])) for (i in 1:dims[1]) H[i,,,] <- array_multiply(C_mat, GH$H[i,,,]) GH$H <- H } P_array <- get_P_array(GH = GH, all_terms = TRUE) Omega <- apply(P_array, 1:2, function(x) sum(diag(x))) Omega_nsqrt <- matrix_power(Omega, -1/2) } # Wald statistic Q <- as.numeric(t(C_mat %*% beta) %*% chol2inv(chol(C_mat %*% vcov %*% t(C_mat))) %*% C_mat %*% beta) result <- data.frame(row.names = c("Fstat","delta","df","p_val")) # chi-square if ("chi-sq" %in% test) { p_val <- pchisq(Q, df = q, lower.tail = FALSE) result <- cbind(result, "chi-sq" = c(Fstat = Q / q, delta = 1, df = Inf, p_val = p_val)) } # Naive F if ("Naive-F" %in% test) { p_val <- pf(Q / q, df1 = q, df2 = J - 1, lower.tail = FALSE) result <- cbind(result, "Naive-F" = c(Fstat = Q / q, delta = 1, df = J - 1, p_val = p_val)) } # Hotelling's T-squared if ("HTA" %in% test | "HTB" %in% test) { Cov_arr <- covariance_array(P_array, Omega_nsqrt, q = q) Var_index <- seq(1,q^4, 1 + q^2) Var_mat <- matrix(Cov_arr[Var_index], q, q) if ("HTA" %in% test) { nu_A <- 2 * sum(Var_mat) / sum(Cov_arr^2) result <- cbind(result, "HTA" = Hotelling_Tsq(Q, q, nu = nu_A)) } if ("HTB" %in% test) { lower_mat <- lower.tri(Var_mat, diag = TRUE) lower_arr <- array(FALSE, dim = dim(Cov_arr)) for (s in 1:q) for (t in 1:s) for (u in 1:s) for (v in 1:(ifelse(u==s,t,u))) lower_arr[s,t,u,v] <- TRUE nu_B <- 2 * sum(Var_mat[lower_mat]) / sum(Cov_arr[lower_arr]^2) result <- cbind(result, "HTB" = Hotelling_Tsq(Q, q, nu = nu_B)) } } else if ("HTZ" %in% test) { Var_mat <- total_variance_mat(P_array, Omega_nsqrt, q = q) } if ("HTZ" %in% test) { nu_Z <- q * (q + 1) / sum(Var_mat) result <- cbind(result, "HTZ" = Hotelling_Tsq(Q, q, nu = nu_Z)) } # Eigen-decompositions if ("EDF" %in% test | "EDT" %in% test) { spec <- eigen(Omega_nsqrt %*% C_mat %*% vcov %*% t(C_mat) %*% t(Omega_nsqrt)) df_eig <- 1 / apply(t(spec$vectors) %*% Omega_nsqrt, 1, function(x) sum(apply(P_array, 3:4, function(P) (t(x) %*% P %*% x)^2))) if ("EDF" %in% test) { df4 <- pmax(df_eig, 4.1) EQ <- sum(df4 / (df4 - 2)) VQ <- 2 * sum(df4^2 * (df4 - 1) / ((df4 - 2)^2 * (df4 - 4))) delta <- ifelse(q * VQ > 2 * EQ^2, (EQ^2 * (q - 2) + 2 * q * VQ) / (EQ * (VQ + EQ^2)), q / EQ) df <- ifelse(q * VQ > 2 * EQ^2, 4 + 2 * EQ^2 * (q + 2) / (q * VQ - 2 * EQ^2), Inf) Fstat <- delta * Q / q p_val <- pf(Fstat, df1 = q, df2 = df, lower.tail = FALSE) result <- cbind(result, "EDF" = c(Fstat = Fstat, delta = delta, df = df, p_val = p_val)) } if ("EDT" %in% test) { t_j <- t(spec$vectors) %*% Omega_nsqrt %*% C_mat %*% beta / sqrt(spec$values) a_j <- df_eig - 1 / 2 b_j <- 48 * a_j^2 c_j <- sqrt(a_j * log(1 + t_j^2 / df_eig)) z_j <- c_j + (c_j^3 + 3 * c_j) / b_j - (4 * c_j^7 + 33 * c_j^5 + 240 * c_j^3 + 855 * c_j) / (10 * b_j^2 + 8 * b_j * c_j^4 + 1000 * b_j) Fstat <- mean(z_j^2) p_val <- pf(Fstat, df1 = q, df2 = Inf, lower.tail = FALSE) result <- cbind(result, "EDT" = c(Fstat = Fstat, delta = 1, df = Inf, p_val = p_val)) } } result <- as.data.frame(t(result)) class(result) <- c("Wald_test_clubSandwich", class(result)) attr(result, "type") <- attr(vcov, "type") result } #--------------------------------------------- # print method for Wald_test #--------------------------------------------- #' @export print.Wald_test_clubSandwich <- function(x, digits = 3, ...) { p_val <- format.pval(x$p_val, digits = digits, eps = 10^-digits) sig <- symnum(x$p_val, corr = FALSE, na = FALSE, cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " ")) res <- data.frame("Test" = rownames(x), "F" = x$Fstat, "d.f." = x$df, "p-val" = p_val) #, "Sig." = sig) print(format(res, digits = 3), row.names = FALSE) } clubSandwich/R/S3-methods.R0000644000176200001440000000435613500611662015142 0ustar liggesusers#---------------------------------------------- # get "working" variance-covariance matrix #---------------------------------------------- targetVariance <- function(obj, cluster) UseMethod("targetVariance") targetVariance.default <- function(obj, cluster) { matrix_list(rep(1, nobs(obj)), cluster, "both") } #---------------------------------------------- # get weighting matrix #---------------------------------------------- weightMatrix <- function(obj, cluster) UseMethod("weightMatrix") weightMatrix.default <- function(obj, cluster) { weights <- weights(obj) if (is.null(weights)) { weights <- w_scale <- 1 } else { w_scale <- mean(weights) weights <- weights / w_scale } W <- rep(weights, length.out = nobs(obj)) W_list <- matrix_list(W, cluster, "both") attr(W_list, "w_scale") <- w_scale W_list } #---------------------------------------------- # get X matrix #---------------------------------------------- model_matrix <- function(obj) UseMethod("model_matrix") model_matrix.default <- function(obj) { model.matrix(obj) } #---------------------------------------------- # get augmented design matrix #---------------------------------------------- augmented_model_matrix <- function(obj, cluster, inverse_var, ignore_FE) UseMethod("augmented_model_matrix") augmented_model_matrix.default <- function(obj, cluster, inverse_var, ignore_FE) { NULL } #---------------------------------------------- # get residuals #---------------------------------------------- residuals_CS <- function(obj) UseMethod("residuals_CS") residuals_CS.default <- function(obj) { residuals(obj) } #---------------------------------------------- # get coefficient estimates #---------------------------------------------- coef_CS <- function(obj) UseMethod("coef_CS") coef_CS.default <- function(obj) { coef(obj) } #---------------------------------------------- # get bread matrix #---------------------------------------------- # bread matrices imported from sandwich package or elsewhere #' @importFrom sandwich bread get_bread <- function(obj) bread(obj) v_scale <- function(obj) UseMethod("v_scale") v_scale.default <- function(obj) { nobs(obj) } clubSandwich/R/matrix-functions.R0000644000176200001440000000307713500611662016525 0ustar liggesusers #--------------------------------------------- # matrix manipulation functions #--------------------------------------------- sub_f <- function(x, fac, dim) { function(f) switch(dim, row = x[fac==f, ,drop=FALSE], col = x[ ,fac==f, drop=FALSE], both = x[fac==f, fac==f, drop=FALSE]) } matrix_list <- function(x, fac, dim) { if (is.vector(x)) { if (dim != "both") stop(paste0("Object must be a matrix in order to subset by ",dim,".")) x_list <- split(x, fac) lapply(x_list, function(x) diag(x, nrow = length(x))) } else { lapply(levels(fac), sub_f(x, fac, dim)) } } matrix_power <- function(x, p, symmetric = TRUE, tol = -12) { eig <- eigen(x, symmetric = symmetric) val_p <- with(eig, ifelse(values > 10^tol, values^p, 0)) with(eig, vectors %*% (val_p * t(vectors))) } chol_psd <- function(x) with(eigen(x, symmetric=TRUE), sqrt(pmax(values,0)) * t(vectors)) add_submatrices <- function(indices, small_mat, big_mat) { levs <- levels(indices) for (i in 1:length(levs)) { ind <- levs[i] == indices big_mat[ind,ind] <- big_mat[ind,ind] + small_mat[[i]] } big_mat } add_bdiag <- function(small_mats, big_mats, crosswalk) { small_indices <- lapply(split(crosswalk[[1]], crosswalk[[2]]), droplevels) big_indices <- unique(crosswalk) big_indices <- big_indices[[2]][order(big_indices[[1]])] small_mats <- split(small_mats, big_indices) Map(add_submatrices, indices = small_indices, small_mat = small_mats, big_mat = big_mats) } clubSandwich/R/lm.R0000644000176200001440000000451513500611662013621 0ustar liggesusers#------------------------------------- # vcovCR with defaults #------------------------------------- #' Cluster-robust variance-covariance matrix for an lm object. #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates from an \code{\link{lm}} object. #' #' @param cluster Expression or vector indicating which observations belong to #' the same cluster. Required for \code{lm} objects. #' @param target Optional matrix or vector describing the working #' variance-covariance model used to calculate the \code{CR2} and \code{CR4} #' adjustment matrices. If a vector, the target matrix is assumed to be #' diagonal. If not specified, the target is taken to be an identity matrix. #' @inheritParams vcovCR #' #' @return An object of class \code{c("vcovCR","clubSandwich")}, which consists #' of a matrix of the estimated variance of and covariances between the #' regression coefficient estimates. #' #' @seealso \code{\link{vcovCR}} #' #' @examples #' data("Produc", package = "plm") #' lm_individual <- lm(log(gsp) ~ 0 + state + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) #' individual_index <- !grepl("state", names(coef(lm_individual))) #' vcovCR(lm_individual, cluster = Produc$state, type = "CR2")[individual_index,individual_index] #' #' # compare to plm() #' plm_FE <- plm::plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, #' data = Produc, index = c("state","year"), #' effect = "individual", model = "within") #' vcovCR(plm_FE, type="CR2") #' #' @export vcovCR.lm <- function(obj, cluster, type, target = NULL, inverse_var = NULL, form = "sandwich", ...) { if (missing(cluster)) stop("You must specify a clustering variable.") if (is.null(inverse_var)) inverse_var <- is.null(weights(obj)) & is.null(target) vcov_CR(obj, cluster = cluster, type = type, target = target, inverse_var = inverse_var, form = form) } # model_matrix() # residuals_CS() # coef() # nobs() # targetVariance() # weightMatrix() #--------------------------------------- # Get bread matrix and scaling constant #--------------------------------------- # bread.lm() is in sandwich package v_scale.lm <- function(obj) { as.vector(sum(summary(obj)$df[1:2])) } clubSandwich/R/lmer.R0000644000176200001440000001046113576303502014151 0ustar liggesusers#------------------------------------- # vcovCR with defaults #------------------------------------- #' Cluster-robust variance-covariance matrix for an lmerMod object. #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates from \code{\link[lme4]{merMod}} object. #' #' @param cluster Optional expression or vector indicating which observations #' belong to the same cluster. If not specified, will be set to #' \code{getGroups(obj)}. #' @param target Optional matrix or vector describing the working #' variance-covariance model used to calculate the \code{CR2} and \code{CR4} #' adjustment matrices. If not specified, the target is taken to be the #' estimated variance-covariance structure of the \code{lmerMod} object. #' @inheritParams vcovCR #' #' @return An object of class \code{c("vcovCR","clubSandwich")}, which consists #' of a matrix of the estimated variance of and covariances between the #' regression coefficient estimates. #' #' @seealso \code{\link{vcovCR}} #' #' @examples #' library(lme4) #' sleep_fit <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' vcovCR(sleep_fit, type = "CR2") #' #' data(egsingle, package = "mlmRev") #' math_model <- lmer(math ~ year * size + female + black + hispanic #' + (1 | schoolid) + (1 | childid), #' data = egsingle) #' vcovCR(math_model, type = "CR2") #' #' @export vcovCR.lmerMod <- function(obj, cluster, type, target, inverse_var, form = "sandwich", ...) { if (!is.null(obj@call$weights)) stop("Models with prior weights are not currently supported.") if (missing(cluster)) cluster <- get_outer_group(obj) if(!is_nested_lmerMod(obj, cluster)) stop("Non-nested random effects detected. clubSandwich methods are not available for such models.") if (missing(target)) target <- NULL if (missing(inverse_var)) inverse_var <- is.null(target) vcov_CR(obj, cluster = cluster, type = type, target = target, inverse_var = inverse_var, form = form) } #------------------------------------- # check nesting of random effects #------------------------------------- get_outer_group <- function(obj) { group_n <- lme4::getME(obj, "l_i") group_facs <- lme4::getME(obj, "flist") group_facs[[which.min(group_n)]] } check_nested <- function(inner_grp, outer_grp) { n_outer <- tapply(outer_grp, inner_grp, function(x) length(unique(x))) all(n_outer == 1) } is_nested_lmerMod <- function(obj, cluster = get_outer_group(obj)) { group_facs <- lme4::getME(obj, "flist") nested <- vapply(group_facs, check_nested, outer_grp = cluster, FUN.VALUE = TRUE) all(nested) } # nobs() # model_matrix() #------------------------------------- # coef_CS() #------------------------------------- coef_CS.lmerMod <- function(obj) lme4::getME(obj, "fixef") #------------------------------------- # residuals_CS() #------------------------------------- residuals_CS.lmerMod <- function(obj) { y <- lme4::getME(obj, "y") X <- lme4::getME(obj, "X") beta <- lme4::getME(obj, "fixef") y - as.numeric(X %*% beta) } #------------------------------------- # Get (model-based) working variance matrix #------------------------------------- targetVariance.lmerMod <- function(obj, cluster = get_outer_group(obj)) { Z_mat <- lme4::getME(obj, "Z") Lambdat <- lme4::getME(obj, "Lambdat") Zlam_list <- matrix_list(Matrix::tcrossprod(Z_mat, Lambdat), fac = cluster, dim = "row") target_list <- lapply(Zlam_list, function(z) Matrix::tcrossprod(z) + Matrix::Diagonal(n = NROW(z))) lapply(target_list, as.matrix) } #------------------------------------- # Get weighting matrix #------------------------------------- weightMatrix.lmerMod <- function(obj, cluster = get_outer_group(obj)) { V_list <- targetVariance(obj, cluster) lapply(V_list, function(v) chol2inv(chol(v))) } #--------------------------------------- # Get bread matrix and scaling constant #--------------------------------------- #' @export bread.lmerMod <- function(x, ...) { sigma_sq <- lme4::getME(x, "sigma")^2 as.matrix(vcov(x) * v_scale(x)) / sigma_sq } v_scale.lmerMod <- function(obj) { min(lme4::getME(obj, "l_i")) } clubSandwich/R/gls.R0000644000176200001440000001217413500611662013776 0ustar liggesusers#------------------------------------- # vcovCR with defaults #------------------------------------- #' Cluster-robust variance-covariance matrix for a gls object. #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates from a \code{\link[nlme]{gls}} object. #' #' @param cluster Optional expression or vector indicating which observations #' belong to the same cluster. If not specified, will be set to #' \code{getGroups(obj)}. #' @param target Optional matrix or vector describing the working #' variance-covariance model used to calculate the \code{CR2} and \code{CR4} #' adjustment matrices. If not specified, the target is taken to be the #' estimated variance-covariance structure of the \code{gls} object. #' @inheritParams vcovCR #' #' @return An object of class \code{c("vcovCR","clubSandwich")}, which consists #' of a matrix of the estimated variance of and covariances between the #' regression coefficient estimates. #' #' @seealso \code{\link{vcovCR}} #' #' @examples #' #' library(nlme) #' data(Ovary, package = "nlme") #' Ovary$time_int <- 1:nrow(Ovary) #' lm_AR1 <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = Ovary, #' correlation = corAR1(form = ~ time_int | Mare)) #' vcovCR(lm_AR1, type = "CR2") #' #' @export vcovCR.gls <- function(obj, cluster, type, target, inverse_var, form = "sandwich", ...) { if (missing(cluster)) cluster <- nlme::getGroups(obj) if (missing(target)) target <- NULL if (missing(inverse_var) ) inverse_var <- is.null(target) vcov_CR(obj, cluster = cluster, type = type, target = target, inverse_var = inverse_var, form = form) } # residuals_CS() # coef() # nobs() #------------------------------------- # model_matrix() #------------------------------------- getData <- function (object) { if ("data" %in% names(object)) { data <- object$data } else { dat_name <- object$call$data envir_names <- sys.frames() ind <- sapply(envir_names, function(e) exists(as.character(dat_name), envir = e)) e <- envir_names[[min(which(ind))]] data <- eval(dat_name, envir = e) } if (is.null(data)) return(data) naAct <- object[["na.action"]] if (!is.null(naAct)) { data <- if (inherits(naAct, "omit")) { data[-naAct, ] } else if (inherits(naAct, "exclude")) { data } else eval(object$call$na.action)(data) } subset <- object$call$subset if (!is.null(subset)) { subset <- eval(asOneSidedFormula(subset)[[2]], data) data <- data[subset, ] } data } model_matrix.gls <- function(obj) { dat <- getData(obj) model.matrix(formula(obj), data = dat) } #------------------------------------- # Get (model-based) working variance matrix #------------------------------------- targetVariance.gls <- function(obj, cluster = nlme::getGroups(obj)) { groups <- nlme::getGroups(obj) if (is.null(groups)) groups <- cluster if (is.null(obj$modelStruct$corStruct)) { if (is.null(obj$modelStruct$varStruct)) { V_list <- matrix_list(rep(1, length(cluster)), cluster, "both") } else { wts <- nlme::varWeights(obj$modelStruct$varStruct) V_list <- matrix_list(1 / wts^2, cluster, "both") } } else { R_list <- nlme::corMatrix(obj$modelStruct$corStruct) if (is.null(obj$modelStruct$varStruct)) { V_list <- R_list } else { sd_vec <- 1 / nlme::varWeights(obj$modelStruct$varStruct)[order(order(groups))] sd_list <- split(sd_vec, groups) V_list <- Map(function(R, s) tcrossprod(s) * R, R = R_list, s = sd_list) } } # check if clustering level is higher than highest level of random effects tb_groups <- table(groups) tb_cluster <- table(cluster) if (length(tb_groups) < length(tb_cluster) | any(as.vector(tb_groups) != rep(as.vector(tb_cluster), length.out = length(tb_groups))) | any(names(tb_groups) != rep(names(tb_cluster), length.out = length(tb_groups)))) { # check that random effects are nested within clusters tb_cross <- table(groups, cluster) nested <- apply(tb_cross, 1, function(x) sum(x > 0) == 1) if (!all(nested)) stop("Random effects are not nested within clustering variable.") # expand target_list to level of clustering crosswalk <- data.frame(groups, cluster) V_list <- add_bdiag(small_mats = V_list, big_mats = matrix_list(rep(0, length(cluster)), cluster, dim = "both"), crosswalk = crosswalk) } V_list } #------------------------------------- # Get weighting matrix #------------------------------------- weightMatrix.gls <- function(obj, cluster = nlme::getGroups(obj)) { V_list <- targetVariance(obj, cluster) lapply(V_list, function(v) chol2inv(chol(v))) } #--------------------------------------- # Get bread matrix and scaling constant #--------------------------------------- #' @export bread.gls <- function(x, ...) { vcov(x) * nobs(x) / x$sigma^2 } # v_scale() is defaultclubSandwich/R/clubSandwich.R0000644000176200001440000002704513500611662015622 0ustar liggesusers#---------------------------------------------- # user-facing vcovCR function #---------------------------------------------- #' Cluster-robust variance-covariance matrix #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates. #' #' @param obj Fitted model for which to calculate the variance-covariance matrix #' @param cluster Expression or vector indicating which observations belong to #' the same cluster. For some classes, the cluster will be detected #' automatically if not specified. #' @param type Character string specifying which small-sample adjustment should #' be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, #' \code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of #' \code{\link{vcovCR}} for further information. #' @param target Optional matrix or vector describing the working #' variance-covariance model used to calculate the \code{CR2} and \code{CR4} #' adjustment matrices. If a vector, the target matrix is assumed to be #' diagonal. If not specified, \code{vcovCR} will attempt to infer a value. #' @param inverse_var Optional logical indicating whether the weights used in #' fitting the model are inverse-variance. If not specified, \code{vcovCR} #' will attempt to infer a value. #' @param form Controls the form of the returned matrix. The default #' \code{"sandwich"} will return the sandwich variance-covariance matrix. #' Alternately, setting \code{form = "meat"} will return only the meat of the #' sandwich and setting \code{form = B}, where \code{B} is a matrix of #' appropriate dimension, will return the sandwich variance-covariance matrix #' calculated using \code{B} as the bread. #' @param ... Additional arguments available for some classes of objects. #' #' @description This is a generic function, with specific methods defined for #' \code{\link[stats]{lm}}, \code{\link[plm]{plm}}, \code{\link[stats]{glm}}, #' \code{\link[nlme]{gls}}, \code{\link[nlme]{lme}}, #' \code{\link[robumeta]{robu}}, \code{\link[metafor]{rma.uni}}, and #' \code{\link[metafor]{rma.mv}} objects. #' #' @details Several different small sample corrections are available, which run #' parallel with the "HC" corrections for heteroskedasticity-consistent #' variance estimators, as implemented in \code{\link[sandwich]{vcovHC}}. The #' "CR2" adjustment is recommended (Pustejovsky & Tipton, 2017; Imbens & #' Kolesar, 2016). See Pustejovsky and Tipton (2017) and Cameron and Miller #' (2015) for further technical details. Available options include: \describe{ #' \item{"CR0"}{is the original form of the sandwich estimator (Liang & Zeger, #' 1986), which does not make any small-sample correction.} #' \item{"CR1"}{multiplies CR0 by \code{m / (m - 1)}, where \code{m} is the #' number of clusters.} #' \item{"CR1p"}{multiplies CR0 by \code{m / (m - p)}, where \code{m} is the #' number of clusters and \code{p} is the number of covariates.} #' \item{"CR1S"}{multiplies CR0 by \code{(m (N-1)) / [(m - #' 1)(N - p)]}, where \code{m} is the number of clusters, \code{N} is the #' total number of observations, and \code{p} is the number of covariates. #' Some Stata commands use this correction by default.} #' \item{"CR2"}{is the #' "bias-reduced linearization" adjustment proposed by Bell and McCaffrey #' (2002) and further developed in Pustejovsky and Tipton (2017). The #' adjustment is chosen so that the variance-covariance estimator is exactly #' unbiased under a user-specified working model.} #' \item{"CR3"}{approximates the leave-one-cluster-out jackknife variance estimator (Bell & McCaffrey, #' 2002).} } #' #' @references Bell, R. M., & McCaffrey, D. F. (2002). Bias reduction in #' standard errors for linear regression with multi-stage samples. Survey #' Methodology, 28(2), 169-181. #' #' Cameron, A. C., & Miller, D. L. (2015). A Practitioner's Guide to #' Cluster-Robust Inference. \emph{Journal of Human Resources, 50}(2), 317-372. #' \doi{10.3368/jhr.50.2.317} #' #' Imbens, G. W., & Kolesar, M. (2016). Robust standard errors in small samples: #' Some practical advice. \emph{Review of Economics and Statistics, 98}(4), #' 701-712. \doi{10.1162/rest_a_00552} #' #' Liang, K.-Y., & Zeger, S. L. (1986). Longitudinal data analysis using #' generalized linear models. \emph{Biometrika, 73}(1), 13-22. #' \doi{10.1093/biomet/73.1.13} #' #' Pustejovsky, J. E. & Tipton, E. (2017). Small sample methods for #' cluster-robust variance estimation and hypothesis testing in fixed effects #' models. \emph{Journal of Business and Economic Statistics}. In Press. #' \doi{10.1080/07350015.2016.1247004} #' #' @return An object of class \code{c("vcovCR","clubSandwich")}, which consists #' of a matrix of the estimated variance of and covariances between the #' regression coefficient estimates. The matrix has several attributes: #' \describe{ \item{type}{indicates which small-sample adjustment was used} #' \item{cluster}{contains the factor vector that defines independent #' clusters} \item{bread}{contains the bread matrix} \item{v_scale}{constant #' used in scaling the sandwich estimator} \item{est_mats}{contains a list of #' estimating matrices used to calculate the sandwich estimator} #' \item{adjustments}{contains a list of adjustment matrices used to calculate #' the sandwich estimator} \item{target}{contains the working #' variance-covariance model used to calculate the adjustment matrices. This #' is needed for calculating small-sample corrections for Wald tests.} } #' #' @seealso \code{\link{vcovCR.lm}}, \code{\link{vcovCR.plm}}, #' \code{\link{vcovCR.glm}}, \code{\link{vcovCR.gls}}, #' \code{\link{vcovCR.lme}}, \code{\link{vcovCR.robu}}, #' \code{\link{vcovCR.rma.uni}}, \code{\link{vcovCR.rma.mv}} #' #' @examples #' #' # simulate design with cluster-dependence #' m <- 8 #' cluster <- factor(rep(LETTERS[1:m], 3 + rpois(m, 5))) #' n <- length(cluster) #' X <- matrix(rnorm(3 * n), n, 3) #' nu <- rnorm(m)[cluster] #' e <- rnorm(n) #' y <- X %*% c(.4, .3, -.3) + nu + e #' dat <- data.frame(y, X, cluster, row = 1:n) #' #' # fit linear model #' lm_fit <- lm(y ~ X1 + X2 + X3, data = dat) #' vcov(lm_fit) #' #' # cluster-robust variance estimator with CR2 small-sample correction #' vcovCR(lm_fit, cluster = dat$cluster, type = "CR2") #' #' # compare small-sample adjustments #' CR_types <- paste0("CR",c("0","1","1S","2","3")) #' sapply(CR_types, function(type) #' sqrt(diag(vcovCR(lm_fit, cluster = dat$cluster, type = type)))) #' #' @export #' @import stats vcovCR <- function(obj, cluster, type, target, inverse_var, form, ...) UseMethod("vcovCR") #' Cluster-robust variance-covariance matrix #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates. #' #' @rdname vcovCR #' @export vcovCR.default <- function(obj, cluster, type, target = NULL, inverse_var = FALSE, form = "sandwich", ...) vcov_CR(obj, cluster, type, target, inverse_var, form) #--------------------------------------------- # Cluster-robust variance estimator #--------------------------------------------- adjust_est_mats <- function(type, est_mats, adjustments) { switch(type, CR0 = est_mats, CR1 = lapply(est_mats, function(e) e * adjustments), CR1p = lapply(est_mats, function(e) e * adjustments), CR1S = lapply(est_mats, function(e) e * adjustments), CR2 = Map(function(e, a) e %*% a, e = est_mats, a = adjustments), CR3 = Map(function(e, a) e %*% a, e = est_mats, a = adjustments), CR4 = Map(function(e, a) a %*% e, e = est_mats, a = adjustments)) } # uses methods residuals_CS(), model_matrix(), weightMatrix(), # targetVariance(), bread(), v_scale() vcov_CR <- function(obj, cluster, type, target = NULL, inverse_var = FALSE, form = "sandwich", ignore_FE = FALSE) { cluster <- droplevels(as.factor(cluster)) alias <- is.na(coef_CS(obj)) X <- model_matrix(obj) if (any(alias)) { X <- X[, !alias, drop = FALSE] } p <- NCOL(X) N <- NROW(X) if (length(cluster) != N) { if (class(na.action(obj)) == "omit") { cluster <- droplevels(cluster[-na.action(obj)]) } else { stop("Clustering variable must have length equal to nrow(model_matrix(obj)).") } } if (any(is.na(cluster))) stop("Clustering variable cannot have missing values.") J <- nlevels(cluster) X_list <- matrix_list(X, cluster, "row") W_list <- weightMatrix(obj, cluster) XW_list <- Map(function(x, w) as.matrix(t(x) %*% w), x = X_list, w = W_list) if (is.null(target)) { if (inverse_var) { Theta_list <- lapply(W_list, function(w) chol2inv(chol(w))) } else { Theta_list <- targetVariance(obj, cluster) } } else { if (!is.list(target)) { Theta_list <- matrix_list(target, cluster, "both") } else { Theta_list <- target } } if (type %in% c("CR2","CR4")) { S <- augmented_model_matrix(obj, cluster, inverse_var, ignore_FE) if (is.null(S)) { rm(S) U_list <- X_list UW_list <- XW_list } else { U <- cbind(X, S) rm(S) U_list <- matrix_list(U, cluster, "row") UW_list <- Map(function(u, w) as.matrix(t(u) %*% w), u = U_list, w = W_list) } UWU_list <- Map(function(uw, u) uw %*% u, uw = UW_list, u = U_list) M_U <- matrix_power(Reduce("+",UWU_list), p = -1) } adjustments <- do.call(type, args = mget(names(formals(type)))) E_list <- adjust_est_mats(type = type, est_mats = XW_list, adjustments = adjustments) resid <- residuals_CS(obj) res_list <- split(resid, cluster) components <- do.call(cbind, Map(function(e, r) e %*% r, e = E_list, r = res_list)) v_scale <- v_scale(obj) w_scale <- attr(W_list, "w_scale") if (is.null(w_scale)) w_scale <- 1L meat <- tcrossprod(components) * w_scale^2 / v_scale if (form == "sandwich") { bread <- sandwich::bread(obj) } else if (form == "meat") { bread <- NULL } else if (is.matrix(form)) { bread <- form form <- "sandwich" } vcov <- switch(form, sandwich = bread %*% meat %*% bread / v_scale, meat = meat) rownames(vcov) <- colnames(vcov) <- colnames(X) attr(vcov, "type") <- type attr(vcov, "cluster") <- cluster attr(vcov, "bread") <- bread attr(vcov, "v_scale") <- v_scale attr(vcov, "est_mats") <- XW_list attr(vcov, "adjustments") <- adjustments attr(vcov, "target") <- Theta_list attr(vcov, "inverse_var") <- inverse_var attr(vcov, "ignore_FE") <- ignore_FE class(vcov) <- c("vcovCR","clubSandwich") return(vcov) } #--------------------------------------------- # as.matrix method for vcovCR #--------------------------------------------- #' @export as.matrix.clubSandwich <- function(x, ...) { attr(x, "type") <- NULL attr(x, "cluster") <- NULL attr(x, "bread") <- NULL attr(x, "v_scale") <- NULL attr(x, "est_mats") <- NULL attr(x, "adjustments") <- NULL attr(x, "target") <- NULL attr(x, "inverse_var") <- NULL attr(x, "ignore_FE") <- NULL class(x) <- "matrix" x } #--------------------------------------------- # print method for vcovCR #--------------------------------------------- #' @export print.clubSandwich <- function(x, ...) { print(as.matrix(x)) } clubSandwich/R/plm.R0000644000176200001440000002045513576051375014016 0ustar liggesusers#------------------------------------- # vcovCR with defaults #------------------------------------- #' Cluster-robust variance-covariance matrix for a plm object. #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates from a \code{\link[plm]{plm}} #' object. #' #' @param cluster Optional character string, expression, or vector indicating #' which observations belong to the same cluster. For fixed-effect models that #' include individual effects or time effects (but not both), the cluster will #' be taken equal to the included fixed effects if not otherwise specified. #' Clustering on individuals can also be obtained by taking \code{cluster = #' "individual"} and clustering on time periods can be obtained with #' \code{cluster = "time"}. For random-effects models, the cluster will be #' taken equal to the included random effect identifier if not otherwise #' specified. #' @param target Optional matrix or vector describing the working #' variance-covariance model used to calculate the \code{CR2} and \code{CR4} #' adjustment matrices. By default, the target is taken to be an identity #' matrix for fixed effect models or the estimated compound-symmetric covariance matrix for random effects models. #' @param ignore_FE Optional logical controlling whether fixed effects are #' ignored when calculating small-sample adjustments in models where fixed #' effects are estimated through absorption. #' @inheritParams vcovCR #' #' @return An object of class \code{c("vcovCR","clubSandwich")}, which consists #' of a matrix of the estimated variance of and covariances between the #' regression coefficient estimates. #' #' @seealso \code{\link{vcovCR}} #' #' @examples #' #' library(plm) #' # fixed effects #' data("Produc", package = "plm") #' plm_FE <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, #' data = Produc, index = c("state","year"), #' effect = "individual", model = "within") #' vcovCR(plm_FE, type="CR2") #' #' # random effects #' plm_RE <- update(plm_FE, model = "random") #' vcovCR(plm_RE, type = "CR2") #' #' # first differencing #' data(Fatalities, package = "AER") #' Fatalities <- within(Fatalities, { #' frate <- 10000 * fatal / pop #' drinkagec <- cut(drinkage, breaks = 18:22, include.lowest = TRUE, right = FALSE) #' drinkagec <- relevel(drinkagec, ref = 4) #' }) #' #' plm_FD <- plm(frate ~ beertax + drinkagec + miles + unemp + log(income), #' data = Fatalities, index = c("state", "year"), #' model = "fd") #' vcovHC(plm_FD, method="arellano", type = "sss", cluster = "group") #' vcovCR(plm_FD, type = "CR1S") #' vcovCR(plm_FD, type = "CR2") #' #' #' @export vcovCR.plm <- function(obj, cluster, type, target, inverse_var, form = "sandwich", ignore_FE = FALSE, ...) { if (obj$args$model=="random" & obj$args$effect=="twoways") stop("Variance matrix is not block diagonal.") if (missing(cluster)) { cluster <- findCluster.plm(obj = obj) } else { cluster <- findCluster.plm(obj = obj, cluster = cluster) } if (missing(target)) target <- NULL if (missing(inverse_var)) inverse_var <- is.null(target) obj$na.action <- attr(obj$model, "na.action") vcov_CR(obj, cluster = cluster, type = type, target = target, inverse_var = inverse_var, form = form, ignore_FE = ignore_FE) } get_index_order <- function(obj) { envir <- environment(obj$formula) mf <- match.call(plm::plm, call = obj$call, envir = envir) dat <- eval(mf$data, envir) index_names <- eval(mf$index) if (inherits(dat, "pdata.frame") | is.numeric(index_names)) { indices <- plm::index(obj) } else { if (is.null(index_names)) index_names <- names(dat)[1:2] indices <- as.list(dat[index_names]) } do.call(order, args = indices) } findCluster.plm <- function(obj, cluster) { index <- attr(model.frame(obj),"index") if (missing(cluster)) { if (obj$args$effect=="twoways") stop("You must specify a clustering variable.") cluster <- switch(obj$args$effect, individual = index[[1]], time = index[[2]]) } else if ((length(cluster)==1) & is.character(cluster)) { if (cluster %in% c("individual","time")) { cluster <- switch(cluster, individual = index[[1]], time = index[[2]]) } } else { sort_order <- get_index_order(obj) cluster <- cluster[sort_order] } if (obj$args$model=="fd") { cluster <- cluster[index[[2]] != levels(index[[2]])[1]] } cluster } #----------------------------------------------- # Model matrix #----------------------------------------------- model_matrix.plm <- function(obj) { if (obj$args$model=="random") { model.matrix(Formula::as.Formula(formula(obj)), model.frame(obj)) } else { model.matrix(obj, model = obj$args$model, effect = obj$args$effect) } } #---------------------------------------------- # Augmented model matrix #---------------------------------------------- augmented_model_matrix.plm <- function(obj, cluster, inverse_var, ignore_FE) { index <- attr(model.frame(obj),"index") individual <- droplevels(as.factor(index[[1]])) time <- droplevels(as.factor(index[[2]])) effect <- obj$args$effect if (ignore_FE) { S <- NULL } else if (obj$args$model=="within") { if (effect=="individual") { if (inverse_var & identical(individual, cluster)) { S <- NULL } else { S <- model.matrix(~ 0 + individual) } } else if (effect=="time") { if (inverse_var & identical(time, cluster)) { S <- NULL } else { S <- model.matrix(~ 0 + time) } } else if (effect=="twoways") { if (inverse_var & identical(individual, cluster)) { S <- residuals(lm.fit(model.matrix(~ 0 + individual), model.matrix(~ 0 + time)[,-1])) } else if (inverse_var & identical(time, cluster)) { S <- residuals(lm.fit(model.matrix(~ 0 + time), model.matrix(~ 0 + individual)[,-1])) } else { S <- model.matrix(~ 0 + individual + time) } } } else { S <- NULL } return(S) } #------------------------------------- # unadjusted residuals #------------------------------------- residuals_CS.plm <- function(obj) { if (obj$args$model=="random") { y <- plm::pmodel.response(formula(obj), model.frame(obj), model = "pooling") nm <- names(y) y <- as.numeric(y) names(y) <- nm Xb <- as.numeric(model_matrix(obj) %*% coef(obj)) y - Xb } else { residuals(obj) } } #------------------------------------- # Get (model-based) working variance matrix #------------------------------------- targetVariance.plm <- function(obj, cluster) { if (obj$args$model=="random") { block_mat <- function(nj) { sigma_sq <- obj$ercomp$sigma2[[1]] tau_sq <- obj$ercomp$sigma2[[2]] r <- tau_sq / sigma_sq Vj <- matrix(r, nj, nj) diag(Vj) <- 1 + r Vj } lapply(table(cluster), block_mat) } else { matrix_list(rep(1, nobs(obj)), cluster, "both") } } #------------------------------------- # Get weighting matrix #------------------------------------- weightMatrix.plm <- function(obj, cluster) { if (obj$args$model=="random") { sigma_sq <- obj$ercomp$sigma2[[1]] tau_sq <- obj$ercomp$sigma2[[2]] block_mat <- function(nj) { theta <- tau_sq / ((nj * tau_sq + sigma_sq)) Wj <- matrix(-theta, nj, nj) diag(Wj) <- 1 - theta Wj } lapply(table(cluster), block_mat) } else { matrix_list(rep(1, nobs(obj)), cluster, "both") } } #--------------------------------------- # Get bread matrix and scaling constant #--------------------------------------- bread.plm <- function(x, ...) { # if (x$args$model=="random") { # v_scale(x) * vcov(x) / x$ercomp$sigma2$idios # } else { # v_scale(x) * vcov(x) / with(x, sum(residuals^2) / df.residual) # } v_scale(x) * vcov(x) / with(x, sum(residuals^2) / df.residual) } v_scale.plm <- function(obj) { max(sapply(attr(obj$model, "index"), nlevels)) } clubSandwich/R/mlm.R0000644000176200001440000001037713500611662014001 0ustar liggesusers#------------------------------------- # vcovCR with defaults #------------------------------------- #' Cluster-robust variance-covariance matrix for an mlm object. #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates from an \code{mlm} object. #' #' @param cluster Optional expression or vector indicating which observations #' belong to the same cluster. If not specified, each row of the data will be #' treated as a separate cluster. #' @param target Optional matrix or vector describing the working #' variance-covariance model used to calculate the \code{CR2} and \code{CR4} #' adjustment matrices. If not specified, the target is taken to be an #' identity matrix. #' @inheritParams vcovCR #' #' @return An object of class \code{c("vcovCR","clubSandwich")}, which consists #' of a matrix of the estimated variance of and covariances between the #' regression coefficient estimates. #' #' @seealso \code{\link{vcovCR}} #' #' @examples #' iris_fit <- lm(cbind(Sepal.Length, Sepal.Width) ~ Species + #' Petal.Length + Petal.Width, data = iris) #' Vcluster <- vcovCR(iris_fit, type = "CR2") #' #' @export vcovCR.mlm <- function(obj, cluster, type, target, inverse_var, form = "sandwich", ...) { d <- ncol(residuals(obj)) if (missing(cluster)) cluster <- 1:nobs(obj) if (length(cluster) == nobs(obj)) cluster <- rep(cluster, each = d) if (length(cluster) != d * nobs(obj)) stop("Clustering variable is not correct length.") if (missing(target)) target <- NULL if (missing(inverse_var)) inverse_var <- is.null(weights(obj)) & is.null(target) vcov_CR(obj, cluster = cluster, type = type, target = target, inverse_var = inverse_var, form = form) } # nobs() #------------------------------------- # residuals #------------------------------------- residuals_CS.mlm <- function(obj) { res <- residuals(obj) as.vector(t(res)) } #------------------------------------- # model_matrix() #------------------------------------- model_matrix.mlm <- function(obj) { X <- model.matrix(obj) d <- ncol(residuals(obj)) X_mat <- X %x% diag(1L, nrow = d) rownames(X_mat) <- rep(dimnames(X)[[1]], each = d) colnames(X_mat) <- paste(rep(colnames(residuals(obj)), ncol(X)), rep(colnames(X), each = d), sep = ":") i <- unlist(lapply(1:d, function(x) seq(x, ncol(X_mat), d))) X_mat[,i] } #---------------------------------------------- # get "working" variance-covariance matrix #---------------------------------------------- targetVariance.mlm <- function(obj, cluster) { matrix_list(rep(1, nobs(obj) * ncol(residuals(obj))), cluster, "both") } #------------------------------------- # Get weighting matrix #------------------------------------- weightMatrix.mlm <- function(obj, cluster) { weights <- weights(obj) if (is.null(weights)) { weights <- w_scale <- 1 } else { w_scale <- mean(weights) weights <- weights / w_scale } W <- rep(weights, length.out = nobs(obj)) W <- rep(W, each = ncol(residuals(obj))) W_list <- matrix_list(W, cluster, "both") attr(W_list, "w_scale") <- w_scale W_list } #---------------------------------------------- # get coefficient estimates #---------------------------------------------- coef_CS.mlm <- function(obj) { cf <- coef(obj) res <- as.vector(cf) names(res) <- paste(rep(colnames(cf), each = nrow(cf)), rep(rownames(cf), ncol(cf)), sep = ":") res } #--------------------------------------- # Get bread matrix and scaling constant #--------------------------------------- #' @export #' bread.mlm <- function (x, ...) { if (!is.null(x$na.action)) class(x$na.action) <- "omit" X_mat <- model.matrix(x) w <- weights(x) XWX <- if (!is.null(w)) crossprod(X_mat, w * X_mat) else crossprod(X_mat) B <- chol2inv(chol(XWX)) rval <- diag(ncol(residuals(x))) %x% (B * nobs(x)) col_names <- paste(rep(colnames(residuals(x)), each = ncol(X_mat)), rep(colnames(X_mat), ncol(residuals(x))), sep = ":") colnames(rval) <- rownames(rval) <- col_names return(rval) } v_scale.mlm <- function(obj) { nobs(obj) } clubSandwich/R/coef_test.R0000644000176200001440000002110013576051441015157 0ustar liggesusers #--------------------------------------------- # Satterthwaite approximation #--------------------------------------------- Satterthwaite <- function(beta, SE, P_array) { V_coef <- 2 * apply(P_array, 3, function(x) sum(x^2)) E_coef <- apply(P_array, 3, function(x) sum(diag(x))) df <- 2 * E_coef^2 / V_coef p_val <- 2 * pt(abs(beta / SE), df = df, lower.tail = FALSE) data.frame(df = df, p_Satt = p_val) } #--------------------------------------------- # Saddlepoint approximation #--------------------------------------------- saddlepoint_pval <- function(t, Q) { eig <- pmax(0, eigen(Q, symmetric = TRUE, only.values=TRUE)$values) g <- c(1, -t^2 * eig / sum(eig)) s_eq <- function(s) sum(g / (1 - 2 * g * s)) s_range <- if (t^2 < 1) c(1 / (2 * min(g)), 0) else c(0, 1 / (2 * max(g))) s <- uniroot(s_eq, s_range)$root if (abs(s) > .01) { r <- sign(s) * sqrt(sum(log(1 - 2 * g * s))) q <- s * sqrt(2 * sum(g^2 / (1 - 2 * g * s)^2)) p_val <- 1 - pnorm(r) - dnorm(r) * (1 / r - 1 / q) } else { p_val <- 0.5 - sum(g^3) / (3 * sqrt(pi) * sum(g^2)^(3/2)) } c(s = s, p_val = p_val) } saddlepoint <- function(t_stats, P_array) { saddles <- sapply(1:length(t_stats), function(i) saddlepoint_pval(t = t_stats[i], Q = P_array[,,i])) data.frame(saddlepoint = saddles["s",], p_saddle = saddles["p_val",]) } #--------------------------------------------- # find which coefficients to test #--------------------------------------------- get_which_coef <- function(beta, coefs) { p <- length(beta) if (identical(coefs,"All")) return(rep(TRUE, p)) switch(class(coefs), character = { term_names <- names(beta) if (length(coefs) == 0) stop("You must specify at least one coefficient to test.") if (any(!coefs %in% term_names)) stop("Coefficient names not in model specification.") term_names %in% coefs }, logical = { if (sum(coefs) == 0) stop("You must specify at least one coefficient to test.") if (length(coefs) != p) stop(paste0("Coefficient vector must be of length ",p, ".")) coefs }, numeric = { if (any(!(coefs %in% 1:p))) stop(paste0("Coefficient indices must be less than or equal to ",p,".")) if (length(coefs) == 0) stop("You must specify at least one coefficient to test.") (1:p) %in% coefs }, integer = { if (any(!(coefs %in% 1:p))) stop(paste0("Coefficient indices must be less than or equal to ",p,".")) if (length(coefs) == 0) stop("You must specify at least one coefficient to test.") (1:p) %in% coefs } ) } #--------------------------------------------- # coeftest for all model coefficients #--------------------------------------------- #' Test all or selected regression coefficients in a fitted model #' #' \code{coef_test} reports t-tests for each coefficient estimate in a fitted #' linear regression model, using a sandwich estimator for the standard errors #' and a small sample correction for the p-value. The small-sample correction is #' based on a Satterthwaite approximation or a saddlepoint approximation. #' #' @param obj Fitted model for which to calculate t-tests. #' @param vcov Variance covariance matrix estimated using \code{vcovCR} or a #' character string specifying which small-sample adjustment should be used to #' calculate the variance-covariance. #' @param test Character vector specifying which small-sample corrections to #' calculate. \code{"z"} returns a z test (i.e., using a standard normal #' reference distribution). \code{"naive-t"} returns a t test with \code{m - #' 1} degrees of freedom. \code{"Satterthwaite"} returns a Satterthwaite #' correction. \code{"saddlepoint"} returns a saddlepoint correction. Default #' is \code{"Satterthwaite"}. #' @param coefs Character, integer, or logical vector specifying which #' coefficients should be tested. The default value \code{"All"} will test all #' estimated coefficients. #' @param p_values Logical indicating whether to report p-values. The defult value is \code{TRUE}. #' @param ... Further arguments passed to \code{\link{vcovCR}}, which are only #' needed if \code{vcov} is a character string. #' #' @return A data frame containing estimated regression coefficients, standard #' errors, and test results. For the Satterthwaite approximation, degrees of #' freedom and a p-value are reported. For the saddlepoint approximation, the #' saddlepoint and a p-value are reported. #' #' @seealso \code{\link{vcovCR}} #' #' @examples #' data("Produc", package = "plm") #' lm_individual <- lm(log(gsp) ~ 0 + state + log(pcap) + log(pc) + log(emp) + unemp, data = Produc) #' individual_index <- !grepl("state", names(coef(lm_individual))) #' coef_test(lm_individual, vcov = "CR2", cluster = Produc$state, coefs = individual_index) #' #' V_CR2 <- vcovCR(lm_individual, cluster = Produc$state, type = "CR2") #' coef_test(lm_individual, vcov = V_CR2, coefs = individual_index) #' #' @export coef_test <- function(obj, vcov, test = "Satterthwaite", coefs = "All", p_values = TRUE, ...) { beta_full <- coef_CS(obj) beta_NA <- is.na(beta_full) which_beta <- get_which_coef(beta_full, coefs) beta <- beta_full[which_beta & !beta_NA] if (is.character(vcov)) vcov <- vcovCR(obj, type = vcov, ...) if (!inherits(vcov, "clubSandwich")) stop("Variance-covariance matrix must be a clubSandwich.") all_tests <- c("z","naive-t","Satterthwaite","saddlepoint") if (all(test == "All")) test <- all_tests test <- match.arg(test, all_tests, several.ok = TRUE) SE <- sqrt(diag(vcov))[which_beta[!beta_NA]] if (any(c("Satterthwaite","saddlepoint") %in% test)) { P_array <- get_P_array(get_GH(obj, vcov))[,,which_beta[!beta_NA],drop=FALSE] } result <- data.frame(beta = beta) result$SE <- SE result$tstat <- beta / SE if ("z" %in% test) { result$p_z <- 2 * pnorm(abs(result$tstat), lower.tail = FALSE) } if ("naive-t" %in% test) { J <- nlevels(attr(vcov, "cluster")) result$p_t <- 2 * pt(abs(result$tstat), df = J - 1, lower.tail = FALSE) } if ("Satterthwaite" %in% test) { Satt <- Satterthwaite(beta = beta, SE = SE, P_array = P_array) result$df <- Satt$df result$p_Satt <- Satt$p_Satt } if ("saddlepoint" %in% test) { saddle <- saddlepoint(t_stats = beta / SE, P_array = P_array) result$saddlepoint <- saddle$saddlepoint result$p_saddle <-saddle$p_saddle } class(result) <- c("coef_test_clubSandwich", class(result)) attr(result, "type") <- attr(vcov, "type") if (p_values) { result } else { which_vars <- !grepl("p_", names(result)) result[which_vars] } } #--------------------------------------------- # print method for coef_test #--------------------------------------------- #' @export print.coef_test_clubSandwich <- function(x, digits = 3, ...) { res <- data.frame("Coef." = rownames(x), "Estimate" = x$beta, "SE" = x$SE) res <- cbind(res, "t-stat" = x$tstat) if ("p_z" %in% names(x)) { p_z <- format.pval(x$p_z, digits = digits, eps = 10^-digits) Sig_z <- cut(x$p_z, breaks = c(0, 0.001, 0.01, 0.05, 0.1, 1), labels = c("***", "**", "*", ".", " "), include.lowest = TRUE) res <- cbind(res, "p-val (z)" = p_z, "Sig." = Sig_z) } if ("p_t" %in% names(x)) { p_t <- format.pval(x$p_t, digits = digits, eps = 10^-digits) Sig_t <- cut(x$p_t, breaks = c(0, 0.001, 0.01, 0.05, 0.1, 1), labels = c("***", "**", "*", ".", " "), include.lowest = TRUE) res <- cbind(res, "p-val (naive-t)" = p_t, "Sig." = Sig_t) } if ("df" %in% names(x)) { res <- cbind(res, "d.f." = x$df) } if ("p_Satt" %in% names(x)) { p_Satt <- format.pval(x$p_Satt, digits = digits, eps = 10^-digits) Sig_Satt <- cut(x$p_Satt, breaks = c(0, 0.001, 0.01, 0.05, 0.1, 1), labels = c("***", "**", "*", ".", " "), include.lowest = TRUE) res <- cbind(res, "p-val (Satt)" = p_Satt, "Sig." = Sig_Satt) } if ("p_saddle" %in% names(x)) { p_saddle <- format.pval(x$p_saddle, digits = digits, eps = 10^-digits) Sig_saddle <- cut(x$p_saddle, breaks = c(0, 0.001, 0.01, 0.05, 0.1, 1), labels = c("***", "**", "*", ".", " "), include.lowest = TRUE) res <- cbind(res, "s.p." = x$saddlepoint, "p-val (Saddle)" = p_saddle, "Sig." = Sig_saddle) } print(format(res, digits = 3)) } clubSandwich/R/CR-adjustments.R0000644000176200001440000000605013500611662016050 0ustar liggesusers#--------------------------------------------- # Auxilliary functions for CR* functions #--------------------------------------------- IH_jj_list <- function(M, X_list, XW_list) { Map(function(x, xw) diag(nrow = nrow(x)) - x %*% M %*% xw, x = X_list, xw = XW_list) } #--------------------------------------------- # Estimating function adjustments #--------------------------------------------- CR0 <- function(J) NULL CR1 <- function(J) sqrt(J / (J - 1)) CR1p <- function(J, p) sqrt(J / (J - p)) CR1S <- function(J, N, p) sqrt(J * (N - 1) / ((J - 1) * (N - p))) CR2 <- function(M_U, U_list, UW_list, Theta_list, inverse_var = FALSE) { Theta_chol <- lapply(Theta_list, chol) if (inverse_var) { IH_jj <- IH_jj_list(M_U, U_list, UW_list) G_list <- Map(function(a,b,ih) as.matrix(a %*% ih %*% b %*% t(a)), a = Theta_chol, b = Theta_list, ih = IH_jj) } else { H_jj <- Map(function(u, uw) u %*% M_U %*% uw, u = U_list, uw = UW_list) uwTwu <- Map(function(uw, th) uw %*% th %*% t(uw), uw = UW_list, th = Theta_list) MUWTWUM <- M_U %*% Reduce("+", uwTwu) %*% M_U G_list <- Map(function(thet, h, u, v) as.matrix(v %*% (thet - h %*% thet - thet %*% t(h) + u %*% MUWTWUM %*% t(u)) %*% t(v)), thet = Theta_list, h = H_jj, u = U_list, v = Theta_chol) } Map(function(v, g) as.matrix(t(v) %*% matrix_power(g, -1/2) %*% v), v = Theta_chol, g = G_list) } CR3 <- function(X_list, XW_list) { XWX_list <- Map(function(xw, x) xw %*% x, xw = XW_list, x = X_list) M <- chol2inv(chol(Reduce("+", XWX_list))) IH_jj <- IH_jj_list(M, X_list, XW_list) lapply(IH_jj, solve) } CR4 <- function(M_U, U_list, UW_list, X_list, XW_list, Theta_list, inverse_var = FALSE) { if (inverse_var) { F_list <- Map(function(xw, x) xw %*% x, xw = XW_list, x = X_list) UWX_list <- Map(function(uw, x) uw %*% x, uw = UW_list, x = X_list) F_chol <- lapply(F_list, chol_psd) G_list <- Map(function(fc, fm, uwx) fc %*% (fm - t(uwx) %*% M_U %*% uwx) %*% t(fc), fc = F_chol, fm = F_list, uwx = UWX_list) } else { F_list <- Map(function(xw, theta) xw %*% theta %*% t(xw), xw = XW_list, theta = Theta_list) F_chol <- lapply(F_list, chol_psd) UWX_list <- Map(function(uw, x) uw %*% x, uw = UW_list, x = X_list) UWTWX_list <- Map(function(uw, xw, theta) uw %*% theta %*% t(xw), uw = UW_list, xw = XW_list, theta = Theta_list) UWTWU_list <- Map(function(uw, theta) uw %*% theta %*% t(uw), uw = UW_list, theta = Theta_list) MUWTWUM <- M_U %*% Reduce("+", UWTWU_list) %*% M_U G_list <- Map(function(fc, fm, uwx, uwtwx) as.matrix(fc %*% (fm - t(uwx) %*% M_U %*% uwtwx - t(uwtwx) %*% M_U %*% uwx + t(uwx) %*% MUWTWUM %*% uwx) %*% t(fc)), fc = F_chol, fm = F_list, uwx = UWX_list, uwtwx = UWTWX_list) } Map(function(fc, g) as.matrix(t(fc) %*% matrix_power(g, -1/2) %*% fc), fc = F_chol, g = G_list) } clubSandwich/R/ivreg.R0000644000176200001440000000466213500611662014330 0ustar liggesusers#------------------------------------- # vcovCR with defaults #------------------------------------- #' Cluster-robust variance-covariance matrix for an ivreg object. #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates from an \code{\link[AER]{ivreg}} object. #' #' @param cluster Expression or vector indicating which observations belong to #' the same cluster. Required for \code{ivreg} objects. #' @param target Optional matrix or vector describing the working #' variance-covariance model used to calculate the \code{CR2} and \code{CR4} #' adjustment matrices. If a vector, the target matrix is assumed to be #' diagonal. If not specified, the target is taken to be an identity matrix. #' @param inverse_var Not used for \code{ivreg} objects. #' @inheritParams vcovCR #' #' @return An object of class \code{c("vcovCR","clubSandwich")}, which consists #' of a matrix of the estimated variance of and covariances between the #' regression coefficient estimates. #' #' @seealso \code{\link{vcovCR}} #' #' @examples #' library(AER) #' data("CigarettesSW") #' Cigs <- within(CigarettesSW, { #' rprice <- price/cpi #' rincome <- income/population/cpi #' tdiff <- (taxs - tax)/cpi #' }) #' #' iv_fit <- ivreg(log(packs) ~ log(rprice) + log(rincome) | #' log(rincome) + tdiff + I(tax/cpi), data = Cigs) #' vcovCR(iv_fit, cluster = Cigs$state, type = "CR2") #' coef_test(iv_fit, vcov = "CR2", cluster = Cigs$state) #' #' @export vcovCR.ivreg <- function(obj, cluster, type, target = NULL, inverse_var = FALSE, form = "sandwich", ...) { if (missing(cluster)) stop("You must specify a clustering variable.") if (inverse_var != FALSE) stop("Unfortunately, the inverse_var option is not available for ivreg models.") vcov_CR(obj, cluster = cluster, type = type, target = target, inverse_var = inverse_var, form = form) } # residuals_CS() # coef() # targetVariance() # weightMatrix() # v_scale() #---------------------------------------------- # get X matrix #---------------------------------------------- model_matrix.ivreg <- function(obj) { model.matrix(obj, component = "projected") } #--------------------------------------- # Get bread matrix and scaling constant #--------------------------------------- # bread.ivreg() is in AER package # use default v_scale()clubSandwich/R/glm.R0000644000176200001440000000656113500611662013773 0ustar liggesusers#------------------------------------- # vcovCR with defaults #------------------------------------- #' Cluster-robust variance-covariance matrix for a glm object. #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates from an \code{\link{glm}} object. #' #' @param cluster Expression or vector indicating which observations belong to #' the same cluster. Required for \code{glm} objects. #' @param target Optional matrix or vector describing the working #' variance-covariance model used to calculate the \code{CR2} and \code{CR4} #' adjustment matrices. If a vector, the target matrix is assumed to be #' diagonal. If not specified, the target is taken to be the estimated variance function. #' @inheritParams vcovCR #' #' @return An object of class \code{c("vcovCR","clubSandwich")}, which consists #' of a matrix of the estimated variance of and covariances between the #' regression coefficient estimates. #' #' @seealso \code{\link{vcovCR}} #' #' @examples #' #' data(dietox, package = "geepack") #' dietox$Cu <- as.factor(dietox$Cu) #' weight_fit <- glm(Weight ~ Cu * poly(Time, 3), data=dietox, family = "quasipoisson") #' V_CR <- vcovCR(weight_fit, cluster = dietox$Pig, type = "CR2") #' coef_test(weight_fit, vcov = V_CR, test = "Satterthwaite") #' #' @export vcovCR.glm <- function(obj, cluster, type, target = NULL, inverse_var = NULL, form = "sandwich", ...) { if (missing(cluster)) stop("You must specify a clustering variable.") if (is.null(inverse_var)) inverse_var <- is.null(target) vcov_CR(obj, cluster = cluster, type = type, target = target, inverse_var = inverse_var, form = form) } # coef() # nobs() #----------------------------------------------- # Model matrix #----------------------------------------------- model_matrix.glm <- function(obj) { X <- model.matrix(obj) eta <- obj$linear.predictors dmu_deta <- obj$family$mu.eta d <- dmu_deta(eta) d * X } #------------------------------------- # residuals #------------------------------------- residuals_CS.glm <- function(obj) { residuals(obj, type = "response") } #----------------------------------------------- # Get (model-based) working variance matrix #----------------------------------------------- targetVariance.glm <- function(obj, cluster) { mu <- fitted.values(obj) var_fun <- obj$family$variance v <- var_fun(mu) w <- weights(obj, type = "prior") matrix_list(v / w, cluster, "both") } #------------------------------------- # Get weighting matrix #------------------------------------- weightMatrix.glm <- function(obj, cluster) { mu <- fitted.values(obj) var_fun <- obj$family$variance v <- var_fun(mu) w <- weights(obj, type = "prior") matrix_list(w / v, cluster, "both") } #--------------------------------------- # Get bread matrix and scaling constant #--------------------------------------- # bread.glm() is in sandwich package v_scale.glm <- function(obj) { if (substr(obj$family$family, 1, 17) %in% c("poisson", "binomial", "Negative Binomial")) { dispersion <- 1 } else { wres <- as.vector(residuals(obj, "working")) * weights(obj, "working") dispersion <- sum(wres^2)/sum(weights(obj, "working")) } as.vector(sum(summary(obj)$df[1:2])) * dispersion } clubSandwich/R/rma-uni.R0000644000176200001440000000752413500611662014564 0ustar liggesusers#------------------------------------- # vcovCR with defaults #------------------------------------- #' Cluster-robust variance-covariance matrix for a rma.uni object. #' #' \code{vcovCR} returns a sandwich estimate of the variance-covariance matrix #' of a set of regression coefficient estimates from a #' \code{\link[metafor]{rma.uni}} object. #' #' @param cluster Expression or vector indicating which observations #' belong to the same cluster. Required for \code{rma.uni} objects. #' @param target Optional matrix or vector describing the working #' variance-covariance model used to calculate the \code{CR2} and \code{CR4} #' adjustment matrices. If not specified, the target is taken to be diagonal #' with entries equal to the estimated marginal variance of the effect sizes. #' @inheritParams vcovCR #' #' @return An object of class \code{c("vcovCR","clubSandwich")}, which consists #' of a matrix of the estimated variance of and covariances between the #' regression coefficient estimates. #' #' @seealso \code{\link{vcovCR}} #' #' @export #' #' @examples #' library(metafor) #' data(corrdat, package = "robumeta") #' #' mfor_fit <- rma.uni(effectsize ~ males + college + binge, #' vi = var, data = corrdat, method = "FE") #' mfor_fit #' mfor_CR2 <- vcovCR(mfor_fit, type = "CR2", cluster = corrdat$studyid) #' mfor_CR2 #' coef_test(mfor_fit, vcov = mfor_CR2, test = c("Satterthwaite", "saddlepoint")) #' Wald_test(mfor_fit, constraints = 2:4, vcov = mfor_CR2) #' vcovCR.rma.uni <- function(obj, cluster, type, target, inverse_var, form = "sandwich", ...) { if (missing(cluster)) stop("You must specify a clustering variable.") if (length(cluster) != nrow(model_matrix(obj))) cluster <- droplevels(as.factor(cluster[obj$not.na])) if (length(cluster) != nrow(model_matrix(obj))) stop("Clustering variable must have length equal to nrow(model_matrix(obj)).") if (missing(target)) { target <- NULL if (missing(inverse_var)) inverse_var <- is.null(obj$weights) & obj$weighted } else { if (missing(inverse_var)) inverse_var <- FALSE } vcov_CR(obj, cluster = cluster, type = type, target = target, inverse_var = inverse_var, form = form) } # residuals_CS() residuals_CS.rma <- function(obj) { res <- residuals(obj) not_na <- obj$not.na if (length(res) == length(not_na)) res[not_na] else res } # coef() # vcov() # model_matrix() # na.action na.action.rma <- function(object, ...) { res <- which(!object$not.na) class(res) <- "omit" res } #------------------------------------- # Get (model-based) working variance matrix #------------------------------------- targetVariance.rma.uni <- function(obj, cluster) { matrix_list(obj$vi + obj$tau2, cluster, "both") } #------------------------------------- # Get weighting matrix #------------------------------------- weightMatrix.rma.uni <- function(obj, cluster) { if (obj$weighted) { if (is.null(obj$weights)) { wi <- 1 / (obj$vi + obj$tau2) } else { wi <- obj$weights } } else { wi <- rep(1, obj$k) } w_scale <- mean(wi) wi <- wi / w_scale W_list <- matrix_list(wi, cluster, "both") attr(W_list, "w_scale") <- w_scale W_list } #--------------------------------------- # Get bread matrix and scaling constant #--------------------------------------- bread.rma.uni <- function(x, ...) { X_mat <- model_matrix(x) if (x$weighted) { if (is.null(x$weights)) { wi <- 1 / (x$vi + x$tau2) } else { wi <- x$weights } XWX <- crossprod(X_mat, wi * X_mat) } else { XWX <- crossprod(X_mat) } B <- chol2inv(chol(XWX)) * nobs(x) rownames(B) <- colnames(B) <- colnames(X_mat) B } v_scale.robu <- function(obj) { nobs(obj) } clubSandwich/R/get_arrays.R0000644000176200001440000001402613500611662015347 0ustar liggesusers #-------------------------- # get G list and H array #-------------------------- get_GH <- function(obj, vcov) { cluster <- attr(vcov, "cluster") M <- attr(vcov, "bread") / attr(vcov, "v_scale") E_list <- adjust_est_mats(type = attr(vcov, "type"), est_mats = attr(vcov, "est_mats"), adjustments = attr(vcov, "adjustments")) target <- attr(vcov, "target") inverse_var <- attr(vcov, "inverse_var") ignore_FE <- attr(vcov, "ignore_FE") N <- length(cluster) J <- nlevels(cluster) X <- model_matrix(obj) alias <- is.na(coef_CS(obj)) if (any(alias)) X <- X[, !alias, drop = FALSE] p <- ncol(X) W_list <- weightMatrix(obj, cluster) w_scale <- attr(W_list, "w_scale") if (is.null(w_scale)) w_scale <- 1 S <- augmented_model_matrix(obj, cluster, inverse_var, ignore_FE) if (is.null(S)) { U_list <- matrix_list(X, cluster, "row") rm(X, S) u <- p UW_list <- Map(function(u, w) as.matrix(t(u) %*% w), u = U_list, w = W_list) M_U <- w_scale * M } else { U_list <- matrix_list(cbind(X, S), cluster, "row") rm(X, S) u <- ncol(U_list[[1]]) UW_list <- Map(function(u, w) as.matrix(t(u) %*% w), u = U_list, w = W_list) UWU_list <- Map(function(uw, u) uw %*% u, uw = UW_list, u = U_list) M_U <- chol2inv(chol(Reduce("+",UWU_list))) rm(UWU_list) } M_U_ct <- t(chol(M_U)) ME_list <- lapply(E_list, function(e) M %*% e) G_list <- Map(function(me, theta) me %*% t(chol(theta)), me = ME_list, theta = target) if (inverse_var) { H_array <- array(unlist(Map(function(me, u) me %*% u %*% M_U_ct, me = ME_list, u = U_list)), dim = c(p, u, J)) } else { H_array <- array(NA, dim = c(3, p, u, J)) MEU_list <- Map(function(me, u) me %*% u, me = ME_list, u = U_list) H_array[1,,,] <- unlist(lapply(MEU_list, function(meu) meu %*% M_U_ct)) TWU_list <- Map(function(t, w, u) t %*% w %*% u, t = target, w = W_list, u = U_list) MEF_list <- Map(function(me, twu) me %*% twu, me = ME_list, twu = TWU_list) H_array[2,,,] <- unlist(lapply(MEF_list, function(mef) mef %*% M_U_ct)) rm(MEF_list) UWTWU_list <- Map(function(uw, twu) uw %*% twu, uw = UW_list, twu = TWU_list) Omega_ct <- t(chol(M_U %*% Reduce("+", UWTWU_list) %*% M_U)) rm(TWU_list, UWTWU_list) H_array[3,,,] <- unlist(lapply(MEU_list, function(meu) meu %*% Omega_ct)) rm(MEU_list, Omega_ct) } list(G = G_list, H = H_array) } #-------------------------- # get P array #-------------------------- get_P_array <- function(GH, all_terms = FALSE) { dims <- dim(GH$H) if (all_terms) { if (length(dims)==3) { P_array <- array(NA, dim = c(dims[1], dims[1], dims[3], dims[3])) for (i in 1:dims[1]) for (j in i:dims[1]) { tmp <- -crossprod(GH$H[i,,], GH$H[j,,]) diag(tmp) <- diag(tmp) + sapply(GH$G, function(x) sum(x[i,] * x[j,])) P_array[i,j,,] <- tmp if (j > i) P_array[j,i,,] <- t(tmp) } } else { P_array <- array(NA, dim = c(dims[2], dims[2], dims[4], dims[4])) for (i in 1:dims[2]) for (j in i:dims[2]) { tmp <- crossprod(GH$H[3,i,,], GH$H[3,j,,]) - crossprod(GH$H[1,i,,], GH$H[2,j,,]) - crossprod(GH$H[2,i,,], GH$H[1,j,,]) diag(tmp) <- diag(tmp) + sapply(GH$G, function(x) sum(x[i,] * x[j,])) P_array[i,j,,] <- tmp if (j > i) P_array[j,i,,] <- t(tmp) } } } else { if (length(dims)==3) { P_array <- array(-apply(GH$H, 1, crossprod), dim = c(dims[3], dims[3], dims[1])) P_diag <- matrix(sapply(GH$G, function(x) rowSums(x^2)), nrow = dims[1], ncol = dims[3]) for (i in 1:dims[1]) diag(P_array[,,i]) <- diag(P_array[,,i]) + P_diag[i,] } else { P_array <- array(apply(GH$H, 2, function(h) { uf <- crossprod(h[1,,], h[2,,]) crossprod(h[3,,]) - uf - t(uf) }), dim = c(dims[4], dims[4], dims[2])) P_diag <- matrix(sapply(GH$G, function(x) rowSums(x^2)), nrow = dims[2], ncol = dims[4]) for (i in 1:dims[2]) diag(P_array[,,i]) <- diag(P_array[,,i]) + P_diag[i,] } } P_array } #-------------------------- # get S array #-------------------------- Sj <- function(M, e, u, tc, cl, cluster, MUWTheta_cholT) { s <- -u %*% MUWTheta_cholT s[,cluster==cl] <- tc + s[,cluster==cl] M %*% e %*% s } get_S_array <- function(obj, vcov) { cluster <- attr(vcov, "cluster") M <- attr(vcov, "bread") / attr(vcov, "v_scale") E_list <- adjust_est_mats(type = attr(vcov, "type"), est_mats = attr(vcov, "est_mats"), adjustments = attr(vcov, "adjustments")) target <- attr(vcov, "target") inverse_var <- attr(vcov, "inverse_var") ignore_FE <- attr(vcov, "ignore_FE") N <- length(cluster) J <- nlevels(cluster) X <- model_matrix(obj) alias <- is.na(coef_CS(obj)) if (any(alias)) X <- X[, !alias, drop = FALSE] p <- ncol(X) S <- augmented_model_matrix(obj, cluster, inverse_var, ignore_FE) if (is.null(S)) { U <- X } else { U <- cbind(X, S) } U_list <- matrix_list(U, cluster, "row") W_list <- weightMatrix(obj, cluster) UW_list <- Map(function(u, w) as.matrix(t(u) %*% w), u = U_list, w = W_list) UWU_list <- Map(function(uw, u) uw %*% u, uw = UW_list, u = U_list) M_U <- chol2inv(chol(Reduce("+",UWU_list))) Theta_cholT <- lapply(target, function(x) t(chol(x))) UWThetaC_list <- Map(function(uw, tc) uw %*% tc, uw = UW_list, tc = Theta_cholT) MUWTheta_cholT <- M_U %*% (matrix(unlist(UWThetaC_list), ncol(U), N)[,order(order(cluster))]) S_list <- mapply(Sj, e = E_list, u = U_list, tc = Theta_cholT, cl = levels(cluster), MoreArgs = list(M = M, cluster=cluster, MUWTheta_cholT=MUWTheta_cholT), SIMPLIFY = FALSE) array(unlist(S_list), dim = c(p, N, J)) } clubSandwich/MD50000644000176200001440000001101413576443015013175 0ustar liggesusersc2e58ac75c1feabcba3ba90c9e24996e *DESCRIPTION abfda0d84d00658b87c6850cb4306da5 *NAMESPACE 0741babd75f40c074c69243a59f69abc *NEWS a1c4db8cc7d1e4f2209e81d9049bccac *R/CR-adjustments.R 750c714948a24ffbcfb1214ab89e7313 *R/S3-methods.R 021ebffbfeb9d5a72cf5a8a936e7c5e7 *R/Wald_test.R de4d76d601c237b01797bbf72ef1dc5f *R/clubSandwich.R 31402e2a7e422c046b7d3ab9417fd453 *R/coef_test.R 6f94a28e786817c8f5dc93352bcadab1 *R/conf_int.R 66c5aa4090f478891b3f6afde01fe078 *R/data-documentation.R cd8f3652ae1e298bed0bd519a3712fa1 *R/get_arrays.R ed27fa133476dd2ba3039739723348c4 *R/glm.R e05f7da65551fbc893a0d58b028735a0 *R/gls.R 9fe40c73d50b4113ae0550ea5320109a *R/ivreg.R 59bc8c24148f371989382d9ae975f4ec *R/lm.R 8548b499e476eeaab6c76d41155108d7 *R/lme.R 67e2c378e08e56d3e84748c7e8b75990 *R/lmer.R f0537c64257f9d2a576d0097ea1fe743 *R/matrix-functions.R 01519df9d9dca55fec42408ed46595dd *R/mlm.R 85bde11f01ff7dc4cfca2c2bb02ff4b2 *R/plm.R 100b26e0fecc0da9741a090dd78f88dd *R/rma-mv.R a14a48efe3dec58403cef76ea7bb8a75 *R/rma-uni.R 70be9c10b05e78de10094e72b3607cce *R/robu.R 8adbfea94a6bbd749a103b28cf41c639 *R/utilities.R a28ccff09980979e3551daef908826d4 *build/partial.rdb 11eb3a0141e043b061a50f6e62dc1f96 *build/vignette.rds 8e550f05dda31b5c40d1817bae9a4363 *data/AchievementAwardsRCT.RData 34360e013336c721ad97d9c3658db4df *data/MortalityRates.RData 320879cf26bc98529a421a78626f4284 *data/SATcoaching.RData 6402f15efe8b3298b8dea6d9811a42ce *data/dropoutPrevention.RData 3573645e49c4c7950b958e2ad8fd1272 *inst/doc/meta-analysis-with-CRVE.R 1ebc34fe0b65d28f4bd116c827c3e00f *inst/doc/meta-analysis-with-CRVE.Rmd 117d56f6056865dc7c45970f957380d4 *inst/doc/meta-analysis-with-CRVE.html d3d5b52fd35e25c5de58d276a6d8e616 *inst/doc/panel-data-CRVE.R 7dd70cc8ee36fcd678135eebf0fb765d *inst/doc/panel-data-CRVE.Rmd 6c26756fc786bae56ecb7e8e2be39fe6 *inst/doc/panel-data-CRVE.html 55617f9d5a1d3435f35f6b8960894e6e *man/AchievementAwardsRCT.Rd a2abbe70b7799177c5e657e04105a5d6 *man/MortalityRates.Rd 8ccf156373eed2718bf8b6f2b7f07ed3 *man/SATcoaching.Rd a79939dc7e5c873dfbeeace8261f8355 *man/Wald_test.Rd 4a41cb944983ef7863fe417431c85739 *man/coef_test.Rd b90a8d1ebb98e8ab948b596d409b466d *man/conf_int.Rd 4a6804edadf2acfdc9fbfbf0be92d296 *man/dropoutPrevention.Rd 522124ecfbe4be2a5197e0dc35b8c2b0 *man/impute_covariance_matrix.Rd e2c48842373d4077cddd693ace38c9d3 *man/vcovCR.Rd 547b5adb0a23ce57af4fa1a35943ab4d *man/vcovCR.glm.Rd 806613ec6cb0bdae4206f0e715623432 *man/vcovCR.gls.Rd 7b5a42049223c2cb0f3d669282d28592 *man/vcovCR.ivreg.Rd 98004b1622f4ad3ea9ceef2d258d3781 *man/vcovCR.lm.Rd a11f7f0fd82ba50f349b0a1b6035ae18 *man/vcovCR.lme.Rd 473b5eb4fadcb110f07c668c719e8935 *man/vcovCR.lmerMod.Rd 1d7cb5679c42087ba89727bc3916fda4 *man/vcovCR.mlm.Rd 4d24f765ff88ceb5f852296d346c1547 *man/vcovCR.plm.Rd d263d352865c50ab3236a076fa50056b *man/vcovCR.rma.mv.Rd a467e84569c8d495e861a463c698d259 *man/vcovCR.rma.uni.Rd b9f427d85777316850aaa695a993b6f7 *man/vcovCR.robu.Rd 77ee716352046c1d86deb383d1d5497a *tests/testthat.R fde6a1d60a30f78bcc363b2411ea374e *tests/testthat/test_Wald.R 2284036981b9197c55ba06fd421a82d4 *tests/testthat/test_coef.R 1263754d686c24deb0feb663a160a195 *tests/testthat/test_conf_int.R c9057e8329dc976e15120205cbbdd75b *tests/testthat/test_glm_logit.R 15e946ac467919c121eb9e3a7c88342b *tests/testthat/test_gls.R 5ec67b4bc1ad7fa7e42b542581e5d2d1 *tests/testthat/test_ignore_absorption.R b889419f0af0d03fd1fa400d9118ce10 *tests/testthat/test_impute_covariance_matrix.R f31d0900fcf58534bc397aa43473f560 *tests/testthat/test_intercept_formulas.R 6a5046ba326892031decaa13ec27687c *tests/testthat/test_ivreg.R 6caedb1ddcb26f77a13d3727f2ee4013 *tests/testthat/test_lm.R 94b8860c99468004b739da03d3e1a037 *tests/testthat/test_lme_2level.R 5fc1f9485c10727bb7771a85e4375c9a *tests/testthat/test_lme_3level.R 7185b64f85d2221986d4b811f11832b5 *tests/testthat/test_lmerMod.R 13f104560e6dcd2991376e05c08b8f54 *tests/testthat/test_mlm.R bba3a3ffdcc908f1ac7267a230d66ee1 *tests/testthat/test_plm-ID-variables.R d641ff01ea8a58ac4b66d5251abe006d *tests/testthat/test_plm-first-differences.R 1252fc58fa60e47901f17dfe0c94afd2 *tests/testthat/test_plm-fixed-effects.R d73054e81890417694533c0efe2ce2db *tests/testthat/test_plm-random-effects.R bb59c13a46856e4552551523f4fc61f8 *tests/testthat/test_plm-unbalanced-fixed-effects.R 1112e802da094ca51076874e70698ca6 *tests/testthat/test_rma-mv.R d272b700109417ff2348c895f2e9455e *tests/testthat/test_rma-uni.R 3b69b05daf52ce79c8337e9330bfcb0f *tests/testthat/test_robu.R 1ebc34fe0b65d28f4bd116c827c3e00f *vignettes/meta-analysis-with-CRVE.Rmd 7dd70cc8ee36fcd678135eebf0fb765d *vignettes/panel-data-CRVE.Rmd clubSandwich/inst/0000755000176200001440000000000013576425047013652 5ustar liggesusersclubSandwich/inst/doc/0000755000176200001440000000000013576425047014417 5ustar liggesusersclubSandwich/inst/doc/meta-analysis-with-CRVE.Rmd0000644000176200001440000001465213574216757021354 0ustar liggesusers--- title: "Meta-analysis with cluster-robust variance estimation" author: "James E. Pustejovsky" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Meta-analysis with cluster-robust variance estimation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- This vignette dimeonstrates how to use the `clubSandwich` package to conduct a meta-analysis of dependent effect sizes with robust variance estimation. Tests of meta-regression coefficients and F-tests of multiple-coefficient hypotheses are calculated using small-sample corrections proposed by Tipton (2015) and Tipton and Pustejovsky (2015). The example uses a dataset of effect sizes from a Campbell Collaboration systematic review of dropout prevention programs, conducted by Sandra Jo Wilson and colleagues (2011). The original analysis included a meta-regression with covariates that capture methodological, participant, and program characteristics. The regression specification used here is similar to Model III from Wilson et al. (2011), but treats the `evaluator_independence` and `implementation_quality` variables as categorical rather than interval-level. Also, the original analysis clustered at the level of the sample (some studies reported results from multiple samples), whereas here we cluster at the study level. The meta-regression can be fit in several different ways. We first demonstrate using the `robumeta` package (Fisher & Tipton, 2015) and then using the `metafor` package (Viechtbauer, 2010). ## robumeta model ```{r, include=FALSE} options(width = 100) ``` ```{r, message = FALSE} library(clubSandwich) library(robumeta) data(dropoutPrevention) # clean formatting names(dropoutPrevention)[7:8] <- c("eval","implement") levels(dropoutPrevention$eval) <- c("independent","indirect","planning","delivery") levels(dropoutPrevention$implement) <- c("low","medium","high") levels(dropoutPrevention$program_site) <- c("community","mixed","classroom","school") levels(dropoutPrevention$study_design) <- c("matched","unmatched","RCT") levels(dropoutPrevention$adjusted) <- c("no","yes") m3_robu <- robu(LOR1 ~ study_design + attrition + group_equivalence + adjusted + outcome + eval + male_pct + white_pct + average_age + implement + program_site + duration + service_hrs, data = dropoutPrevention, studynum = studyID, var.eff.size = varLOR, modelweights = "HIER") print(m3_robu) ``` Note that `robumeta` produces small-sample corrected standard errors and t-tests, and so there is no need to repeat those calculations with `clubSandwich`. The `eval` variable has four levels, and it might be of interest to test whether the average program effects differ by the degree of evaluator independence. The null hypothesis in this case is that the 10th, 11th, and 12th regression coefficients are all equal to zero. A small-sample adjusted F-test for this hypothesis can be obtained as follows. The `vcov = "CR2"` option means that the standard errors will be corrected using the bias-reduced linearization estimator described in Tipton and Pustejovsky (2015). ```{r} Wald_test(m3_robu, constraints = 10:12, vcov = "CR2") ``` By default, the `Wald_test` function provides an F-type test with degrees of freedom estimated using the approximate Hotelling's $T^2_Z$ method. The test has less than 17 degrees of freedom, even though there are 152 independent studies in the data, and has a p-value that is not quite significant at conventional levels. The low degrees of freedom are a consequence of the fact that one of the levels of `evaluator independence` has only a few effect sizes in it: ```{r} table(dropoutPrevention$eval) ``` ## metafor model `clubSandwich` also works with models fit using the `metafor` package. Here we re-fit the same regression specification, but use REML to estimate the variance components (`robumeta` uses a method-of-moments estimator), as well as a somewhat different weighting scheme than that used in `robumeta`. ```{r, message = FALSE} library(metafor) m3_metafor <- rma.mv(LOR1 ~ study_design + attrition + group_equivalence + adjusted + outcome + eval + male_pct + white_pct + average_age + implement + program_site + duration + service_hrs, V = varLOR, random = list(~ 1 | studyID, ~ 1 | studySample), data = dropoutPrevention) summary(m3_metafor) ``` `metafor` produces model-based standard errors, t-tests, and confidence intervals. The `coef_test` function from `clubSandwich` will calculate robust standard errors and robust t-tests for each of the coefficients: ```{r} coef_test(m3_metafor, vcov = "CR2") ``` Note that `coef_test` assumed that it should cluster based on `studyID`, which is the outer-most random effect in the metafor model. This can be specified explicitly by including the option `cluster = dropoutPrevention$studyID` in the call. The F-test for degree of evaluator independence uses the same syntax as before: ```{r} Wald_test(m3_metafor, constraints = 10:12, vcov = "CR2") ``` Despite some differences in weighting schemes, the p-value is very close to the result obtained using `robumeta`. ## References Fisher, Z., & Tipton, E. (2015). robumeta: An R-package for robust variance estimation in meta-analysis. [arXiv:1503.02220](http://arxiv.org/abs/1503.02220) Tipton, E. (2015). Small sample adjustments for robust variance estimation with meta-regression. _Psychological Methods, 20_(3), 375-393. doi: [10.1037/met0000011](https://doi.org/10.1037/met0000011) Tipton, E., & Pustejovsky, J. E. (2015). Small-sample adjustments for tests of moderators and model fit using robust variance estimation in meta-regression. _Journal of Educational and Behavioral Statistics, 40_(6), 604-634. doi: [10.3102/1076998615606099](https://doi.org/10.3102/1076998615606099) Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. _Journal of Statistical Software, 36_(3), 1-48. URL: http://www.jstatsoft.org/v36/i03/ Wilson, S. J., Lipsey, M. W., Tanner-Smith, E., Huang, C. H., & Steinka-Fry, K. T. (2011). Dropout prevention and intervention programs: Effects on school completion and dropout Among school-aged children and youth: A systematic review. _Campbell Systematic Reviews, 7_(8). URL: http://www.campbellcollaboration.org/lib/project/158/clubSandwich/inst/doc/panel-data-CRVE.R0000644000176200001440000000613313576425047017310 0ustar liggesusers## ------------------------------------------------------------------------ library(clubSandwich) data(MortalityRates) # subset for deaths in motor vehicle accidents, 1970-1983 MV_deaths <- subset(MortalityRates, cause=="Motor Vehicle" & year <= 1983 & !is.na(beertaxa)) # fit by OLS lm_unweighted <- lm(mrate ~ 0 + legal + beertaxa + factor(state) + factor(year), data = MV_deaths) ## ------------------------------------------------------------------------ coef_test(lm_unweighted, vcov = "CR1", cluster = MV_deaths$state, test = "naive-t")[1:2,] ## ------------------------------------------------------------------------ coef_test(lm_unweighted, vcov = "CR2", cluster = MV_deaths$state, test = "Satterthwaite")[1:2,] ## ---- message = FALSE---------------------------------------------------- library(plm) plm_unweighted <- plm(mrate ~ legal + beertaxa, data = MV_deaths, effect = "twoways", index = c("state","year")) coef_test(plm_unweighted, vcov = "CR1", cluster = "individual", test = "naive-t") coef_test(plm_unweighted, vcov = "CR2", cluster = "individual", test = "Satterthwaite") ## ------------------------------------------------------------------------ lm_weighted <- lm(mrate ~ 0 + legal + beertaxa + factor(state) + factor(year), weights = pop, data = MV_deaths) coef_test(lm_weighted, vcov = "CR1", cluster = MV_deaths$state, test = "naive-t")[1:2,] coef_test(lm_weighted, vcov = "CR2", cluster = MV_deaths$state, test = "Satterthwaite")[1:2,] ## ------------------------------------------------------------------------ coef_test(lm_weighted, vcov = "CR2", cluster = MV_deaths$state, target = 1 / MV_deaths$pop, test = "Satterthwaite")[1:2,] ## ------------------------------------------------------------------------ plm_random <- plm(mrate ~ 0 + legal + beertaxa + year, data = MV_deaths, effect = "individual", index = c("state","year"), model = "random") coef_test(plm_random, vcov = "CR1", test = "naive-t")[1:2,] coef_test(plm_random, vcov = "CR2", test = "Satterthwaite")[1:2,] ## ------------------------------------------------------------------------ MV_deaths <- within(MV_deaths, { legal_cent <- legal - tapply(legal, state, mean)[factor(state)] beer_cent <- beertaxa - tapply(beertaxa, state, mean)[factor(state)] }) plm_Hausman <- plm(mrate ~ 0 + legal + beertaxa + legal_cent + beer_cent + factor(year), data = MV_deaths, effect = "individual", index = c("state","year"), model = "random") coef_test(plm_Hausman, vcov = "CR2", test = "Satterthwaite")[1:4,] ## ------------------------------------------------------------------------ Wald_test(plm_Hausman, constraints = c("legal_cent","beer_cent"), vcov = "CR1", test = "chi-sq") ## ------------------------------------------------------------------------ Wald_test(plm_Hausman, constraints = c("legal_cent","beer_cent"), vcov = "CR2") clubSandwich/inst/doc/panel-data-CRVE.Rmd0000644000176200001440000003573213574216757017644 0ustar liggesusers--- title: "Cluster-robust standard errors and hypothesis tests in panel data models" author: "James E. Pustejovsky" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Cluster-robust standard errors and hypothesis tests in panel data models} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The importance of using cluster-robust variance estimators (i.e., "clustered standard errors") in panel models is now widely recognized. Less widely recognized is the fact that standard methods for constructing hypothesis tests and confidence intervals based on CRVE can perform quite poorly in when based on a limited number of independent clusters. Furthermore, it can be difficult to determine what counts as a large-enough sample to trust standard CRVE methods, because the finite-sample behavior of the variance estimators and test statistics depends on the configuration of the covariates, not just the total number of clusters. One solution to this problem is to use bias-reduced linearization (BRL), which was proposed by Bell and McCaffrey (2002) and has recently begun to receive attention in the econometrics literature (e.g., Cameron & Miller, 2015; Imbens & Kolesar, 2015). The idea of BRL is to correct the bias of standard CRVE based on a working model, and then to use a degrees-of-freedom correction for Wald tests based on the bias-reduced CRVE. That may seem silly (after all, the whole point of CRVE is to avoid making distributional assumptions about the errors in your model), but it turns out that the correction can help quite a bit, even when the working model is wrong. The degrees-of-freedom correction is based on a standard Satterthwaite-type approximation, and also relies on the working model. A problem with Bell and McCaffrey's original formulation of BRL is that it does not work in some very common models for panel data, such as state-by-year panels that include fixed effects for each state and each year (Angrist and Pischke, 2009, point out this issue in their chapter on "non-standard standard error issues"; see also Young, 2016). However, Pustejovsky and Tipton (2016) proposed a generalization of BRL that works even in models with arbitrary sets of fixed effects, and this generalization is implemented in `clubSandwich` as CRVE type `CR2`. The package also implements small-sample corrections for multiple-constraint hypothesis tests based on an approximation proposed by Pustejovsky and Tipton (2016). For one-parameter constraints, the test reduces to a t-test with Satterthwaite degrees of freedom, and so it is a natural extension of BRL. The following example demonstrates how to use `clubSandwich` to do cluster-robust inference for a state-by-year panel model with fixed effects in both dimensions, clustering by states. ## Effects of changing the minimum legal drinking age Carpenter and Dobkin (2011) analyzed the effects of changes in the minimum legal drinking age on rates of motor vehicle fatalies among 18-20 year olds, using state-level panel data from the National Highway Traffic Administration's Fatal Accident Reporting System. In their new textbook, Angrist and Pischke (2014) developed a stylized example based on Carpenter and Dobkin's work. The following example uses Angrist and Pischke's data and follows their analysis because their data are [easily available](http://masteringmetrics.com/resources/). The outcome is the incidence of deaths in motor vehicle crashes among 18-20 year-olds (per 100,000 residents), for each state plus the District of Columbia, over the period 1970 to 1983. There were several changes in the minimum legal drinking age during this time period, with variability in the timing of changes across states. Angrist and Pischke (following Carpenter and Dobkin) use a difference-in-differences strategy to estimate the effects of lowering the minimum legal drinking age from 21 to 18. Their specification is $$y_{it} = \alpha_i + \beta_t + \gamma b_{it} + \delta d_{it} + \epsilon_{it},$$ for $i$ = 1,...,51 and $t$ = 1970,...,1983. In this model, $\alpha_i$ is a state-specific fixed effect, $\beta_t$ is a year-specific fixed effect, $b_{it}$ is the current rate of beer taxation in state $i$ in year $t$, $d_{it}$ is the proportion of 18-20 year-olds in state $i$ in year $t$ who are legally allowed to drink, and $\delta$ captures the effect of shifting the minimum legal drinking age from 21 to 18. Following Angrist and Pischke's analysis, we estimate this model both by (unweighted) OLs and by weighted least squares with weights corresponding to population size in a given state and year. We also demonstrate random effects estimation and implement a cluster-robust Hausmann specification test. ## Unweighted OLS The following code does some simple data-munging and the estimates the model by OLS: ```{r} library(clubSandwich) data(MortalityRates) # subset for deaths in motor vehicle accidents, 1970-1983 MV_deaths <- subset(MortalityRates, cause=="Motor Vehicle" & year <= 1983 & !is.na(beertaxa)) # fit by OLS lm_unweighted <- lm(mrate ~ 0 + legal + beertaxa + factor(state) + factor(year), data = MV_deaths) ``` The `coef_test` function from `clubSandwich` can then be used to test the hypothesis that changing the minimum legal drinking age has no effect on motor vehicle deaths in this cohort (i.e., $H_0: \delta = 0$). The usual way to test this is to cluster the standard errors by state, calculate the robust Wald statistic, and compare that to a standard normal reference distribution. The code and results are as follows: ```{r} coef_test(lm_unweighted, vcov = "CR1", cluster = MV_deaths$state, test = "naive-t")[1:2,] ``` A better approach would be to use the generalized, bias-reduced linearization CRVE, together with Satterthwaite degrees of freedom. In the `clubSandwich` package, the BRL adjustment is called "CR2" because it is directly analogous to the HC2 correction used in heteroskedasticity-robust variance estimation. When applied to an OLS model estimated by `lm`, the default working model is an identity matrix, which amounts to the "working" assumption that the errors are all uncorrelated and homoskedastic. Here's how to apply this approach in the example: ```{r} coef_test(lm_unweighted, vcov = "CR2", cluster = MV_deaths$state, test = "Satterthwaite")[1:2,] ``` The Satterthwaite degrees of freedom are different for each coefficient in the model, and so the `coef_test` function reports them right alongside the standard error. For the effect of legal drinking age, the degrees of freedom are about half of what might be expected, given that there are 51 clusters. The p-value for the CR2+Satterthwaite test is about twice as large as the p-value based on the standard Wald test, although the coefficient is still statistically significant at conventional levels. Note, however, that the degrees of freedom on the beer taxation rate are considerably smaller because there are only a few states with substantial variability in taxation rates over time. ## Unweighted "within" estimation The `plm` package in R provides another way to estimate the same model. It is convenient because it absorbs the state and year fixed effects before estimating the effect of `legal`. The `clubSandwich` package works with fitted `plm` models too: ```{r, message = FALSE} library(plm) plm_unweighted <- plm(mrate ~ legal + beertaxa, data = MV_deaths, effect = "twoways", index = c("state","year")) coef_test(plm_unweighted, vcov = "CR1", cluster = "individual", test = "naive-t") coef_test(plm_unweighted, vcov = "CR2", cluster = "individual", test = "Satterthwaite") ``` ## Population-weighted estimation The difference between the standard method and the new method are not terribly exciting in the above example. However, things change quite a bit if the model is estimated using population weights. We go back to fitting in `lm` with dummies for all the fixed effects because `plm` does not handle weighted least squares. ```{r} lm_weighted <- lm(mrate ~ 0 + legal + beertaxa + factor(state) + factor(year), weights = pop, data = MV_deaths) coef_test(lm_weighted, vcov = "CR1", cluster = MV_deaths$state, test = "naive-t")[1:2,] coef_test(lm_weighted, vcov = "CR2", cluster = MV_deaths$state, test = "Satterthwaite")[1:2,] ``` Using population weights slightly reduces the point estimate of the effect, while also slightly increasing its precision. If you were following the standard approach, you would probably be happy with the weighted estimates and wouldn't think about it any further. However, using the CR2 variance estimator and Satterthwaite correction produces a p-value that is an order of magnitude larger (though still significant at the conventional 5% level). The degrees of freedom are just `r round(coef_test(lm_weighted, vcov = "CR2", cluster = MV_deaths$state, test = "Satterthwaite")["legal","df"], 1)`---drastically smaller than would be expected based on the number of clusters. Even with weights, the `coef_test` function uses an "independent, homoskedastic" working model as a default for `lm` objects. In the present example, the outcome is a standardized rate and so a better assumption might be that the error variances are inversely proportional to population size. The following code uses this alternate working model: ```{r} coef_test(lm_weighted, vcov = "CR2", cluster = MV_deaths$state, target = 1 / MV_deaths$pop, test = "Satterthwaite")[1:2,] ``` The new working model leads to slightly smaller standard errors and a couple of additional degrees of freedom, though they remain in small-sample territory. ## Random effects estimation If the unobserved effects $\alpha_1,...,\alpha_{51}$ are uncorrelated with the regressors, then a more efficient way to estimate $\gamma,\delta$ is by weighted least squares, with weights based on a random effects model. We still treat the year effects as fixed. ```{r} plm_random <- plm(mrate ~ 0 + legal + beertaxa + year, data = MV_deaths, effect = "individual", index = c("state","year"), model = "random") coef_test(plm_random, vcov = "CR1", test = "naive-t")[1:2,] coef_test(plm_random, vcov = "CR2", test = "Satterthwaite")[1:2,] ``` With random effects estimation, the effect of legal drinking age is smaller by about 1 death per 100,000. As a procedural aside, note that `coef_test` infers that `state` is the clustering variable because the call to plm includes only one type of effects (random state effects). ## Robust Hausman test CRVE is also used in specification tests, as in the artificial Hausman-type test for endogeneity of unobserved effects (Arellano, 1993). As noted above, random effects estimation is more efficient than fixed effects estimation, but requires the assumption that the unobserved effects are uncorrelated with the regressors. However, if the unobserved effects covary with $\mathbf{b}_i, \mathbf{d}_i$, then the random-effects estimator will be biased. We can test for whether endogeneity is a problem by including group-centered covariates as additional regressors. Let $\tilde{d}_{it} = d_{it} - \frac{1}{T}\sum_t d_{it}$, with $\tilde{b}_{it}$ defined analogously. Now estimate the regression $$y_{it} = \beta_t + \gamma_1 b_{it} + \gamma_2 \tilde{b}_{it} + \delta_1 d_{it} + \delta_2 \tilde{d}_{it} + \epsilon_{it},$$ which does not include state fixed effects. The parameters $\gamma_2,\delta_2$ represent the differences between the within-groups and between-groups estimands of $\gamma_1, \delta_1$. If these are both zero, then the random effects estimator is unbiased. Thus, the joint test for $H_0: \gamma_2 = \delta_2 = 0$ amounts to a test for exogeneity of the unobserved effects. For efficiency, we estimate this specification using weighted least squares (although OLS would be valid too): ```{r} MV_deaths <- within(MV_deaths, { legal_cent <- legal - tapply(legal, state, mean)[factor(state)] beer_cent <- beertaxa - tapply(beertaxa, state, mean)[factor(state)] }) plm_Hausman <- plm(mrate ~ 0 + legal + beertaxa + legal_cent + beer_cent + factor(year), data = MV_deaths, effect = "individual", index = c("state","year"), model = "random") coef_test(plm_Hausman, vcov = "CR2", test = "Satterthwaite")[1:4,] ``` To conduct a joint test on the centered covariates, we can use the `Wald_test` function. The usual way to test this hypothesis would be to use the `CR1` variance estimator to calculate the robust Wald statistic, then use a $\chi^2_2$ reference distribution (or equivalently, compare a re-scaled Wald statistic to an $F(2,\infty)$ distribution). The `Wald_test` function reports the latter version: ```{r} Wald_test(plm_Hausman, constraints = c("legal_cent","beer_cent"), vcov = "CR1", test = "chi-sq") ``` The test is just shy of significance at the 5% level. If we instead use the `CR2` variance estimator and our newly proposed approximate F-test (which is the default in `Wald_test`), then we get: ```{r} Wald_test(plm_Hausman, constraints = c("legal_cent","beer_cent"), vcov = "CR2") ``` The low degrees of freedom of the test indicate that we're definitely in small-sample territory and should not trust the asymptotic $\chi^2$ approximation. ## References Angrist, J. D., & Pischke, J. (2009). _Mostly harmless econometrics: An empiricist’s companion_. Princeton, NJ: Princeton University Press. Angrist, J. D., and Pischke, J. S. (2014). _Mastering'metrics: the path from cause to effect_. Princeton, NJ: Princeton University Press. Arellano, M. (1993). On the testing of correlated effects with panel data. Journal of Econometrics, 59(1-2), 87-97. doi: [10.1016/0304-4076(93)90040-C](http://www.sciencedirect.com/science/article/pii/030440769390040C) Bell, R. M., & McCaffrey, D. F. (2002). Bias reduction in standard errors for linear regression with multi-stage samples. _Survey Methodology, 28_(2), 169-181. Cameron, A. C., & Miller, D. L. (2015). A practitioner’s guide to cluster-robust inference. URL: http://cameron.econ.ucdavis.edu/research/Cameron_Miller_JHR_2015_February.pdf Carpenter, C., & Dobkin, C. (2011). The minimum legal drinking age and public health. _Journal of Economic Perspectives, 25_(2), 133-156. doi: [10.1257/jep.25.2.133](https://doi.org/10.1257/jep.25.2.133) Imbens, G. W., & Kolesar, M. (2015). Robust standard errors in small samples: Some practical advice. URL: https://www.princeton.edu/~mkolesar/papers/small-robust.pdf Pustejovsky, J. E. & Tipton, E. (2016). Small sample methods for cluster-robust variance estimation and hypothesis testing in fixed effects models. arXiv: [1601.01981](https://arxiv.org/abs/1601.01981) [stat.ME] Young, A. (2016). Improved, nearly exact, statistical inference with robust and clustered covariance matrices using effective degrees of freedom corrections. clubSandwich/inst/doc/meta-analysis-with-CRVE.R0000644000176200001440000000430013576425045021012 0ustar liggesusers## ---- include=FALSE------------------------------------------------------------------------------- options(width = 100) ## ---- message = FALSE----------------------------------------------------------------------------- library(clubSandwich) library(robumeta) data(dropoutPrevention) # clean formatting names(dropoutPrevention)[7:8] <- c("eval","implement") levels(dropoutPrevention$eval) <- c("independent","indirect","planning","delivery") levels(dropoutPrevention$implement) <- c("low","medium","high") levels(dropoutPrevention$program_site) <- c("community","mixed","classroom","school") levels(dropoutPrevention$study_design) <- c("matched","unmatched","RCT") levels(dropoutPrevention$adjusted) <- c("no","yes") m3_robu <- robu(LOR1 ~ study_design + attrition + group_equivalence + adjusted + outcome + eval + male_pct + white_pct + average_age + implement + program_site + duration + service_hrs, data = dropoutPrevention, studynum = studyID, var.eff.size = varLOR, modelweights = "HIER") print(m3_robu) ## ------------------------------------------------------------------------------------------------- Wald_test(m3_robu, constraints = 10:12, vcov = "CR2") ## ------------------------------------------------------------------------------------------------- table(dropoutPrevention$eval) ## ---- message = FALSE----------------------------------------------------------------------------- library(metafor) m3_metafor <- rma.mv(LOR1 ~ study_design + attrition + group_equivalence + adjusted + outcome + eval + male_pct + white_pct + average_age + implement + program_site + duration + service_hrs, V = varLOR, random = list(~ 1 | studyID, ~ 1 | studySample), data = dropoutPrevention) summary(m3_metafor) ## ------------------------------------------------------------------------------------------------- coef_test(m3_metafor, vcov = "CR2") ## ------------------------------------------------------------------------------------------------- Wald_test(m3_metafor, constraints = 10:12, vcov = "CR2") clubSandwich/inst/doc/meta-analysis-with-CRVE.html0000644000176200001440000006743713576425045021601 0ustar liggesusers Meta-analysis with cluster-robust variance estimation

Meta-analysis with cluster-robust variance estimation

James E. Pustejovsky

2019-12-18

This vignette dimeonstrates how to use the clubSandwich package to conduct a meta-analysis of dependent effect sizes with robust variance estimation. Tests of meta-regression coefficients and F-tests of multiple-coefficient hypotheses are calculated using small-sample corrections proposed by Tipton (2015) and Tipton and Pustejovsky (2015). The example uses a dataset of effect sizes from a Campbell Collaboration systematic review of dropout prevention programs, conducted by Sandra Jo Wilson and colleagues (2011).

The original analysis included a meta-regression with covariates that capture methodological, participant, and program characteristics. The regression specification used here is similar to Model III from Wilson et al. (2011), but treats the evaluator_independence and implementation_quality variables as categorical rather than interval-level. Also, the original analysis clustered at the level of the sample (some studies reported results from multiple samples), whereas here we cluster at the study level. The meta-regression can be fit in several different ways. We first demonstrate using the robumeta package (Fisher & Tipton, 2015) and then using the metafor package (Viechtbauer, 2010).

robumeta model

## RVE: Hierarchical Effects Model with Small-Sample Corrections 
## 
## Model: LOR1 ~ study_design + attrition + group_equivalence + adjusted + outcome + eval + male_pct + white_pct + average_age + implement + program_site + duration + service_hrs 
## 
## Number of clusters = 152 
## Number of outcomes = 385 (min = 1 , mean = 2.53 , median = 1 , max = 30 )
## Omega.sq = 0.24907 
## Tau.sq = 0.1024663 
## 
##                           Estimate   StdErr t-value  dfs    P(|t|>) 95% CI.L 95% CI.U Sig
## 1           X.Intercept.  0.016899 0.615399  0.0275 16.9 0.97841541 -1.28228  1.31608    
## 2  study_designunmatched -0.002626 0.185142 -0.0142 40.5 0.98875129 -0.37667  0.37141    
## 3        study_designRCT -0.086872 0.140044 -0.6203 38.6 0.53869676 -0.37024  0.19650    
## 4              attrition  0.118889 0.247228  0.4809 15.5 0.63732597 -0.40666  0.64444    
## 5      group_equivalence  0.502463 0.195838  2.5657 28.7 0.01579282  0.10174  0.90318  **
## 6            adjustedyes -0.322480 0.125413 -2.5713 33.8 0.01470796 -0.57741 -0.06755  **
## 7        outcomeenrolled  0.097059 0.139842  0.6941 16.5 0.49727848 -0.19862  0.39274    
## 8      outcomegraduation  0.147643 0.134938  1.0942 30.2 0.28253825 -0.12786  0.42315    
## 9  outcomegraduation.ged  0.258034 0.169134  1.5256 16.3 0.14632629 -0.10006  0.61613    
## 10          evalindirect -0.765085 0.399109 -1.9170  6.2 0.10212896 -1.73406  0.20389    
## 11          evalplanning -0.920874 0.346536 -2.6574  5.6 0.04027061 -1.78381 -0.05794  **
## 12          evaldelivery -0.916673 0.304303 -3.0124  4.7 0.03212299 -1.71432 -0.11903  **
## 13              male_pct  0.167965 0.181538  0.9252 16.4 0.36824526 -0.21609  0.55202    
## 14             white_pct  0.022915 0.149394  0.1534 21.8 0.87950385 -0.28704  0.33287    
## 15           average_age  0.037102 0.027053  1.3715 21.2 0.18458247 -0.01913  0.09333    
## 16       implementmedium  0.411779 0.128898  3.1946 26.7 0.00358205  0.14714  0.67642 ***
## 17         implementhigh  0.658570 0.123874  5.3164 34.6 0.00000635  0.40699  0.91015 ***
## 18     program_sitemixed  0.444384 0.172635  2.5741 28.6 0.01550504  0.09109  0.79768  **
## 19 program_siteclassroom  0.426658 0.159773  2.6704 37.4 0.01115192  0.10303  0.75028  **
## 20    program_siteschool  0.262517 0.160519  1.6354 30.1 0.11236814 -0.06525  0.59028    
## 21              duration  0.000427 0.000873  0.4895 36.7 0.62736846 -0.00134  0.00220    
## 22           service_hrs -0.003434 0.005012 -0.6852 36.7 0.49752503 -0.01359  0.00672    
## ---
## Signif. codes: < .01 *** < .05 ** < .10 *
## ---
## Note: If df < 4, do not trust the results

Note that robumeta produces small-sample corrected standard errors and t-tests, and so there is no need to repeat those calculations with clubSandwich. The eval variable has four levels, and it might be of interest to test whether the average program effects differ by the degree of evaluator independence. The null hypothesis in this case is that the 10th, 11th, and 12th regression coefficients are all equal to zero. A small-sample adjusted F-test for this hypothesis can be obtained as follows. The vcov = "CR2" option means that the standard errors will be corrected using the bias-reduced linearization estimator described in Tipton and Pustejovsky (2015).

##  Test    F d.f.  p.val
##   HTZ 2.78 16.8 0.0732

By default, the Wald_test function provides an F-type test with degrees of freedom estimated using the approximate Hotelling’s \(T^2_Z\) method. The test has less than 17 degrees of freedom, even though there are 152 independent studies in the data, and has a p-value that is not quite significant at conventional levels. The low degrees of freedom are a consequence of the fact that one of the levels of evaluator independence has only a few effect sizes in it:

## 
## independent    indirect    planning    delivery 
##           6          33          43         303

metafor model

clubSandwich also works with models fit using the metafor package. Here we re-fit the same regression specification, but use REML to estimate the variance components (robumeta uses a method-of-moments estimator), as well as a somewhat different weighting scheme than that used in robumeta.

## 
## Multivariate Meta-Analysis Model (k = 385; method: REML)
## 
##    logLik   Deviance        AIC        BIC       AICc 
## -489.0357   978.0714  1026.0714  1119.5371  1029.6217   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed       factor 
## sigma^2.1  0.2274  0.4769    152     no      studyID 
## sigma^2.2  0.1145  0.3384    317     no  studySample 
## 
## Test for Residual Heterogeneity:
## QE(df = 363) = 1588.4397, p-val < .0001
## 
## Test of Moderators (coefficients 2:22):
## QM(df = 21) = 293.8694, p-val < .0001
## 
## Model Results:
## 
##                        estimate      se     zval    pval    ci.lb    ci.ub 
## intrcpt                  0.5296  0.7250   0.7304  0.4651  -0.8915   1.9506      
## study_designunmatched   -0.0494  0.1722  -0.2871  0.7741  -0.3870   0.2881      
## study_designRCT          0.0653  0.1628   0.4010  0.6884  -0.2538   0.3843      
## attrition               -0.1366  0.2429  -0.5623  0.5739  -0.6126   0.3395      
## group_equivalence        0.4071  0.1573   2.5877  0.0097   0.0988   0.7155   ** 
## adjustedyes             -0.3581  0.1532  -2.3371  0.0194  -0.6585  -0.0578    * 
## outcomeenrolled         -0.2831  0.0771  -3.6709  0.0002  -0.4343  -0.1320  *** 
## outcomegraduation       -0.0913  0.0657  -1.3896  0.1646  -0.2201   0.0375      
## outcomegraduation/ged    0.6983  0.0805   8.6750  <.0001   0.5406   0.8561  *** 
## evalindirect            -0.7530  0.4949  -1.5214  0.1282  -1.7230   0.2171      
## evalplanning            -0.7700  0.4869  -1.5814  0.1138  -1.7242   0.1843      
## evaldelivery            -1.0016  0.4600  -2.1774  0.0294  -1.9033  -0.1000    * 
## male_pct                 0.1021  0.1715   0.5951  0.5518  -0.2341   0.4382      
## white_pct                0.1223  0.1804   0.6777  0.4979  -0.2313   0.4758      
## average_age              0.0061  0.0291   0.2091  0.8344  -0.0509   0.0631      
## implementmedium          0.4738  0.1609   2.9445  0.0032   0.1584   0.7892   ** 
## implementhigh            0.6318  0.1471   4.2965  <.0001   0.3436   0.9201  *** 
## program_sitemixed        0.3289  0.2413   1.3631  0.1729  -0.1440   0.8019      
## program_siteclassroom    0.2920  0.1736   1.6821  0.0926  -0.0482   0.6321    . 
## program_siteschool       0.1616  0.1898   0.8515  0.3945  -0.2104   0.5337      
## duration                 0.0013  0.0009   1.3423  0.1795  -0.0006   0.0031      
## service_hrs             -0.0003  0.0047  -0.0654  0.9478  -0.0096   0.0090      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

metafor produces model-based standard errors, t-tests, and confidence intervals. The coef_test function from clubSandwich will calculate robust standard errors and robust t-tests for each of the coefficients:

##                    Coef.  Estimate       SE  t-stat  d.f. p-val (Satt) Sig.
## 1                intrcpt  0.529569 0.724851  0.7306 20.08      0.47347     
## 2  study_designunmatched -0.049434 0.204152 -0.2421 58.42      0.80952     
## 3        study_designRCT  0.065272 0.149146  0.4376 53.17      0.66342     
## 4              attrition -0.136575 0.306429 -0.4457 10.52      0.66485     
## 5      group_equivalence  0.407108 0.210917  1.9302 23.10      0.06595    .
## 6            adjustedyes -0.358124 0.136132 -2.6307 43.20      0.01176    *
## 7        outcomeenrolled -0.283124 0.237199 -1.1936  7.08      0.27108     
## 8      outcomegraduation -0.091295 0.091465 -0.9981  9.95      0.34188     
## 9  outcomegraduation/ged  0.698328 0.364882  1.9138  8.02      0.09188    .
## 10          evalindirect -0.752994 0.447670 -1.6820  6.56      0.13929     
## 11          evalplanning -0.769968 0.403898 -1.9063  6.10      0.10446     
## 12          evaldelivery -1.001648 0.355989 -2.8137  4.89      0.03834    *
## 13              male_pct  0.102055 0.148410  0.6877  9.68      0.50782     
## 14             white_pct  0.122255 0.141470  0.8642 16.88      0.39961     
## 15           average_age  0.006084 0.033387  0.1822 15.79      0.85772     
## 16       implementmedium  0.473789 0.148660  3.1871 22.44      0.00419   **
## 17         implementhigh  0.631842 0.138073  4.5761 28.68      < 0.001  ***
## 18     program_sitemixed  0.328941 0.196848  1.6710 27.47      0.10607     
## 19 program_siteclassroom  0.291952 0.146014  1.9995 42.70      0.05195    .
## 20    program_siteschool  0.161640 0.171700  0.9414 29.27      0.35420     
## 21              duration  0.001270 0.000978  1.2988 31.96      0.20332     
## 22           service_hrs -0.000309 0.004828 -0.0641 49.63      0.94915

Note that coef_test assumed that it should cluster based on studyID, which is the outer-most random effect in the metafor model. This can be specified explicitly by including the option cluster = dropoutPrevention$studyID in the call.

The F-test for degree of evaluator independence uses the same syntax as before:

##  Test    F d.f.  p.val
##   HTZ 2.71 18.3 0.0753

Despite some differences in weighting schemes, the p-value is very close to the result obtained using robumeta.

References

Fisher, Z., & Tipton, E. (2015). robumeta: An R-package for robust variance estimation in meta-analysis. arXiv:1503.02220

Tipton, E. (2015). Small sample adjustments for robust variance estimation with meta-regression. Psychological Methods, 20(3), 375-393. doi: 10.1037/met0000011

Tipton, E., & Pustejovsky, J. E. (2015). Small-sample adjustments for tests of moderators and model fit using robust variance estimation in meta-regression. Journal of Educational and Behavioral Statistics, 40(6), 604-634. doi: 10.3102/1076998615606099

Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. Journal of Statistical Software, 36(3), 1-48. URL: http://www.jstatsoft.org/v36/i03/

Wilson, S. J., Lipsey, M. W., Tanner-Smith, E., Huang, C. H., & Steinka-Fry, K. T. (2011). Dropout prevention and intervention programs: Effects on school completion and dropout Among school-aged children and youth: A systematic review. Campbell Systematic Reviews, 7(8). URL: http://www.campbellcollaboration.org/lib/project/158/

clubSandwich/inst/doc/panel-data-CRVE.html0000644000176200001440000011003613576425047020051 0ustar liggesusers Cluster-robust standard errors and hypothesis tests in panel data models

Cluster-robust standard errors and hypothesis tests in panel data models

James E. Pustejovsky

2019-12-18

The importance of using cluster-robust variance estimators (i.e., “clustered standard errors”) in panel models is now widely recognized. Less widely recognized is the fact that standard methods for constructing hypothesis tests and confidence intervals based on CRVE can perform quite poorly in when based on a limited number of independent clusters. Furthermore, it can be difficult to determine what counts as a large-enough sample to trust standard CRVE methods, because the finite-sample behavior of the variance estimators and test statistics depends on the configuration of the covariates, not just the total number of clusters.

One solution to this problem is to use bias-reduced linearization (BRL), which was proposed by Bell and McCaffrey (2002) and has recently begun to receive attention in the econometrics literature (e.g., Cameron & Miller, 2015; Imbens & Kolesar, 2015). The idea of BRL is to correct the bias of standard CRVE based on a working model, and then to use a degrees-of-freedom correction for Wald tests based on the bias-reduced CRVE. That may seem silly (after all, the whole point of CRVE is to avoid making distributional assumptions about the errors in your model), but it turns out that the correction can help quite a bit, even when the working model is wrong. The degrees-of-freedom correction is based on a standard Satterthwaite-type approximation, and also relies on the working model.

A problem with Bell and McCaffrey’s original formulation of BRL is that it does not work in some very common models for panel data, such as state-by-year panels that include fixed effects for each state and each year (Angrist and Pischke, 2009, point out this issue in their chapter on “non-standard standard error issues”; see also Young, 2016). However, Pustejovsky and Tipton (2016) proposed a generalization of BRL that works even in models with arbitrary sets of fixed effects, and this generalization is implemented in clubSandwich as CRVE type CR2. The package also implements small-sample corrections for multiple-constraint hypothesis tests based on an approximation proposed by Pustejovsky and Tipton (2016). For one-parameter constraints, the test reduces to a t-test with Satterthwaite degrees of freedom, and so it is a natural extension of BRL.

The following example demonstrates how to use clubSandwich to do cluster-robust inference for a state-by-year panel model with fixed effects in both dimensions, clustering by states.

Unweighted OLS

The following code does some simple data-munging and the estimates the model by OLS:

The coef_test function from clubSandwich can then be used to test the hypothesis that changing the minimum legal drinking age has no effect on motor vehicle deaths in this cohort (i.e., \(H_0: \delta = 0\)). The usual way to test this is to cluster the standard errors by state, calculate the robust Wald statistic, and compare that to a standard normal reference distribution. The code and results are as follows:

##      Coef. Estimate   SE t-stat p-val (naive-t) Sig.
## 1    legal     7.59 2.44  3.108         0.00313   **
## 2 beertaxa     3.82 5.14  0.743         0.46128

A better approach would be to use the generalized, bias-reduced linearization CRVE, together with Satterthwaite degrees of freedom. In the clubSandwich package, the BRL adjustment is called “CR2” because it is directly analogous to the HC2 correction used in heteroskedasticity-robust variance estimation. When applied to an OLS model estimated by lm, the default working model is an identity matrix, which amounts to the “working” assumption that the errors are all uncorrelated and homoskedastic. Here’s how to apply this approach in the example:

##      Coef. Estimate   SE t-stat  d.f. p-val (Satt) Sig.
## 1    legal     7.59 2.51  3.019 24.58      0.00583   **
## 2 beertaxa     3.82 5.27  0.725  5.77      0.49663

The Satterthwaite degrees of freedom are different for each coefficient in the model, and so the coef_test function reports them right alongside the standard error. For the effect of legal drinking age, the degrees of freedom are about half of what might be expected, given that there are 51 clusters. The p-value for the CR2+Satterthwaite test is about twice as large as the p-value based on the standard Wald test, although the coefficient is still statistically significant at conventional levels. Note, however, that the degrees of freedom on the beer taxation rate are considerably smaller because there are only a few states with substantial variability in taxation rates over time.

Unweighted “within” estimation

The plm package in R provides another way to estimate the same model. It is convenient because it absorbs the state and year fixed effects before estimating the effect of legal. The clubSandwich package works with fitted plm models too:

##      Coef. Estimate   SE t-stat p-val (naive-t) Sig.
## 1    legal     7.59 2.44  3.108         0.00313   **
## 2 beertaxa     3.82 5.14  0.743         0.46128
##      Coef. Estimate   SE t-stat  d.f. p-val (Satt) Sig.
## 1    legal     7.59 2.51  3.019 24.58      0.00583   **
## 2 beertaxa     3.82 5.27  0.725  5.77      0.49663

Population-weighted estimation

The difference between the standard method and the new method are not terribly exciting in the above example. However, things change quite a bit if the model is estimated using population weights. We go back to fitting in lm with dummies for all the fixed effects because plm does not handle weighted least squares.

##      Coef. Estimate   SE t-stat p-val (naive-t) Sig.
## 1    legal     7.78 2.01   3.87          <0.001  ***
## 2 beertaxa    11.16 4.20   2.66          0.0106    *
##      Coef. Estimate   SE t-stat d.f. p-val (Satt) Sig.
## 1    legal     7.78 2.13   3.64 8.52      0.00588   **
## 2 beertaxa    11.16 4.37   2.55 6.85      0.03854    *

Using population weights slightly reduces the point estimate of the effect, while also slightly increasing its precision. If you were following the standard approach, you would probably be happy with the weighted estimates and wouldn’t think about it any further. However, using the CR2 variance estimator and Satterthwaite correction produces a p-value that is an order of magnitude larger (though still significant at the conventional 5% level). The degrees of freedom are just 8.5—drastically smaller than would be expected based on the number of clusters.

Even with weights, the coef_test function uses an “independent, homoskedastic” working model as a default for lm objects. In the present example, the outcome is a standardized rate and so a better assumption might be that the error variances are inversely proportional to population size. The following code uses this alternate working model:

##      Coef. Estimate   SE t-stat  d.f. p-val (Satt) Sig.
## 1    legal     7.78 2.03   3.83 12.64      0.00221   **
## 2 beertaxa    11.16 4.17   2.68  5.06      0.04333    *

The new working model leads to slightly smaller standard errors and a couple of additional degrees of freedom, though they remain in small-sample territory.

Random effects estimation

If the unobserved effects \(\alpha_1,...,\alpha_{51}\) are uncorrelated with the regressors, then a more efficient way to estimate \(\gamma,\delta\) is by weighted least squares, with weights based on a random effects model. We still treat the year effects as fixed.

##      Coef. Estimate   SE t-stat p-val (naive-t) Sig.
## 1    legal     7.31 2.39  3.054         0.00364   **
## 2 beertaxa     3.37 5.11  0.661         0.51202
##      Coef. Estimate   SE t-stat  d.f. p-val (Satt) Sig.
## 1    legal     7.31 2.46  2.966 25.18      0.00652   **
## 2 beertaxa     3.37 5.22  0.647  5.78      0.54258

With random effects estimation, the effect of legal drinking age is smaller by about 1 death per 100,000. As a procedural aside, note that coef_test infers that state is the clustering variable because the call to plm includes only one type of effects (random state effects).

Robust Hausman test

CRVE is also used in specification tests, as in the artificial Hausman-type test for endogeneity of unobserved effects (Arellano, 1993). As noted above, random effects estimation is more efficient than fixed effects estimation, but requires the assumption that the unobserved effects are uncorrelated with the regressors. However, if the unobserved effects covary with \(\mathbf{b}_i, \mathbf{d}_i\), then the random-effects estimator will be biased.

We can test for whether endogeneity is a problem by including group-centered covariates as additional regressors. Let \(\tilde{d}_{it} = d_{it} - \frac{1}{T}\sum_t d_{it}\), with \(\tilde{b}_{it}\) defined analogously. Now estimate the regression

\[y_{it} = \beta_t + \gamma_1 b_{it} + \gamma_2 \tilde{b}_{it} + \delta_1 d_{it} + \delta_2 \tilde{d}_{it} + \epsilon_{it},\]

which does not include state fixed effects. The parameters \(\gamma_2,\delta_2\) represent the differences between the within-groups and between-groups estimands of \(\gamma_1, \delta_1\). If these are both zero, then the random effects estimator is unbiased. Thus, the joint test for \(H_0: \gamma_2 = \delta_2 = 0\) amounts to a test for exogeneity of the unobserved effects.

For efficiency, we estimate this specification using weighted least squares (although OLS would be valid too):

##        Coef. Estimate   SE  t-stat  d.f. p-val (Satt) Sig.
## 1      legal   -9.180 7.62 -1.2042 24.94       0.2398     
## 2   beertaxa    3.395 9.40  0.3613  6.44       0.7295     
## 3 legal_cent   16.768 8.53  1.9665 25.44       0.0602    .
## 4  beer_cent    0.424 9.25  0.0458  6.42       0.9648

To conduct a joint test on the centered covariates, we can use the Wald_test function. The usual way to test this hypothesis would be to use the CR1 variance estimator to calculate the robust Wald statistic, then use a \(\chi^2_2\) reference distribution (or equivalently, compare a re-scaled Wald statistic to an \(F(2,\infty)\) distribution). The Wald_test function reports the latter version:

##    Test    F d.f.  p.val
##  chi-sq 2.93  Inf 0.0534

The test is just shy of significance at the 5% level. If we instead use the CR2 variance estimator and our newly proposed approximate F-test (which is the default in Wald_test), then we get:

##  Test    F d.f. p.val
##   HTZ 2.56 11.7  0.12

The low degrees of freedom of the test indicate that we’re definitely in small-sample territory and should not trust the asymptotic \(\chi^2\) approximation.

References

Angrist, J. D., & Pischke, J. (2009). Mostly harmless econometrics: An empiricist’s companion. Princeton, NJ: Princeton University Press.

Angrist, J. D., and Pischke, J. S. (2014). Mastering’metrics: the path from cause to effect. Princeton, NJ: Princeton University Press.

Arellano, M. (1993). On the testing of correlated effects with panel data. Journal of Econometrics, 59(1-2), 87-97. doi: 10.1016/0304-4076(93)90040-C

Bell, R. M., & McCaffrey, D. F. (2002). Bias reduction in standard errors for linear regression with multi-stage samples. Survey Methodology, 28(2), 169-181.

Cameron, A. C., & Miller, D. L. (2015). A practitioner’s guide to cluster-robust inference. URL: http://cameron.econ.ucdavis.edu/research/Cameron_Miller_JHR_2015_February.pdf

Carpenter, C., & Dobkin, C. (2011). The minimum legal drinking age and public health. Journal of Economic Perspectives, 25(2), 133-156. doi: 10.1257/jep.25.2.133

Imbens, G. W., & Kolesar, M. (2015). Robust standard errors in small samples: Some practical advice. URL: https://www.princeton.edu/~mkolesar/papers/small-robust.pdf

Pustejovsky, J. E. & Tipton, E. (2016). Small sample methods for cluster-robust variance estimation and hypothesis testing in fixed effects models. arXiv: 1601.01981 [stat.ME]

Young, A. (2016). Improved, nearly exact, statistical inference with robust and clustered covariance matrices using effective degrees of freedom corrections.